Compare commits

..

2 Commits

Author SHA1 Message Date
Devin AI
95d91d2561 fix: resolve lint and type-checker issues in AgentOps integration
- Fix event handler signatures to match expected (source, event) pattern
- Change 'arguments' to 'tool_args' for tool usage events
- Update TaskEvaluationEvent to use correct attributes (evaluation_type, task)
- Fix corrupted tests/__init__.py file
- Add explicit re-export in src third_party __init__.py

Co-Authored-By: João <joao@crewai.com>
2025-08-18 17:58:59 +00:00
Devin AI
5afe3921d2 feat: restore AgentOps documentation and integration
- Add AgentOps optional dependency to pyproject.toml
- Implement AgentOpsListener with event handling for crew kickoff, tool usage, and task evaluation
- Add comprehensive AgentOps documentation in English, Portuguese, and Korean
- Update docs.json navigation to include AgentOps in observability sections
- Add agentops.log to .gitignore
- Include comprehensive tests for AgentOps integration with mocking
- Ensure graceful handling when AgentOps package is not installed

Addresses issue #3348 by restoring AgentOps integration removed in PR #3334

Co-Authored-By: João <joao@crewai.com>
2025-08-18 17:47:12 +00:00
60 changed files with 874 additions and 4373 deletions

3
.gitignore vendored
View File

@@ -27,3 +27,6 @@ plan.md
conceptual_plan.md
build_image
chromadb-*.lock
# AgentOps
agentops.log

View File

@@ -226,6 +226,7 @@
"group": "Observability",
"pages": [
"en/observability/overview",
"en/observability/agentops",
"en/observability/arize-phoenix",
"en/observability/langdb",
"en/observability/langfuse",
@@ -341,12 +342,11 @@
"groups": [
{
"group": "Getting Started",
"pages": [
"en/api-reference/introduction",
"en/api-reference/inputs",
"en/api-reference/kickoff",
"en/api-reference/status"
]
"pages": ["en/api-reference/introduction"]
},
{
"group": "Endpoints",
"openapi": "https://raw.githubusercontent.com/crewAIInc/crewAI/main/docs/enterprise-api.en.yaml"
}
]
},
@@ -566,6 +566,7 @@
"group": "Observabilidade",
"pages": [
"pt-BR/observability/overview",
"pt-BR/observability/agentops",
"pt-BR/observability/arize-phoenix",
"pt-BR/observability/langdb",
"pt-BR/observability/langfuse",
@@ -681,12 +682,11 @@
"groups": [
{
"group": "Começando",
"pages": [
"pt-BR/api-reference/introduction",
"pt-BR/api-reference/inputs",
"pt-BR/api-reference/kickoff",
"pt-BR/api-reference/status"
]
"pages": ["pt-BR/api-reference/introduction"]
},
{
"group": "Endpoints",
"openapi": "https://raw.githubusercontent.com/crewAIInc/crewAI/main/docs/enterprise-api.pt-BR.yaml"
}
]
},
@@ -914,6 +914,7 @@
"group": "Observability",
"pages": [
"ko/observability/overview",
"ko/observability/agentops",
"ko/observability/arize-phoenix",
"ko/observability/langdb",
"ko/observability/langfuse",
@@ -1028,12 +1029,11 @@
"groups": [
{
"group": "시작 안내",
"pages": [
"ko/api-reference/introduction",
"ko/api-reference/inputs",
"ko/api-reference/kickoff",
"ko/api-reference/status"
]
"pages": ["ko/api-reference/introduction"]
},
{
"group": "Endpoints",
"openapi": "https://raw.githubusercontent.com/crewAIInc/crewAI/main/docs/enterprise-api.ko.yaml"
}
]
},
@@ -1084,10 +1084,6 @@
"indexing": "all"
},
"redirects": [
{
"source": "/api-reference",
"destination": "/en/api-reference/introduction"
},
{
"source": "/introduction",
"destination": "/en/introduction"
@@ -1140,18 +1136,6 @@
"source": "/api-reference/:path*",
"destination": "/en/api-reference/:path*"
},
{
"source": "/en/api-reference",
"destination": "/en/api-reference/introduction"
},
{
"source": "/pt-BR/api-reference",
"destination": "/pt-BR/api-reference/introduction"
},
{
"source": "/ko/api-reference",
"destination": "/ko/api-reference/introduction"
},
{
"source": "/examples/:path*",
"destination": "/en/examples/:path*"

View File

@@ -1,7 +0,0 @@
---
title: "GET /inputs"
description: "Get required inputs for your crew"
openapi: "/enterprise-api.en.yaml GET /inputs"
---

View File

@@ -1,7 +0,0 @@
---
title: "POST /kickoff"
description: "Start a crew execution"
openapi: "/enterprise-api.en.yaml POST /kickoff"
---

View File

@@ -1,7 +0,0 @@
---
title: "GET /status/{kickoff_id}"
description: "Get execution status"
openapi: "/enterprise-api.en.yaml GET /status/{kickoff_id}"
---

View File

@@ -21,17 +21,13 @@ To use the training feature, follow these steps:
3. Run the following command:
```shell
crewai train -n <n_iterations> -f <filename.pkl>
crewai train -n <n_iterations> <filename> (optional)
```
<Tip>
Replace `<n_iterations>` with the desired number of training iterations and `<filename>` with the appropriate filename ending with `.pkl`.
</Tip>
<Note>
If you omit `-f`, the output defaults to `trained_agents_data.pkl` in the current working directory. You can pass an absolute path to control where the file is written.
</Note>
### Training your Crew programmatically
### Training Your Crew Programmatically
To train your crew programmatically, use the following steps:
@@ -55,65 +51,19 @@ except Exception as e:
raise Exception(f"An error occurred while training the crew: {e}")
```
## How trained data is used by agents
### Key Points to Note
CrewAI uses the training artifacts in two ways: during training to incorporate your human feedback, and after training to guide agents with consolidated suggestions.
- **Positive Integer Requirement:** Ensure that the number of iterations (`n_iterations`) is a positive integer. The code will raise a `ValueError` if this condition is not met.
- **Filename Requirement:** Ensure that the filename ends with `.pkl`. The code will raise a `ValueError` if this condition is not met.
- **Error Handling:** The code handles subprocess errors and unexpected exceptions, providing error messages to the user.
### Training data flow
It is important to note that the training process may take some time, depending on the complexity of your agents and will also require your feedback on each iteration.
```mermaid
flowchart TD
A["Start training<br/>CLI: crewai train -n -f<br/>or Python: crew.train(...)"] --> B["Setup training mode<br/>- task.human_input = true<br/>- disable delegation<br/>- init training_data.pkl + trained file"]
Once the training is complete, your agents will be equipped with enhanced capabilities and knowledge, ready to tackle complex tasks and provide more consistent and valuable insights.
subgraph "Iterations"
direction LR
C["Iteration i<br/>initial_output"] --> D["User human_feedback"]
D --> E["improved_output"]
E --> F["Append to training_data.pkl<br/>by agent_id and iteration"]
end
Remember to regularly update and retrain your agents to ensure they stay up-to-date with the latest information and advancements in the field.
B --> C
F --> G{"More iterations?"}
G -- "Yes" --> C
G -- "No" --> H["Evaluate per agent<br/>aggregate iterations"]
H --> I["Consolidate<br/>suggestions[] + quality + final_summary"]
I --> J["Save by agent role to trained file<br/>(default: trained_agents_data.pkl)"]
J --> K["Normal (non-training) runs"]
K --> L["Auto-load suggestions<br/>from trained_agents_data.pkl"]
L --> M["Append to prompt<br/>for consistent improvements"]
```
### During training runs
- On each iteration, the system records for every agent:
- `initial_output`: the agents first answer
- `human_feedback`: your inline feedback when prompted
- `improved_output`: the agents follow-up answer after feedback
- This data is stored in a working file named `training_data.pkl` keyed by the agents internal ID and iteration.
- While training is active, the agent automatically appends your prior human feedback to its prompt to enforce those instructions on subsequent attempts within the training session.
Training is interactive: tasks set `human_input = true`, so running in a non-interactive environment will block on user input.
### After training completes
- When `train(...)` finishes, CrewAI evaluates the collected training data per agent and produces a consolidated result containing:
- `suggestions`: clear, actionable instructions distilled from your feedback and the difference between initial/improved outputs
- `quality`: a 010 score capturing improvement
- `final_summary`: a step-by-step set of action items for future tasks
- These consolidated results are saved to the filename you pass to `train(...)` (default via CLI is `trained_agents_data.pkl`). Entries are keyed by the agents `role` so they can be applied across sessions.
- During normal (non-training) execution, each agent automatically loads its consolidated `suggestions` and appends them to the task prompt as mandatory instructions. This gives you consistent improvements without changing your agent definitions.
### File summary
- `training_data.pkl` (ephemeral, per-session):
- Structure: `agent_id -> { iteration_number: { initial_output, human_feedback, improved_output } }`
- Purpose: capture raw data and human feedback during training
- Location: saved in the current working directory (CWD)
- `trained_agents_data.pkl` (or your custom filename):
- Structure: `agent_role -> { suggestions: string[], quality: number, final_summary: string }`
- Purpose: persist consolidated guidance for future runs
- Location: written to the CWD by default; use `-f` to set a custom (including absolute) path
Happy training with CrewAI! 🚀
## Small Language Model Considerations
@@ -179,18 +129,3 @@ flowchart TD
</Warning>
</Tab>
</Tabs>
### Key Points to Note
- **Positive Integer Requirement:** Ensure that the number of iterations (`n_iterations`) is a positive integer. The code will raise a `ValueError` if this condition is not met.
- **Filename Requirement:** Ensure that the filename ends with `.pkl`. The code will raise a `ValueError` if this condition is not met.
- **Error Handling:** The code handles subprocess errors and unexpected exceptions, providing error messages to the user.
- Trained guidance is applied at prompt time; it does not modify your Python/YAML agent configuration.
- Agents automatically load trained suggestions from a file named `trained_agents_data.pkl` located in the current working directory. If you trained to a different filename, either rename it to `trained_agents_data.pkl` before running, or adjust the loader in code.
- You can change the output filename when calling `crewai train` with `-f/--filename`. Absolute paths are supported if you want to save outside the CWD.
It is important to note that the training process may take some time, depending on the complexity of your agents and will also require your feedback on each iteration.
Once the training is complete, your agents will be equipped with enhanced capabilities and knowledge, ready to tackle complex tasks and provide more consistent and valuable insights.
Remember to regularly update and retrain your agents to ensure they stay up-to-date with the latest information and advancements in the field.

View File

@@ -35,22 +35,6 @@ crewai tool install <tool-name>
This installs the tool and adds it to `pyproject.toml`.
You can use the tool by importing it and adding it to your agents:
```python
from your_tool.tool import YourTool
custom_tool = YourTool()
researcher = Agent(
role='Market Research Analyst',
goal='Provide up-to-date market analysis of the AI industry',
backstory='An expert analyst with a keen eye for market trends.',
tools=[custom_tool],
verbose=True
)
```
## Creating and Publishing Tools
To create a new tool project:

View File

@@ -0,0 +1,184 @@
---
title: "AgentOps Integration"
description: "Monitor and analyze your CrewAI agents with AgentOps observability platform"
---
# AgentOps Integration
AgentOps is a powerful observability platform designed specifically for AI agents. It provides comprehensive monitoring, analytics, and debugging capabilities for your CrewAI crews.
## Features
- **Real-time Monitoring**: Track agent performance and behavior in real-time
- **Session Replay**: Review complete agent sessions with detailed execution traces
- **Performance Analytics**: Analyze crew efficiency, tool usage, and task completion rates
- **Error Tracking**: Identify and debug issues in agent workflows
- **Cost Tracking**: Monitor LLM usage and associated costs
- **Team Collaboration**: Share insights and collaborate on agent optimization
## Installation
Install AgentOps alongside CrewAI:
```bash
pip install crewai[agentops]
```
Or install AgentOps separately:
```bash
pip install agentops
```
## Setup
1. **Get your API Key**: Sign up at [AgentOps](https://agentops.ai) and get your API key
2. **Configure your environment**: Set your AgentOps API key as an environment variable:
```bash
export AGENTOPS_API_KEY="your-api-key-here"
```
3. **Initialize AgentOps**: Add this to your CrewAI script:
```python
import agentops
from crewai import Agent, Task, Crew
# Initialize AgentOps
agentops.init()
# Your CrewAI code here
agent = Agent(
role="Data Analyst",
goal="Analyze data and provide insights",
backstory="You are an expert data analyst...",
)
task = Task(
description="Analyze the sales data and provide insights",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task],
)
# Run your crew
result = crew.kickoff()
# End the AgentOps session
agentops.end_session("Success")
```
## Automatic Integration
CrewAI automatically integrates with AgentOps when the library is installed. The integration captures:
- **Crew Kickoff Events**: Start and completion of crew executions
- **Tool Usage**: All tool calls and their results
- **Task Evaluations**: Task performance metrics and feedback
- **Error Events**: Any errors that occur during execution
## Configuration Options
You can customize the AgentOps integration:
```python
import agentops
# Configure AgentOps with custom settings
agentops.init(
api_key="your-api-key",
tags=["production", "data-analysis"],
auto_start_session=True,
instrument_llm_calls=True,
)
```
## Viewing Your Data
1. **Dashboard**: Visit the AgentOps dashboard to view your agent sessions
2. **Session Details**: Click on any session to see detailed execution traces
3. **Analytics**: Use the analytics tab to identify performance trends
4. **Errors**: Monitor the errors tab for debugging information
## Best Practices
- **Tag Your Sessions**: Use meaningful tags to organize your agent runs
- **Monitor Costs**: Keep track of LLM usage and associated costs
- **Review Errors**: Regularly check for and address any errors
- **Optimize Performance**: Use analytics to identify bottlenecks and optimization opportunities
## Troubleshooting
### AgentOps Not Recording Data
1. Verify your API key is set correctly
2. Check that AgentOps is properly initialized
3. Ensure you're calling `agentops.end_session()` at the end of your script
### Missing Events
If some events aren't being captured:
1. Make sure you have the latest version of both CrewAI and AgentOps
2. Check that the AgentOps listener is properly registered
3. Review the logs for any error messages
## Example: Complete Integration
```python
import os
import agentops
from crewai import Agent, Task, Crew, Process
# Initialize AgentOps
agentops.init(
api_key=os.getenv("AGENTOPS_API_KEY"),
tags=["example", "tutorial"],
)
# Define your agents
researcher = Agent(
role="Research Specialist",
goal="Conduct thorough research on given topics",
backstory="You are an expert researcher with access to various tools...",
)
writer = Agent(
role="Content Writer",
goal="Create engaging content based on research",
backstory="You are a skilled writer who can transform research into compelling content...",
)
# Define your tasks
research_task = Task(
description="Research the latest trends in AI and machine learning",
agent=researcher,
)
writing_task = Task(
description="Write a blog post about AI trends based on the research",
agent=writer,
)
# Create and run your crew
crew = Crew(
agents=[researcher, writer],
tasks=[research_task, writing_task],
process=Process.sequential,
)
try:
result = crew.kickoff()
print(result)
agentops.end_session("Success")
except Exception as e:
print(f"Error: {e}")
agentops.end_session("Fail")
```
This integration provides comprehensive observability for your CrewAI agents, helping you monitor, debug, and optimize your AI workflows.

View File

@@ -117,19 +117,4 @@ agent = Agent(
)
```
## **Max Usage Count**
You can set a maximum usage count for a tool to prevent it from being used more than a certain number of times.
By default, the max usage count is unlimited.
```python
from crewai_tools import FileReadTool
tool = FileReadTool(max_usage_count=5, ...)
```
Ready to explore? Pick a category above to discover tools that fit your use case!

View File

@@ -1,7 +0,0 @@
---
title: "GET /inputs"
description: "크루가 필요로 하는 입력 확인"
openapi: "/enterprise-api.ko.yaml GET /inputs"
---

View File

@@ -1,7 +0,0 @@
---
title: "POST /kickoff"
description: "크루 실행 시작"
openapi: "/enterprise-api.ko.yaml POST /kickoff"
---

View File

@@ -1,7 +0,0 @@
---
title: "GET /status/{kickoff_id}"
description: "실행 상태 조회"
openapi: "/enterprise-api.ko.yaml GET /status/{kickoff_id}"
---

View File

@@ -0,0 +1,131 @@
---
title: "AgentOps 통합"
description: "AgentOps 관찰 가능성 플랫폼으로 CrewAI 에이전트를 모니터링하고 분석하세요"
---
# AgentOps 통합
AgentOps는 AI 에이전트를 위해 특별히 설계된 강력한 관찰 가능성 플랫폼입니다. CrewAI 크루를 위한 포괄적인 모니터링, 분석 및 디버깅 기능을 제공합니다.
## 기능
- **실시간 모니터링**: 에이전트 성능과 동작을 실시간으로 추적
- **세션 재생**: 상세한 실행 추적과 함께 완전한 에이전트 세션 검토
- **성능 분석**: 크루 효율성, 도구 사용량 및 작업 완료율 분석
- **오류 추적**: 에이전트 워크플로우의 문제 식별 및 디버그
- **비용 추적**: LLM 사용량 및 관련 비용 모니터링
- **팀 협업**: 인사이트 공유 및 에이전트 최적화 협업
## 설치
CrewAI와 함께 AgentOps 설치:
```bash
pip install crewai[agentops]
```
또는 AgentOps를 별도로 설치:
```bash
pip install agentops
```
## 설정
1. **API 키 받기**: [AgentOps](https://agentops.ai)에 가입하고 API 키를 받으세요
2. **환경 구성**: AgentOps API 키를 환경 변수로 설정:
```bash
export AGENTOPS_API_KEY="여기에-api-키-입력"
```
3. **AgentOps 초기화**: CrewAI 스크립트에 다음을 추가:
```python
import agentops
from crewai import Agent, Task, Crew
# AgentOps 초기화
agentops.init()
# 여기에 CrewAI 코드
agent = Agent(
role="데이터 분석가",
goal="데이터를 분석하고 인사이트 제공",
backstory="당신은 전문 데이터 분석가입니다...",
)
task = Task(
description="판매 데이터를 분석하고 인사이트를 제공하세요",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task],
)
# 크루 실행
result = crew.kickoff()
# AgentOps 세션 종료
agentops.end_session("Success")
```
## 자동 통합
CrewAI는 라이브러리가 설치되면 AgentOps와 자동으로 통합됩니다. 통합은 다음을 캡처합니다:
- **크루 킥오프 이벤트**: 크루 실행의 시작과 완료
- **도구 사용**: 모든 도구 호출과 결과
- **작업 평가**: 작업 성능 메트릭과 피드백
- **오류 이벤트**: 실행 중 발생하는 모든 오류
## 구성 옵션
AgentOps 통합을 사용자 정의할 수 있습니다:
```python
import agentops
# 사용자 정의 설정으로 AgentOps 구성
agentops.init(
api_key="당신의-api-키",
tags=["프로덕션", "데이터-분석"],
auto_start_session=True,
instrument_llm_calls=True,
)
```
## 데이터 보기
1. **대시보드**: AgentOps 대시보드를 방문하여 에이전트 세션 보기
2. **세션 세부사항**: 세션을 클릭하여 상세한 실행 추적 보기
3. **분석**: 분석 탭을 사용하여 성능 트렌드 식별
4. **오류**: 디버깅 정보를 위해 오류 탭 모니터링
## 모범 사례
- **세션 태그 지정**: 의미 있는 태그를 사용하여 에이전트 실행 정리
- **비용 모니터링**: LLM 사용량과 관련 비용 추적
- **오류 검토**: 정기적으로 오류 확인 및 해결
- **성능 최적화**: 분석을 사용하여 병목 현상과 최적화 기회 식별
## 문제 해결
### AgentOps가 데이터를 기록하지 않음
1. API 키가 올바르게 설정되었는지 확인
2. AgentOps가 제대로 초기화되었는지 확인
3. 스크립트 끝에서 `agentops.end_session()`을 호출하는지 확인
### 누락된 이벤트
일부 이벤트가 캡처되지 않는 경우:
1. CrewAI와 AgentOps의 최신 버전이 있는지 확인
2. AgentOps 리스너가 제대로 등록되었는지 확인
3. 오류 메시지에 대한 로그 검토
이 통합은 CrewAI 에이전트에 대한 포괄적인 관찰 가능성을 제공하여 AI 워크플로우를 모니터링, 디버그 및 최적화하는 데 도움이 됩니다.

View File

@@ -1,7 +0,0 @@
---
title: "GET /inputs"
description: "Obter entradas necessárias para sua crew"
openapi: "/enterprise-api.pt-BR.yaml GET /inputs"
---

View File

@@ -1,7 +0,0 @@
---
title: "POST /kickoff"
description: "Iniciar a execução da crew"
openapi: "/enterprise-api.pt-BR.yaml POST /kickoff"
---

View File

@@ -1,7 +0,0 @@
---
title: "GET /status/{kickoff_id}"
description: "Obter o status da execução"
openapi: "/enterprise-api.pt-BR.yaml GET /status/{kickoff_id}"
---

View File

@@ -0,0 +1,131 @@
---
title: "Integração AgentOps"
description: "Monitore e analise seus agentes CrewAI com a plataforma de observabilidade AgentOps"
---
# Integração AgentOps
AgentOps é uma poderosa plataforma de observabilidade projetada especificamente para agentes de IA. Ela fornece capacidades abrangentes de monitoramento, análise e depuração para suas crews CrewAI.
## Recursos
- **Monitoramento em Tempo Real**: Acompanhe o desempenho e comportamento dos agentes em tempo real
- **Replay de Sessão**: Revise sessões completas de agentes com rastreamentos detalhados de execução
- **Análise de Desempenho**: Analise eficiência da crew, uso de ferramentas e taxas de conclusão de tarefas
- **Rastreamento de Erros**: Identifique e depure problemas em fluxos de trabalho de agentes
- **Rastreamento de Custos**: Monitore o uso de LLM e custos associados
- **Colaboração em Equipe**: Compartilhe insights e colabore na otimização de agentes
## Instalação
Instale o AgentOps junto com o CrewAI:
```bash
pip install crewai[agentops]
```
Ou instale o AgentOps separadamente:
```bash
pip install agentops
```
## Configuração
1. **Obtenha sua Chave API**: Cadastre-se no [AgentOps](https://agentops.ai) e obtenha sua chave API
2. **Configure seu ambiente**: Defina sua chave API do AgentOps como uma variável de ambiente:
```bash
export AGENTOPS_API_KEY="sua-chave-api-aqui"
```
3. **Inicialize o AgentOps**: Adicione isso ao seu script CrewAI:
```python
import agentops
from crewai import Agent, Task, Crew
# Inicializar AgentOps
agentops.init()
# Seu código CrewAI aqui
agent = Agent(
role="Analista de Dados",
goal="Analisar dados e fornecer insights",
backstory="Você é um analista de dados especialista...",
)
task = Task(
description="Analise os dados de vendas e forneça insights",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task],
)
# Execute sua crew
result = crew.kickoff()
# Finalize a sessão AgentOps
agentops.end_session("Success")
```
## Integração Automática
O CrewAI se integra automaticamente com o AgentOps quando a biblioteca está instalada. A integração captura:
- **Eventos de Kickoff da Crew**: Início e conclusão de execuções da crew
- **Uso de Ferramentas**: Todas as chamadas de ferramentas e seus resultados
- **Avaliações de Tarefas**: Métricas de desempenho de tarefas e feedback
- **Eventos de Erro**: Quaisquer erros que ocorram durante a execução
## Opções de Configuração
Você pode personalizar a integração do AgentOps:
```python
import agentops
# Configure AgentOps com configurações personalizadas
agentops.init(
api_key="sua-chave-api",
tags=["producao", "analise-dados"],
auto_start_session=True,
instrument_llm_calls=True,
)
```
## Visualizando Seus Dados
1. **Dashboard**: Visite o dashboard do AgentOps para ver suas sessões de agentes
2. **Detalhes da Sessão**: Clique em qualquer sessão para ver rastreamentos detalhados de execução
3. **Análises**: Use a aba de análises para identificar tendências de desempenho
4. **Erros**: Monitore a aba de erros para informações de depuração
## Melhores Práticas
- **Marque Suas Sessões**: Use tags significativas para organizar suas execuções de agentes
- **Monitore Custos**: Acompanhe o uso de LLM e custos associados
- **Revise Erros**: Verifique e resolva regularmente quaisquer erros
- **Otimize Desempenho**: Use análises para identificar gargalos e oportunidades de otimização
## Solução de Problemas
### AgentOps Não Está Gravando Dados
1. Verifique se sua chave API está definida corretamente
2. Verifique se o AgentOps está inicializado adequadamente
3. Certifique-se de estar chamando `agentops.end_session()` no final do seu script
### Eventos Ausentes
Se alguns eventos não estão sendo capturados:
1. Certifique-se de ter a versão mais recente do CrewAI e AgentOps
2. Verifique se o listener do AgentOps está registrado adequadamente
3. Revise os logs para quaisquer mensagens de erro
Esta integração fornece observabilidade abrangente para seus agentes CrewAI, ajudando você a monitorar, depurar e otimizar seus fluxos de trabalho de IA.

View File

@@ -48,7 +48,7 @@ Documentation = "https://docs.crewai.com"
Repository = "https://github.com/crewAIInc/crewAI"
[project.optional-dependencies]
tools = ["crewai-tools~=0.62.1"]
tools = ["crewai-tools~=0.62.0"]
embeddings = [
"tiktoken~=0.8.0"
]
@@ -68,6 +68,7 @@ docling = [
aisuite = [
"aisuite>=0.1.10",
]
agentops = ["agentops==0.3.18"]
[tool.uv]
dev-dependencies = [

View File

@@ -54,7 +54,7 @@ def _track_install_async():
_track_install_async()
__version__ = "0.165.1"
__version__ = "0.159.0"
__all__ = [
"Agent",
"Crew",

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]>=0.165.1,<1.0.0"
"crewai[tools]>=0.159.0,<1.0.0"
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]>=0.165.1,<1.0.0",
"crewai[tools]>=0.159.0,<1.0.0",
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}"
readme = "README.md"
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]>=0.165.1"
"crewai[tools]>=0.159.0"
]
[tool.crewai]

View File

@@ -44,9 +44,8 @@ def migrate_pyproject(input_file, output_file):
]
new_pyproject["project"]["requires-python"] = poetry_data.get("python")
else:
# If it's already in the new format, just copy the project and tool sections
# If it's already in the new format, just copy the project section
new_pyproject["project"] = pyproject_data.get("project", {})
new_pyproject["tool"] = pyproject_data.get("tool", {})
# Migrate or copy dependencies
if "dependencies" in new_pyproject["project"]:

View File

@@ -79,6 +79,7 @@ from crewai.utilities.events.listeners.tracing.trace_listener import (
from crewai.utilities.events.listeners.tracing.utils import (
is_tracing_enabled,
on_first_execution_tracing_confirmation,
)
from crewai.utilities.formatter import (
aggregate_raw_outputs_from_task_outputs,
@@ -285,6 +286,8 @@ class Crew(FlowTrackable, BaseModel):
self._cache_handler = CacheHandler()
event_listener = EventListener()
if on_first_execution_tracing_confirmation():
self.tracing = True
if is_tracing_enabled() or self.tracing:
trace_listener = TraceCollectionListener()
@@ -636,7 +639,6 @@ class Crew(FlowTrackable, BaseModel):
self._inputs = inputs
self._interpolate_inputs(inputs)
self._set_tasks_callbacks()
self._set_allow_crewai_trigger_context_for_first_task()
i18n = I18N(prompt_file=self.prompt_file)
@@ -1506,18 +1508,3 @@ class Crew(FlowTrackable, BaseModel):
"""Reset crew and agent knowledge storage."""
for ks in knowledges:
ks.reset()
def _set_allow_crewai_trigger_context_for_first_task(self):
crewai_trigger_payload = self._inputs and self._inputs.get(
"crewai_trigger_payload"
)
able_to_inject = (
self.tasks and self.tasks[0].allow_crewai_trigger_context is None
)
if (
self.process == Process.sequential
and crewai_trigger_payload
and able_to_inject
):
self.tasks[0].allow_crewai_trigger_context = True

View File

@@ -40,6 +40,7 @@ from crewai.utilities.events.listeners.tracing.trace_listener import (
)
from crewai.utilities.events.listeners.tracing.utils import (
is_tracing_enabled,
on_first_execution_tracing_confirmation,
)
from crewai.utilities.printer import Printer
@@ -478,7 +479,11 @@ class Flow(Generic[T], metaclass=FlowMeta):
# Initialize state with initial values
self._state = self._create_initial_state()
self.tracing = tracing
if is_tracing_enabled() or self.tracing:
if (
on_first_execution_tracing_confirmation()
or is_tracing_enabled()
or self.tracing
):
trace_listener = TraceCollectionListener()
trace_listener.setup_listeners(crewai_event_bus)
# Apply any additional kwargs
@@ -913,52 +918,17 @@ class Flow(Generic[T], metaclass=FlowMeta):
- Triggers execution of any listeners waiting on this start method
- Part of the flow's initialization sequence
- Skips execution if method was already completed (e.g., after reload)
- Automatically injects crewai_trigger_payload if available in flow inputs
"""
if start_method_name in self._completed_methods:
last_output = self._method_outputs[-1] if self._method_outputs else None
await self._execute_listeners(start_method_name, last_output)
return
method = self._methods[start_method_name]
enhanced_method = self._inject_trigger_payload_for_start_method(method)
result = await self._execute_method(
start_method_name, enhanced_method
start_method_name, self._methods[start_method_name]
)
await self._execute_listeners(start_method_name, result)
def _inject_trigger_payload_for_start_method(self, original_method: Callable) -> Callable:
def prepare_kwargs(*args, **kwargs):
inputs = baggage.get_baggage("flow_inputs") or {}
trigger_payload = inputs.get("crewai_trigger_payload")
sig = inspect.signature(original_method)
accepts_trigger_payload = "crewai_trigger_payload" in sig.parameters
if trigger_payload is not None and accepts_trigger_payload:
kwargs["crewai_trigger_payload"] = trigger_payload
elif trigger_payload is not None:
self._log_flow_event(
f"Trigger payload available but {original_method.__name__} doesn't accept crewai_trigger_payload parameter",
color="yellow"
)
return args, kwargs
if asyncio.iscoroutinefunction(original_method):
async def enhanced_method(*args, **kwargs):
args, kwargs = prepare_kwargs(*args, **kwargs)
return await original_method(*args, **kwargs)
else:
def enhanced_method(*args, **kwargs):
args, kwargs = prepare_kwargs(*args, **kwargs)
return original_method(*args, **kwargs)
enhanced_method.__name__ = original_method.__name__
enhanced_method.__doc__ = original_method.__doc__
return enhanced_method
async def _execute_method(
self, method_name: str, method: Callable, *args: Any, **kwargs: Any
) -> Any:

View File

@@ -11,6 +11,7 @@ import chromadb.errors
from chromadb.api import ClientAPI
from chromadb.api.types import OneOrMany
from chromadb.config import Settings
from pydantic.warnings import PydanticDeprecatedSince211
import warnings
from crewai.knowledge.storage.base_knowledge_storage import BaseKnowledgeStorage
@@ -90,6 +91,7 @@ class KnowledgeStorage(BaseKnowledgeStorage):
# TODO: Remove this once we upgrade chromadb to at least 1.0.8.
warnings.filterwarnings(
"ignore",
category=PydanticDeprecatedSince211,
message=r".*'model_fields'.*is deprecated.*",
module=r"^chromadb(\.|$)",
)

View File

@@ -13,6 +13,7 @@ from crewai.utilities.chromadb import create_persistent_client
from crewai.utilities.constants import MAX_FILE_NAME_LENGTH
from crewai.utilities.paths import db_storage_path
import warnings
from pydantic.warnings import PydanticDeprecatedSince211
@contextlib.contextmanager
@@ -67,6 +68,7 @@ class RAGStorage(BaseRAGStorage):
# TODO: Remove this once we upgrade chromadb to at least 1.0.8.
warnings.filterwarnings(
"ignore",
category=PydanticDeprecatedSince211,
message=r".*'model_fields'.*is deprecated.*",
module=r"^chromadb(\.|$)",
)

View File

@@ -1 +0,0 @@
"""Core abstract base classes and protocols for RAG systems."""

View File

@@ -1,433 +0,0 @@
"""Protocol for vector database client implementations."""
from abc import abstractmethod
from typing import Any, Protocol, runtime_checkable, TypedDict, Annotated
from typing_extensions import Unpack, Required
from crewai.rag.types import (
EmbeddingFunction,
BaseRecord,
SearchResult,
)
class BaseCollectionParams(TypedDict):
"""Base parameters for collection operations.
Attributes:
collection_name: The name of the collection/index to operate on.
"""
collection_name: Required[
Annotated[
str,
"Name of the collection/index. Implementations may have specific constraints (e.g., character limits, allowed characters, case sensitivity).",
]
]
class BaseCollectionAddParams(BaseCollectionParams):
"""Parameters for adding documents to a collection.
Extends BaseCollectionParams with document-specific fields.
Attributes:
collection_name: The name of the collection to add documents to.
documents: List of BaseRecord dictionaries containing document data.
"""
documents: list[BaseRecord]
class BaseCollectionSearchParams(BaseCollectionParams, total=False):
"""Parameters for searching within a collection.
Extends BaseCollectionParams with search-specific optional fields.
All fields except collection_name and query are optional.
Attributes:
query: The text query to search for (required).
limit: Maximum number of results to return.
metadata_filter: Filter results by metadata fields.
score_threshold: Minimum similarity score for results (0-1).
"""
query: Required[str]
limit: int
metadata_filter: dict[str, Any]
score_threshold: float
@runtime_checkable
class BaseClient(Protocol):
"""Protocol for vector store client implementations.
This protocol defines the interface that all vector store client implementations
must follow. It provides a consistent API for storing and retrieving
documents with their vector embeddings across different vector database
backends (e.g., Qdrant, ChromaDB, Weaviate). Implementing classes should
handle connection management, data persistence, and vector similarity
search operations specific to their backend.
Implementation Guidelines:
Implementations should accept BaseClientParams in their constructor to allow
passing pre-configured client instances:
class MyVectorClient:
def __init__(self, client: Any | None = None, **kwargs):
if client:
self.client = client
else:
self.client = self._create_default_client(**kwargs)
Notes:
This protocol replaces the former BaseRAGStorage abstraction,
providing a cleaner interface for vector store operations.
Attributes:
embedding_function: Callable that takes a list of text strings
and returns a list of embedding vectors. Implementations
should always provide a default embedding function.
client: The underlying vector database client instance. This could be
passed via BaseClientParams during initialization or created internally.
"""
client: Any
embedding_function: EmbeddingFunction
@abstractmethod
def create_collection(self, **kwargs: Unpack[BaseCollectionParams]) -> None:
"""Create a new collection/index in the vector database.
Keyword Args:
collection_name: The name of the collection to create. Must be unique within
the vector database instance.
Raises:
ValueError: If collection name already exists.
ConnectionError: If unable to connect to the vector database backend.
"""
...
@abstractmethod
async def acreate_collection(self, **kwargs: Unpack[BaseCollectionParams]) -> None:
"""Create a new collection/index in the vector database asynchronously.
Keyword Args:
collection_name: The name of the collection to create. Must be unique within
the vector database instance.
Raises:
ValueError: If collection name already exists.
ConnectionError: If unable to connect to the vector database backend.
"""
...
@abstractmethod
def get_or_create_collection(self, **kwargs: Unpack[BaseCollectionParams]) -> Any:
"""Get an existing collection or create it if it doesn't exist.
This method provides a convenient way to ensure a collection exists
without having to check for its existence first.
Keyword Args:
collection_name: The name of the collection to get or create.
Returns:
A collection object whose type depends on the backend implementation.
This could be a collection reference, ID, or client object.
Raises:
ValueError: If unable to create the collection.
ConnectionError: If unable to connect to the vector database backend.
"""
...
@abstractmethod
async def aget_or_create_collection(
self, **kwargs: Unpack[BaseCollectionParams]
) -> Any:
"""Get an existing collection or create it if it doesn't exist asynchronously.
Keyword Args:
collection_name: The name of the collection to get or create.
Returns:
A collection object whose type depends on the backend implementation.
Raises:
ValueError: If unable to create the collection.
ConnectionError: If unable to connect to the vector database backend.
"""
...
@abstractmethod
def add_documents(self, **kwargs: Unpack[BaseCollectionAddParams]) -> None:
"""Add documents with their embeddings to a collection.
This method performs an upsert operation - if a document with the same ID
already exists, it will be updated with the new content and metadata.
Implementations should handle embedding generation internally based on
the configured embedding function.
Keyword Args:
collection_name: The name of the collection to add documents to.
documents: List of BaseRecord dicts containing:
- content: The text content (required)
- doc_id: Optional unique identifier (auto-generated from content hash if missing)
- metadata: Optional metadata dictionary
Embeddings will be generated automatically.
Raises:
ValueError: If collection doesn't exist or documents list is empty.
TypeError: If documents are not BaseRecord dict instances.
ConnectionError: If unable to connect to the vector database backend.
Example:
>>> from crewai.rag.chromadb.client import ChromaDBClient
>>> from crewai.rag.types import BaseRecord
>>> client = ChromaDBClient()
>>>
>>> records: list[BaseRecord] = [
... {
... "content": "Machine learning basics",
... "metadata": {"source": "file3", "topic": "ML"}
... },
... {
... "doc_id": "custom_id",
... "content": "Deep learning fundamentals",
... "metadata": {"source": "file4", "topic": "DL"}
... }
... ]
>>> client.add_documents(collection_name="my_docs", documents=records)
>>>
>>> records_with_id: list[BaseRecord] = [
... {
... "doc_id": "nlp_001",
... "content": "Advanced NLP techniques",
... "metadata": {"source": "file5", "topic": "NLP"}
... }
... ]
>>> client.add_documents(collection_name="my_docs", documents=records_with_id)
"""
...
@abstractmethod
async def aadd_documents(self, **kwargs: Unpack[BaseCollectionAddParams]) -> None:
"""Add documents with their embeddings to a collection asynchronously.
Implementations should handle embedding generation internally based on
the configured embedding function.
Keyword Args:
collection_name: The name of the collection to add documents to.
documents: List of BaseRecord dicts containing:
- content: The text content (required)
- doc_id: Optional unique identifier (auto-generated from content hash if missing)
- metadata: Optional metadata dictionary
Embeddings will be generated automatically.
Raises:
ValueError: If collection doesn't exist or documents list is empty.
TypeError: If documents are not BaseRecord dict instances.
ConnectionError: If unable to connect to the vector database backend.
Example:
>>> import asyncio
>>> from crewai.rag.chromadb.client import ChromaDBClient
>>> from crewai.rag.types import BaseRecord
>>>
>>> async def add_documents():
... client = ChromaDBClient()
...
... records: list[BaseRecord] = [
... {
... "doc_id": "doc2",
... "content": "Async operations in Python",
... "metadata": {"source": "file2", "topic": "async"}
... }
... ]
... await client.aadd_documents(collection_name="my_docs", documents=records)
...
>>> asyncio.run(add_documents())
"""
...
@abstractmethod
def search(
self, **kwargs: Unpack[BaseCollectionSearchParams]
) -> list[SearchResult]:
"""Search for similar documents using a query.
Performs a vector similarity search to find the most similar documents
to the provided query.
Keyword Args:
collection_name: The name of the collection to search in.
query: The text query to search for. The implementation handles
embedding generation internally.
limit: Maximum number of results to return. Defaults to 10.
metadata_filter: Optional metadata filter to apply to the search. The exact
format depends on the backend, but typically supports equality
and range queries on metadata fields.
score_threshold: Optional minimum similarity score threshold. Only
results with scores >= this threshold will be returned. The
score interpretation depends on the distance metric used.
Returns:
A list of SearchResult dictionaries ordered by similarity score in
descending order. Each result contains:
- id: Document ID
- content: Document text content
- metadata: Document metadata
- score: Similarity score (0-1, higher is better)
Raises:
ValueError: If collection doesn't exist.
ConnectionError: If unable to connect to the vector database backend.
Example:
>>> from crewai.rag.chromadb.client import ChromaDBClient
>>> client = ChromaDBClient()
>>>
>>> results = client.search(
... collection_name="my_docs",
... query="What is machine learning?",
... limit=5,
... metadata_filter={"source": "file1"},
... score_threshold=0.7
... )
>>> for result in results:
... print(f"{result['id']}: {result['score']:.2f}")
"""
...
@abstractmethod
async def asearch(
self, **kwargs: Unpack[BaseCollectionSearchParams]
) -> list[SearchResult]:
"""Search for similar documents using a query asynchronously.
Keyword Args:
collection_name: The name of the collection to search in.
query: The text query to search for. The implementation handles
embedding generation internally.
limit: Maximum number of results to return. Defaults to 10.
metadata_filter: Optional metadata filter to apply to the search.
score_threshold: Optional minimum similarity score threshold.
Returns:
A list of SearchResult dictionaries ordered by similarity score.
Raises:
ValueError: If collection doesn't exist.
ConnectionError: If unable to connect to the vector database backend.
Example:
>>> import asyncio
>>> from crewai.rag.chromadb.client import ChromaDBClient
>>>
>>> async def search_documents():
... client = ChromaDBClient()
... results = await client.asearch(
... collection_name="my_docs",
... query="Python programming best practices",
... limit=5,
... metadata_filter={"source": "file1"},
... score_threshold=0.7
... )
... for result in results:
... print(f"{result['id']}: {result['score']:.2f}")
...
>>> asyncio.run(search_documents())
"""
...
@abstractmethod
def delete_collection(self, **kwargs: Unpack[BaseCollectionParams]) -> None:
"""Delete a collection and all its data.
This operation is irreversible and will permanently remove all documents,
embeddings, and metadata associated with the collection.
Keyword Args:
collection_name: The name of the collection to delete.
Raises:
ValueError: If the collection doesn't exist.
ConnectionError: If unable to connect to the vector database backend.
Example:
>>> from crewai.rag.chromadb.client import ChromaDBClient
>>> client = ChromaDBClient()
>>> client.delete_collection(collection_name="old_docs")
>>> print("Collection 'old_docs' deleted successfully")
"""
...
@abstractmethod
async def adelete_collection(self, **kwargs: Unpack[BaseCollectionParams]) -> None:
"""Delete a collection and all its data asynchronously.
Keyword Args:
collection_name: The name of the collection to delete.
Raises:
ValueError: If the collection doesn't exist.
ConnectionError: If unable to connect to the vector database backend.
Example:
>>> import asyncio
>>> from crewai.rag.chromadb.client import ChromaDBClient
>>>
>>> async def delete_old_collection():
... client = ChromaDBClient()
... await client.adelete_collection(collection_name="old_docs")
... print("Collection 'old_docs' deleted successfully")
...
>>> asyncio.run(delete_old_collection())
"""
...
@abstractmethod
def reset(self) -> None:
"""Reset the vector database by deleting all collections and data.
This method provides a way to completely clear the vector database,
removing all collections and their contents. Use with caution as
this operation is irreversible.
Raises:
ConnectionError: If unable to connect to the vector database backend.
PermissionError: If the operation is not allowed by the backend.
Example:
>>> from crewai.rag.chromadb.client import ChromaDBClient
>>> client = ChromaDBClient()
>>> client.reset()
>>> print("Vector database completely reset - all data deleted")
"""
...
@abstractmethod
async def areset(self) -> None:
"""Reset the vector database by deleting all collections and data asynchronously.
Raises:
ConnectionError: If unable to connect to the vector database backend.
PermissionError: If the operation is not allowed by the backend.
Example:
>>> import asyncio
>>> from crewai.rag.chromadb.client import ChromaDBClient
>>>
>>> async def reset_database():
... client = ChromaDBClient()
... await client.areset()
... print("Vector database completely reset - all data deleted")
...
>>> asyncio.run(reset_database())
"""
...

View File

@@ -1,30 +0,0 @@
"""Base provider protocol for vector database client creation."""
from abc import ABC
from typing import Any, Protocol, runtime_checkable, Union
from pydantic import BaseModel, Field
from crewai.rag.types import EmbeddingFunction
from crewai.rag.embeddings.types import EmbeddingOptions
class BaseProviderOptions(BaseModel, ABC):
"""Base configuration for all provider options."""
client_type: str = Field(..., description="Type of client to create")
embedding_config: Union[EmbeddingOptions, EmbeddingFunction, None] = Field(
default=None,
description="Embedding configuration - either options for built-in providers or a custom function",
)
options: Any = Field(
default=None, description="Additional provider-specific options"
)
@runtime_checkable
class BaseProvider(Protocol):
"""Protocol for vector database client providers."""
def __call__(self, options: BaseProviderOptions) -> Any:
"""Create and return a configured client instance."""
...

View File

@@ -1,148 +0,0 @@
"""Minimal embedding function factory for CrewAI."""
import os
from chromadb import EmbeddingFunction
from chromadb.utils.embedding_functions.amazon_bedrock_embedding_function import (
AmazonBedrockEmbeddingFunction,
)
from chromadb.utils.embedding_functions.cohere_embedding_function import (
CohereEmbeddingFunction,
)
from chromadb.utils.embedding_functions.google_embedding_function import (
GooglePalmEmbeddingFunction,
GoogleGenerativeAiEmbeddingFunction,
GoogleVertexEmbeddingFunction,
)
from chromadb.utils.embedding_functions.huggingface_embedding_function import (
HuggingFaceEmbeddingFunction,
)
from chromadb.utils.embedding_functions.instructor_embedding_function import (
InstructorEmbeddingFunction,
)
from chromadb.utils.embedding_functions.jina_embedding_function import (
JinaEmbeddingFunction,
)
from chromadb.utils.embedding_functions.ollama_embedding_function import (
OllamaEmbeddingFunction,
)
from chromadb.utils.embedding_functions.onnx_mini_lm_l6_v2 import ONNXMiniLM_L6_V2
from chromadb.utils.embedding_functions.open_clip_embedding_function import (
OpenCLIPEmbeddingFunction,
)
from chromadb.utils.embedding_functions.openai_embedding_function import (
OpenAIEmbeddingFunction,
)
from chromadb.utils.embedding_functions.roboflow_embedding_function import (
RoboflowEmbeddingFunction,
)
from chromadb.utils.embedding_functions.sentence_transformer_embedding_function import (
SentenceTransformerEmbeddingFunction,
)
from chromadb.utils.embedding_functions.text2vec_embedding_function import (
Text2VecEmbeddingFunction,
)
from crewai.rag.embeddings.types import EmbeddingOptions
def get_embedding_function(
config: EmbeddingOptions | dict | None = None,
) -> EmbeddingFunction:
"""Get embedding function - delegates to ChromaDB.
Args:
config: Optional configuration - either an EmbeddingOptions object or a dict with:
- provider: The embedding provider to use (default: "openai")
- Any other provider-specific parameters
Returns:
EmbeddingFunction instance ready for use with ChromaDB
Supported providers:
- openai: OpenAI embeddings (default)
- cohere: Cohere embeddings
- ollama: Ollama local embeddings
- huggingface: HuggingFace embeddings
- sentence-transformer: Local sentence transformers
- instructor: Instructor embeddings for specialized tasks
- google-palm: Google PaLM embeddings
- google-generativeai: Google Generative AI embeddings
- google-vertex: Google Vertex AI embeddings
- amazon-bedrock: AWS Bedrock embeddings
- jina: Jina AI embeddings
- roboflow: Roboflow embeddings for vision tasks
- openclip: OpenCLIP embeddings for multimodal tasks
- text2vec: Text2Vec embeddings
- onnx: ONNX MiniLM-L6-v2 (no API key needed, included with ChromaDB)
Examples:
# Use default OpenAI with retry logic
>>> embedder = get_embedding_function()
# Use Cohere with dict
>>> embedder = get_embedding_function({
... "provider": "cohere",
... "api_key": "your-key",
... "model_name": "embed-english-v3.0"
... })
# Use with EmbeddingOptions
>>> embedder = get_embedding_function(
... EmbeddingOptions(provider="sentence-transformer", model_name="all-MiniLM-L6-v2")
... )
# Use local sentence transformers (no API key needed)
>>> embedder = get_embedding_function({
... "provider": "sentence-transformer",
... "model_name": "all-MiniLM-L6-v2"
... })
# Use Ollama for local embeddings
>>> embedder = get_embedding_function({
... "provider": "ollama",
... "model_name": "nomic-embed-text"
... })
# Use ONNX (no API key needed)
>>> embedder = get_embedding_function({
... "provider": "onnx"
... })
"""
if config is None:
return OpenAIEmbeddingFunction(
api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small"
)
# Handle EmbeddingOptions object
if isinstance(config, EmbeddingOptions):
config_dict = config.model_dump(exclude_none=True)
else:
config_dict = config.copy()
provider = config_dict.pop("provider", "openai")
embedding_functions = {
"openai": OpenAIEmbeddingFunction,
"cohere": CohereEmbeddingFunction,
"ollama": OllamaEmbeddingFunction,
"huggingface": HuggingFaceEmbeddingFunction,
"sentence-transformer": SentenceTransformerEmbeddingFunction,
"instructor": InstructorEmbeddingFunction,
"google-palm": GooglePalmEmbeddingFunction,
"google-generativeai": GoogleGenerativeAiEmbeddingFunction,
"google-vertex": GoogleVertexEmbeddingFunction,
"amazon-bedrock": AmazonBedrockEmbeddingFunction,
"jina": JinaEmbeddingFunction,
"roboflow": RoboflowEmbeddingFunction,
"openclip": OpenCLIPEmbeddingFunction,
"text2vec": Text2VecEmbeddingFunction,
"onnx": ONNXMiniLM_L6_V2,
}
if provider not in embedding_functions:
raise ValueError(
f"Unsupported provider: {provider}. "
f"Available providers: {list(embedding_functions.keys())}"
)
return embedding_functions[provider](**config_dict)

View File

@@ -1,62 +0,0 @@
"""Type definitions for the embeddings module."""
from typing import Literal
from pydantic import BaseModel, Field, SecretStr
from crewai.rag.types import EmbeddingFunction
EmbeddingProvider = Literal[
"openai",
"cohere",
"ollama",
"huggingface",
"sentence-transformer",
"instructor",
"google-palm",
"google-generativeai",
"google-vertex",
"amazon-bedrock",
"jina",
"roboflow",
"openclip",
"text2vec",
"onnx",
]
"""Supported embedding providers.
These correspond to the embedding functions available in ChromaDB's
embedding_functions module. Each provider has specific requirements
and configuration options.
"""
class EmbeddingOptions(BaseModel):
"""Configuration options for embedding providers.
Generic attributes that can be passed to get_embedding_function
to configure various embedding providers.
"""
provider: EmbeddingProvider = Field(
..., description="Embedding provider name (e.g., 'openai', 'cohere', 'onnx')"
)
model_name: str | None = Field(
default=None, description="Model name for the embedding provider"
)
api_key: SecretStr | None = Field(
default=None, description="API key for the embedding provider"
)
class EmbeddingConfig(BaseModel):
"""Configuration wrapper for embedding functions.
Accepts either a pre-configured EmbeddingFunction or EmbeddingOptions
to create one. This provides flexibility in how embeddings are configured.
Attributes:
function: Either a callable EmbeddingFunction or EmbeddingOptions to create one
"""
function: EmbeddingFunction | EmbeddingOptions

View File

@@ -1,50 +0,0 @@
"""Type definitions for RAG (Retrieval-Augmented Generation) systems."""
from collections.abc import Callable, Mapping
from typing import TypeAlias, TypedDict, Any
from typing_extensions import Required
class BaseRecord(TypedDict, total=False):
"""A typed dictionary representing a document record.
Attributes:
doc_id: Optional unique identifier for the document. If not provided,
a content-based ID will be generated using SHA256 hash.
content: The text content of the document (required)
metadata: Optional metadata associated with the document
"""
doc_id: str
content: Required[str]
metadata: (
Mapping[str, str | int | float | bool]
| list[Mapping[str, str | int | float | bool]]
)
DenseVector: TypeAlias = list[float]
IntVector: TypeAlias = list[int]
EmbeddingFunction: TypeAlias = Callable[..., Any]
class SearchResult(TypedDict):
"""Standard search result format for vector store queries.
This provides a consistent interface for search results across different
vector store implementations. Each implementation should convert their
native result format to this standard format.
Attributes:
id: Unique identifier of the document
content: The text content of the document
metadata: Optional metadata associated with the document
score: Similarity score (higher is better, typically between 0 and 1)
"""
id: str
content: str
metadata: dict[str, Any]
score: float

View File

@@ -72,10 +72,6 @@ class Task(BaseModel):
output_pydantic: Pydantic model for task output.
security_config: Security configuration including fingerprinting.
tools: List of tools/resources limited for task execution.
allow_crewai_trigger_context: Optional flag to control crewai_trigger_payload injection.
None (default): Auto-inject for first task only.
True: Always inject trigger payload for this task.
False: Never inject trigger payload, even for first task.
"""
__hash__ = object.__hash__ # type: ignore
@@ -167,10 +163,6 @@ class Task(BaseModel):
end_time: Optional[datetime.datetime] = Field(
default=None, description="End time of the task execution"
)
allow_crewai_trigger_context: Optional[bool] = Field(
default=None,
description="Whether this task should append 'Trigger Payload: {crewai_trigger_payload}' to the task description when crewai_trigger_payload exists in crew inputs.",
)
model_config = {"arbitrary_types_allowed": True}
@field_validator("guardrail")
@@ -556,23 +548,12 @@ class Task(BaseModel):
str: The formatted prompt string containing the task description,
expected output, and optional markdown formatting instructions.
"""
description = self.description
should_inject = self.allow_crewai_trigger_context
if should_inject and self.agent:
crew = getattr(self.agent, 'crew', None)
if crew and hasattr(crew, '_inputs') and crew._inputs:
trigger_payload = crew._inputs.get("crewai_trigger_payload")
if trigger_payload is not None:
description += f"\n\nTrigger Payload: {trigger_payload}"
tasks_slices = [description]
tasks_slices = [self.description]
output = self.i18n.slice("expected_output").format(
expected_output=self.expected_output
)
tasks_slices = [description, output]
tasks_slices = [self.description, output]
if self.markdown:
markdown_instruction = """Your final answer MUST be formatted in Markdown syntax.

View File

@@ -14,14 +14,12 @@ from pydantic import BaseModel as PydanticBaseModel
from crewai.tools.structured_tool import CrewStructuredTool
class EnvVar(BaseModel):
name: str
description: str
required: bool = True
default: Optional[str] = None
class BaseTool(BaseModel, ABC):
class _ArgsSchemaPlaceholder(PydanticBaseModel):
pass
@@ -110,7 +108,7 @@ class BaseTool(BaseModel, ABC):
def to_structured_tool(self) -> CrewStructuredTool:
"""Convert this tool to a CrewStructuredTool instance."""
self._set_args_schema()
structured_tool = CrewStructuredTool(
return CrewStructuredTool(
name=self.name,
description=self.description,
args_schema=self.args_schema,
@@ -119,8 +117,6 @@ class BaseTool(BaseModel, ABC):
max_usage_count=self.max_usage_count,
current_usage_count=self.current_usage_count,
)
structured_tool._original_tool = self
return structured_tool
@classmethod
def from_langchain(cls, tool: Any) -> "BaseTool":
@@ -280,9 +276,7 @@ def to_langchain(
return [t.to_structured_tool() if isinstance(t, BaseTool) else t for t in tools]
def tool(
*args, result_as_answer: bool = False, max_usage_count: int | None = None
) -> Callable:
def tool(*args, result_as_answer: bool = False, max_usage_count: int | None = None) -> Callable:
"""
Decorator to create a tool from a function.

View File

@@ -10,11 +10,6 @@ from pydantic import BaseModel, Field, create_model
from crewai.utilities.logger import Logger
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from crewai.tools.base_tool import BaseTool
class CrewStructuredTool:
"""A structured tool that can operate on any number of inputs.
@@ -23,8 +18,6 @@ class CrewStructuredTool:
that integrates better with CrewAI's ecosystem.
"""
_original_tool: BaseTool | None = None
def __init__(
self,
name: str,
@@ -54,7 +47,6 @@ class CrewStructuredTool:
self.result_as_answer = result_as_answer
self.max_usage_count = max_usage_count
self.current_usage_count = current_usage_count
self._original_tool = None
# Validate the function signature matches the schema
self._validate_function_signature()
@@ -227,8 +219,6 @@ class CrewStructuredTool:
"""
parsed_args = self._parse_args(input)
self._increment_usage_count()
if inspect.iscoroutinefunction(self.func):
return await self.func(**parsed_args, **kwargs)
else:
@@ -252,8 +242,6 @@ class CrewStructuredTool:
"""Main method for tool execution."""
parsed_args = self._parse_args(input)
self._increment_usage_count()
if inspect.iscoroutinefunction(self.func):
result = asyncio.run(self.func(**parsed_args, **kwargs))
return result
@@ -265,12 +253,6 @@ class CrewStructuredTool:
return result
def _increment_usage_count(self) -> None:
"""Increment the usage count."""
self.current_usage_count += 1
if self._original_tool is not None:
self._original_tool.current_usage_count = self.current_usage_count
@property
def args(self) -> dict:
"""Get the tool's input arguments schema."""

View File

@@ -1,10 +1,9 @@
import os
import re
import portalocker
from chromadb import PersistentClient
from hashlib import md5
from typing import Optional
from crewai.utilities.paths import db_storage_path
MIN_COLLECTION_LENGTH = 3
MAX_COLLECTION_LENGTH = 63
@@ -28,9 +27,7 @@ def is_ipv4_pattern(name: str) -> bool:
return bool(IPV4_PATTERN.match(name))
def sanitize_collection_name(
name: Optional[str], max_collection_length: int = MAX_COLLECTION_LENGTH
) -> str:
def sanitize_collection_name(name: Optional[str], max_collection_length: int = MAX_COLLECTION_LENGTH) -> str:
"""
Sanitize a collection name to meet ChromaDB requirements:
1. 3-63 characters long
@@ -75,8 +72,7 @@ def create_persistent_client(path: str, **kwargs):
concurrent creations. Works for both multi-threads and multi-processes
environments.
"""
lock_id = md5(path.encode(), usedforsecurity=False).hexdigest()
lockfile = os.path.join(db_storage_path(), f"chromadb-{lock_id}.lock")
lockfile = f"chromadb-{md5(path.encode(), usedforsecurity=False).hexdigest()}.lock"
with portalocker.Lock(lockfile):
client = PersistentClient(path=path, **kwargs)

View File

@@ -67,6 +67,7 @@ from .memory_events import (
# events
from .event_listener import EventListener
from .third_party.agentops_listener import agentops_listener
__all__ = [
"EventListener",
@@ -121,4 +122,5 @@ __all__ = [
"ToolSelectionErrorEvent",
"ToolUsageEvent",
"ToolValidateInputErrorEvent",
"agentops_listener",
]

View File

@@ -161,10 +161,8 @@ class EventListener(BaseEventListener):
def on_task_started(source, event: TaskStartedEvent):
span = self._telemetry.task_started(crew=source.agent.crew, task=source)
self.execution_spans[source] = span
# Pass both task ID and task name (if set)
task_name = source.name if hasattr(source, 'name') and source.name else None
self.formatter.create_task_branch(
self.formatter.current_crew_tree, source.id, task_name
self.formatter.current_crew_tree, source.id
)
@crewai_event_bus.on(TaskCompletedEvent)
@@ -175,14 +173,11 @@ class EventListener(BaseEventListener):
self._telemetry.task_ended(span, source, source.agent.crew)
self.execution_spans[source] = None
# Pass task name if it exists
task_name = source.name if hasattr(source, 'name') and source.name else None
self.formatter.update_task_status(
self.formatter.current_crew_tree,
source.id,
source.agent.role,
"completed",
task_name
)
@crewai_event_bus.on(TaskFailedEvent)
@@ -193,14 +188,11 @@ class EventListener(BaseEventListener):
self._telemetry.task_ended(span, source, source.agent.crew)
self.execution_spans[source] = None
# Pass task name if it exists
task_name = source.name if hasattr(source, 'name') and source.name else None
self.formatter.update_task_status(
self.formatter.current_crew_tree,
source.id,
source.agent.role,
"failed",
task_name
)
# ----------- AGENT EVENTS -----------

View File

@@ -40,8 +40,6 @@ class TraceBatch:
class TraceBatchManager:
"""Single responsibility: Manage batches and event buffering"""
is_current_batch_ephemeral: bool = False
def __init__(self):
try:
self.plus_api = PlusAPI(api_key=get_auth_token())
@@ -64,7 +62,6 @@ class TraceBatchManager:
user_context=user_context, execution_metadata=execution_metadata
)
self.event_buffer.clear()
self.is_current_batch_ephemeral = use_ephemeral
self.record_start_time("execution")
self._initialize_backend_batch(user_context, execution_metadata, use_ephemeral)
@@ -139,7 +136,7 @@ class TraceBatchManager:
"""Add event to buffer"""
self.event_buffer.append(trace_event)
def _send_events_to_backend(self):
def _send_events_to_backend(self, ephemeral: bool = True):
"""Send buffered events to backend"""
if not self.plus_api or not self.trace_batch_id or not self.event_buffer:
return
@@ -159,7 +156,7 @@ class TraceBatchManager:
response = (
self.plus_api.send_ephemeral_trace_events(self.trace_batch_id, payload)
if self.is_current_batch_ephemeral
if ephemeral
else self.plus_api.send_trace_events(self.trace_batch_id, payload)
)
@@ -173,14 +170,15 @@ class TraceBatchManager:
except Exception as e:
logger.error(f"❌ Error sending events to backend: {str(e)}")
def finalize_batch(self) -> Optional[TraceBatch]:
def finalize_batch(self, ephemeral: bool = True) -> Optional[TraceBatch]:
"""Finalize batch and return it for sending"""
if not self.current_batch:
return None
if self.event_buffer:
self._send_events_to_backend()
self._finalize_backend_batch()
self._send_events_to_backend(ephemeral)
self._finalize_backend_batch(ephemeral)
self.current_batch.events = self.event_buffer.copy()
@@ -189,13 +187,12 @@ class TraceBatchManager:
self.current_batch = None
self.event_buffer.clear()
self.trace_batch_id = None
self.is_current_batch_ephemeral = False
self._cleanup_batch_data()
return finalized_batch
def _finalize_backend_batch(self):
def _finalize_backend_batch(self, ephemeral: bool = True):
"""Send batch finalization to backend"""
if not self.plus_api or not self.trace_batch_id:
return
@@ -213,7 +210,7 @@ class TraceBatchManager:
self.plus_api.finalize_ephemeral_trace_batch(
self.trace_batch_id, payload
)
if self.is_current_batch_ephemeral
if ephemeral
else self.plus_api.finalize_trace_batch(self.trace_batch_id, payload)
)
@@ -222,7 +219,7 @@ class TraceBatchManager:
console = Console()
return_link = (
f"{CREWAI_BASE_URL}/crewai_plus/trace_batches/{self.trace_batch_id}"
if not self.is_current_batch_ephemeral and access_code is None
if not ephemeral and access_code
else f"{CREWAI_BASE_URL}/crewai_plus/ephemeral_trace_batches/{self.trace_batch_id}?access_code={access_code}"
)
panel = Panel(

View File

@@ -166,7 +166,7 @@ class TraceCollectionListener(BaseEventListener):
@event_bus.on(CrewKickoffCompletedEvent)
def on_crew_completed(source, event):
self._handle_trace_event("crew_kickoff_completed", source, event)
self.batch_manager.finalize_batch()
self.batch_manager.finalize_batch(ephemeral=True)
@event_bus.on(CrewKickoffFailedEvent)
def on_crew_failed(source, event):

View File

@@ -0,0 +1 @@
from .agentops_listener import agentops_listener as agentops_listener

View File

@@ -0,0 +1,137 @@
import logging
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.utilities.events.crewai_event_bus import CrewAIEventsBus
from crewai.utilities.events.crew_events import (
CrewKickoffCompletedEvent,
CrewKickoffStartedEvent,
)
from crewai.utilities.events.task_events import TaskEvaluationEvent
from crewai.utilities.events.tool_usage_events import (
ToolUsageErrorEvent,
ToolUsageStartedEvent,
)
logger = logging.getLogger(__name__)
class AgentOpsListener(BaseEventListener):
def __init__(self):
self.agentops = None
try:
import agentops
self.agentops = agentops
logger.info("AgentOps integration enabled")
except ImportError:
logger.debug("AgentOps not installed, skipping AgentOps integration")
super().__init__()
def setup_listeners(self, crewai_event_bus: CrewAIEventsBus):
if self.agentops is None:
return
@crewai_event_bus.on(CrewKickoffStartedEvent)
def on_crew_kickoff_started(source, event):
self._handle_crew_kickoff_started(source, event)
@crewai_event_bus.on(CrewKickoffCompletedEvent)
def on_crew_kickoff_completed(source, event):
self._handle_crew_kickoff_completed(source, event)
@crewai_event_bus.on(ToolUsageStartedEvent)
def on_tool_usage_started(source, event):
self._handle_tool_usage_started(source, event)
@crewai_event_bus.on(ToolUsageErrorEvent)
def on_tool_usage_error(source, event):
self._handle_tool_usage_error(source, event)
@crewai_event_bus.on(TaskEvaluationEvent)
def on_task_evaluation(source, event):
self._handle_task_evaluation(source, event)
def _handle_crew_kickoff_started(self, source, event: CrewKickoffStartedEvent):
if self.agentops is None:
return
try:
self.agentops.start_session(
tags=["crewai", "crew_kickoff"],
config=self.agentops.Configuration(
auto_start_session=False,
instrument_llm_calls=True,
),
)
logger.debug("AgentOps session started for crew kickoff")
except Exception as e:
logger.warning(f"Failed to start AgentOps session: {e}")
def _handle_crew_kickoff_completed(self, source, event: CrewKickoffCompletedEvent):
if self.agentops is None:
return
try:
self.agentops.end_session("Success")
logger.debug("AgentOps session ended for crew kickoff completion")
except Exception as e:
logger.warning(f"Failed to end AgentOps session: {e}")
def _handle_tool_usage_started(self, source, event: ToolUsageStartedEvent):
if self.agentops is None:
return
try:
self.agentops.record(
self.agentops.ActionEvent(
action_type="tool_usage",
params={
"tool_name": event.tool_name,
"tool_args": event.tool_args,
},
)
)
logger.debug(f"AgentOps recorded tool usage: {event.tool_name}")
except Exception as e:
logger.warning(f"Failed to record tool usage in AgentOps: {e}")
def _handle_tool_usage_error(self, source, event: ToolUsageErrorEvent):
if self.agentops is None:
return
try:
self.agentops.record(
self.agentops.ErrorEvent(
message=f"Tool usage error: {event.error}",
error_type="ToolUsageError",
details={
"tool_name": event.tool_name,
"tool_args": event.tool_args,
},
)
)
logger.debug(f"AgentOps recorded tool usage error: {event.tool_name}")
except Exception as e:
logger.warning(f"Failed to record tool usage error in AgentOps: {e}")
def _handle_task_evaluation(self, source, event: TaskEvaluationEvent):
if self.agentops is None:
return
try:
self.agentops.record(
self.agentops.ActionEvent(
action_type="task_evaluation",
params={
"evaluation_type": event.evaluation_type,
"task": str(event.task) if event.task else None,
},
)
)
logger.debug(f"AgentOps recorded task evaluation: {event.evaluation_type}")
except Exception as e:
logger.warning(f"Failed to record task evaluation in AgentOps: {e}")
agentops_listener = AgentOpsListener()

View File

@@ -220,22 +220,14 @@ class ConsoleFormatter:
return tree
def create_task_branch(
self, crew_tree: Optional[Tree], task_id: str, task_name: Optional[str] = None
self, crew_tree: Optional[Tree], task_id: str
) -> Optional[Tree]:
"""Create and initialize a task branch."""
if not self.verbose:
return None
task_content = Text()
# Display task name if available, otherwise just the ID
if task_name:
task_content.append("📋 Task: ", style="yellow bold")
task_content.append(f"{task_name}", style="yellow bold")
task_content.append(f" (ID: {task_id})", style="yellow dim")
else:
task_content.append(f"📋 Task: {task_id}", style="yellow bold")
task_content.append(f"📋 Task: {task_id}", style="yellow bold")
task_content.append("\nStatus: ", style="white")
task_content.append("Executing Task...", style="yellow dim")
@@ -259,7 +251,6 @@ class ConsoleFormatter:
task_id: str,
agent_role: str,
status: str = "completed",
task_name: Optional[str] = None,
) -> None:
"""Update task status in the tree."""
if not self.verbose or crew_tree is None:
@@ -279,13 +270,8 @@ class ConsoleFormatter:
if str(task_id) in str(branch.label):
# Build label without introducing stray blank lines
task_content = Text()
# First line: Task ID/name
if task_name:
task_content.append("📋 Task: ", style=f"{style} bold")
task_content.append(f"{task_name}", style=f"{style} bold")
task_content.append(f" (ID: {task_id})", style=f"{style} dim")
else:
task_content.append(f"📋 Task: {task_id}", style=f"{style} bold")
# First line: Task ID
task_content.append(f"📋 Task: {task_id}", style=f"{style} bold")
# Second line: Assigned to
task_content.append("\nAssigned to: ", style="white")
@@ -299,9 +285,8 @@ class ConsoleFormatter:
break
# Show status panel
display_name = task_name if task_name else str(task_id)
content = self.create_status_content(
f"Task {status.title()}", display_name, style, Agent=agent_role
f"Task {status.title()}", str(task_id), style, Agent=agent_role
)
self.print_panel(content, panel_title, style)

View File

@@ -21,7 +21,7 @@ from crewai.utilities import RPMController
from crewai.utilities.errors import AgentRepositoryError
from crewai.utilities.events import crewai_event_bus
from crewai.utilities.events.tool_usage_events import ToolUsageFinishedEvent
from crewai.process import Process
def test_agent_llm_creation_with_env_vars():
# Store original environment variables
@@ -1209,181 +1209,6 @@ Thought:<|eot_id|>
assert mock_format_prompt.return_value == expected_prompt
@pytest.mark.vcr(filter_headers=["authorization"])
def test_task_allow_crewai_trigger_context():
from crewai import Crew
agent = Agent(
role="test role",
goal="test goal",
backstory="test backstory"
)
task = Task(
description="Analyze the data",
expected_output="Analysis report",
agent=agent,
allow_crewai_trigger_context=True
)
crew = Crew(agents=[agent], tasks=[task])
crew.kickoff({"crewai_trigger_payload": "Important context data"})
prompt = task.prompt()
assert "Analyze the data" in prompt
assert "Trigger Payload: Important context data" in prompt
@pytest.mark.vcr(filter_headers=["authorization"])
def test_task_without_allow_crewai_trigger_context():
from crewai import Crew
agent = Agent(
role="test role",
goal="test goal",
backstory="test backstory"
)
task = Task(
description="Analyze the data",
expected_output="Analysis report",
agent=agent,
allow_crewai_trigger_context=False
)
crew = Crew(agents=[agent], tasks=[task])
crew.kickoff({"crewai_trigger_payload": "Important context data"})
prompt = task.prompt()
assert "Analyze the data" in prompt
assert "Trigger Payload:" not in prompt
assert "Important context data" not in prompt
@pytest.mark.vcr(filter_headers=["authorization"])
def test_task_allow_crewai_trigger_context_no_payload():
from crewai import Crew
agent = Agent(
role="test role",
goal="test goal",
backstory="test backstory"
)
task = Task(
description="Analyze the data",
expected_output="Analysis report",
agent=agent,
allow_crewai_trigger_context=True
)
crew = Crew(agents=[agent], tasks=[task])
crew.kickoff({"other_input": "other data"})
prompt = task.prompt()
assert "Analyze the data" in prompt
assert "Trigger Payload:" not in prompt
@pytest.mark.vcr(filter_headers=["authorization"])
def test_do_not_allow_crewai_trigger_context_for_first_task_hierarchical():
from crewai import Crew
agent1 = Agent(role="First Agent", goal="First goal", backstory="First backstory")
agent2 = Agent(role="Second Agent", goal="Second goal", backstory="Second backstory")
first_task = Task(
description="Process initial data",
expected_output="Initial analysis",
agent=agent1,
)
crew = Crew(
agents=[agent1, agent2],
tasks=[first_task],
process=Process.hierarchical,
manager_llm="gpt-4o"
)
crew.kickoff({"crewai_trigger_payload": "Initial context data"})
first_prompt = first_task.prompt()
assert "Process initial data" in first_prompt
assert "Trigger Payload: Initial context data" not in first_prompt
@pytest.mark.vcr(filter_headers=["authorization"])
def test_first_task_auto_inject_trigger():
from crewai import Crew
agent1 = Agent(role="First Agent", goal="First goal", backstory="First backstory")
agent2 = Agent(role="Second Agent", goal="Second goal", backstory="Second backstory")
first_task = Task(
description="Process initial data",
expected_output="Initial analysis",
agent=agent1,
)
second_task = Task(
description="Process secondary data",
expected_output="Secondary analysis",
agent=agent2,
)
crew = Crew(
agents=[agent1, agent2],
tasks=[first_task, second_task]
)
crew.kickoff({"crewai_trigger_payload": "Initial context data"})
first_prompt = first_task.prompt()
assert "Process initial data" in first_prompt
assert "Trigger Payload: Initial context data" in first_prompt
second_prompt = second_task.prompt()
assert "Process secondary data" in second_prompt
assert "Trigger Payload:" not in second_prompt
@pytest.mark.vcr(filter_headers=["authorization"])
def test_ensure_first_task_allow_crewai_trigger_context_is_false_does_not_inject():
from crewai import Crew
agent1 = Agent(role="First Agent", goal="First goal", backstory="First backstory")
agent2 = Agent(role="Second Agent", goal="Second goal", backstory="Second backstory")
first_task = Task(
description="Process initial data",
expected_output="Initial analysis",
agent=agent1,
allow_crewai_trigger_context=False
)
second_task = Task(
description="Process secondary data",
expected_output="Secondary analysis",
agent=agent2,
allow_crewai_trigger_context=True
)
crew = Crew(
agents=[agent1, agent2],
tasks=[first_task, second_task]
)
crew.kickoff({"crewai_trigger_payload": "Context data"})
first_prompt = first_task.prompt()
assert "Trigger Payload: Context data" not in first_prompt
second_prompt = second_task.prompt()
assert "Trigger Payload: Context data" in second_prompt
@patch("crewai.agent.CrewTrainingHandler")
def test_agent_training_handler(crew_training_handler):
task_prompt = "What is 1 + 1?"

View File

@@ -1,126 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test backstory\nYour
personal goal is: Test goal\nTo give my best complete final answer to the task
respond using the exact following format:\n\nThought: I now can give a great
answer\nFinal Answer: Your final answer must be the great and the most complete
as possible, it must be outcome described.\n\nI MUST use these formats, my job
depends on it!"}, {"role": "user", "content": "\nCurrent Task: Say hello to
the world\n\nThis is the expected criteria for your final answer: hello world\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop":
["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '825'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFJdi9swEHz3r1j0HBc7ZydXvx1HC0evhT6UUtrDKNLa1lXWqpJ8aTjy
34vsXOz0A/pi8M7OaGZ3nxMApiSrgImOB9Fbnd4Why/v79HeePeD37/Zfdx8evf5+rY4fNjdPbJV
ZNDuEUV4Yb0S1FuNQZGZYOGQB4yq+bYsr7J1nhcj0JNEHWmtDWlBaa+MStfZukizbZpfn9gdKYGe
VfA1AQB4Hr/Rp5H4k1WQrV4qPXrPW2TVuQmAOdKxwrj3ygduAlvNoCAT0IzW78DQHgQ30KonBA5t
tA3c+D06gG/mrTJcw834X0GHWhPsyWm5FHTYDJ7HUGbQegFwYyjwOJQxysMJOZ7Na2qto53/jcoa
ZZTvaofck4lGfSDLRvSYADyMQxoucjPrqLehDvQdx+fycjvpsXk3C/TqBAYKXC/q29NoL/VqiYEr
7RdjZoKLDuVMnXfCB6loASSL1H+6+Zv2lFyZ9n/kZ0AItAFlbR1KJS4Tz20O4+n+q+085dEw8+ie
lMA6KHRxExIbPujpoJg/+IB93SjTorNOTVfV2LrcZLzZYFm+Zskx+QUAAP//AwB1vYZ+YwMAAA==
headers:
CF-RAY:
- 96fc9f29dea3cf1f-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Fri, 15 Aug 2025 23:55:15 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=oA9oTa3cE0ZaEUDRf0hCpnarSAQKzrVUhl6qDS4j09w-1755302115-1.0.1.1-gUUDl4ZqvBQkg7244DTwOmSiDUT2z_AiQu0P1xUaABjaufSpZuIlI5G0H7OSnW.ldypvpxjj45NGWesJ62M_2U7r20tHz_gMmDFw6D5ZiNc;
path=/; expires=Sat, 16-Aug-25 00:25:15 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=ICenEGMmOE5jaOjwD30bAOwrF8.XRbSIKTBl1EyWs0o-1755302115700-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '735'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '753'
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999830'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999827'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_212fde9d945a462ba0d89ea856131dce
status:
code: 200
message: OK
version: 1

View File

@@ -1,123 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test backstory\nYour
personal goal is: Test goal\nTo give my best complete final answer to the task
respond using the exact following format:\n\nThought: I now can give a great
answer\nFinal Answer: Your final answer must be the great and the most complete
as possible, it must be outcome described.\n\nI MUST use these formats, my job
depends on it!"}, {"role": "user", "content": "\nCurrent Task: Say hello to
the world\n\nThis is the expected criteria for your final answer: hello world\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop":
["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '825'
content-type:
- application/json
cookie:
- __cf_bm=oA9oTa3cE0ZaEUDRf0hCpnarSAQKzrVUhl6qDS4j09w-1755302115-1.0.1.1-gUUDl4ZqvBQkg7244DTwOmSiDUT2z_AiQu0P1xUaABjaufSpZuIlI5G0H7OSnW.ldypvpxjj45NGWesJ62M_2U7r20tHz_gMmDFw6D5ZiNc;
_cfuvid=ICenEGMmOE5jaOjwD30bAOwrF8.XRbSIKTBl1EyWs0o-1755302115700-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFLLbtswELzrKxY8W4VkW3ajW1GgQA7JJUGLog0EmlxJrCkuQ1J2g8D/
XlByLLkPoBcB2tkZzuzuawLAlGQlMNHyIDqr04/rl6/3z+6hu8vq/cOX5936cRPE3i8Px88rtogM
2v1AEd5Y7wR1VmNQZEZYOOQBo2q+LYpVtszzYgA6kqgjrbEhXVPaKaPSZbZcp9k2zd+f2S0pgZ6V
8C0BAHgdvtGnkfiTlZAt3iodes8bZOWlCYA50rHCuPfKB24CW0ygIBPQDNZvwdARBDfQqAMChyba
Bm78ER3Ad/NJGa7hw/BfQotaExzJaTkXdFj3nsdQptd6BnBjKPA4lCHK0xk5Xcxraqyjnf+Nympl
lG8rh9yTiUZ9IMsG9JQAPA1D6q9yM+uos6EKtMfhubzYjnps2s0MXZ3BQIHrWX17Hu21XiUxcKX9
bMxMcNGinKjTTngvFc2AZJb6Tzd/0x6TK9P8j/wECIE2oKysQ6nEdeKpzWE83X+1XaY8GGYe3UEJ
rIJCFzchsea9Hg+K+RcfsKtqZRp01qnxqmpbFZuM1xssihuWnJJfAAAA//8DAFSowWRjAwAA
headers:
CF-RAY:
- 96fc9f301bf7cf1f-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Fri, 15 Aug 2025 23:55:16 GMT
Server:
- cloudflare
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '685'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '711'
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999827'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999827'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_3f0ec42447374a76a22a4cdb9f336279
status:
code: 200
message: OK
version: 1

View File

@@ -1,501 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are Crew Manager. You
are a seasoned manager with a knack for getting the best out of your team.\nYou
are also known for your ability to delegate work to the right people, and to
ask the right questions to get the best out of your team.\nEven though you don''t
perform tasks by yourself, you have a lot of experience in the field, which
allows you to properly evaluate the work of your team members.\nYour personal
goal is: Manage the team to complete the task in the best way possible.\nYou
ONLY have access to the following tools, and should NEVER make up tools that
are not listed here:\n\nTool Name: Delegate work to coworker\nTool Arguments:
{''task'': {''description'': ''The task to delegate'', ''type'': ''str''}, ''context'':
{''description'': ''The context for the task'', ''type'': ''str''}, ''coworker'':
{''description'': ''The role/name of the coworker to delegate to'', ''type'':
''str''}}\nTool Description: Delegate a specific task to one of the following
coworkers: First Agent\nThe input to this tool should be the coworker, the task
you want them to do, and ALL necessary context to execute the task, they know
nothing about the task, so share absolutely everything you know, don''t reference
things but instead explain them.\nTool Name: Ask question to coworker\nTool
Arguments: {''question'': {''description'': ''The question to ask'', ''type'':
''str''}, ''context'': {''description'': ''The context for the question'', ''type'':
''str''}, ''coworker'': {''description'': ''The role/name of the coworker to
ask'', ''type'': ''str''}}\nTool Description: Ask a specific question to one
of the following coworkers: First Agent\nThe input to this tool should be the
coworker, the question you have for them, and ALL necessary context to ask the
question properly, they know nothing about the question, so share absolutely
everything you know, don''t reference things but instead explain them.\n\nIMPORTANT:
Use the following format in your response:\n\n```\nThought: you should always
think about what to do\nAction: the action to take, only one name of [Delegate
work to coworker, Ask question to coworker], just the name, exactly as it''s
written.\nAction Input: the input to the action, just a simple JSON object,
enclosed in curly braces, using \" to wrap keys and values.\nObservation: the
result of the action\n```\n\nOnce all necessary information is gathered, return
the following format:\n\n```\nThought: I now know the final answer\nFinal Answer:
the final answer to the original input question\n```"}, {"role": "user", "content":
"\nCurrent Task: Process initial data\n\nThis is the expected criteria for your
final answer: Initial analysis\nyou MUST return the actual complete content
as the final answer, not a summary.\n\nBegin! This is VERY important to you,
use the tools available and give your best Final Answer, your job depends on
it!\n\nThought:"}], "model": "gpt-4o", "stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '2921'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.12
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFTJbtswEL37KwY824aT2E7qWxcUSE9Fa6CHujDG5EiahhoK5MiOG+Tf
C0re0qZAL4LAN2+WN8vTAMCwMwswtkK1deNH7+dXd5+0lTl/mZYPzd1mOvHNuzhffvt6szTDzAib
n2T1yBrbUDeelIP0sI2EStnr1e1sNptNbmZvOqAOjnymlY2OpmF0PbmejiZ3o8n8QKwCW0pmAd8H
AABP3TenKI4ezQImw+NLTSlhSWZxMgIwMfj8YjAlToqiZngGbRAl6bJeVqEtK13APQiRAw3gyFOJ
SqAVgWJ6gFBAE4OllFjK7pmFldGDQ8XMyW8fOSaFtyWJ5ieS1EaCHUGFWwIErULMwQDFAVrbxhwE
Bf0+cRrDPezY+xxpy66LXsOOtQL0vgsglFPAuAdHiuxTDnNQPNtz6tOloiCrvCW/H69kJW9tbsgC
PhwL24X40HPzH8WjCdxL0+oCnlYmO1qZBazM577yFyWvzBBWvYyP2pstj2KxbIPfUuor+/WqYon0
JEwkS7wlN4ZlroDF+tZRAusJ5cjOrCFYVCpD5M4pKxQhnvQbAjsS5WKfQZQ9aCRxCUKEBlUpShp2
0qe2rvHgJPsuWBxLmXICBGVAD9xJe+hbTiRCK45inqRsmydiV6GecoPsI6eX+u7K/lQwS+Ky0gSa
CRYFNgQu4k6giKEG1vFRzkM3Oj0vpmllni+nN1LRJszLI633FwCKBMXcyG5vfhyQ59Om+FA2MWzS
H1RTsHCq1pEwBclbkTQ0pkOfBwA/uo1sXyyZaWKoG11reKAu3PzquvdnzjfgjF7dTA+oBkV/Bm6n
8+ErDteHCb9YamPRVuTO1PMFwNZxuAAGF2X/nc5rvvvSWcr/cX8GrKVGya2bSI7ty5LPZpHyjfyX
2UnmLmGTKG7Z0lqZYm6FowJb358vk/ZJqV4XLCXFJnJ/w4pmPZ1vimJCE3tnBs+D3wAAAP//AwBY
9uEVzAUAAA==
headers:
CF-RAY:
- 97144bd22eb41abc-GRU
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Mon, 18 Aug 2025 20:52:42 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=dCMuu_IT8i5kyJB9_ugQhudGYphCvJlfXMZwJgOuB8Y-1755550362-1.0.1.1-VyrRrYT2JzvUYUjT9T5uCe31rJR0Q_FicsTyAJZYdj0j8anm6ZdVD7QhtUW0OjVK_8F82E4cVt8Uf5shMfmUm3Gf.EMuBA1AgSAUrzsHEy4;
path=/; expires=Mon, 18-Aug-25 21:22:42 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=YeODa6MF5ug3OZUV6ob1dSrBKCM8BXbKkS77TIihYoE-1755550362828-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '3236'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '3253'
x-ratelimit-limit-project-tokens:
- '30000000'
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '30000000'
x-ratelimit-remaining-project-tokens:
- '29999308'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '29999308'
x-ratelimit-reset-project-tokens:
- 1ms
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 1ms
x-request-id:
- req_08aa9de2797d4fee93003bdc7fc19156
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "system", "content": "You are First Agent. First
backstory\nYour personal goal is: First goal\nTo give my best complete final
answer to the task respond using the exact following format:\n\nThought: I now
can give a great answer\nFinal Answer: Your final answer must be the great and
the most complete as possible, it must be outcome described.\n\nI MUST use these
formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task:
Process initial data\n\nThis is the expected criteria for your final answer:
Your best answer to your coworker asking you this, accounting for the context
shared.\nyou MUST return the actual complete content as the final answer, not
a summary.\n\nThis is the context you''re working with:\nThe task involves analyzing
the initial data set we have received. This includes cleaning the data, categorizing
it for analysis, identifying any trends or patterns, and summarizing the findings.
The goal is to have a clear understanding of what the data indicates and any
initial insights that can be drawn from it.\n\nBegin! This is VERY important
to you, use the tools available and give your best Final Answer, your job depends
on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '1262'
content-type:
- application/json
cookie:
- __cf_bm=dCMuu_IT8i5kyJB9_ugQhudGYphCvJlfXMZwJgOuB8Y-1755550362-1.0.1.1-VyrRrYT2JzvUYUjT9T5uCe31rJR0Q_FicsTyAJZYdj0j8anm6ZdVD7QhtUW0OjVK_8F82E4cVt8Uf5shMfmUm3Gf.EMuBA1AgSAUrzsHEy4;
_cfuvid=YeODa6MF5ug3OZUV6ob1dSrBKCM8BXbKkS77TIihYoE-1755550362828-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.12
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFbfbxRHDH7PX+HuE6DLKSEkQN6AChUhoUpFULVBkTPj3XUzO96MZ+9y
RfzvlWfubi+USn3Jj/HaY3+f/Y2/HgE07JtLaFyP2Q1jOH5zcfriwx+v36RP3affN+9buetUT27o
4uMvp5+bhXnIzV/k8s5r6WQYA2WWWM0uEWayqKfPz8/Pz0/OLs6KYRBPwdy6MR8/k+OBIx8/PXn6
7Pjk+fHpi613L+xIm0v48wgA4Gv5aXlGT/fNJZwsdicDqWJHzeX+I4AmSbCTBlVZM8bcLGajk5gp
ltTfQZQ1OIzQ8YoAobO0AaOuKQFcxbccMcCr8v8lfBQYkzhShdwTcOTMGMBjRlDKQG1LLvOKwmYB
a4I1hwCthCBrUFpRwgC3tAHNNCpkAYo6JbJPXSCMC3CYqZPEf9MCMGLY1D886DQMaOcgU4KWo+fY
KRjsiXqKWm5dXsWreLqEJ09+tpzeWFCO3ZMnl3AVAeAY3nLSPCeXaMW0LtVYFVZEKwkGVuXYLYCj
k2gYUswLkASUkkSSSYFiTky63EX+vA3ZY/SBdiFghWEihZsNEOeeEvAwTtksuacBblDJg0TQKSWZ
SlkVUEmQaJCVHSRykrzCuqdEEMkowGTllqtfec/WehgOgTfiPSZvoO1wdRhghYnxJtAhA/sq3QYe
0bJbLqrFLscQIIhDuwEiDqSAiUBHCoF8wU5xIFjj5nEh4OlMwI7O4nxAwwe6P2BhZn3PBHDMAokC
rTBmUOoGitn6DnN1QvalF0qbKM9EfOxZgeNKwooUuiTTuAd1FLYoe9SdDIP96jGhy5RYMztdgE6u
B1TwNEiXcOzLaeaBYKTE4rV0A8ZNaeiRUitpwOhsKjw7zJIUHr3/9Z0+tjINsFbcVFpCYoHpzGD6
mCj60uG/Ys6UIrzzFDO37L7H7DPnvuBTZoWq1wydLxXOoG5zAgRPGTkUhwqVEc/1mg1ky0BLsLGm
oMtDJEuwLZxQC9CMuSCFAbJIqN4r1gnDlutyxxSdrMj6ONTDnkcLmHuOe6aX+8kJIreAucKsZNO1
T3kBTtIuDjihtmXH1hJVH4wJ5S4W4GIGmXJgStuGGXADie4mNhqmVOcwrkgzdyViIeSZEfLbVmis
zrdbmXmgH99NmSkQB9POKlEbkPahRq17dv0ORhcmT3AjuYc7Q8uQXFnTKHd9rkDeTRjzzjJQTuwe
UrK7SXuZggeKDkedAuY6P9aRW1a3LDP5RYEoSrbR3zNdweNhDNt+U0s/96S0L2D5ncBhUAEvbrKJ
LFEDDyXd2b1OWxXuriPNNgGoRVx3BCRSwuR64PaBpF3F1xvYvadVJ5XqmzHDHiWDxLCBHsvDZTOR
YIqeUtG9MmctrI39A00po2lyPOVax5hkxZ4AXRFQw2bPRWmd8jhO9omRGuk+11SWlYoHD9A8YFWA
KdmZWm9IYA+tCXzt4HarHriXbqD7MUiqZkngybGyxOMBb62a+tpaT2gdKNNeKVmNSWwZMQW8iu9a
2Mi0xSVu4G6yNi/UWP7k9wS4gGkvNKaG9vmIKVcSWXev/QLGQKgEgTIMBLdR1j8drhSJ2knR1po4
hXBgwGgdV663ZebL1vJtv74E6cYkN/qda9NyZO2vrWkk2qqiWcamWL8dAXwpa9L0YPNpxiTDmK+z
3FK57unZ8xqvmbez2Xr2cmfNkjHMhouzZ4sfBLyuMqoHm1bj0PXkZ9d5LcPJsxwYjg7K/nc6P4pd
S+fY/Z/ws8E5GjP56zGRZ/ew5PmzRNYw//XZHuaScGN9zI6uM1MyKjy1OIW6Uza60UzDdcuxozQm
rotlO16fX5xge0Hn5y+bo29H/wAAAP//AwCE+a2iZgsAAA==
headers:
CF-RAY:
- 97144be7eaa81abc-GRU
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Mon, 18 Aug 2025 20:52:47 GMT
Server:
- cloudflare
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '4424'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '4473'
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999717'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999717'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_5bf23819c1214732aa87a90207bc0d31
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "system", "content": "You are Crew Manager. You
are a seasoned manager with a knack for getting the best out of your team.\nYou
are also known for your ability to delegate work to the right people, and to
ask the right questions to get the best out of your team.\nEven though you don''t
perform tasks by yourself, you have a lot of experience in the field, which
allows you to properly evaluate the work of your team members.\nYour personal
goal is: Manage the team to complete the task in the best way possible.\nYou
ONLY have access to the following tools, and should NEVER make up tools that
are not listed here:\n\nTool Name: Delegate work to coworker\nTool Arguments:
{''task'': {''description'': ''The task to delegate'', ''type'': ''str''}, ''context'':
{''description'': ''The context for the task'', ''type'': ''str''}, ''coworker'':
{''description'': ''The role/name of the coworker to delegate to'', ''type'':
''str''}}\nTool Description: Delegate a specific task to one of the following
coworkers: First Agent\nThe input to this tool should be the coworker, the task
you want them to do, and ALL necessary context to execute the task, they know
nothing about the task, so share absolutely everything you know, don''t reference
things but instead explain them.\nTool Name: Ask question to coworker\nTool
Arguments: {''question'': {''description'': ''The question to ask'', ''type'':
''str''}, ''context'': {''description'': ''The context for the question'', ''type'':
''str''}, ''coworker'': {''description'': ''The role/name of the coworker to
ask'', ''type'': ''str''}}\nTool Description: Ask a specific question to one
of the following coworkers: First Agent\nThe input to this tool should be the
coworker, the question you have for them, and ALL necessary context to ask the
question properly, they know nothing about the question, so share absolutely
everything you know, don''t reference things but instead explain them.\n\nIMPORTANT:
Use the following format in your response:\n\n```\nThought: you should always
think about what to do\nAction: the action to take, only one name of [Delegate
work to coworker, Ask question to coworker], just the name, exactly as it''s
written.\nAction Input: the input to the action, just a simple JSON object,
enclosed in curly braces, using \" to wrap keys and values.\nObservation: the
result of the action\n```\n\nOnce all necessary information is gathered, return
the following format:\n\n```\nThought: I now know the final answer\nFinal Answer:
the final answer to the original input question\n```"}, {"role": "user", "content":
"\nCurrent Task: Process initial data\n\nThis is the expected criteria for your
final answer: Initial analysis\nyou MUST return the actual complete content
as the final answer, not a summary.\n\nBegin! This is VERY important to you,
use the tools available and give your best Final Answer, your job depends on
it!\n\nThought:"}, {"role": "assistant", "content": "Thought: I need to delegate
the task of processing the initial data to the First Agent to ensure we have
a thorough and accurate analysis. I will provide them with all the necessary
details to complete this task effectively.\n\nAction: Delegate work to coworker\nAction
Input: {\"task\": \"Process initial data\", \"context\": \"The task involves
analyzing the initial data set we have received. This includes cleaning the
data, categorizing it for analysis, identifying any trends or patterns, and
summarizing the findings. The goal is to have a clear understanding of what
the data indicates and any initial insights that can be drawn from it.\", \"coworker\":
\"First Agent\"}\nObservation: To process the initial data set effectively,
we will follow several key steps to ensure we clean, categorize, analyze, and
summarize our findings comprehensively.\n\n1. **Data Cleaning**: \n - First,
we will review the dataset for missing, inconsistent, or erroneous entries.
\n - We will handle missing values by either imputing them based on surrounding
data or removing records where necessary.\n - Additionally, we will standardize
categorical variables to ensure consistency (e.g., ensuring all location names
are spelled the same way).\n\n2. **Data Categorization**: \n - Next, we will
categorize the data into relevant segments that will aid our analysis. \n -
This involves grouping data points based on common characteristics, such as
demographics, time periods, or any key performance indicators (KPIs) we are
focusing on.\n\n3. **Trend and Pattern Identification**: \n - With the cleaned
and categorized data, we will perform a detailed analysis to identify trends
and patterns.\n - This will involve using statistical tools and visualizations
to uncover relationships within the data. We will look at time series analysis,
correlation coefficients, and any significant outliers that may require further
investigation.\n\n4. **Summarizing Findings**: \n - Finally, we will compile
a summary of our findings which will include both qualitative insights and quantitative
metrics.\n - This summary should encapsulate the key trends identified, any
notable patterns, and implications of these findings.\n - We will also document
any limitations of the data and suggest areas for further research if necessary.\n\nBy
completing these steps, we will not only have a clear understanding of what
the data indicates but also provide actionable insights that can guide our next
steps. This comprehensive analysis will serve as a solid foundation for any
additional exploration or decision-making initiatives related to our project.
\n\nIf you have any questions or need further clarification on any part of this
process, please let me know!"}], "model": "gpt-4o", "stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '5714'
content-type:
- application/json
cookie:
- __cf_bm=dCMuu_IT8i5kyJB9_ugQhudGYphCvJlfXMZwJgOuB8Y-1755550362-1.0.1.1-VyrRrYT2JzvUYUjT9T5uCe31rJR0Q_FicsTyAJZYdj0j8anm6ZdVD7QhtUW0OjVK_8F82E4cVt8Uf5shMfmUm3Gf.EMuBA1AgSAUrzsHEy4;
_cfuvid=YeODa6MF5ug3OZUV6ob1dSrBKCM8BXbKkS77TIihYoE-1755550362828-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.12
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFbbbhw3DH33VxDzlATrxTq+pX5LUwQJChRBayRA68ChJc4Ma404ETW7
3gT594LS3pymQF/2IooUeQ55pK9HAA375goa12N2wxiOX12cvPj9t/dv4sOf79z1m8vFKvbdl+fy
YfXlvTQz85C7v8nlrdfcyTAGyiyxml0izGRRTy7Pz8/PF6cXl8UwiKdgbt2Yj8/k+Pni+dnx4sXx
4mLj2As70uYK/joCAPhaPi3F6OmhuYLFbLsykCp21FztNgE0SYKtNKjKmjHmZrY3OomZYsn606dP
N/G6l6nr8xW8hSgruLeP3BO0HDEARl1Ruomvy7+X5d8VXAuMSRyplq0cOTMG8JgRlDJQ25LLvKSw
nsGKYMUhQCshyAqUlpQwwD2tQTONClmAok6JbKsLhHEGDjN1kvgLzQAjhnX94UGnYUBbB5mSJek5
dgqGfaKeopZT5zfxJp7M4dmzXyynVxaUY/fs2RXcRAA4htecNO+TS7RkqoVbFVZEKwkGVuXYzYCj
k2hoUswzkASUkkSSSYFiTkw630b+sAnZY/SBtiFgiWEihbs1EOeeEvAwTtksuacB7lDJg0TQKSWZ
SlkVUEmQaJClLSRykrzCqqdEEMkowGTllqNfes/WfxgOgbcW8Ji8gbbF1WGAJSbGu0CHDOyqdGt4
QvNuPqsWOxxDgCAO7QSIOJACJgIdKQTyBTvFgWCF66eFgOd7ArZ0FucDGn6jhwMW9qzvmACOWSBR
oCXGDErdQDFb32GuTsi+9EJpE+U9Edc9K3BcSliSQpdkGnegjsIWZYe6k2Gwrx4TukyJNbPTGejk
ekAFT4N0Cce+rGYeCEZKLF5LN2Bcl4YeKbWSBozOpsKzwyxJ4cmv797qUyvTAGvFTaUlJBaYTg2m
60TRlw5/hzlTivDWU8zcsvsesw+c+4JPmRWqXnvofKlwD+omJ0DwlJFDcahQGfFcj1lDtgy0BBtr
Cjo/RLIE28AJtQDNmAtSGCCLhOq9ZJ0wbLguZ0zRyZKsj0Nd7Hm0gLnnuGN6vpucIHIPmCvMSjZd
u5Rn4CRt44ATalt2bC2x0QfuYgEtZpApB6a0aZYB15Do88RGwZTqDMYlaeauRCtknBkZf2xExmp8
vZGYR9rx3YSZ+nAgwI08rUHaR/pUBCRMZajvJPfw2RAy9JbWKMpdnyt4nyeMeWsZKCd2j2nYnqC9
TMEDRYejTgFznRnrwg2TG2aZ/Kw0aJRs475jtwLGwxg2PaaWdu5JaZf4/DtRw6ACXtxkU1iiBh5K
unv3OmFVrLuONFvXoxZB3QKfSAmT64HbRzJ2E39ew/YirdqoVO+JPdxRMkgMa+hxaaDbHCSYoqdU
tK7MVgsrY/1AR8o4mgRPudYxJlmyJ0BXRNOw2XFRWsZhhG6yLUZmpIdcU5lXKh5dOvuhqqJLydbU
ekICe2hN1GvXthvFwJ1cAz2MQVI1SwJPjpUlHg94b9XUG9Z6QusQmd5KyWpMYq8QU71ynx/e9Yna
SdGeGnEK4cCA0ZqhkGavjI8by7fduyJINya50+9cm5Yja39rfEq0N4RmGZti/XYE8LG8X6ZHT5Jm
TDKM+TbLPZXjTk5OX9SAzf7JtDefXv60sWbJGA78zk8uZz8IeVt1TQ8eQY1D15Pf++5fTDh5lgPD
0UHh/87nR7Fr8Ry7/xN+b3COxkz+dkzk2T2ueb8tkbH5X9t2QJeEG2sydnSbmZKR4anFKdTnXqNr
zTTcthw7SmPi+uZrx9uzi7u2XdDCvWiOvh39AwAA//8DAIF0yI38CgAA
headers:
CF-RAY:
- 97144c04e89a1abc-GRU
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Mon, 18 Aug 2025 20:52:50 GMT
Server:
- cloudflare
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '2974'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '2999'
x-ratelimit-limit-project-tokens:
- '30000000'
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '30000000'
x-ratelimit-remaining-project-tokens:
- '29998628'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '29998627'
x-ratelimit-reset-project-tokens:
- 2ms
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 2ms
x-request-id:
- req_c0cd67fc9b9342a7bd649b1458724745
status:
code: 200
message: OK
version: 1

View File

@@ -1,296 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are First Agent. First
backstory\nYour personal goal is: First goal\nTo give my best complete final
answer to the task respond using the exact following format:\n\nThought: I now
can give a great answer\nFinal Answer: Your final answer must be the great and
the most complete as possible, it must be outcome described.\n\nI MUST use these
formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task:
Process initial data\n\nThis is the expected criteria for your final answer:
Initial analysis\nyou MUST return the actual complete content as the final answer,
not a summary.\n\nBegin! This is VERY important to you, use the tools available
and give your best Final Answer, your job depends on it!\n\nThought:"}], "model":
"gpt-4o-mini", "stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '831'
content-type:
- application/json
cookie:
- _cfuvid=PslIVDqXn7jd_NXBGdSU5kVFvzwCchKPRVe9LpQVdQA-1736351415895-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.12
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFVNbxw3DL37VxBzyWV3YTtZ2/EtLerCQNH2kCAFmmDBlTgzjDXURKR2
vQny3wtpNp7Nx6EXr0eUqPceH6nPZwAN++YWGtejuWEMy1+vLm6Gi93rF9f4x+/h7V26/zi8fPv3
xT/34c1vzaKciNsP5OzrqZWLwxjIOMoUdonQqGS9uF6v1+vz5zc3NTBET6Ec60ZbvojLgYWXl+eX
L5bn18uLm+PpPrIjbW7h3zMAgM/1b8Epnh6bWzhffF0ZSBU7am6fNgE0KYay0qAqq6FYs5iDLoqR
VOj3IHEPDgU63hEgdAU2oOieEsA7uWPBAK/q9y287glY2BgDoGA4KCvEFqwn8GgILLsYdqSgtKOE
AVxiY4cB1GjUFdxxUlvAnmDIasCexLg91Awac3L0TcIFkGhOLB1Yj1bWD4CJIFFg3AYCFF8+aIdi
YLGenErDuznXmGJZWsFf4ugEroKLIZAz8hXUSKmNaQCEsdwwsGA6AD1i+a8Ut1zhenIP0MYE6FxO
6A4VxdEBJKS6gBDjQ4Fdt8kBBlYt3zsMueBK4FldohHFMenqnbyTP+lx0sahURcTfzrFKhZhIBSW
rs0BlLqBxHQBOI7hUHJvUdmBGhrrpPpA1kevBbXmYcCa8oEO0BJaTqVQ2fWAWjMvYCDP5bfwKUZd
weuetci3Y08KLMpdbzqhqdhYLfE2V3GqDCRWKm8kniq304JWnq+857IfQzgsYMeaMfCnqu8MqGe1
2CUcdAHb+AhjiIVsTKAOzShNK9UNx2YrNLdUY1k8peL86o4pdc+jVohjPS8Ke7aeZQZXDK50RATI
XqGnMALLk1OrFROJL1iyBaakk15jLF1VWyMRVtYuiqMklfRdTtZTGmKiWmNUJdW5vsUobApZccuB
7VBuRe8TTcapHTKS45YdfMykk1xo0KP47xuFDTBwd+R42gPPFLqIQVfwy9R2JH6qEOsPzV2R7jkE
6LHOBxcIE8QdpR3T/rSyzxS0CNNZP6m8J3wovUC6gC6zL9hyseIek1coQgDL0tNofRkchVF3NEFp
Gv8hq1WLgxB58lWiNhffTpIde5ejrOBNMB7QqDiqUmljFo+TzeZhpWST5mrY0WnGumXqmjFFV4FX
Hp4cK0dZDlg7etKojpfV6VhN1GbFMtolh3ASQJFoE7Ey0N8fI1+eRniI3ZjiVr872rQsrP2muClK
GddqcWxq9MsZwPv6VORvpn8zpjiMtrH4QPW6i/V6ytfML9QcvXx+fYxaNAxz4PnLy8VPEm48GXLQ
k9emceh68vPR+WnC7DmeBM5OaP8I52e5J+os3f9JPweco9HIb8ZEnt23lOdtiT7Uyf/zbU8yV8CN
Fsc72hhTKqXw1GIO07va6EGNhk3L0lEaE0+Paztu1lfn2F7Rev2yOfty9h8AAAD//wMAaw+BEmoI
AAA=
headers:
CF-RAY:
- 97144c8758cd1abc-GRU
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Mon, 18 Aug 2025 20:53:12 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=VDTNVbhdzLyVi3fpAyOvoFppI0NEm6YkT9eWIm1wnrs-1755550392-1.0.1.1-vfYBbcAz.yp6ATfVycTWX6tFDJ.1yb_ghwed7t5GOMhNlsFeYYNGz4uupfWMnhc4QLK4UNXIeZGeGKJ.me4S240xKk6FUEu3F5tEAvhPnCM;
path=/; expires=Mon, 18-Aug-25 21:23:12 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=FFe5KuJ6P4BUXOoz57aqNdKwRoz64NOw_EhuSGirJWc-1755550392539-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '4008'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '4027'
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999825'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999825'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_f287350aa2ac4662b9a5e01e85cc221f
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "system", "content": "You are Second Agent. Second
backstory\nYour personal goal is: Second goal\nTo give my best complete final
answer to the task respond using the exact following format:\n\nThought: I now
can give a great answer\nFinal Answer: Your final answer must be the great and
the most complete as possible, it must be outcome described.\n\nI MUST use these
formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task:
Process secondary data\n\nTrigger Payload: Context data\n\nThis is the expected
criteria for your final answer: Secondary analysis\nyou MUST return the actual
complete content as the final answer, not a summary.\n\nThis is the context
you''re working with:\nThe initial analysis of the data involves several critical
steps. First, we must identify the sources of the data, ensuring that they are
reliable and relevant to the objectives of the project. Once the data is collected,
we perform a preliminary examination to check for accuracy and completeness,
looking for any missing values or discrepancies.\n\nNext, we categorize the
data into meaningful segments, applying basic statistical methods to summarize
key features such as mean, median, and mode. This provides insights into the
distribution and central tendencies of the data.\n\nAdditionally, visualizations
such as histograms, box plots, or scatter plots are created to better understand
relationships and patterns within the data. These visual aids help in identifying
trends, outliers, and potential areas of concern.\n\nFurthermore, we assess
the data for its usability in addressing the specific questions at hand, ensuring
that it aligns with the project''s goals. By the end of this initial analysis,
we will have a clear overview of the data''s strengths and weaknesses, guiding
us towards more in-depth investigations or adjustments needed for future data
collection. Ultimately, this foundational analysis sets the stage for future
analytical processes and decision-making initiatives.\n\nBegin! This is VERY
important to you, use the tools available and give your best Final Answer, your
job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '2214'
content-type:
- application/json
cookie:
- _cfuvid=FFe5KuJ6P4BUXOoz57aqNdKwRoz64NOw_EhuSGirJWc-1755550392539-0.0.1.1-604800000;
__cf_bm=VDTNVbhdzLyVi3fpAyOvoFppI0NEm6YkT9eWIm1wnrs-1755550392-1.0.1.1-vfYBbcAz.yp6ATfVycTWX6tFDJ.1yb_ghwed7t5GOMhNlsFeYYNGz4uupfWMnhc4QLK4UNXIeZGeGKJ.me4S240xKk6FUEu3F5tEAvhPnCM
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.12
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFZNbxw5Dr37VxB9yaVt2JlpO/EtM0Awe5hd7GIWGGAzMGiJVcVYJSmk
1O3OIP99QKn6w94c5lLoFiWKfHx84p8XACv2q3tYuQmLm3O4/Pn25t2XX/9T7z7+evP77vdNdP++
peH6brz95W1ere1EevxMrhxOXbk050CFU+xmJ4SFzOvN3Waz2Vz/8P5tM8zJU7BjYy6XP6bLmSNf
vr1+++Pl9d3lzbvl9JTYka7u4X8XAAB/tq/FGT09r+7hen1YmUkVR1rdHzcBrCQFW1mhKmvBWFbr
k9GlWCi20P8BMe3AYYSRtwQIo4UNGHVHAvApfuSIAT60//fw20TAkQtjAIwY9soKaYAyEXgsCBy3
KWxJQWlLggGccGGHAbRQ1iv4yKJlDTuCuWoB9hQLD/vmQVMVRy8croGiVuE4Qpmw2PoeUAiEAuNj
IMDo7Q9tMRYoqZ3speHtyVeWZEtX8NvEehalk1o48td+AYET8vzIgcvejhK6aQlrDahKqoedM5Up
+RTSyKRQlTwMSToKLoVgAaS4bvG5FAeW+VUWgccIOy5T8yekhOImQJ716lP8FP8VHZ0hqwe35Bt+
mWRIMgNCNjBmjih7oGe0X3a3oeEmck8tMHSuCrr9ElAjK0VSXTCZCaN9C7saUtWwh5DSk8Xcjsc9
zNzT32KopGvwNQd2WAgoFmFbMgRYnVDG6AyYlq9LNXjQJ9pZmjUUvYKfA2E8gNkybMCwghYcqeUr
1RnT7P4jDTgWGsXqwxFSlYWH1DH7Jz13dllYYxL+eg5hLKnlyXEcagClcaZY1KLeojQ+LREbxTJJ
4UidVnbTsURfKqkhfMBucdRRxxDSTlvQdjPmDpKZ0gCPqOwsxcLaG6MTSe0SrfOMLeYn2sNAWKqQ
XsFPe3AYXA1YTvTDuIaZPONCMtOVlvqIHFuNWn9wVB6noj37hgVrEX6sPVpjg5UPAxSKnnrZXrTg
bmJrAxJrGVRAGFKNvudkec5JaOHU88t6fPCebRuGsO91abIIW9aKgb82HwpajfgKE2tJo+Csa3hM
z5BDKtrTU4elkPQlA4tCdezNWW+f0H1NnLUdyG1/1NZiHI/5WM1IDyEAslfIAfeAsOWCAUw7jVwH
bWqQC0Vv/K4lMMkSU06mo00MhbCh5lJ0JFYSo4EdrS1ao61koXKiY0ONY6lsUhX2DbCPVcpEYraG
Vxed46E3JjW4CBRHQO/lTJQ0k+OB3Ymh1lUTRv9a+ZowHJvbpMgYfJKjRTDfKIwJQ0f0TFYNf+tI
ajEKQeoAi3HNoJ+u4EOXjJPzuRmFvlQWWnARokbVctanx3dAIPM2lRed3psGc5aEbmqY/dSfD7IQ
h64hrx+phuWOQ4AJ2ztnbBWaKKq9e2lLsmXanfP+jSmRUBzL1NPfET6ZYNKh7Wv0JPa6egOyeR8r
+yW1HYrXQ5EvPeUyGfxWl3GhvUHvP1ctXYYikV+ekaFa679+TYCGIYmp539D4RkLWVe1hE8def40
K5XOna6pZ57PsMySXEurZenJsXKKlzM+db01JFvV10BzTjuShdaNLFauwB4GwZl2SZ6sqo+Vg4ea
TWDUoPcUtvalTNKVyApKzzkkOWrjsUPPhxWhoSrawBRrCGcGjDF1yW1j0h+L5dtxMAppzJIe9dXR
1cCRdXqwjk3RhiAtKa+a9dsFwB9tAKsvZqpVljTn8lDSE7Xrfni/6f5Wp7nvzPrudrGWVDCcDHfv
btbfcfjgqSAHPZvhVg7dRP509DTwYfWczgwXZ2n/fzjf891T5zj+Hfcng3OUC/mHbEOSe5nyaZvQ
5zakfH/bEeYW8MoeFXb0UJjESuFpwBr6tLrSvRaaHwaOo2kn95F1yA+b22scbmmzeb+6+HbxFwAA
AP//AwAAHGphwAsAAA==
headers:
CF-RAY:
- 97144ca1b97b1abc-GRU
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Mon, 18 Aug 2025 20:53:21 GMT
Server:
- cloudflare
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '8604'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '8628'
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999482'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999485'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_545a8ffcdf954433b9059a5b35dddf20
status:
code: 200
message: OK
version: 1

File diff suppressed because one or more lines are too long

View File

@@ -1,845 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are Iterating Agent. You
are an agent that iterates a given number of times\nYour personal goal is: Call
the iterating tool 5 times\nYou ONLY have access to the following tools, and
should NEVER make up tools that are not listed here:\n\nTool Name: iterating_tool\nTool
Arguments: {''input_text'': {''description'': None, ''type'': ''str''}}\nTool
Description: A tool that iterates a given number of times\n\nIMPORTANT: Use
the following format in your response:\n\n```\nThought: you should always think
about what to do\nAction: the action to take, only one name of [iterating_tool],
just the name, exactly as it''s written.\nAction Input: the input to the action,
just a simple JSON object, enclosed in curly braces, using \" to wrap keys and
values.\nObservation: the result of the action\n```\n\nOnce all necessary information
is gathered, return the following format:\n\n```\nThought: I now know the final
answer\nFinal Answer: the final answer to the original input question\n```"},
{"role": "user", "content": "\nCurrent Task: Call the iterating tool 5 times\n\nThis
is the expected criteria for your final answer: A list of the iterations\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o", "stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '1452'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//hFPRjtMwEHzPV6z83J5ySVvu8nYCIZ1ACKSCEOSUOs4mMefYlr2Boqr/
jpy0TQ7uxIsV7exMZmftQwTAZMUyYKLlJDqrlq837zbXn/flx6/fVPIp/fI+3ZbaxVv568ObR7YI
DFP+QEFn1pUwnVVI0ugRFg45YVC9frVeb5I0WacD0JkKVaA1lpYrs0ziZLWMb5bx5kRsjRToWQbf
IwCAw3AGi7rCPcsgXpwrHXrPG2TZpQmAOaNChXHvpSeuiS0mUBhNqAfXu90u19vW9E1LGdyDRqyA
DAiuFFCLIAkdJ6kbIGPGUi2dJyDZ4VWu70SYNpv6itB3rsO9tj1lcMiZDF8F4Z5ylkHO3g4qJ5rR
OTvOLTqse89DQrpXagZwrQ0NjCGchxNyvMShTGOdKf1fVFZLLX1bOOTe6DC6J2PZgB4jgIch9v5J
ksw601kqyDzi8Lvk9nbUY9OiJzRdn0AyxNWsnq4Wz+gVFRKXys8WxwQXLVYTddoy7ytpZkA0m/pf
N89pj5OPG/qv/AQIgZawKqzDSoqnE09tDsM7eKntkvJgmHl0P6XAgiS6sIkKa96r8Yoy/9sTdkUt
dYPOOjne09oWq01Z1zHG4oZFx+gPAAAA//8DAHvlcCKwAwAA
headers:
CF-RAY:
- 971b3f72effa6897-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 19 Aug 2025 17:07:35 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=6OZC5kPO9TEmVSACP2sSOZK9ZEZ5I4T_VUlfmzsoY9Y-1755623255-1.0.1.1-ek3SaNBOhXmCg7K3J7LIsE0aCrnK5YfSumHDT6nc8Df1Zh3bzMLHLDqTUwtqiG8SwxiIFXeGP4.Vt2sx9b3FCkxoyrqNpgrBL5DAffAGHm8;
path=/; expires=Tue, 19-Aug-25 17:37:35 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=R_H7SrOF3QFEWePZfvxzuyKWZAt5ulsNbP28.6DC9wM-1755623255760-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '2564'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '2751'
x-ratelimit-limit-project-tokens:
- '30000000'
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '30000000'
x-ratelimit-remaining-project-tokens:
- '29999674'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '29999674'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_d654df1116aa42ca8ee7d10b4b424303
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "system", "content": "You are Iterating Agent. You
are an agent that iterates a given number of times\nYour personal goal is: Call
the iterating tool 5 times\nYou ONLY have access to the following tools, and
should NEVER make up tools that are not listed here:\n\nTool Name: iterating_tool\nTool
Arguments: {''input_text'': {''description'': None, ''type'': ''str''}}\nTool
Description: A tool that iterates a given number of times\n\nIMPORTANT: Use
the following format in your response:\n\n```\nThought: you should always think
about what to do\nAction: the action to take, only one name of [iterating_tool],
just the name, exactly as it''s written.\nAction Input: the input to the action,
just a simple JSON object, enclosed in curly braces, using \" to wrap keys and
values.\nObservation: the result of the action\n```\n\nOnce all necessary information
is gathered, return the following format:\n\n```\nThought: I now know the final
answer\nFinal Answer: the final answer to the original input question\n```"},
{"role": "user", "content": "\nCurrent Task: Call the iterating tool 5 times\n\nThis
is the expected criteria for your final answer: A list of the iterations\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}, {"role": "assistant", "content":
"```\nThought: I need to call the iterating tool the first time.\nAction: iterating_tool\nAction
Input: {\"input_text\": \"First iteration\"}\nObservation: Iteration 0: First
iteration"}], "model": "gpt-4o", "stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '1673'
content-type:
- application/json
cookie:
- __cf_bm=6OZC5kPO9TEmVSACP2sSOZK9ZEZ5I4T_VUlfmzsoY9Y-1755623255-1.0.1.1-ek3SaNBOhXmCg7K3J7LIsE0aCrnK5YfSumHDT6nc8Df1Zh3bzMLHLDqTUwtqiG8SwxiIFXeGP4.Vt2sx9b3FCkxoyrqNpgrBL5DAffAGHm8;
_cfuvid=R_H7SrOF3QFEWePZfvxzuyKWZAt5ulsNbP28.6DC9wM-1755623255760-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA4RTTW/bMAy9+1cQOidFlg8v8G1LB6zosbvNhaNItK3NFjWJ7loE+e+D7CR2uw67
CAIf3xP5SB0TAGG0yECoWrJqXTPfpffp2tWqu93u7m+Xu+3Dl6/m96+X+pB+VmIWGXT4gYovrBtF
rWuQDdkBVh4lY1T98HGzSZer5SbtgZY0NpFWOZ6vab5cLNfzxXa+SM/EmozCIDL4ngAAHPszlmg1
PosMFrNLpMUQZIUiuyYBCE9NjAgZggksLYvZCCqyjLaver/f5/ZbTV1VcwZ3UMsnhHMXqIFrhNL4
wGAYvYyNgbQaLEaQwHlS52tMDajIaiCLN7n9pGJ6dmHaqmCi5hKHO+s6zuCYCxNvBeMz5yKDXDwM
KtcXc3GaVu+x7IKM5tmuaSaAtJa4Z/S+PZ6R09Wphirn6RDeUEVprAl14VEGstGVwOREj54SgMd+
It0rk4Xz1DoumH5i/9xqvRn0xLgDE3R7BplYNtP4avaOXqGRpWnCZKZCSVWjHqnjAshOG5oAyaTr
v6t5T3vofBjRf+VHQCl0jLpwHrVRrzse0zzGL/KvtKvLfcEioH8yCgs26OMkNJaya4btFeElMLZF
aWyF3nkzrHDpinV6KMsFLtRWJKfkDwAAAP//AwAMYddzywMAAA==
headers:
CF-RAY:
- 971b3f84cf146897-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 19 Aug 2025 17:07:38 GMT
Server:
- cloudflare
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '1900'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '2551'
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '30000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '29999629'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_f4181fe581264993ac5c6deba4f1c287
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "system", "content": "You are Iterating Agent. You
are an agent that iterates a given number of times\nYour personal goal is: Call
the iterating tool 5 times\nYou ONLY have access to the following tools, and
should NEVER make up tools that are not listed here:\n\nTool Name: iterating_tool\nTool
Arguments: {''input_text'': {''description'': None, ''type'': ''str''}}\nTool
Description: A tool that iterates a given number of times\n\nIMPORTANT: Use
the following format in your response:\n\n```\nThought: you should always think
about what to do\nAction: the action to take, only one name of [iterating_tool],
just the name, exactly as it''s written.\nAction Input: the input to the action,
just a simple JSON object, enclosed in curly braces, using \" to wrap keys and
values.\nObservation: the result of the action\n```\n\nOnce all necessary information
is gathered, return the following format:\n\n```\nThought: I now know the final
answer\nFinal Answer: the final answer to the original input question\n```"},
{"role": "user", "content": "\nCurrent Task: Call the iterating tool 5 times\n\nThis
is the expected criteria for your final answer: A list of the iterations\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}, {"role": "assistant", "content":
"```\nThought: I need to call the iterating tool the first time.\nAction: iterating_tool\nAction
Input: {\"input_text\": \"First iteration\"}\nObservation: Iteration 0: First
iteration"}, {"role": "assistant", "content": "```\nThought: I have completed
the first iteration and need to proceed to the second one.\nAction: iterating_tool\nAction
Input: {\"input_text\": \"Second iteration\"}\nObservation: Iteration 0: Second
iteration"}], "model": "gpt-4o", "stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '1922'
content-type:
- application/json
cookie:
- __cf_bm=6OZC5kPO9TEmVSACP2sSOZK9ZEZ5I4T_VUlfmzsoY9Y-1755623255-1.0.1.1-ek3SaNBOhXmCg7K3J7LIsE0aCrnK5YfSumHDT6nc8Df1Zh3bzMLHLDqTUwtqiG8SwxiIFXeGP4.Vt2sx9b3FCkxoyrqNpgrBL5DAffAGHm8;
_cfuvid=R_H7SrOF3QFEWePZfvxzuyKWZAt5ulsNbP28.6DC9wM-1755623255760-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//hFNNb9swDL37VxA6J0XmfCz1bRswNBh2GXrYsBSOItG2ElsUJLrIUOS/
D7KT2N067CIIfHxP5CP1kgAIo0UGQlWSVePq6afVl9Xq9HAI3z6uj8eHz6os9OGrvt98T9sfYhIZ
tD+g4ivrTlHjamRDtoeVR8kYVd+9Xy5X6TxdrjugIY11pJWOpwuaprN0MZ2tp7PVhViRURhEBj8T
AICX7owlWo0nkcFsco00GIIsUWS3JADhqY4RIUMwgaVlMRlARZbRdlXvdrutfayoLSvOYAOVfEa4
dIEauEIIqMhqMIxexs5AWg0WI0rgPKnLNeZyZbwGsni3tR9UzM6uRFvmTFRf47CxruUMXrbCxFvO
eOKtyGArHjuR23tbcR4X77Fog4ze2bauR4C0lrhjdLY9XZDzzaiaSudpH/6gisJYE6rcowxkoymB
yYkOPScAT91A2lceC+epcZwzHbF7bn6/6PXEsAIjdH0BmVjWQ3wxTydv6OUaWZo6jEYqlFQV6oE6
zF+22tAISEZd/13NW9p95/2E/is/AEqhY9S586iNet3xkOYx/pB/pd1c7goWAf2zUZizQR8nobGQ
bd0vrwi/AmOTF8aW6J03/QYXLl+s9kUxw5lai+Sc/AYAAP//AwDpY7tZygMAAA==
headers:
CF-RAY:
- 971b3f958e746897-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 19 Aug 2025 17:07:39 GMT
Server:
- cloudflare
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '890'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '906'
x-ratelimit-limit-project-tokens:
- '30000000'
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '30000000'
x-ratelimit-remaining-project-tokens:
- '29999577'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '29999577'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_b3632e88268747218e4cc4cc08d87bca
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "system", "content": "You are Iterating Agent. You
are an agent that iterates a given number of times\nYour personal goal is: Call
the iterating tool 5 times\nYou ONLY have access to the following tools, and
should NEVER make up tools that are not listed here:\n\nTool Name: iterating_tool\nTool
Arguments: {''input_text'': {''description'': None, ''type'': ''str''}}\nTool
Description: A tool that iterates a given number of times\n\nIMPORTANT: Use
the following format in your response:\n\n```\nThought: you should always think
about what to do\nAction: the action to take, only one name of [iterating_tool],
just the name, exactly as it''s written.\nAction Input: the input to the action,
just a simple JSON object, enclosed in curly braces, using \" to wrap keys and
values.\nObservation: the result of the action\n```\n\nOnce all necessary information
is gathered, return the following format:\n\n```\nThought: I now know the final
answer\nFinal Answer: the final answer to the original input question\n```"},
{"role": "user", "content": "\nCurrent Task: Call the iterating tool 5 times\n\nThis
is the expected criteria for your final answer: A list of the iterations\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}, {"role": "assistant", "content":
"```\nThought: I need to call the iterating tool the first time.\nAction: iterating_tool\nAction
Input: {\"input_text\": \"First iteration\"}\nObservation: Iteration 0: First
iteration"}, {"role": "assistant", "content": "```\nThought: I have completed
the first iteration and need to proceed to the second one.\nAction: iterating_tool\nAction
Input: {\"input_text\": \"Second iteration\"}\nObservation: Iteration 0: Second
iteration"}, {"role": "assistant", "content": "```\nThought: I have completed
the second iteration and need to proceed to the third one.\nAction: iterating_tool\nAction
Input: {\"input_text\": \"Third iteration\"}\nObservation: Iteration 0: Third
iteration\n\n\nYou ONLY have access to the following tools, and should NEVER
make up tools that are not listed here:\n\nTool Name: iterating_tool\nTool Arguments:
{''input_text'': {''description'': None, ''type'': ''str''}}\nTool Description:
A tool that iterates a given number of times\n\nIMPORTANT: Use the following
format in your response:\n\n```\nThought: you should always think about what
to do\nAction: the action to take, only one name of [iterating_tool], just the
name, exactly as it''s written.\nAction Input: the input to the action, just
a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation:
the result of the action\n```\n\nOnce all necessary information is gathered,
return the following format:\n\n```\nThought: I now know the final answer\nFinal
Answer: the final answer to the original input question\n```"}], "model": "gpt-4o",
"stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '3018'
content-type:
- application/json
cookie:
- __cf_bm=6OZC5kPO9TEmVSACP2sSOZK9ZEZ5I4T_VUlfmzsoY9Y-1755623255-1.0.1.1-ek3SaNBOhXmCg7K3J7LIsE0aCrnK5YfSumHDT6nc8Df1Zh3bzMLHLDqTUwtqiG8SwxiIFXeGP4.Vt2sx9b3FCkxoyrqNpgrBL5DAffAGHm8;
_cfuvid=R_H7SrOF3QFEWePZfvxzuyKWZAt5ulsNbP28.6DC9wM-1755623255760-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//hFPBbtswDL37Kwidk8JzErfxbdgwtNh1GwrMhaPItK3MFgWJLloE+fdB
dhK7W4ddBIGP74l8pI4RgNClyECoRrLqbLv8lH5Nb8k9Hu7T5PP37f361R1Wj1us6IeVYhEYtD+g
4gvrRlFnW2RNZoSVQ8kYVD/cbjZpsko22wHoqMQ20GrLyzUtkzhZL+O7ZZyeiQ1phV5k8DMCADgO
ZyjRlPgiMogXl0iH3ssaRXZNAhCO2hAR0nvtWRoWiwlUZBjNUPVut8vNt4b6uuEMHqCRzwjnLrAE
bhC40a4EzehkaAykKcFgAAmsI3W+htSKescNkMGb3HxUIT27ME1dMFF7icODsT1ncMyFDreC8YVz
kUEuvowq1xdzcZpX77DqvQzmmb5tZ4A0hnhgDL49nZHT1amWauto7/+gikob7ZvCofRkgiueyYoB
PUUAT8NE+jcmC+uos1ww/cLhuTRJRz0x7cCEru7OIBPLdsZK14t39IoSWerWz2YqlFQNlhN1WgDZ
l5pmQDTr+u9q3tMeOx9H9F/5CVAKLWNZWIelVm87ntIchi/yr7Sry0PBwqN71goL1ujCJEqsZN+O
2yv8q2fsikqbGp11elzhyhbrdF9VMcbqTkSn6DcAAAD//wMACjMtRssDAAA=
headers:
CF-RAY:
- 971b3f9bbef86897-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 19 Aug 2025 17:07:40 GMT
Server:
- cloudflare
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '1182'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '1208'
x-ratelimit-limit-project-tokens:
- '30000000'
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '30000000'
x-ratelimit-remaining-project-tokens:
- '29999320'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '29999320'
x-ratelimit-reset-project-tokens:
- 1ms
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 1ms
x-request-id:
- req_7fc641fabc634f29ae085ef176071402
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "system", "content": "You are Iterating Agent. You
are an agent that iterates a given number of times\nYour personal goal is: Call
the iterating tool 5 times\nYou ONLY have access to the following tools, and
should NEVER make up tools that are not listed here:\n\nTool Name: iterating_tool\nTool
Arguments: {''input_text'': {''description'': None, ''type'': ''str''}}\nTool
Description: A tool that iterates a given number of times\n\nIMPORTANT: Use
the following format in your response:\n\n```\nThought: you should always think
about what to do\nAction: the action to take, only one name of [iterating_tool],
just the name, exactly as it''s written.\nAction Input: the input to the action,
just a simple JSON object, enclosed in curly braces, using \" to wrap keys and
values.\nObservation: the result of the action\n```\n\nOnce all necessary information
is gathered, return the following format:\n\n```\nThought: I now know the final
answer\nFinal Answer: the final answer to the original input question\n```"},
{"role": "user", "content": "\nCurrent Task: Call the iterating tool 5 times\n\nThis
is the expected criteria for your final answer: A list of the iterations\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}, {"role": "assistant", "content":
"```\nThought: I need to call the iterating tool the first time.\nAction: iterating_tool\nAction
Input: {\"input_text\": \"First iteration\"}\nObservation: Iteration 0: First
iteration"}, {"role": "assistant", "content": "```\nThought: I have completed
the first iteration and need to proceed to the second one.\nAction: iterating_tool\nAction
Input: {\"input_text\": \"Second iteration\"}\nObservation: Iteration 0: Second
iteration"}, {"role": "assistant", "content": "```\nThought: I have completed
the second iteration and need to proceed to the third one.\nAction: iterating_tool\nAction
Input: {\"input_text\": \"Third iteration\"}\nObservation: Iteration 0: Third
iteration\n\n\nYou ONLY have access to the following tools, and should NEVER
make up tools that are not listed here:\n\nTool Name: iterating_tool\nTool Arguments:
{''input_text'': {''description'': None, ''type'': ''str''}}\nTool Description:
A tool that iterates a given number of times\n\nIMPORTANT: Use the following
format in your response:\n\n```\nThought: you should always think about what
to do\nAction: the action to take, only one name of [iterating_tool], just the
name, exactly as it''s written.\nAction Input: the input to the action, just
a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation:
the result of the action\n```\n\nOnce all necessary information is gathered,
return the following format:\n\n```\nThought: I now know the final answer\nFinal
Answer: the final answer to the original input question\n```"}, {"role": "assistant",
"content": "```\nThought: I have completed the third iteration and need to proceed
to the fourth one.\nAction: iterating_tool\nAction Input: {\"input_text\": \"Fourth
iteration\"}\nObservation: Iteration 0: Fourth iteration"}], "model": "gpt-4o",
"stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '3267'
content-type:
- application/json
cookie:
- __cf_bm=6OZC5kPO9TEmVSACP2sSOZK9ZEZ5I4T_VUlfmzsoY9Y-1755623255-1.0.1.1-ek3SaNBOhXmCg7K3J7LIsE0aCrnK5YfSumHDT6nc8Df1Zh3bzMLHLDqTUwtqiG8SwxiIFXeGP4.Vt2sx9b3FCkxoyrqNpgrBL5DAffAGHm8;
_cfuvid=R_H7SrOF3QFEWePZfvxzuyKWZAt5ulsNbP28.6DC9wM-1755623255760-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA4RTTW/bMAy9+1cQOieFmyZO5ts2bECxDwztgB3mwlFk2tZmi4JEFx2C/PdBchKn
W4ddBIGP74l8pPYJgNCVyEGoVrLqbTd/m33INvJ+0NnnT/ff3tTZR72483eSv7xLScwCg3Y/UPGJ
daWotx2yJjPCyqFkDKrX69UqW9wssjQCPVXYBVpjeb6k+SJdLOfpZp5mR2JLWqEXOXxPAAD28Qwl
mgqfRA5RJkZ69F42KPJzEoBw1IWIkN5rz9KwmE2gIsNoYtXb7bYwX1sampZzuIVWPiIcu8AKuEWo
aXDcgmZ0MnQG0lRgMKAE1pE6XmOurrkFMnhVmNcqZOcnomlKJupOcbg1duAc9oXQ4VYyPnEhcijE
+yhyfq8Qh8viHdaDl8E7M3TdBSCNIY6MaNvDETmcjeqosY52/g+qqLXRvi0dSk8mmOKZrIjoIQF4
iAMZnnksrKPecsn0E+Nz2Xo16olpBSb05tURZGLZTfH19XL2gl5ZIUvd+YuRCiVVi9VEneYvh0rT
BZBcdP13NS9pj52PE/qv/AQohZaxKq3DSqvnHU9pDsMP+Vfa2eVYsPDoHrXCkjW6MIkKazl04/IK
/8sz9mWtTYPOOj1ucG3LZbar6xRTtRHJIfkNAAD//wMANy12ZMoDAAA=
headers:
CF-RAY:
- 971b3fa3ea2f6897-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 19 Aug 2025 17:07:41 GMT
Server:
- cloudflare
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '780'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '878'
x-ratelimit-limit-project-tokens:
- '30000000'
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '30000000'
x-ratelimit-remaining-project-tokens:
- '29999268'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '29999268'
x-ratelimit-reset-project-tokens:
- 1ms
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 1ms
x-request-id:
- req_603a6c645bac468888838d21c64db11f
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "system", "content": "You are Iterating Agent. You
are an agent that iterates a given number of times\nYour personal goal is: Call
the iterating tool 5 times\nYou ONLY have access to the following tools, and
should NEVER make up tools that are not listed here:\n\nTool Name: iterating_tool\nTool
Arguments: {''input_text'': {''description'': None, ''type'': ''str''}}\nTool
Description: A tool that iterates a given number of times\n\nIMPORTANT: Use
the following format in your response:\n\n```\nThought: you should always think
about what to do\nAction: the action to take, only one name of [iterating_tool],
just the name, exactly as it''s written.\nAction Input: the input to the action,
just a simple JSON object, enclosed in curly braces, using \" to wrap keys and
values.\nObservation: the result of the action\n```\n\nOnce all necessary information
is gathered, return the following format:\n\n```\nThought: I now know the final
answer\nFinal Answer: the final answer to the original input question\n```"},
{"role": "user", "content": "\nCurrent Task: Call the iterating tool 5 times\n\nThis
is the expected criteria for your final answer: A list of the iterations\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}, {"role": "assistant", "content":
"```\nThought: I need to call the iterating tool the first time.\nAction: iterating_tool\nAction
Input: {\"input_text\": \"First iteration\"}\nObservation: Iteration 0: First
iteration"}, {"role": "assistant", "content": "```\nThought: I have completed
the first iteration and need to proceed to the second one.\nAction: iterating_tool\nAction
Input: {\"input_text\": \"Second iteration\"}\nObservation: Iteration 0: Second
iteration"}, {"role": "assistant", "content": "```\nThought: I have completed
the second iteration and need to proceed to the third one.\nAction: iterating_tool\nAction
Input: {\"input_text\": \"Third iteration\"}\nObservation: Iteration 0: Third
iteration\n\n\nYou ONLY have access to the following tools, and should NEVER
make up tools that are not listed here:\n\nTool Name: iterating_tool\nTool Arguments:
{''input_text'': {''description'': None, ''type'': ''str''}}\nTool Description:
A tool that iterates a given number of times\n\nIMPORTANT: Use the following
format in your response:\n\n```\nThought: you should always think about what
to do\nAction: the action to take, only one name of [iterating_tool], just the
name, exactly as it''s written.\nAction Input: the input to the action, just
a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation:
the result of the action\n```\n\nOnce all necessary information is gathered,
return the following format:\n\n```\nThought: I now know the final answer\nFinal
Answer: the final answer to the original input question\n```"}, {"role": "assistant",
"content": "```\nThought: I have completed the third iteration and need to proceed
to the fourth one.\nAction: iterating_tool\nAction Input: {\"input_text\": \"Fourth
iteration\"}\nObservation: Iteration 0: Fourth iteration"}, {"role": "assistant",
"content": "```\nThought: I have completed the fourth iteration and need to
proceed to the fifth one.\nAction: iterating_tool\nAction Input: {\"input_text\":
\"Fifth iteration\"}\nObservation: Iteration 0: Fifth iteration"}], "model":
"gpt-4o", "stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '3514'
content-type:
- application/json
cookie:
- __cf_bm=6OZC5kPO9TEmVSACP2sSOZK9ZEZ5I4T_VUlfmzsoY9Y-1755623255-1.0.1.1-ek3SaNBOhXmCg7K3J7LIsE0aCrnK5YfSumHDT6nc8Df1Zh3bzMLHLDqTUwtqiG8SwxiIFXeGP4.Vt2sx9b3FCkxoyrqNpgrBL5DAffAGHm8;
_cfuvid=R_H7SrOF3QFEWePZfvxzuyKWZAt5ulsNbP28.6DC9wM-1755623255760-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA4xTy26jMBTd8xVXXpOK0IRE7KqRInVazaZZTROBY1/AU2NT26QzqvLvI0MSSNuJ
ZsPC58E99/EeABDBSQqEVdSxupGTb8lDcne7pt8Xb/r14XX5tJiufiT1z8f944smoVfo3S9k7qS6
YbpuJDqhVQ8zg9Shd50u5vMkvo2TuANqzVF6Wdm4yUxP4iieTaLlJEqOwkoLhpak8BwAALx3X1+i
4vibpBCFp5caraUlkvRMAiBGS/9CqLXCOqocCQeQaeVQdVXneb5R60q3ZeVSuIeK7hGOKZADlRLm
IBwa6kPZm41aCUUl3Cn7hiaF5w25P6EQpbASxrpBsCEhfGA8IdOKX6WsK2GuM1a6Na66ThHFJWO7
UXmej/tgsGgt9WNQrZQjgCqlXZ/YT2B7RA7nnktdNkbv7AcpKYQStsoMUquV7691uiEdeggAtt1s
24txkcbounGZ0y/Y/W4Rz3s/MmzTgM6TI+i0o3KkWk7DL/wyjo4KaUfbQRhlFfJBOqwSbbnQIyAY
pf5czVfefXKhyv+xHwDGsHHIs8YgF+wy8UAz6I/tX7Rzl7uCiUWzFwwzJ9D4SXAsaCv7OyD2j3VY
Z4VQJZrGiP4YiiabJbuiiDBiSxIcgr8AAAD//wMAnPTcwxUEAAA=
headers:
CF-RAY:
- 971b3faa6ba46897-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 19 Aug 2025 17:07:43 GMT
Server:
- cloudflare
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '1392'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '1841'
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '30000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '29999217'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 1ms
x-request-id:
- req_19dc255cec9d4763be7d5f597c80e936
status:
code: 200
message: OK
version: 1

View File

@@ -1,228 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
personal goal is: test goal\nTo give my best complete final answer to the task
respond using the exact following format:\n\nThought: I now can give a great
answer\nFinal Answer: Your final answer must be the great and the most complete
as possible, it must be outcome described.\n\nI MUST use these formats, my job
depends on it!"}, {"role": "user", "content": "\nCurrent Task: Analyze the data\n\nTrigger
Payload: Important context data\n\nThis is the expected criteria for your final
answer: Analysis report\nyou MUST return the actual complete content as the
final answer, not a summary.\n\nBegin! This is VERY important to you, use the
tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],
"model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '865'
content-type:
- application/json
cookie:
- _cfuvid=FFe5KuJ6P4BUXOoz57aqNdKwRoz64NOw_EhuSGirJWc-1755550392539-0.0.1.1-604800000;
__cf_bm=VDTNVbhdzLyVi3fpAyOvoFppI0NEm6YkT9eWIm1wnrs-1755550392-1.0.1.1-vfYBbcAz.yp6ATfVycTWX6tFDJ.1yb_ghwed7t5GOMhNlsFeYYNGz4uupfWMnhc4QLK4UNXIeZGeGKJ.me4S240xKk6FUEu3F5tEAvhPnCM
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.12
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: 'upstream connect error or disconnect/reset before headers. reset reason:
connection termination'
headers:
CF-RAY:
- 97144cd97d521abc-GRU
Connection:
- keep-alive
Content-Length:
- '95'
Content-Type:
- text/plain
Date:
- Mon, 18 Aug 2025 20:53:22 GMT
Server:
- cloudflare
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
X-Content-Type-Options:
- nosniff
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
status:
code: 503
message: Service Unavailable
- request:
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
personal goal is: test goal\nTo give my best complete final answer to the task
respond using the exact following format:\n\nThought: I now can give a great
answer\nFinal Answer: Your final answer must be the great and the most complete
as possible, it must be outcome described.\n\nI MUST use these formats, my job
depends on it!"}, {"role": "user", "content": "\nCurrent Task: Analyze the data\n\nTrigger
Payload: Important context data\n\nThis is the expected criteria for your final
answer: Analysis report\nyou MUST return the actual complete content as the
final answer, not a summary.\n\nBegin! This is VERY important to you, use the
tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],
"model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '865'
content-type:
- application/json
cookie:
- _cfuvid=FFe5KuJ6P4BUXOoz57aqNdKwRoz64NOw_EhuSGirJWc-1755550392539-0.0.1.1-604800000;
__cf_bm=VDTNVbhdzLyVi3fpAyOvoFppI0NEm6YkT9eWIm1wnrs-1755550392-1.0.1.1-vfYBbcAz.yp6ATfVycTWX6tFDJ.1yb_ghwed7t5GOMhNlsFeYYNGz4uupfWMnhc4QLK4UNXIeZGeGKJ.me4S240xKk6FUEu3F5tEAvhPnCM
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '1'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.12
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA4xXTW8cNxK961cUBtBFmBEkRfKHbrLiAI6xcBLvYRe7gVFDVneXxSZbLPaMx0H+
e1Bkf42kLPZiWM1hseq9V6/IP04AVmxXt7AyDSbTdm5z/+ry7cW/Ht99vfmIPz/uPt73b0MMP3/6
d/fxx3erte4I269k0rjr3IS2c5Q4+LJsImEijXr5+ubm5ubi+uIqL7TBktNtdZc212HTsufN1cXV
9ebi9ebyzbC7CWxIVrfwnxMAgD/yv5qnt/RtdQsX6/FLSyJY0+p2+hHAKganX1YowpLQp9V6XjTB
J/I59Q/gwx4Meqh5R4BQa9qAXvYUAf7rf2KPDu7y37f64ezszqM7CAv8Rl2I6exMP+vCB59isL1R
EMrXfzYEXR+7IAShgtSwQMy7gAVSgC6GHVs9WPGL1JCXnMh4Ri/sa0gNQc76WwKLCcd9FtjnxRS5
rilChwcX0J7DuwPQN1Rsh+0dxcSefIIdRsatIwH0FtiST1wd8u8ieSvrozyR25ypJcc7irBD1+tu
YC9cN0kgNZgyhOyrEFuwZFg4+E2LDxq1i8GQCMl5gelHLeDTjuKOaT/jpHUJJa1TORMFTFMNvUBL
KbIRMME5MoksBM0FQToyXLHR8jjYNZBXIJV2XwPqchKQ3jSAAoJadcV1H0nWYHpJoaUI5GusqSWf
1hmT0FFEZREdUFWxYfLmcA4f6bBAj71xvaVbreryHM7OPufwWt7t2Rn8I/jUuMPxoSBU6zlkYXtQ
ZFQuYDBRHSJniK401P2Y2vsptRxzwCGS0+ZSXqYi2CeKmMUn6yE5BWFPW+FEsGPhJGuQYBgdtGQZ
QQOXDVp3RWS3aB5AedRcftBcPi3QeD+hoelMnRCqI8xGuuYkcEcRa4JI0gUvBIlbWkPVu4qd0ywg
YqIhj0gS+mgI0LlgctSldsZji3YW0P9CUTNHb6isAcAGVFyFhNw67C0r4AIIXRBO2m9Z+UVU2iwO
JUEV+giPPcZEUdaw59QA+vwjdE6Li4SlsS9vTrUTI+3I93Sej2y4bkjSpitJDZ2gfAtgpLHxmKxK
85dBCncZgPGvd+vc9pG3fcoNGuDVxWm2kpDQlbLOhzo/E0qGP0s00wp7igRhKxR3ZMcaQLj22jXo
04CMdPxAYPuop/x6vQZM5dQisiY4tpgF24YcOfPxd1JdgH//vMcmeUgT9oDgQxoMZQCU/TPR9p32
y9XNafZJjCWt7GuRdtkjigMU7BeHqawg+GPZdw5T1jg0KIBOAkQW8nrIDxen61Ekiob0Ru2r6h20
GB8ofzTYdsi1n8Cf6jzuoSwJdIo5Jpaq9OeQVAVvbk4HUkxo2+Chi8iidhHi5A6PPTpOhyyMqduV
UTaUafgfXbpg4s7PU+VJv84elznRwQDc6owpGLJ/qVP3DZsGGtzN1GVW3mpRkSp16nH6UFXpXzvy
JPl49jvyKcQDtOgHrkYs714yi4LJwu0ee1bHLAlYGhOoYmjh+g00oY95cF1dl/+vc0c6nVmaFflG
fULPVaNaINt3efA9HVtq/j+pLnw9OE+xFhPaLfsMZSlszKR01hFrC11KX9ck4/iclSVJ4a21MnWJ
CbjJGL1qZ0/OFeHk6Y+xpgTYWyVRRbGBpR4WXJao6qt7PU0xrVCSDlNvoQ3lyKyGNIpsnP2H526U
TW5ByzOVS05msqaF28jkxNlVM+i9V6HrVMOOEzr+nnt3sh2NMOMz2FVH+DB0/8jVfdDJI4t7GMvi
TuUtRTFB57HC50l1o+oKPrHv87VqR5JG8c/sUFWFOJJmo/LyErmKpUbmVqtSnak0WmSfsFzJFLij
HtRbqsVos2ZHDb6I6zl8njHIiTgaOmYYcMPFbM/OAflsrprNVi+T2n/PAJ7R56HR6hj2ed7Zp06g
kao+9ZEGsH8jNS/ytsycsTfelwYbxEn2Jfc8orBoYyAyJxG6xC1/JztOVk17OHkD9wNb0AbPKeRA
5pkNpzBdcVX6WCKPyl7UlmN+yLznMsk0PrhQH/KGLaWUbee5bekRI7WF12dmmZHKIvyba/5w2R4a
TaAw/eSeHaDu9bFQ0J/pHC/cOidHaU/X/vGGw22HJqkMxw4ywEo46rJkFQw3m26+Rp0v302Rql5Q
326+d26xgF5neGZfX2y/Dyt/Tm80F+ouhq082bqq2LM0X2K2B32PSQrdKq/+eQLwe34L9kfPu5Wa
QZe+pPBA+bjLV1cl3mp+gs6rN9NqvjHNC6+vrtcvBPxiKSE7WTwnVwZNQ3beOr891XHDYuFkUfbz
dF6KXUpnX/8/4ecFY6hLZL90kSyb45Lnn0X6ml9LL//sBP4CAAD//4IGM9jBStAsEV+SmVoEioqU
1LTE0hxIx1mpuLK4JDU3Pi0zLz21qKAoE9J7TiuINzVMSbIwSUxLTFLiquUCAAAA//8DANr6751L
EAAA
headers:
CF-RAY:
- 97144ce12be51abc-GRU
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Mon, 18 Aug 2025 20:53:29 GMT
Server:
- cloudflare
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '6350'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '6385'
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999820'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999820'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_633dd1e17cb44249af3d9408f3d3c21b
status:
code: 200
message: OK
version: 1

View File

@@ -1,156 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
personal goal is: test goal\nTo give my best complete final answer to the task
respond using the exact following format:\n\nThought: I now can give a great
answer\nFinal Answer: Your final answer must be the great and the most complete
as possible, it must be outcome described.\n\nI MUST use these formats, my job
depends on it!"}, {"role": "user", "content": "\nCurrent Task: Analyze the data\n\nThis
is the expected criteria for your final answer: Analysis report\nyou MUST return
the actual complete content as the final answer, not a summary.\n\nBegin! This
is VERY important to you, use the tools available and give your best Final Answer,
your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '822'
content-type:
- application/json
cookie:
- _cfuvid=wu1mwFBixM_Cn8wLLh.nRacWi8OMVBrEyBNuF_Htz6I-1743463498282-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.12
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//fFfbjhtHDn33VxAC5mXQEmY8lh3Mm9e3DHa9NpzZC3YdBFQ31V2Z6mKn
WCVZCfLvC7JKLY2d7IsAdXfxcnh4yPrtCcDCdYtbWLQDpnac/PLV8+vv/n1P7+7yx+f/eeOHXQ7/
/P6uf0fxYfqwaPQEb36mNh1PrVoeJ0/JcSiv20iYSK1ev1iv1+urmxc39mLkjrwe66e0fMbL0QW3
fHr19Nny6sXy+rt6emDXkixu4b9PAAB+s1+NM3T0ZXELV83xyUgi2NPidv4IYBHZ65MFijhJGNKi
Ob1sOSQKFvodBN5DiwF6tyNA6DVswCB7igCfw1sX0MNL+38Ln8PncHn5MqA/iBP4RBPHdHlZHl+v
4C6kyF1uFYbLSz1/PziBaN/BFEkoJAEERSvSQEHM7dEgbyENBB0m/cR7ahN1wDuK9tyjJPglY0wU
V3A/EPSMvpxycjLjBBJDR9GMWzS48QQuiOuHJA24jkJy2wOkSKGTBjB04MKW4wgdtU4ch+WIDy70
MEVuSYQEthxhm1OOBJIiJuodyapk/3QFrzXsDzuKO0f7Y/o1G6FUknZqaMw+uckTTBhxpERRwIXW
504dCnp15vocSRposyQeKUJHI/cRp8G10mhYijS0GgdHRzWLno4foYfOSYpukxWCFfyVDrDD6BSM
Ctev1FXPdKuJLOHy8gfz/7b4v7y8hXtO6GtYkXYUMjWAO4rYE6SIQQrIsEOvr3JwSUDYd6ti8dUx
hddnKajllz010FPoKDbguUU1U/KYcmwHFAVkQwPuHMdq7WPN/NWcuZr6SFHLh6HVmkcWgc5ttxQp
PAbpBPTIksyXJ2XWxFP2GI/ISnX37hzQ12eAqteC1fStb8WZs+LVOw5HltyUIrx1QQOQQpIT5vfG
RrX7OQAsq3UXVEyEOtgc4Hp9YUTCSJ2yXPtiirQzZ7U3Gti7NABC4GTEl8k9EHQ5atZ6YmDvOjyA
EAqHVfGmZP3TqgJ6YYgsZFFcXTSAqUBRAhlcP1A8sRX3GCmQiCGcMPakX44YHyhpHC2OE7q+YvP/
aHIKb4puxHg4edmgELQcVOZMPxTZnesyegHsqYOn6+XNswawbTkHc6xt/OzqwnTDmF2pZr2snt7S
iJ5mJwKc04StRf/o+eYAT68uwAXTKPQeZCIrbbX0IXgXCGTgabLWzrGnrtG+T65VtvkD4Mihhxw3
GE62G8tEGyL0gCCD2yZIvMfYKbF7p4FT6LGnkUKaMfyqOQ7nfTFD+UaVNXJwrQCNpDEBStFYwq5U
p56vVLq5uqi1AxkwqphW9EwXar7f80hFhTB2FOZWAsE9oEqr4eFCVZPNAdYXpmXjVDNVLtNei3Oi
ykltLRx1rYIv1NfcrVmMy1rOo9NvCS8grg9u61oMyR9gwyzKykeSa2VdX8yQPhKAuzpDZizf488c
j9WjkKx2JBO1Dr0/GE4Dwd85psFkRuH5F0mCV4ySGiXHXltIo9QeknQUWVVGiqaJNcdPOaIHVDlo
YD847W1r8cbOoKcOckg4TdTBxDrmHfrGRkcwFBUQDso6WF/M0jJX5CvaBaLO+mXuX84pErZDFbRn
K/hELY8jhc4CPdO093P9fij1O6iyvQmD6eSRw6cq03bLMUn1pU8OnEP/1eQrhJwoWrGdDrC60JSx
EXlkC2QF/0hOPwBhrQWM1DmEyWPShijj/NRBj4pmiqILRKXPWQDzFgE9poFUhreRx3lx+aYT33Kb
TdM/EeoaIZWQf9gpwDqydDaopmoTxqPK0479riwkZGOtJVM8jTWrZKzgVQ0bNjl0vi4vpRc4gq2k
1k5nnVLBUieRtDeSkiFRJEl/3AFvvkwYpM6/17Qjz5MqVO3RVhEOsNWsC9kgzqyFiQKlSmm4Cy45
THQaBZDQea6jTfcB/yhbK/CR1bqv+awvooVPlVibWHigy2ZLp+HCqv5Zx6Qtau85uMQKteZxp9v7
aCTSkW17gC1uVY7qEqh+HogmnY/tg6E/YOhNMOeRVLeVEm7VzLJnruBvVAesbV9J+ZyYvRnGnHjE
RGcMm/u/NKEbyR++XlBrJ66t/K3PMi/fH8pQaow+p+247L4qpmdiCH3kvTbWDK9Gb0oDkX7JznrB
jdZEiXT463daxr8cADsu+q2e4lEQVFhn5S5RcOwxuF9LTnrvOFbtOEDPVqnmJE8zuILJybYsJmVP
1FVaUVNgbOEfWffzs8xmNbUMK1zPV/BysmH95ahYRot0XI47La3KkfM+z9JZBkWx9KeredWh8/X8
cJ7Yqvh7T2ngjj33B8hShfb87qMYWRVq6Sz08xuVUNxpsHql2nKuEozeOGXmsq7WegPsjtVpc7SV
2GPopMWpqlB29kW93hSACyvbwdGOSvVy0vndwUZ7W/uh3ILdjmR1fsWMtM2Ces0N2fuzFxh0JzXj
ern9sb75fb7Oeu6nyBv56uhi64KT4ado0qVXV0k8Lezt708AfrRrc350E16UteKnxA9k7q7X62Jv
UW7r/wMAAP//jFi9DoIhDNx5DGaHbxDi9zSEtEVr/CHANzj47gYwFiOD85XLHSXAtcoQ1Jr9G23/
GgEOy7qbEDqkelvlIXlr8HAilKUS0/2GfB8ANdj+lTPj7tb5dvyHXgAAioXQxUTI8G1ZyhKdW9ie
l322uQnW9dgxkCtMqbYCKfjt0mcMOj9yoasLXF/umLgPGkJ0xi4+WDJm1eqpXgAAAP//AwCGkEKG
dhEAAA==
headers:
CF-RAY:
- 97144c27cad01abc-GRU
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Mon, 18 Aug 2025 20:53:07 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=gumItH7ZRtD4GgE2NL8KJd5b0g0ukzMySphsV0ru1LE-1755550387-1.0.1.1-iwCn2q9kDpJVTaZu1Swtv1kYCiM39NBeviV1R9awG4XHHMKnojkbu6T7jh_Z3UxfNbluVCsI6RMKj.2rEPp1IcH63gHUQdJfHF71CdCZ3Uc;
path=/; expires=Mon, 18-Aug-25 21:23:07 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=d7iU8FXLKWOoICtn52jYIApBpBp20kALP6yQjOvXHvQ-1755550387858-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '14516'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '14596'
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999830'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999827'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_3c1af5f5590a4b76b33f3fbf7d3a3288
status:
code: 200
message: OK
version: 1

View File

@@ -1,154 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
personal goal is: test goal\nTo give my best complete final answer to the task
respond using the exact following format:\n\nThought: I now can give a great
answer\nFinal Answer: Your final answer must be the great and the most complete
as possible, it must be outcome described.\n\nI MUST use these formats, my job
depends on it!"}, {"role": "user", "content": "\nCurrent Task: Analyze the data\n\nThis
is the expected criteria for your final answer: Analysis report\nyou MUST return
the actual complete content as the final answer, not a summary.\n\nBegin! This
is VERY important to you, use the tools available and give your best Final Answer,
your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '822'
content-type:
- application/json
cookie:
- _cfuvid=aoRHJvKio8gVXmGaYpzTzdGuWwkBsDAyAKAVwm6QUbE-1743465392324-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.12
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFddcxu3Dn3Pr8BoJi8eSWM5luP6zbFbN7d1m0n81nQyEAntIuaSe0lQ
itLpf++A+6G1rztzX+zVcgEeAAeH4F+vAGZsZ1cwMzWKaVq3uLlY/XD5n7uffzvLFw+r28tV+nT3
7vdV/hDuf8bZXC3C5isZGayWJjStI+Hgu2UTCYXU6+rter1en56vTstCEyw5NataWZyHRcOeF2en
Z+eL07eL1WVvXQc2lGZX8McrAIC/yl/F6S19m11B8VXeNJQSVjS7Gj8CmMXg9M0MU+Ik6GU2Py6a
4IV8gf4efNiDQQ8V7wgQKoUN6NOeIsBn/xN7dHBdfl/BZ//Zn5xce3SHxAk+UhuinJx0r1dLeO8l
BpuNpuHk5EodPNScIJYPAblJIAFQHXwnkJqgjWHHlixYFIREAuwhREtRv6RvEtEINISefbXNDtgn
rmpJIDVKgc5+G2IDSSIKVWzAkuHEwadlh+xsCbfq/ZaSidw+QUfdxiZ4zVSCsIUmO+HWEewwMm4c
pTmwNy5b9hVssoAPAo4bFrKKMqGjBFuuctRvTU4SGopgqQlVxLZmk+ZQ0fADXY8ZFcoc0FsQbigJ
Nm2BIBF9wpLHtIQf0dRAXuJBkyM9Zs1VpDZSIi8JELLn/2aa2s4BnQt7hb0NETTMpo1Uk0+l3EMh
wxbaHE2NqURINe44RAg7UqPUkuEtk4WWIgfbZ/XNEu5J6mCDC9VhktDRLSe10EDJQi6+k6BwkpKD
plgnSNnUgAnsUJ4dHb/TfIYYyZVcjb67pEWqIiUtNZS20h2UX8lQFGQPg12quU2wIdkT+WNde17s
OGV0/L3bQkJwCTASoEsBsrDj712h2bnc0Qwe6QASydtUkLQoQnFk3PkSfmKvfEl9Yj77BZycfCpM
eSh2/QKA5uwJhSDVYa+J58rzlg16gdzuMdpuy64wMk11V5k57FlqaAkfe4/BmByj5sXm8q8Oji0e
IBGmQq774KVeBL9o9AGqGPZSg8aYAHcUsSILq/Xr3rfuWnNVU5J+i0hG+9UqN2/JULOhuBwCvhl6
4XbSC2Pk17CJhI827L1y8MXGgUg7Qtf3u27f4NcQWQ4T2lKCPUVdsgQbbRPLO7ZZ7UoAZ+vFm/M5
oDEhexka4vz0dem2IOie9dy1tayP6Nzh2NJJg8xxg14Jgkl1QyJvcq8EF1OHJT3zgsVg2RNVpIKv
oI20pUjeUMHRdqqp8JTOG3YshzGHd1PhGMR3zOFHqkaYgzRAylVFSSZJ+y1EqfdaNQUOIcuxNYPU
FLWZNPS+zthXd8IHjcwTRneAs/VrOBD2EqFPSyhKb0J2FjYEKNO8CMaK9LnB+EglFwabFrnyCRxm
b+qOQAWvIhyjf6CmDREdvO+F/8ge/0TBJvVTNS1qU4d9n4PSExORq0OOCdBx5buA9Zi02mIbVSpK
qftkPsif0RSPErI6gw/3pfPfwIf7+ZBw9Rxa4Ub5pIo+lLcJhVcQ4pHl5CusqCGvZx4LoyrfoCDr
JdxMdO9Z2adLJtB2y4bLKdCTjUaihcRFUF9WwhEZOtDU7ViYOkXryr8LLjc0cmKyl6b8dHm57guv
muCGg5mAtWiCSnA9Uceq03YbYsEJNvKuFEk36qO+WMJHMqFpyNsO7hP5fBhodD86vBloNNLiHSbl
tJ+qyDg1zEGQXYhPqDhMDqVcpjuoQnfQFt1QCdFGyO3z9m/wAAcmZyHlTRm0GB1EkjycBQr7d2UE
fyf4JME8wq+0I3fEe8c76jZjS166k7bLvpI2/YtylyKx1zlT431O7Tlg28bQRu7YoBs73RjQfs1J
mkKXVA/9WpRTAjREAm3QAVFDsdQUMuRYUXpZkX781qJPx5kK4EaHKUsFtcYVe4nSk14FbphiRk2a
A0vJ5YZgQ56UYtpBAdjvtK2qEgQ37DC+XDgtjLcUe1UraltKpPOZcJOduujUrCfbW20xnevS04Hw
eKh2s2mCTlV0ZHhh/NyiUb1W991Up8NsP4EuGnwsULwtfCfYZsmRjiLT49VOKXCEfQ45QRM8S4iD
MVpspev3sJ3GvRnZjoKLsoc/gtyzc5pUE3PJKHtokL3ORkWDQ9OSdAKBdodesBp78XIJ121LKig0
sHUxvPoG11dwS9pMZKEba8os9VAGq2ffvruCO+VLR9qbGlUC3g/DlCL5hQ7jxPTM+OZqOmfCu3Fi
UGn9NJknx3tJ0Yv+OPqXeXerZzlCDJucBLYh93pTnB5vEq1D74cahJYi9mLJjZKDui5ScewpHWKF
vp8kl9NLV6RtTqgXP5+dmyyg96GrbLnu/dmv/D1e8Fyo2hg26ZnpbMueU/0lFjnQy1yS0M7K6t+v
AP4sF8n85G44U71v5YuERyrbrdbrzt/seH89rl6s3vSrZaA5Lry9uJy/4PCLLYRIk7vozKAe7UfT
48UVs+UwWXg1Cft/4bzkuwudffX/uD8uGEOtkP3S/gMAAP//KkpNyUxG9TJCWVEqqH+PSxk8mMEO
VipOLSrLTE6NL8lMLQJFRUpqWmJpDqTXrVRcWVySmhuflpmXnlpUUJQJ6XqnFcSbmhkkppmlmppa
KnHVcgEAAAD//wMABbo03YgQAAA=
headers:
CF-RAY:
- 97144d0daeb11abc-GRU
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Mon, 18 Aug 2025 20:53:43 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=UW4fV15_S2h9VQ58d_nhU200TOxc3Tjdd_QFUBY6B80-1755550423-1.0.1.1-.oSX43E.zjFk61gbEHMacZh5c8ndmynl75bstCvKcohtwVY6oLpdBWnO2lTUFXpzvGaGsbuYt55OUo_Hmi228z97Nm4cDdOT84lhfStAcms;
path=/; expires=Mon, 18-Aug-25 21:23:43 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=dg9d3YnyfwVQNRGWo64PZ6mtqIOlYEozligD5ggvZFc-1755550423708-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '13654'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '13673'
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999827'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999827'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_169cd22058fb418f90f12e041c0880a9
status:
code: 200
message: OK
version: 1

View File

@@ -497,131 +497,6 @@ def test_unstructured_flow_event_emission():
assert isinstance(received_events[6].timestamp, datetime)
def test_flow_trigger_payload_injection():
captured_payload = []
class TriggerFlow(Flow):
@start()
def start_method(self, crewai_trigger_payload=None):
captured_payload.append(crewai_trigger_payload)
return "started"
@listen(start_method)
def second_method(self):
captured_payload.append("no_parameter")
return "finished"
flow = TriggerFlow()
test_payload = "This is important trigger data"
flow.kickoff(inputs={"crewai_trigger_payload": test_payload})
assert captured_payload == [test_payload, "no_parameter"]
def test_flow_trigger_payload_injection_multiple_starts():
captured_payloads = []
class MultiStartFlow(Flow):
@start()
def start_method_1(self, crewai_trigger_payload=None):
captured_payloads.append(("start_1", crewai_trigger_payload))
return "start_1_done"
@start()
def start_method_2(self, crewai_trigger_payload=None):
captured_payloads.append(("start_2", crewai_trigger_payload))
return "start_2_done"
flow = MultiStartFlow()
test_payload = "Multiple start trigger data"
flow.kickoff(inputs={"crewai_trigger_payload": test_payload})
assert captured_payloads == [("start_1", test_payload), ("start_2", test_payload)]
def test_flow_without_trigger_payload():
captured_payload = []
class NormalFlow(Flow):
@start()
def start_method(self, crewai_trigger_payload=None):
captured_payload.append(crewai_trigger_payload)
return "no_trigger"
flow = NormalFlow()
flow.kickoff(inputs={"other_data": "some value"})
assert captured_payload[0] is None
def test_flow_trigger_payload_with_structured_state():
class TriggerState(BaseModel):
id: str = "test"
message: str = ""
class StructuredFlow(Flow[TriggerState]):
@start()
def start_method(self, crewai_trigger_payload=None):
return crewai_trigger_payload
flow = StructuredFlow()
test_payload = "Structured state trigger data"
result = flow.kickoff(inputs={"crewai_trigger_payload": test_payload})
assert result == test_payload
def test_flow_start_method_without_trigger_parameter():
execution_order = []
class FlowWithoutParameter(Flow):
@start()
def start_without_param(self):
execution_order.append("start_executed")
return "started"
@listen(start_without_param)
def second_method(self):
execution_order.append("second_executed")
return "finished"
flow = FlowWithoutParameter()
result = flow.kickoff(inputs={"crewai_trigger_payload": "some data"})
assert execution_order == ["start_executed", "second_executed"]
assert result == "finished"
def test_async_flow_with_trigger_payload():
captured_payload = []
class AsyncTriggerFlow(Flow):
@start()
async def async_start_method(self, crewai_trigger_payload=None):
captured_payload.append(crewai_trigger_payload)
await asyncio.sleep(0.01)
return "async_started"
@listen(async_start_method)
async def async_second_method(self, result):
captured_payload.append(result)
await asyncio.sleep(0.01)
return "async_finished"
flow = AsyncTriggerFlow()
test_payload = "Async trigger data"
result = asyncio.run(flow.kickoff_async(inputs={"crewai_trigger_payload": test_payload}))
assert captured_payload == [test_payload, "async_started"]
assert result == "async_finished"
def test_structured_flow_event_emission():
"""Test that the correct events are emitted during structured flow
execution with all fields validated."""

View File

@@ -1,12 +1,9 @@
import asyncio
from typing import Callable
import inspect
import unittest
from typing import Any, Callable, Dict, List
from unittest.mock import patch
import pytest
from crewai.agent import Agent
from crewai.crew import Crew
from crewai.task import Task
from crewai.tools import BaseTool, tool
@@ -114,26 +111,25 @@ def test_result_as_answer_in_tool_decorator():
def my_tool_with_result_as_answer(question: str) -> str:
"""This tool will return its result as the final answer."""
return question
assert my_tool_with_result_as_answer.result_as_answer is True
converted_tool = my_tool_with_result_as_answer.to_structured_tool()
assert converted_tool.result_as_answer is True
@tool("Tool with default result_as_answer")
def my_tool_with_default(question: str) -> str:
"""This tool uses the default result_as_answer value."""
return question
assert my_tool_with_default.result_as_answer is False
converted_tool = my_tool_with_default.to_structured_tool()
assert converted_tool.result_as_answer is False
class SyncTool(BaseTool):
"""Test implementation with a synchronous _run method"""
name: str = "sync_tool"
description: str = "A synchronous tool for testing"
@@ -144,7 +140,6 @@ class SyncTool(BaseTool):
class AsyncTool(BaseTool):
"""Test implementation with an asynchronous _run method"""
name: str = "async_tool"
description: str = "An asynchronous tool for testing"
@@ -179,7 +174,7 @@ def test_run_calls_asyncio_run_for_async_tools():
"""Test that asyncio.run is called when using async tools."""
async_tool = AsyncTool()
with patch("asyncio.run") as mock_run:
with patch('asyncio.run') as mock_run:
mock_run.return_value = "Processed test asynchronously"
async_result = async_tool.run(input_text="test")
@@ -191,43 +186,9 @@ def test_run_does_not_call_asyncio_run_for_sync_tools():
"""Test that asyncio.run is NOT called when using sync tools."""
sync_tool = SyncTool()
with patch("asyncio.run") as mock_run:
with patch('asyncio.run') as mock_run:
sync_result = sync_tool.run(input_text="test")
mock_run.assert_not_called()
assert sync_result == "Processed test synchronously"
@pytest.mark.vcr(filter_headers=["authorization"])
def test_max_usage_count_is_respected():
class IteratingTool(BaseTool):
name: str = "iterating_tool"
description: str = "A tool that iterates a given number of times"
def _run(self, input_text: str):
return f"Iteration {input_text}"
tool = IteratingTool(max_usage_count=5)
agent = Agent(
role="Iterating Agent",
goal="Call the iterating tool 5 times",
backstory="You are an agent that iterates a given number of times",
tools=[tool],
)
task = Task(
description="Call the iterating tool 5 times",
expected_output="A list of the iterations",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task],
verbose=True,
)
crew.kickoff()
assert tool.max_usage_count == 5
assert tool.current_usage_count == 5

View File

@@ -2,7 +2,6 @@ import os
import pytest
from unittest.mock import patch, MagicMock
from crewai import Agent, Task, Crew
from crewai.flow.flow import Flow, start
from crewai.utilities.events.listeners.tracing.trace_listener import (
@@ -322,74 +321,6 @@ class TestTraceListenerSetup:
FlowExample()
assert mock_listener_setup.call_count >= 1
@pytest.mark.vcr(filter_headers=["authorization"])
def test_trace_listener_ephemeral_batch(self):
"""Test that trace listener properly handles ephemeral batches"""
with (
patch.dict(os.environ, {"CREWAI_TRACING_ENABLED": "true"}),
patch(
"crewai.utilities.events.listeners.tracing.trace_listener.TraceCollectionListener._check_authenticated",
return_value=False,
),
):
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
llm="gpt-4o-mini",
)
task = Task(
description="Say hello to the world",
expected_output="hello world",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task], tracing=True)
with patch.object(TraceBatchManager, "initialize_batch") as mock_initialize:
crew.kickoff()
assert mock_initialize.call_count >= 1
assert mock_initialize.call_args_list[0][1]["use_ephemeral"] is True
@pytest.mark.vcr(filter_headers=["authorization"])
def test_trace_listener_with_authenticated_user(self):
"""Test that trace listener properly handles authenticated batches"""
with (
patch.dict(os.environ, {"CREWAI_TRACING_ENABLED": "true"}),
patch(
"crewai.utilities.events.listeners.tracing.trace_batch_manager.PlusAPI"
) as mock_plus_api_class,
):
mock_plus_api_instance = MagicMock()
mock_plus_api_class.return_value = mock_plus_api_instance
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
llm="gpt-4o-mini",
)
task = Task(
description="Say hello to the world",
expected_output="hello world",
agent=agent,
)
with (
patch.object(TraceBatchManager, "initialize_batch") as mock_initialize,
patch.object(
TraceBatchManager, "finalize_batch"
) as mock_finalize_backend_batch,
):
crew = Crew(agents=[agent], tasks=[task], tracing=True)
crew.kickoff()
mock_plus_api_class.assert_called_with(api_key="mock_token_12345")
assert mock_initialize.call_count >= 1
mock_finalize_backend_batch.assert_called_with()
assert mock_finalize_backend_batch.call_count >= 1
# Helper method to ensure cleanup
def teardown_method(self):
"""Cleanup after each test method"""

View File

@@ -0,0 +1 @@

View File

@@ -0,0 +1,196 @@
from unittest.mock import Mock, patch
from crewai.utilities.events.third_party.agentops_listener import AgentOpsListener
from crewai.utilities.events.crew_events import (
CrewKickoffStartedEvent,
CrewKickoffCompletedEvent,
)
from crewai.utilities.events.task_events import TaskEvaluationEvent
from crewai.utilities.events.tool_usage_events import (
ToolUsageStartedEvent,
ToolUsageErrorEvent,
)
from crewai.utilities.events.crewai_event_bus import CrewAIEventsBus
class TestAgentOpsListener:
def test_agentops_listener_initialization_with_agentops_installed(self):
with patch("crewai.utilities.events.third_party.agentops_listener.agentops"):
listener = AgentOpsListener()
assert listener.agentops is not None
def test_agentops_listener_initialization_without_agentops_installed(self):
with patch("crewai.utilities.events.third_party.agentops_listener.agentops", side_effect=ImportError):
listener = AgentOpsListener()
assert listener.agentops is None
def test_setup_listeners_with_agentops_installed(self):
with patch("crewai.utilities.events.third_party.agentops_listener.agentops"):
listener = AgentOpsListener()
mock_event_bus = Mock(spec=CrewAIEventsBus)
listener.setup_listeners(mock_event_bus)
assert mock_event_bus.register_handler.call_count == 5
mock_event_bus.register_handler.assert_any_call(
CrewKickoffStartedEvent, listener._handle_crew_kickoff_started
)
mock_event_bus.register_handler.assert_any_call(
CrewKickoffCompletedEvent, listener._handle_crew_kickoff_completed
)
mock_event_bus.register_handler.assert_any_call(
ToolUsageStartedEvent, listener._handle_tool_usage_started
)
mock_event_bus.register_handler.assert_any_call(
ToolUsageErrorEvent, listener._handle_tool_usage_error
)
mock_event_bus.register_handler.assert_any_call(
TaskEvaluationEvent, listener._handle_task_evaluation
)
def test_setup_listeners_without_agentops_installed(self):
with patch("crewai.utilities.events.third_party.agentops_listener.agentops", side_effect=ImportError):
listener = AgentOpsListener()
mock_event_bus = Mock(spec=CrewAIEventsBus)
listener.setup_listeners(mock_event_bus)
mock_event_bus.register_handler.assert_not_called()
def test_handle_crew_kickoff_started_with_agentops(self):
with patch("crewai.utilities.events.third_party.agentops_listener.agentops") as mock_agentops:
listener = AgentOpsListener()
event = CrewKickoffStartedEvent(crew_id="test-crew")
listener._handle_crew_kickoff_started(event)
mock_agentops.start_session.assert_called_once()
call_args = mock_agentops.start_session.call_args
assert call_args[1]["tags"] == ["crewai", "crew_kickoff"]
def test_handle_crew_kickoff_started_without_agentops(self):
with patch("crewai.utilities.events.third_party.agentops_listener.agentops", side_effect=ImportError):
listener = AgentOpsListener()
event = CrewKickoffStartedEvent(crew_id="test-crew")
listener._handle_crew_kickoff_started(event)
def test_handle_crew_kickoff_completed_with_agentops(self):
with patch("crewai.utilities.events.third_party.agentops_listener.agentops") as mock_agentops:
listener = AgentOpsListener()
event = CrewKickoffCompletedEvent(crew_id="test-crew", crew_output=Mock())
listener._handle_crew_kickoff_completed(event)
mock_agentops.end_session.assert_called_once_with("Success")
def test_handle_crew_kickoff_completed_without_agentops(self):
with patch("crewai.utilities.events.third_party.agentops_listener.agentops", side_effect=ImportError):
listener = AgentOpsListener()
event = CrewKickoffCompletedEvent(crew_id="test-crew", crew_output=Mock())
listener._handle_crew_kickoff_completed(event)
def test_handle_tool_usage_started_with_agentops(self):
with patch("crewai.utilities.events.third_party.agentops_listener.agentops") as mock_agentops:
listener = AgentOpsListener()
event = ToolUsageStartedEvent(
tool_name="test_tool",
arguments={"arg1": "value1"},
agent_id="test-agent",
task_id="test-task"
)
listener._handle_tool_usage_started(event)
mock_agentops.record.assert_called_once()
call_args = mock_agentops.record.call_args[0][0]
assert hasattr(call_args, "action_type")
def test_handle_tool_usage_error_with_agentops(self):
with patch("crewai.utilities.events.third_party.agentops_listener.agentops") as mock_agentops:
listener = AgentOpsListener()
event = ToolUsageErrorEvent(
tool_name="test_tool",
arguments={"arg1": "value1"},
error="Test error",
agent_id="test-agent",
task_id="test-task"
)
listener._handle_tool_usage_error(event)
mock_agentops.record.assert_called_once()
def test_handle_task_evaluation_with_agentops(self):
with patch("crewai.utilities.events.third_party.agentops_listener.agentops") as mock_agentops:
listener = AgentOpsListener()
event = TaskEvaluationEvent(
task_id="test-task",
score=0.85,
feedback="Good performance"
)
listener._handle_task_evaluation(event)
mock_agentops.record.assert_called_once()
def test_handle_crew_kickoff_started_with_exception(self):
with patch("crewai.utilities.events.third_party.agentops_listener.agentops") as mock_agentops:
mock_agentops.start_session.side_effect = Exception("Test exception")
listener = AgentOpsListener()
event = CrewKickoffStartedEvent(crew_id="test-crew")
listener._handle_crew_kickoff_started(event)
def test_handle_crew_kickoff_completed_with_exception(self):
with patch("crewai.utilities.events.third_party.agentops_listener.agentops") as mock_agentops:
mock_agentops.end_session.side_effect = Exception("Test exception")
listener = AgentOpsListener()
event = CrewKickoffCompletedEvent(crew_id="test-crew", crew_output=Mock())
listener._handle_crew_kickoff_completed(event)
def test_handle_tool_usage_started_with_exception(self):
with patch("crewai.utilities.events.third_party.agentops_listener.agentops") as mock_agentops:
mock_agentops.record.side_effect = Exception("Test exception")
listener = AgentOpsListener()
event = ToolUsageStartedEvent(
tool_name="test_tool",
arguments={"arg1": "value1"},
agent_id="test-agent",
task_id="test-task"
)
listener._handle_tool_usage_started(event)
def test_handle_tool_usage_error_with_exception(self):
with patch("crewai.utilities.events.third_party.agentops_listener.agentops") as mock_agentops:
mock_agentops.record.side_effect = Exception("Test exception")
listener = AgentOpsListener()
event = ToolUsageErrorEvent(
tool_name="test_tool",
arguments={"arg1": "value1"},
error="Test error",
agent_id="test-agent",
task_id="test-task"
)
listener._handle_tool_usage_error(event)
def test_handle_task_evaluation_with_exception(self):
with patch("crewai.utilities.events.third_party.agentops_listener.agentops") as mock_agentops:
mock_agentops.record.side_effect = Exception("Test exception")
listener = AgentOpsListener()
event = TaskEvaluationEvent(
task_id="test-task",
score=0.85,
feedback="Good performance"
)
listener._handle_task_evaluation(event)
def test_agentops_listener_instance_creation(self):
with patch("crewai.utilities.events.third_party.agentops_listener.agentops"):
from crewai.utilities.events.third_party.agentops_listener import agentops_listener
assert agentops_listener is not None
assert isinstance(agentops_listener, AgentOpsListener)

10
uv.lock generated
View File

@@ -1,5 +1,5 @@
version = 1
revision = 3
revision = 2
requires-python = ">=3.10, <3.14"
resolution-markers = [
"python_full_version >= '3.13' and platform_python_implementation == 'PyPy' and sys_platform == 'darwin'",
@@ -778,7 +778,7 @@ requires-dist = [
{ name = "blinker", specifier = ">=1.9.0" },
{ name = "chromadb", specifier = ">=0.5.23" },
{ name = "click", specifier = ">=8.1.7" },
{ name = "crewai-tools", marker = "extra == 'tools'", specifier = "~=0.62.1" },
{ name = "crewai-tools", marker = "extra == 'tools'", specifier = "~=0.62.0" },
{ name = "docling", marker = "extra == 'docling'", specifier = ">=2.12.0" },
{ name = "instructor", specifier = ">=1.3.3" },
{ name = "json-repair", specifier = "==0.25.2" },
@@ -830,7 +830,7 @@ dev = [
[[package]]
name = "crewai-tools"
version = "0.62.1"
version = "0.62.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "chromadb" },
@@ -848,9 +848,9 @@ dependencies = [
{ name = "stagehand" },
{ name = "tiktoken" },
]
sdist = { url = "https://files.pythonhosted.org/packages/e3/4b/23cd61a07105b54be8d1185e3b861b1bb2ac6050264acc060d91b22da171/crewai_tools-0.62.1.tar.gz", hash = "sha256:1819d09189e815a28f6744b6cffde703b9e9e438ef5f066c5b4dcdd75cc3c6ad", size = 1059792, upload-time = "2025-08-19T01:08:02.567Z" }
sdist = { url = "https://files.pythonhosted.org/packages/e9/24/0287edbaa33c52b5541a564d92d3324d4839a76a4b023540c0fd5c7ee330/crewai_tools-0.62.0.tar.gz", hash = "sha256:71a24c173677f108516e1cde286e476e9aeb60da78d911bec0f15caa3c6af15a", size = 1059534, upload-time = "2025-08-13T21:13:49.879Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/34/87/a6e8d5dff4718795d806cd54bdb1cb94c637d93951d00e87c0b55d431870/crewai_tools-0.62.1-py3-none-any.whl", hash = "sha256:d8333315c8bf35bdb939b22e5b555acf9357ae676b992832f96f63de21670871", size = 677041, upload-time = "2025-08-19T01:08:00.891Z" },
{ url = "https://files.pythonhosted.org/packages/a9/16/cf26f4eaf1edc2f62d0c9cb1ec97f573f8d7401663da047c52b3ce4c4628/crewai_tools-0.62.0-py3-none-any.whl", hash = "sha256:b5e7035563cb00601431286b1c56933966acea1f220052cd33e1d4ee35590017", size = 677008, upload-time = "2025-08-13T21:13:46.903Z" },
]
[[package]]