mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-04-26 12:52:39 +00:00
Compare commits
16 Commits
lorenze/im
...
joaomdmour
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
83afb17cf9 | ||
|
|
01df1ef3cf | ||
|
|
358fd92e6b | ||
|
|
c4d4ea6c71 | ||
|
|
24c68d4053 | ||
|
|
320326e3e5 | ||
|
|
b371f97a2f | ||
|
|
017189db78 | ||
|
|
02d911494f | ||
|
|
8102d0a6ca | ||
|
|
ee374d01de | ||
|
|
9914e51199 | ||
|
|
2dbb83ae31 | ||
|
|
7377e1aa26 | ||
|
|
51754899a2 | ||
|
|
71b4f8402a |
@@ -21,7 +21,6 @@ OPENROUTER_API_KEY=fake-openrouter-key
|
||||
AWS_ACCESS_KEY_ID=fake-aws-access-key
|
||||
AWS_SECRET_ACCESS_KEY=fake-aws-secret-key
|
||||
AWS_DEFAULT_REGION=us-east-1
|
||||
AWS_REGION_NAME=us-east-1
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Azure OpenAI Configuration
|
||||
|
||||
5
.github/workflows/publish.yml
vendored
5
.github/workflows/publish.yml
vendored
@@ -1,8 +1,6 @@
|
||||
name: Publish to PyPI
|
||||
|
||||
on:
|
||||
repository_dispatch:
|
||||
types: [deployment-tests-passed]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
release_tag:
|
||||
@@ -20,11 +18,8 @@ jobs:
|
||||
- name: Determine release tag
|
||||
id: release
|
||||
run: |
|
||||
# Priority: workflow_dispatch input > repository_dispatch payload > default branch
|
||||
if [ -n "${{ inputs.release_tag }}" ]; then
|
||||
echo "tag=${{ inputs.release_tag }}" >> $GITHUB_OUTPUT
|
||||
elif [ -n "${{ github.event.client_payload.release_tag }}" ]; then
|
||||
echo "tag=${{ github.event.client_payload.release_tag }}" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "tag=" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
18
.github/workflows/trigger-deployment-tests.yml
vendored
18
.github/workflows/trigger-deployment-tests.yml
vendored
@@ -1,18 +0,0 @@
|
||||
name: Trigger Deployment Tests
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
trigger:
|
||||
name: Trigger deployment tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Trigger deployment tests
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.CREWAI_DEPLOYMENTS_PAT }}
|
||||
repository: ${{ secrets.CREWAI_DEPLOYMENTS_REPOSITORY }}
|
||||
event-type: crewai-release
|
||||
client-payload: '{"release_tag": "${{ github.event.release.tag_name }}", "release_name": "${{ github.event.release.name }}"}'
|
||||
2355
docs/docs.json
2355
docs/docs.json
File diff suppressed because it is too large
Load Diff
@@ -1,12 +0,0 @@
|
||||
---
|
||||
title: "Agents: Examples"
|
||||
description: "Runnable examples for robust agent configuration and execution."
|
||||
icon: "rocket-launch"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## Example links
|
||||
|
||||
- [/en/guides/agents/crafting-effective-agents](/en/guides/agents/crafting-effective-agents)
|
||||
- [/en/learn/customizing-agents](/en/learn/customizing-agents)
|
||||
- [/en/learn/coding-agents](/en/learn/coding-agents)
|
||||
@@ -1,32 +0,0 @@
|
||||
---
|
||||
title: "Agents: Concepts"
|
||||
description: "Agent role contracts, task boundaries, and decision criteria for robust agent behavior."
|
||||
icon: "user"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## When to use
|
||||
|
||||
- You need specialized behavior with explicit role and goal.
|
||||
- You need tool-enabled execution under constraints.
|
||||
|
||||
## When not to use
|
||||
|
||||
- Static transformations are enough without model reasoning.
|
||||
- Task can be solved by deterministic code only.
|
||||
|
||||
## Core decisions
|
||||
|
||||
| Decision | Choose this when |
|
||||
|---|---|
|
||||
| Single agent | Narrow scope, low coordination needs |
|
||||
| Multi-agent crew | Distinct expertise and review loops needed |
|
||||
| Tool-enabled agent | Model needs external actions or data |
|
||||
|
||||
## Canonical links
|
||||
|
||||
- Reference: [/en/ai/agents/reference](/en/ai/agents/reference)
|
||||
- Patterns: [/en/ai/agents/patterns](/en/ai/agents/patterns)
|
||||
- Troubleshooting: [/en/ai/agents/troubleshooting](/en/ai/agents/troubleshooting)
|
||||
- Examples: [/en/ai/agents/examples](/en/ai/agents/examples)
|
||||
- Existing docs: [/en/concepts/agents](/en/concepts/agents)
|
||||
@@ -1,17 +0,0 @@
|
||||
---
|
||||
title: "Agents: Patterns"
|
||||
description: "Practical agent patterns for role design, tool boundaries, and reliable outputs."
|
||||
icon: "diagram-project"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## Patterns
|
||||
|
||||
1. Role + reviewer pair
|
||||
- One agent drafts, one agent validates.
|
||||
|
||||
2. Tool-bounded agent
|
||||
- Restrict tool list to minimal action set.
|
||||
|
||||
3. Structured output agent
|
||||
- Force JSON or schema output for automation pipelines.
|
||||
@@ -1,22 +0,0 @@
|
||||
---
|
||||
title: "Agents: Reference"
|
||||
description: "Reference for agent fields, prompt contracts, tool usage, and output constraints."
|
||||
icon: "book"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## Agent contract
|
||||
|
||||
- `role`: stable operating identity
|
||||
- `goal`: measurable completion objective
|
||||
- `backstory`: bounded style and context
|
||||
- `tools`: allowed action surface
|
||||
|
||||
## Output contract
|
||||
|
||||
- Prefer structured outputs for machine workflows.
|
||||
- Define failure behavior for missing tool data.
|
||||
|
||||
## Canonical source
|
||||
|
||||
Primary API details live in [/en/concepts/agents](/en/concepts/agents).
|
||||
@@ -1,12 +0,0 @@
|
||||
---
|
||||
title: "Agents: Troubleshooting"
|
||||
description: "Diagnose and fix common agent reliability and instruction-following failures."
|
||||
icon: "circle-exclamation"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## Common issues
|
||||
|
||||
- Hallucinated tool results: require tool-call evidence in output.
|
||||
- Prompt drift: tighten role and success criteria.
|
||||
- Verbose but low-signal output: enforce concise schema output.
|
||||
@@ -1,12 +0,0 @@
|
||||
---
|
||||
title: "Crews: Examples"
|
||||
description: "Runnable crew examples for sequential and hierarchical execution."
|
||||
icon: "rocket-launch"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## Example links
|
||||
|
||||
- [/en/guides/crews/first-crew](/en/guides/crews/first-crew)
|
||||
- [/en/learn/sequential-process](/en/learn/sequential-process)
|
||||
- [/en/learn/hierarchical-process](/en/learn/hierarchical-process)
|
||||
@@ -1,26 +0,0 @@
|
||||
---
|
||||
title: "Crews: Concepts"
|
||||
description: "When to use crews, process selection, delegation boundaries, and collaboration strategy."
|
||||
icon: "users"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## When to use
|
||||
|
||||
- You need multiple agents with specialized roles.
|
||||
- You need staged execution and reviewer loops.
|
||||
|
||||
## Process decision table
|
||||
|
||||
| Process | Best for |
|
||||
|---|---|
|
||||
| Sequential | Linear pipelines and deterministic ordering |
|
||||
| Hierarchical | Manager-controlled planning and delegation |
|
||||
|
||||
## Canonical links
|
||||
|
||||
- Reference: [/en/ai/crews/reference](/en/ai/crews/reference)
|
||||
- Patterns: [/en/ai/crews/patterns](/en/ai/crews/patterns)
|
||||
- Troubleshooting: [/en/ai/crews/troubleshooting](/en/ai/crews/troubleshooting)
|
||||
- Examples: [/en/ai/crews/examples](/en/ai/crews/examples)
|
||||
- Existing docs: [/en/concepts/crews](/en/concepts/crews)
|
||||
@@ -1,12 +0,0 @@
|
||||
---
|
||||
title: "Crews: Patterns"
|
||||
description: "Production crew patterns for decomposition, review loops, and hybrid orchestration with Flows."
|
||||
icon: "diagram-project"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## Patterns
|
||||
|
||||
1. Researcher + writer + reviewer
|
||||
2. Manager-directed hierarchical crew
|
||||
3. Flow-orchestrated multi-crew pipeline
|
||||
@@ -1,21 +0,0 @@
|
||||
---
|
||||
title: "Crews: Reference"
|
||||
description: "Reference for crew composition, process semantics, task context passing, and execution modes."
|
||||
icon: "book"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## Crew contract
|
||||
|
||||
- `agents`: available executors
|
||||
- `tasks`: work units with expected output
|
||||
- `process`: ordering and delegation semantics
|
||||
|
||||
## Runtime
|
||||
|
||||
- `kickoff()` for synchronous runs
|
||||
- `kickoff_async()` for async execution
|
||||
|
||||
## Canonical source
|
||||
|
||||
Primary API details live in [/en/concepts/crews](/en/concepts/crews).
|
||||
@@ -1,12 +0,0 @@
|
||||
---
|
||||
title: "Crews: Troubleshooting"
|
||||
description: "Common multi-agent coordination failures and practical fixes."
|
||||
icon: "circle-exclamation"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## Common issues
|
||||
|
||||
- Agents overlap on responsibilities: tighten role boundaries.
|
||||
- Output inconsistency: standardize expected outputs per task.
|
||||
- Slow runs: reduce unnecessary handoffs and model size.
|
||||
@@ -1,17 +0,0 @@
|
||||
---
|
||||
title: "Flows: Examples"
|
||||
description: "Runnable end-to-end examples for production flow orchestration."
|
||||
icon: "rocket-launch"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## Canonical examples
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="Flowstate Chat History" icon="comments" href="/en/learn/flowstate-chat-history">
|
||||
Persistent chat history with summary compaction and memory scope.
|
||||
</Card>
|
||||
<Card title="Flows Concepts Example" icon="arrow-progress" href="/en/concepts/flows">
|
||||
Full API and feature-oriented flow examples, including routers and persistence.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
@@ -1,39 +0,0 @@
|
||||
---
|
||||
title: "Flows: Concepts"
|
||||
description: "When to use Flows, when not to use them, and key design constraints for production orchestration."
|
||||
icon: "arrow-progress"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## When to use
|
||||
|
||||
- You need deterministic orchestration, branching, and resumable execution.
|
||||
- You need explicit state transitions across steps.
|
||||
- You need persistence, routing, and event-driven control.
|
||||
|
||||
## When not to use
|
||||
|
||||
- A single prompt/response interaction is enough.
|
||||
- You only need one agent call without orchestration logic.
|
||||
|
||||
## Core decisions
|
||||
|
||||
| Decision | Choose this when |
|
||||
|---|---|
|
||||
| Unstructured state | Fast prototyping, highly dynamic fields |
|
||||
| Structured state | Stable contracts, team development, type safety |
|
||||
| `@persist()` | Long-running workflows and recovery requirements |
|
||||
| Router labels | Deterministic branch handling |
|
||||
|
||||
## Canonical links
|
||||
|
||||
- Reference: [/en/ai/flows/reference](/en/ai/flows/reference)
|
||||
- Patterns: [/en/ai/flows/patterns](/en/ai/flows/patterns)
|
||||
- Troubleshooting: [/en/ai/flows/troubleshooting](/en/ai/flows/troubleshooting)
|
||||
- Examples: [/en/ai/flows/examples](/en/ai/flows/examples)
|
||||
|
||||
## Existing docs
|
||||
|
||||
- [/en/concepts/flows](/en/concepts/flows)
|
||||
- [/en/guides/flows/mastering-flow-state](/en/guides/flows/mastering-flow-state)
|
||||
- [/en/learn/flowstate-chat-history](/en/learn/flowstate-chat-history)
|
||||
@@ -1,29 +0,0 @@
|
||||
---
|
||||
title: "Flows: Patterns"
|
||||
description: "Production flow patterns: triage routing, flowstate chat history, and human-in-the-loop checkpoints."
|
||||
icon: "diagram-project"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## Recommended patterns
|
||||
|
||||
1. Triage router flow
|
||||
- Inputs: normalized request payload
|
||||
- Output: deterministic route label + action
|
||||
- Reference: [/en/concepts/flows](/en/concepts/flows)
|
||||
|
||||
2. Flowstate chat history
|
||||
- Inputs: `session_id`, `last_user_message`
|
||||
- Output: assistant reply + compact context state
|
||||
- Reference: [/en/learn/flowstate-chat-history](/en/learn/flowstate-chat-history)
|
||||
|
||||
3. Human feedback gates
|
||||
- Inputs: generated artifact + reviewer feedback
|
||||
- Output: approved/rejected/revision path
|
||||
- Reference: [/en/learn/human-feedback-in-flows](/en/learn/human-feedback-in-flows)
|
||||
|
||||
## Pattern requirements
|
||||
|
||||
- declare explicit input schema
|
||||
- define expected output shape
|
||||
- list failure modes and retries
|
||||
@@ -1,34 +0,0 @@
|
||||
---
|
||||
title: "Flows: Reference"
|
||||
description: "API-oriented reference for Flow decorators, lifecycle semantics, state, routing, and persistence."
|
||||
icon: "book"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## Decorators
|
||||
|
||||
- `@start()` entrypoint, optional conditional trigger
|
||||
- `@listen(...)` downstream method subscription
|
||||
- `@router(...)` label-based deterministic routing
|
||||
- `@persist()` automatic state persistence checkpoints
|
||||
|
||||
## Runtime contracts
|
||||
|
||||
- `kickoff(inputs=...)` initializes or updates run inputs.
|
||||
- final output is the value from the last completed method.
|
||||
- `self.state` always has an auto-generated `id`.
|
||||
|
||||
## State contracts
|
||||
|
||||
- Use typed state for durable workflows.
|
||||
- Keep control fields explicit (`route`, `status`, `retry_count`).
|
||||
- Avoid storing unbounded raw transcripts in state.
|
||||
|
||||
## Resume and recovery
|
||||
|
||||
- Use persistence for recoverable runs.
|
||||
- Keep idempotent step logic for safe retries.
|
||||
|
||||
## Canonical source
|
||||
|
||||
Primary API details live in [/en/concepts/flows](/en/concepts/flows).
|
||||
@@ -1,28 +0,0 @@
|
||||
---
|
||||
title: "Flows: Troubleshooting"
|
||||
description: "Common flow failures, causes, and fixes for state, routing, persistence, and resumption."
|
||||
icon: "circle-exclamation"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## Common issues
|
||||
|
||||
### Branch did not trigger
|
||||
|
||||
- Cause: router label mismatch.
|
||||
- Fix: align returned label with `@listen("label")` exactly.
|
||||
|
||||
### State fields missing
|
||||
|
||||
- Cause: untyped dynamic writes or missing inputs.
|
||||
- Fix: switch to typed state and validate required fields at `@start()`.
|
||||
|
||||
### Context window blow-up
|
||||
|
||||
- Cause: raw message accumulation.
|
||||
- Fix: use sliding window + summary compaction pattern.
|
||||
|
||||
### Resume behavior inconsistent
|
||||
|
||||
- Cause: non-idempotent side effects in retried steps.
|
||||
- Fix: make side-effecting calls idempotent and record execution markers in state.
|
||||
@@ -1,12 +0,0 @@
|
||||
---
|
||||
title: "LLMs: Examples"
|
||||
description: "Concrete examples for model setup, routing, and output-control patterns."
|
||||
icon: "rocket-launch"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## Example links
|
||||
|
||||
- [/en/concepts/llms](/en/concepts/llms)
|
||||
- [/en/learn/llm-connections](/en/learn/llm-connections)
|
||||
- [/en/learn/custom-llm](/en/learn/custom-llm)
|
||||
@@ -1,27 +0,0 @@
|
||||
---
|
||||
title: "LLMs: Concepts"
|
||||
description: "Model selection strategy, cost-quality tradeoffs, and reliability posture for CrewAI systems."
|
||||
icon: "microchip-ai"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## When to use advanced LLM configuration
|
||||
|
||||
- You need predictable quality, latency, and cost control.
|
||||
- You need model routing by task type.
|
||||
|
||||
## Core decisions
|
||||
|
||||
| Decision | Choose this when |
|
||||
|---|---|
|
||||
| Single model | Small systems with uniform task profile |
|
||||
| Routed models | Mixed workloads with different quality/cost needs |
|
||||
| Structured output | Automation pipelines and strict parsing needs |
|
||||
|
||||
## Canonical links
|
||||
|
||||
- Reference: [/en/ai/llms/reference](/en/ai/llms/reference)
|
||||
- Patterns: [/en/ai/llms/patterns](/en/ai/llms/patterns)
|
||||
- Troubleshooting: [/en/ai/llms/troubleshooting](/en/ai/llms/troubleshooting)
|
||||
- Examples: [/en/ai/llms/examples](/en/ai/llms/examples)
|
||||
- Existing docs: [/en/concepts/llms](/en/concepts/llms)
|
||||
@@ -1,17 +0,0 @@
|
||||
---
|
||||
title: "LLMs: Patterns"
|
||||
description: "Model routing, reliability defaults, and structured outputs for production AI workflows."
|
||||
icon: "diagram-project"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## Patterns
|
||||
|
||||
1. Role-based model routing
|
||||
2. Reliability defaults (`timeout`, `max_retries`, low temperature)
|
||||
3. JSON-first outputs for machine consumption
|
||||
4. Responses API for multi-turn reasoning flows
|
||||
|
||||
## Reference
|
||||
|
||||
- [/en/concepts/llms#production-llm-patterns](/en/concepts/llms#production-llm-patterns)
|
||||
@@ -1,25 +0,0 @@
|
||||
---
|
||||
title: "LLMs: Reference"
|
||||
description: "Provider-agnostic LLM configuration reference for CrewAI projects."
|
||||
icon: "book"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## Common parameters
|
||||
|
||||
- `model`
|
||||
- `temperature`
|
||||
- `max_tokens`
|
||||
- `timeout`
|
||||
- `max_retries`
|
||||
- `response_format`
|
||||
|
||||
## Contract guidance
|
||||
|
||||
- Set low temperature for extraction/classification.
|
||||
- Use structured outputs for downstream automation.
|
||||
- Set explicit timeout and retry policy for production.
|
||||
|
||||
## Canonical source
|
||||
|
||||
Primary API details live in [/en/concepts/llms](/en/concepts/llms).
|
||||
@@ -1,12 +0,0 @@
|
||||
---
|
||||
title: "LLMs: Troubleshooting"
|
||||
description: "Fix common model behavior failures: drift, latency spikes, malformed output, and cost overruns."
|
||||
icon: "circle-exclamation"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## Common issues
|
||||
|
||||
- Malformed JSON: enforce `response_format` and validate at boundary.
|
||||
- Latency spikes: route heavy tasks to smaller models when acceptable.
|
||||
- Cost growth: add budget-aware model routing and truncation rules.
|
||||
@@ -1,11 +0,0 @@
|
||||
---
|
||||
title: "Memory: Examples"
|
||||
description: "Runnable examples for scoped storage and semantic retrieval in CrewAI."
|
||||
icon: "rocket-launch"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## Example links
|
||||
|
||||
- [/en/concepts/memory](/en/concepts/memory)
|
||||
- [/en/learn/flowstate-chat-history](/en/learn/flowstate-chat-history)
|
||||
@@ -1,24 +0,0 @@
|
||||
---
|
||||
title: "Memory: Concepts"
|
||||
description: "Designing recall systems with scope boundaries and state-vs-memory separation."
|
||||
icon: "database"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## When to use memory
|
||||
|
||||
- You need semantic recall across runs.
|
||||
- You need long-term context outside immediate flow state.
|
||||
|
||||
## When to use state instead
|
||||
|
||||
- Data is only needed for current control flow.
|
||||
- Data must remain deterministic and explicit per step.
|
||||
|
||||
## Canonical links
|
||||
|
||||
- Reference: [/en/ai/memory/reference](/en/ai/memory/reference)
|
||||
- Patterns: [/en/ai/memory/patterns](/en/ai/memory/patterns)
|
||||
- Troubleshooting: [/en/ai/memory/troubleshooting](/en/ai/memory/troubleshooting)
|
||||
- Examples: [/en/ai/memory/examples](/en/ai/memory/examples)
|
||||
- Existing docs: [/en/concepts/memory](/en/concepts/memory)
|
||||
@@ -1,17 +0,0 @@
|
||||
---
|
||||
title: "Memory: Patterns"
|
||||
description: "Practical memory patterns for session recall, scoped retrieval, and hybrid flow-state designs."
|
||||
icon: "diagram-project"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## Patterns
|
||||
|
||||
1. Session-scoped recall (`/chat/{session_id}`)
|
||||
2. Project-scoped knowledge (`/project/{project_id}`)
|
||||
3. Hybrid pattern: flow state for control, memory for long-tail context
|
||||
|
||||
## Reference
|
||||
|
||||
- [/en/learn/flowstate-chat-history](/en/learn/flowstate-chat-history)
|
||||
- [/en/guides/flows/mastering-flow-state](/en/guides/flows/mastering-flow-state)
|
||||
@@ -1,23 +0,0 @@
|
||||
---
|
||||
title: "Memory: Reference"
|
||||
description: "Reference for remember/recall contracts, scopes, and retrieval tuning."
|
||||
icon: "book"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## API surface
|
||||
|
||||
- `remember(content, scope=...)`
|
||||
- `recall(query, limit=...)`
|
||||
- `extract_memories(text)`
|
||||
- `scope(path)` and `subscope(name)`
|
||||
|
||||
## Scope rules
|
||||
|
||||
- use `/{entity_type}/{identifier}` paths
|
||||
- keep hierarchy shallow
|
||||
- isolate sessions by stable identifiers
|
||||
|
||||
## Canonical source
|
||||
|
||||
Primary API details live in [/en/concepts/memory](/en/concepts/memory).
|
||||
@@ -1,12 +0,0 @@
|
||||
---
|
||||
title: "Memory: Troubleshooting"
|
||||
description: "Diagnose poor recall quality, scope leakage, and stale memory retrieval."
|
||||
icon: "circle-exclamation"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## Common issues
|
||||
|
||||
- Irrelevant recall: tighten scopes and query wording.
|
||||
- Missing recall: check scope path and recency weighting.
|
||||
- Scope leakage: avoid shared broad scopes for unrelated workflows.
|
||||
@@ -1,54 +0,0 @@
|
||||
---
|
||||
title: "AI-First Documentation"
|
||||
description: "Canonical, agent-optimized documentation map for Flows, Agents, Crews, LLMs, Memory, and Tools."
|
||||
icon: "sitemap"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## Purpose
|
||||
|
||||
This section is the canonical map for AI agents and developers.
|
||||
|
||||
Use it when you need:
|
||||
- one source of truth per domain
|
||||
- predictable page structure
|
||||
- runnable patterns with explicit inputs and outputs
|
||||
|
||||
## Domain Packs
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card title="Flows" icon="arrow-progress" href="/en/ai/flows/index">
|
||||
State, routing, persistence, resume, and orchestration lifecycle.
|
||||
</Card>
|
||||
<Card title="Agents" icon="user" href="/en/ai/agents/index">
|
||||
Agent contracts, tool boundaries, prompt roles, and output discipline.
|
||||
</Card>
|
||||
<Card title="Crews" icon="users" href="/en/ai/crews/index">
|
||||
Multi-agent execution, process choice, delegation, and coordination.
|
||||
</Card>
|
||||
<Card title="LLMs" icon="microchip-ai" href="/en/ai/llms/index">
|
||||
Model configuration contracts, routing, reliability defaults, and providers.
|
||||
</Card>
|
||||
<Card title="Memory" icon="database" href="/en/ai/memory/index">
|
||||
Retrieval semantics, scope design, and state-vs-memory architecture.
|
||||
</Card>
|
||||
<Card title="Tools" icon="wrench" href="/en/ai/tools/index">
|
||||
Tool safety, schema contracts, retries, and integration patterns.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## Writing Contract
|
||||
|
||||
Every domain follows the same structure:
|
||||
1. Concepts (`index`)
|
||||
2. Reference (`reference`)
|
||||
3. Patterns (`patterns`)
|
||||
4. Troubleshooting (`troubleshooting`)
|
||||
5. Examples (`examples`)
|
||||
|
||||
## Deprecation Policy
|
||||
|
||||
When a page is replaced:
|
||||
- keep a redirect for the old URL
|
||||
- keep one canonical destination
|
||||
- avoid duplicated conceptual prose
|
||||
@@ -1,12 +0,0 @@
|
||||
---
|
||||
title: "Tools: Examples"
|
||||
description: "Practical examples for tool-driven agents and crews."
|
||||
icon: "rocket-launch"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## Example links
|
||||
|
||||
- [/en/tools/overview](/en/tools/overview)
|
||||
- [/en/learn/create-custom-tools](/en/learn/create-custom-tools)
|
||||
- [/en/learn/tool-hooks](/en/learn/tool-hooks)
|
||||
@@ -1,25 +0,0 @@
|
||||
---
|
||||
title: "Tools: Concepts"
|
||||
description: "Tool selection strategy, safety boundaries, and reliability rules for agentic execution."
|
||||
icon: "wrench"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## When to use tools
|
||||
|
||||
- Agents need external data or side effects.
|
||||
- Deterministic systems must be integrated into agent workflows.
|
||||
|
||||
## Tool safety rules
|
||||
|
||||
- define clear input schemas
|
||||
- validate outputs before downstream use
|
||||
- isolate privileged tools behind policy checks
|
||||
|
||||
## Canonical links
|
||||
|
||||
- Reference: [/en/ai/tools/reference](/en/ai/tools/reference)
|
||||
- Patterns: [/en/ai/tools/patterns](/en/ai/tools/patterns)
|
||||
- Troubleshooting: [/en/ai/tools/troubleshooting](/en/ai/tools/troubleshooting)
|
||||
- Examples: [/en/ai/tools/examples](/en/ai/tools/examples)
|
||||
- Existing docs: [/en/concepts/tools](/en/concepts/tools)
|
||||
@@ -1,12 +0,0 @@
|
||||
---
|
||||
title: "Tools: Patterns"
|
||||
description: "Tool execution patterns for retrieval, action safety, and response grounding."
|
||||
icon: "diagram-project"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## Patterns
|
||||
|
||||
1. Read-first then write pattern
|
||||
2. Validation gate before side effects
|
||||
3. Fallback tool chains for degraded mode
|
||||
@@ -1,22 +0,0 @@
|
||||
---
|
||||
title: "Tools: Reference"
|
||||
description: "Reference for tool invocation contracts, argument schemas, and runtime safeguards."
|
||||
icon: "book"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## Tool contract
|
||||
|
||||
- deterministic input schema
|
||||
- stable output schema
|
||||
- explicit error behavior
|
||||
|
||||
## Runtime safeguards
|
||||
|
||||
- timeout and retry policy
|
||||
- idempotency for side effects
|
||||
- validation before commit
|
||||
|
||||
## Canonical source
|
||||
|
||||
Primary API details live in [/en/concepts/tools](/en/concepts/tools).
|
||||
@@ -1,12 +0,0 @@
|
||||
---
|
||||
title: "Tools: Troubleshooting"
|
||||
description: "Common tool-call failures and fixes for schema mismatch, retries, and side effects."
|
||||
icon: "circle-exclamation"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## Common issues
|
||||
|
||||
- Schema mismatch: align tool args with declared model output schema.
|
||||
- Repeated side effects: add idempotency keys.
|
||||
- Tool timeouts: define retries with bounded backoff.
|
||||
@@ -23,17 +23,6 @@ In the CrewAI framework, an `Agent` is an autonomous unit that can:
|
||||
at creating content.
|
||||
</Tip>
|
||||
|
||||
## When to Use Agents
|
||||
|
||||
- You need role-specific reasoning and decision-making.
|
||||
- You need tool-enabled execution with delegated responsibilities.
|
||||
- You need reusable behavioral units across tasks and crews.
|
||||
|
||||
## When Not to Use Agents
|
||||
|
||||
- Deterministic business logic in plain code is sufficient.
|
||||
- A static transformation without reasoning is sufficient.
|
||||
|
||||
<Note type="info" title="Enterprise Enhancement: Visual Agent Builder">
|
||||
CrewAI AMP includes a Visual Agent Builder that simplifies agent creation and configuration without writing code. Design your agents visually and test them in real-time.
|
||||
|
||||
|
||||
@@ -9,17 +9,6 @@ mode: "wide"
|
||||
|
||||
A crew in crewAI represents a collaborative group of agents working together to achieve a set of tasks. Each crew defines the strategy for task execution, agent collaboration, and the overall workflow.
|
||||
|
||||
## When to Use Crews
|
||||
|
||||
- You need multiple specialized agents collaborating on a shared outcome.
|
||||
- You need process-level orchestration (`sequential` or `hierarchical`).
|
||||
- You need task-level handoffs and context propagation.
|
||||
|
||||
## When Not to Use Crews
|
||||
|
||||
- A single agent can complete the work end-to-end.
|
||||
- You do not need multi-step task decomposition.
|
||||
|
||||
## Crew Attributes
|
||||
|
||||
| Attribute | Parameters | Description |
|
||||
@@ -428,17 +417,3 @@ crewai replay -t <task_id>
|
||||
```
|
||||
|
||||
These commands let you replay from your latest kickoff tasks, still retaining context from previously executed tasks.
|
||||
|
||||
## Common Failure Modes
|
||||
|
||||
### Agents overlap responsibilities
|
||||
- Cause: role/goal definitions are too broad.
|
||||
- Fix: tighten role boundaries and task ownership.
|
||||
|
||||
### Hierarchical runs stall or degrade
|
||||
- Cause: weak manager configuration or unclear delegation criteria.
|
||||
- Fix: define a stronger manager objective and explicit completion criteria.
|
||||
|
||||
### Crew outputs are inconsistent
|
||||
- Cause: expected outputs are underspecified across tasks.
|
||||
- Fix: enforce structured outputs and stronger task contracts.
|
||||
|
||||
@@ -19,121 +19,82 @@ Flows allow you to create structured, event-driven workflows. They provide a sea
|
||||
|
||||
4. **Flexible Control Flow**: Implement conditional logic, loops, and branching within your workflows.
|
||||
|
||||
## When to Use Flows
|
||||
|
||||
- You need deterministic orchestration and branching logic.
|
||||
- You need explicit state transitions across multiple steps.
|
||||
- You need resumable workflows with persistence.
|
||||
- You need to combine crews, direct model calls, and Python logic in one runtime.
|
||||
|
||||
## When Not to Use Flows
|
||||
|
||||
- A single prompt/response call is sufficient.
|
||||
- A single crew kickoff with no orchestration logic is sufficient.
|
||||
- You do not need stateful multi-step execution.
|
||||
|
||||
## Getting Started
|
||||
|
||||
The example below shows a realistic Flow for support-ticket triage. It demonstrates features teams use in production: typed state, routing, memory access, and persistence.
|
||||
Let's create a simple Flow where you will use OpenAI to generate a random city in one task and then use that city to generate a fun fact in another task.
|
||||
|
||||
```python Code
|
||||
from crewai.flow.flow import Flow, listen, router, start
|
||||
from crewai.flow.persistence import persist
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.flow.flow import Flow, listen, start
|
||||
from dotenv import load_dotenv
|
||||
from litellm import completion
|
||||
|
||||
|
||||
class SupportTriageState(BaseModel):
|
||||
ticket_id: str = ""
|
||||
customer_tier: str = "standard" # standard | enterprise
|
||||
issue: str = ""
|
||||
urgency: str = "normal"
|
||||
route: str = ""
|
||||
draft_reply: str = ""
|
||||
internal_notes: list[str] = Field(default_factory=list)
|
||||
class ExampleFlow(Flow):
|
||||
model = "gpt-4o-mini"
|
||||
|
||||
|
||||
@persist()
|
||||
class SupportTriageFlow(Flow[SupportTriageState]):
|
||||
@start()
|
||||
def ingest_ticket(self):
|
||||
# kickoff(inputs={...}) is merged into typed state fields
|
||||
print(f"Flow State ID: {self.state.id}")
|
||||
def generate_city(self):
|
||||
print("Starting flow")
|
||||
# Each flow state automatically gets a unique ID
|
||||
print(f"Flow State ID: {self.state['id']}")
|
||||
|
||||
self.remember(
|
||||
f"Ticket {self.state.ticket_id}: {self.state.issue}",
|
||||
scope=f"/support/{self.state.ticket_id}",
|
||||
response = completion(
|
||||
model=self.model,
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Return the name of a random city in the world.",
|
||||
},
|
||||
],
|
||||
)
|
||||
|
||||
issue = self.state.issue.lower()
|
||||
if "security" in issue or "breach" in issue:
|
||||
self.state.urgency = "critical"
|
||||
elif self.state.customer_tier == "enterprise":
|
||||
self.state.urgency = "high"
|
||||
else:
|
||||
self.state.urgency = "normal"
|
||||
random_city = response["choices"][0]["message"]["content"]
|
||||
# Store the city in our state
|
||||
self.state["city"] = random_city
|
||||
print(f"Random City: {random_city}")
|
||||
|
||||
return self.state.issue
|
||||
return random_city
|
||||
|
||||
@router(ingest_ticket)
|
||||
def route_ticket(self):
|
||||
issue = self.state.issue.lower()
|
||||
if "security" in issue or "breach" in issue:
|
||||
self.state.route = "security"
|
||||
return "security_review"
|
||||
if self.state.customer_tier == "enterprise" or self.state.urgency == "high":
|
||||
self.state.route = "priority"
|
||||
return "priority_queue"
|
||||
self.state.route = "standard"
|
||||
return "standard_queue"
|
||||
|
||||
@listen("security_review")
|
||||
def handle_security(self):
|
||||
self.state.internal_notes.append("Escalated to Security Incident Response")
|
||||
self.state.draft_reply = (
|
||||
"We have escalated your case to our security team and will update you shortly."
|
||||
@listen(generate_city)
|
||||
def generate_fun_fact(self, random_city):
|
||||
response = completion(
|
||||
model=self.model,
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": f"Tell me a fun fact about {random_city}",
|
||||
},
|
||||
],
|
||||
)
|
||||
return self.state.draft_reply
|
||||
|
||||
@listen("priority_queue")
|
||||
def handle_priority(self):
|
||||
history = self.recall("SLA commitments for enterprise support", limit=2)
|
||||
self.state.internal_notes.append(
|
||||
f"Loaded {len(history)} memory hits for priority handling"
|
||||
)
|
||||
self.state.draft_reply = (
|
||||
"Your ticket has been prioritized and assigned to a senior support engineer."
|
||||
)
|
||||
return self.state.draft_reply
|
||||
|
||||
@listen("standard_queue")
|
||||
def handle_standard(self):
|
||||
self.state.internal_notes.append("Routed to standard support queue")
|
||||
self.state.draft_reply = "Thanks for reporting this. Our team will follow up soon."
|
||||
return self.state.draft_reply
|
||||
fun_fact = response["choices"][0]["message"]["content"]
|
||||
# Store the fun fact in our state
|
||||
self.state["fun_fact"] = fun_fact
|
||||
return fun_fact
|
||||
|
||||
|
||||
flow = SupportTriageFlow()
|
||||
flow.plot("support_triage_flow")
|
||||
result = flow.kickoff(
|
||||
inputs={
|
||||
"ticket_id": "TCK-1024",
|
||||
"customer_tier": "enterprise",
|
||||
"issue": "Cannot access SSO after enabling new policy",
|
||||
}
|
||||
)
|
||||
print("Final reply:", result)
|
||||
print("Route:", flow.state.route)
|
||||
print("Notes:", flow.state.internal_notes)
|
||||
|
||||
flow = ExampleFlow()
|
||||
flow.plot()
|
||||
result = flow.kickoff()
|
||||
|
||||
print(f"Generated fun fact: {result}")
|
||||
```
|
||||

|
||||
In this example, one flow demonstrates several core features together:
|
||||
1. `@start()` initializes and normalizes state for downstream steps.
|
||||
2. `@router()` performs deterministic branching into labeled routes.
|
||||
3. Route listeners implement lane-specific behavior (`security`, `priority`, `standard`).
|
||||
4. `@persist()` keeps the flow state recoverable between runs.
|
||||
5. Built-in memory methods (`remember`, `recall`) add durable context beyond a single method call.
|
||||
In the above example, we have created a simple Flow that generates a random city using OpenAI and then generates a fun fact about that city. The Flow consists of two tasks: `generate_city` and `generate_fun_fact`. The `generate_city` task is the starting point of the Flow, and the `generate_fun_fact` task listens for the output of the `generate_city` task.
|
||||
|
||||
This pattern mirrors typical production workflows where request classification, policy-aware routing, and auditable state all happen in one orchestrated flow.
|
||||
Each Flow instance automatically receives a unique identifier (UUID) in its state, which helps track and manage flow executions. The state can also store additional data (like the generated city and fun fact) that persists throughout the flow's execution.
|
||||
|
||||
When you run the Flow, it will:
|
||||
1. Generate a unique ID for the flow state
|
||||
2. Generate a random city and store it in the state
|
||||
3. Generate a fun fact about that city and store it in the state
|
||||
4. Print the results to the console
|
||||
|
||||
The state's unique ID and stored data can be useful for tracking flow executions and maintaining context between tasks.
|
||||
|
||||
**Note:** Ensure you have set up your `.env` file to store your `OPENAI_API_KEY`. This key is necessary for authenticating requests to the OpenAI API.
|
||||
|
||||
### @start()
|
||||
|
||||
@@ -156,15 +117,15 @@ The `@listen()` decorator can be used in several ways:
|
||||
1. **Listening to a Method by Name**: You can pass the name of the method you want to listen to as a string. When that method completes, the listener method will be triggered.
|
||||
|
||||
```python Code
|
||||
@listen("upstream_method")
|
||||
def downstream_method(self, upstream_result):
|
||||
@listen("generate_city")
|
||||
def generate_fun_fact(self, random_city):
|
||||
# Implementation
|
||||
```
|
||||
|
||||
2. **Listening to a Method Directly**: You can pass the method itself. When that method completes, the listener method will be triggered.
|
||||
```python Code
|
||||
@listen(upstream_method)
|
||||
def downstream_method(self, upstream_result):
|
||||
@listen(generate_city)
|
||||
def generate_fun_fact(self, random_city):
|
||||
# Implementation
|
||||
```
|
||||
|
||||
@@ -780,17 +741,201 @@ This example demonstrates several key features of using Agents in flows:
|
||||
|
||||
3. **Tool Integration**: Agents can use tools (like `WebsiteSearchTool`) to enhance their capabilities.
|
||||
|
||||
## Multi-Crew Flows and Plotting
|
||||
## Adding Crews to Flows
|
||||
|
||||
Detailed build walkthroughs and project scaffolding are documented in guide pages to keep this concepts page focused.
|
||||
Creating a flow with multiple crews in CrewAI is straightforward.
|
||||
|
||||
- Build your first flow: [/en/guides/flows/first-flow](/en/guides/flows/first-flow)
|
||||
- Master state and persistence: [/en/guides/flows/mastering-flow-state](/en/guides/flows/mastering-flow-state)
|
||||
- Real-world chat-state pattern: [/en/learn/flowstate-chat-history](/en/learn/flowstate-chat-history)
|
||||
You can generate a new CrewAI project that includes all the scaffolding needed to create a flow with multiple crews by running the following command:
|
||||
|
||||
For visualization:
|
||||
- Use `flow.plot("my_flow_plot")` in code, or
|
||||
- Use `crewai flow plot` in CLI projects.
|
||||
```bash
|
||||
crewai create flow name_of_flow
|
||||
```
|
||||
|
||||
This command will generate a new CrewAI project with the necessary folder structure. The generated project includes a prebuilt crew called `poem_crew` that is already working. You can use this crew as a template by copying, pasting, and editing it to create other crews.
|
||||
|
||||
### Folder Structure
|
||||
|
||||
After running the `crewai create flow name_of_flow` command, you will see a folder structure similar to the following:
|
||||
|
||||
| Directory/File | Description |
|
||||
| :--------------------- | :----------------------------------------------------------------- |
|
||||
| `name_of_flow/` | Root directory for the flow. |
|
||||
| ├── `crews/` | Contains directories for specific crews. |
|
||||
| │ └── `poem_crew/` | Directory for the "poem_crew" with its configurations and scripts. |
|
||||
| │ ├── `config/` | Configuration files directory for the "poem_crew". |
|
||||
| │ │ ├── `agents.yaml` | YAML file defining the agents for "poem_crew". |
|
||||
| │ │ └── `tasks.yaml` | YAML file defining the tasks for "poem_crew". |
|
||||
| │ ├── `poem_crew.py` | Script for "poem_crew" functionality. |
|
||||
| ├── `tools/` | Directory for additional tools used in the flow. |
|
||||
| │ └── `custom_tool.py` | Custom tool implementation. |
|
||||
| ├── `main.py` | Main script for running the flow. |
|
||||
| ├── `README.md` | Project description and instructions. |
|
||||
| ├── `pyproject.toml` | Configuration file for project dependencies and settings. |
|
||||
| └── `.gitignore` | Specifies files and directories to ignore in version control. |
|
||||
|
||||
### Building Your Crews
|
||||
|
||||
In the `crews` folder, you can define multiple crews. Each crew will have its own folder containing configuration files and the crew definition file. For example, the `poem_crew` folder contains:
|
||||
|
||||
- `config/agents.yaml`: Defines the agents for the crew.
|
||||
- `config/tasks.yaml`: Defines the tasks for the crew.
|
||||
- `poem_crew.py`: Contains the crew definition, including agents, tasks, and the crew itself.
|
||||
|
||||
You can copy, paste, and edit the `poem_crew` to create other crews.
|
||||
|
||||
### Connecting Crews in `main.py`
|
||||
|
||||
The `main.py` file is where you create your flow and connect the crews together. You can define your flow by using the `Flow` class and the decorators `@start` and `@listen` to specify the flow of execution.
|
||||
|
||||
Here's an example of how you can connect the `poem_crew` in the `main.py` file:
|
||||
|
||||
```python Code
|
||||
#!/usr/bin/env python
|
||||
from random import randint
|
||||
|
||||
from pydantic import BaseModel
|
||||
from crewai.flow.flow import Flow, listen, start
|
||||
from .crews.poem_crew.poem_crew import PoemCrew
|
||||
|
||||
class PoemState(BaseModel):
|
||||
sentence_count: int = 1
|
||||
poem: str = ""
|
||||
|
||||
class PoemFlow(Flow[PoemState]):
|
||||
|
||||
@start()
|
||||
def generate_sentence_count(self):
|
||||
print("Generating sentence count")
|
||||
self.state.sentence_count = randint(1, 5)
|
||||
|
||||
@listen(generate_sentence_count)
|
||||
def generate_poem(self):
|
||||
print("Generating poem")
|
||||
result = PoemCrew().crew().kickoff(inputs={"sentence_count": self.state.sentence_count})
|
||||
|
||||
print("Poem generated", result.raw)
|
||||
self.state.poem = result.raw
|
||||
|
||||
@listen(generate_poem)
|
||||
def save_poem(self):
|
||||
print("Saving poem")
|
||||
with open("poem.txt", "w") as f:
|
||||
f.write(self.state.poem)
|
||||
|
||||
def kickoff():
|
||||
poem_flow = PoemFlow()
|
||||
poem_flow.kickoff()
|
||||
|
||||
|
||||
def plot():
|
||||
poem_flow = PoemFlow()
|
||||
poem_flow.plot("PoemFlowPlot")
|
||||
|
||||
if __name__ == "__main__":
|
||||
kickoff()
|
||||
plot()
|
||||
```
|
||||
|
||||
In this example, the `PoemFlow` class defines a flow that generates a sentence count, uses the `PoemCrew` to generate a poem, and then saves the poem to a file. The flow is kicked off by calling the `kickoff()` method. The PoemFlowPlot will be generated by `plot()` method.
|
||||
|
||||

|
||||
|
||||
### Running the Flow
|
||||
|
||||
(Optional) Before running the flow, you can install the dependencies by running:
|
||||
|
||||
```bash
|
||||
crewai install
|
||||
```
|
||||
|
||||
Once all of the dependencies are installed, you need to activate the virtual environment by running:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
```
|
||||
|
||||
After activating the virtual environment, you can run the flow by executing one of the following commands:
|
||||
|
||||
```bash
|
||||
crewai flow kickoff
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```bash
|
||||
uv run kickoff
|
||||
```
|
||||
|
||||
The flow will execute, and you should see the output in the console.
|
||||
|
||||
## Plot Flows
|
||||
|
||||
Visualizing your AI workflows can provide valuable insights into the structure and execution paths of your flows. CrewAI offers a powerful visualization tool that allows you to generate interactive plots of your flows, making it easier to understand and optimize your AI workflows.
|
||||
|
||||
### What are Plots?
|
||||
|
||||
Plots in CrewAI are graphical representations of your AI workflows. They display the various tasks, their connections, and the flow of data between them. This visualization helps in understanding the sequence of operations, identifying bottlenecks, and ensuring that the workflow logic aligns with your expectations.
|
||||
|
||||
### How to Generate a Plot
|
||||
|
||||
CrewAI provides two convenient methods to generate plots of your flows:
|
||||
|
||||
#### Option 1: Using the `plot()` Method
|
||||
|
||||
If you are working directly with a flow instance, you can generate a plot by calling the `plot()` method on your flow object. This method will create an HTML file containing the interactive plot of your flow.
|
||||
|
||||
```python Code
|
||||
# Assuming you have a flow instance
|
||||
flow.plot("my_flow_plot")
|
||||
```
|
||||
|
||||
This will generate a file named `my_flow_plot.html` in your current directory. You can open this file in a web browser to view the interactive plot.
|
||||
|
||||
#### Option 2: Using the Command Line
|
||||
|
||||
If you are working within a structured CrewAI project, you can generate a plot using the command line. This is particularly useful for larger projects where you want to visualize the entire flow setup.
|
||||
|
||||
```bash
|
||||
crewai flow plot
|
||||
```
|
||||
|
||||
This command will generate an HTML file with the plot of your flow, similar to the `plot()` method. The file will be saved in your project directory, and you can open it in a web browser to explore the flow.
|
||||
|
||||
### Understanding the Plot
|
||||
|
||||
The generated plot will display nodes representing the tasks in your flow, with directed edges indicating the flow of execution. The plot is interactive, allowing you to zoom in and out, and hover over nodes to see additional details.
|
||||
|
||||
By visualizing your flows, you can gain a clearer understanding of the workflow's structure, making it easier to debug, optimize, and communicate your AI processes to others.
|
||||
|
||||
### Conclusion
|
||||
|
||||
Plotting your flows is a powerful feature of CrewAI that enhances your ability to design and manage complex AI workflows. Whether you choose to use the `plot()` method or the command line, generating plots will provide you with a visual representation of your workflows, aiding in both development and presentation.
|
||||
|
||||
## Next Steps
|
||||
|
||||
If you're interested in exploring additional examples of flows, we have a variety of recommendations in our examples repository. Here are four specific flow examples, each showcasing unique use cases to help you match your current problem type to a specific example:
|
||||
|
||||
1. **Email Auto Responder Flow**: This example demonstrates an infinite loop where a background job continually runs to automate email responses. It's a great use case for tasks that need to be performed repeatedly without manual intervention. [View Example](https://github.com/crewAIInc/crewAI-examples/tree/main/email_auto_responder_flow)
|
||||
|
||||
2. **Lead Score Flow**: This flow showcases adding human-in-the-loop feedback and handling different conditional branches using the router. It's an excellent example of how to incorporate dynamic decision-making and human oversight into your workflows. [View Example](https://github.com/crewAIInc/crewAI-examples/tree/main/lead-score-flow)
|
||||
|
||||
3. **Write a Book Flow**: This example excels at chaining multiple crews together, where the output of one crew is used by another. Specifically, one crew outlines an entire book, and another crew generates chapters based on the outline. Eventually, everything is connected to produce a complete book. This flow is perfect for complex, multi-step processes that require coordination between different tasks. [View Example](https://github.com/crewAIInc/crewAI-examples/tree/main/write_a_book_with_flows)
|
||||
|
||||
4. **Meeting Assistant Flow**: This flow demonstrates how to broadcast one event to trigger multiple follow-up actions. For instance, after a meeting is completed, the flow can update a Trello board, send a Slack message, and save the results. It's a great example of handling multiple outcomes from a single event, making it ideal for comprehensive task management and notification systems. [View Example](https://github.com/crewAIInc/crewAI-examples/tree/main/meeting_assistant_flow)
|
||||
|
||||
By exploring these examples, you can gain insights into how to leverage CrewAI Flows for various use cases, from automating repetitive tasks to managing complex, multi-step processes with dynamic decision-making and human feedback.
|
||||
|
||||
Also, check out our YouTube video on how to use flows in CrewAI below!
|
||||
|
||||
<iframe
|
||||
className="w-full aspect-video rounded-xl"
|
||||
src="https://www.youtube.com/embed/MTb5my6VOT8"
|
||||
title="CrewAI Flows overview"
|
||||
frameBorder="0"
|
||||
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
|
||||
referrerPolicy="strict-origin-when-cross-origin"
|
||||
allowFullScreen
|
||||
></iframe>
|
||||
|
||||
## Running Flows
|
||||
|
||||
@@ -801,7 +946,7 @@ There are two ways to run a flow:
|
||||
You can run a flow programmatically by creating an instance of your flow class and calling the `kickoff()` method:
|
||||
|
||||
```python
|
||||
flow = SupportTriageFlow()
|
||||
flow = ExampleFlow()
|
||||
result = flow.kickoff()
|
||||
```
|
||||
|
||||
@@ -920,21 +1065,3 @@ crewai flow kickoff
|
||||
```
|
||||
|
||||
However, the `crewai run` command is now the preferred method as it works for both crews and flows.
|
||||
|
||||
## Common Failure Modes
|
||||
|
||||
### Router branch not firing
|
||||
- Cause: returned label does not match a `@listen("label")` value.
|
||||
- Fix: align router return strings with listener labels exactly.
|
||||
|
||||
### State fields missing at runtime
|
||||
- Cause: untyped dynamic fields or missing kickoff inputs.
|
||||
- Fix: use typed state and validate required fields in `@start()`.
|
||||
|
||||
### Prompt/token growth over time
|
||||
- Cause: appending unbounded message history in state.
|
||||
- Fix: apply sliding-window state and summary compaction patterns.
|
||||
|
||||
### Non-idempotent retries
|
||||
- Cause: side effects executed on retried steps.
|
||||
- Fix: add idempotency keys/markers to state and guard external writes.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -156,7 +156,6 @@ class ResearchFlow(Flow):
|
||||
```
|
||||
|
||||
See the [Flows documentation](/concepts/flows) for more on memory in Flows.
|
||||
For a production-style conversational pattern that combines Flow state and memory, see [Flowstate Chat History](/en/learn/flowstate-chat-history).
|
||||
|
||||
|
||||
## Hierarchical Scopes
|
||||
|
||||
@@ -10,17 +10,6 @@ mode: "wide"
|
||||
The planning feature in CrewAI allows you to add planning capability to your crew. When enabled, before each Crew iteration,
|
||||
all Crew information is sent to an AgentPlanner that will plan the tasks step by step, and this plan will be added to each task description.
|
||||
|
||||
## When to Use Planning
|
||||
|
||||
- Tasks require multi-step decomposition before execution.
|
||||
- You need more consistent execution quality on complex tasks.
|
||||
- You want transparent planning traces in crew runs.
|
||||
|
||||
## When Not to Use Planning
|
||||
|
||||
- Tasks are simple and deterministic.
|
||||
- Latency and token budget are strict and planning overhead is not justified.
|
||||
|
||||
### Using the Planning Feature
|
||||
|
||||
Getting started with the planning feature is very easy, the only step required is to add `planning=True` to your Crew:
|
||||
@@ -42,7 +31,7 @@ my_crew = Crew(
|
||||
From this point on, your crew will have planning enabled, and the tasks will be planned before each iteration.
|
||||
|
||||
<Warning>
|
||||
Planning model defaults can vary by version and environment. To avoid implicit provider dependencies, set `planning_llm` explicitly in your crew configuration.
|
||||
When planning is enabled, crewAI will use `gpt-4o-mini` as the default LLM for planning, which requires a valid OpenAI API key. Since your agents might be using different LLMs, this could cause confusion if you don't have an OpenAI API key configured or if you're experiencing unexpected behavior related to LLM API calls.
|
||||
</Warning>
|
||||
|
||||
#### Planning LLM
|
||||
@@ -163,14 +152,4 @@ A list with 10 bullet points of the most relevant information about AI LLMs.
|
||||
**Expected Output:**
|
||||
A fully fledged report with the main topics, each with a full section of information. Formatted as markdown without '```'.
|
||||
```
|
||||
</CodeGroup>
|
||||
|
||||
## Common Failure Modes
|
||||
|
||||
### Planning adds cost/latency without quality gains
|
||||
- Cause: planning enabled for simple tasks.
|
||||
- Fix: disable `planning` for straightforward pipelines.
|
||||
|
||||
### Unexpected provider authentication errors
|
||||
- Cause: implicit planner model/provider assumptions.
|
||||
- Fix: set `planning_llm` explicitly and ensure matching credentials are configured.
|
||||
</CodeGroup>
|
||||
@@ -12,20 +12,11 @@ mode: "wide"
|
||||
These processes ensure tasks are distributed and executed efficiently, in alignment with a predefined strategy.
|
||||
</Tip>
|
||||
|
||||
## When to Use Each Process
|
||||
|
||||
- Use `sequential` when task order is fixed and outputs feed directly into the next task.
|
||||
- Use `hierarchical` when you need a manager to delegate and validate work dynamically.
|
||||
|
||||
## When Not to Use Hierarchical
|
||||
|
||||
- You do not need dynamic delegation.
|
||||
- You cannot provide a reliable `manager_llm` or `manager_agent`.
|
||||
|
||||
## Process Implementations
|
||||
|
||||
- **Sequential**: Executes tasks sequentially, ensuring tasks are completed in an orderly progression.
|
||||
- **Hierarchical**: Organizes tasks in a managerial hierarchy, where tasks are delegated and executed based on a structured chain of command. A manager language model (`manager_llm`) or a custom manager agent (`manager_agent`) must be specified in the crew to enable the hierarchical process, facilitating the creation and management of tasks by the manager.
|
||||
- **Consensual Process (Planned)**: Aiming for collaborative decision-making among agents on task execution, this process type introduces a democratic approach to task management within CrewAI. It is planned for future development and is not currently implemented in the codebase.
|
||||
|
||||
## The Role of Processes in Teamwork
|
||||
Processes enable individual agents to operate as a cohesive unit, streamlining their efforts to achieve common objectives with efficiency and coherence.
|
||||
@@ -68,17 +59,9 @@ Emulates a corporate hierarchy, CrewAI allows specifying a custom manager agent
|
||||
|
||||
## Process Class: Detailed Overview
|
||||
|
||||
The `Process` class is implemented as an enumeration (`Enum`), ensuring type safety and restricting process values to the defined types (`sequential`, `hierarchical`).
|
||||
The `Process` class is implemented as an enumeration (`Enum`), ensuring type safety and restricting process values to the defined types (`sequential`, `hierarchical`). The consensual process is planned for future inclusion, emphasizing our commitment to continuous development and innovation.
|
||||
|
||||
## Conclusion
|
||||
|
||||
The structured collaboration facilitated by processes within CrewAI is crucial for enabling systematic teamwork among agents.
|
||||
## Common Failure Modes
|
||||
|
||||
### Hierarchical process fails at startup
|
||||
- Cause: missing `manager_llm` or `manager_agent`.
|
||||
- Fix: provide one of them explicitly in crew configuration.
|
||||
|
||||
### Sequential process produces weak outputs
|
||||
- Cause: task boundaries/context are underspecified.
|
||||
- Fix: improve task descriptions, expected outputs, and task context chaining.
|
||||
This documentation has been updated to reflect the latest features, enhancements, and the planned integration of the Consensual Process, ensuring users have access to the most current and comprehensive information.
|
||||
@@ -9,20 +9,9 @@ mode: "wide"
|
||||
|
||||
Testing is a crucial part of the development process, and it is essential to ensure that your crew is performing as expected. With crewAI, you can easily test your crew and evaluate its performance using the built-in testing capabilities.
|
||||
|
||||
## When to Use Testing
|
||||
|
||||
- Before promoting a crew to production.
|
||||
- After changing prompts, tools, or model configurations.
|
||||
- When benchmarking quality/cost/latency tradeoffs.
|
||||
|
||||
## When Not to Rely on Testing Alone
|
||||
|
||||
- For safety-critical deployments without human review gates.
|
||||
- When test datasets are too small or unrepresentative.
|
||||
|
||||
### Using the Testing Feature
|
||||
|
||||
Use the CLI command `crewai test` to run repeated crew executions and compare outputs across iterations. The parameters are `n_iterations` and `model`, which are optional and default to `2` and `gpt-4o-mini`.
|
||||
We added the CLI command `crewai test` to make it easy to test your crew. This command will run your crew for a specified number of iterations and provide detailed performance metrics. The parameters are `n_iterations` and `model`, which are optional and default to 2 and `gpt-4o-mini` respectively. For now, the only provider available is OpenAI.
|
||||
|
||||
```bash
|
||||
crewai test
|
||||
@@ -58,13 +47,3 @@ A table of scores at the end will show the performance of the crew in terms of t
|
||||
| Execution Time (s) | 126 | 145 | **135** | | |
|
||||
|
||||
The example above shows the test results for two runs of the crew with two tasks, with the average total score for each task and the crew as a whole.
|
||||
|
||||
## Common Failure Modes
|
||||
|
||||
### Scores fluctuate too much between runs
|
||||
- Cause: high sampling randomness or unstable prompts.
|
||||
- Fix: lower temperature and tighten output constraints.
|
||||
|
||||
### Good test scores but poor production quality
|
||||
- Cause: test prompts do not match real workload.
|
||||
- Fix: build a representative test set from real production inputs.
|
||||
|
||||
@@ -10,17 +10,6 @@ mode: "wide"
|
||||
CrewAI tools empower agents with capabilities ranging from web searching and data analysis to collaboration and delegating tasks among coworkers.
|
||||
This documentation outlines how to create, integrate, and leverage these tools within the CrewAI framework, including a new focus on collaboration tools.
|
||||
|
||||
## When to Use Tools
|
||||
|
||||
- Agents need external data or side effects.
|
||||
- You need deterministic actions wrapped in reusable interfaces.
|
||||
- You need to connect APIs, files, databases, or browser actions into agent workflows.
|
||||
|
||||
## When Not to Use Tools
|
||||
|
||||
- The task can be solved entirely from prompt context.
|
||||
- The external side effect cannot be made safe or idempotent.
|
||||
|
||||
## What is a Tool?
|
||||
|
||||
A tool in CrewAI is a skill or function that agents can utilize to perform various actions.
|
||||
@@ -296,17 +285,3 @@ writer1 = Agent(
|
||||
Tools are pivotal in extending the capabilities of CrewAI agents, enabling them to undertake a broad spectrum of tasks and collaborate effectively.
|
||||
When building solutions with CrewAI, leverage both custom and existing tools to empower your agents and enhance the AI ecosystem. Consider utilizing error handling,
|
||||
caching mechanisms, and the flexibility of tool arguments to optimize your agents' performance and capabilities.
|
||||
|
||||
## Common Failure Modes
|
||||
|
||||
### Tool schema mismatch
|
||||
- Cause: model-generated arguments do not match tool signature.
|
||||
- Fix: tighten tool descriptions and validate input schemas.
|
||||
|
||||
### Repeated side effects
|
||||
- Cause: retries trigger duplicate writes/actions.
|
||||
- Fix: add idempotency keys and deduplication checks in tool logic.
|
||||
|
||||
### Tool timeouts under load
|
||||
- Cause: unbounded retries or slow external services.
|
||||
- Fix: set explicit timeout/retry policy and graceful fallbacks.
|
||||
|
||||
@@ -177,6 +177,11 @@ You need to push your crew to a GitHub repository. If you haven't created a crew
|
||||

|
||||
</Frame>
|
||||
|
||||
<Info>
|
||||
Using private Python packages? You'll need to add your registry credentials here too.
|
||||
See [Private Package Registries](/en/enterprise/guides/private-package-registry) for the required variables.
|
||||
</Info>
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="Deploy Your Crew">
|
||||
|
||||
@@ -256,6 +256,12 @@ Before deployment, ensure you have:
|
||||
1. **LLM API keys** ready (OpenAI, Anthropic, Google, etc.)
|
||||
2. **Tool API keys** if using external tools (Serper, etc.)
|
||||
|
||||
<Info>
|
||||
If your project depends on packages from a **private PyPI registry**, you'll also need to configure
|
||||
registry authentication credentials as environment variables. See the
|
||||
[Private Package Registries](/en/enterprise/guides/private-package-registry) guide for details.
|
||||
</Info>
|
||||
|
||||
<Tip>
|
||||
Test your project locally with the same environment variables before deploying
|
||||
to catch configuration issues early.
|
||||
|
||||
263
docs/en/enterprise/guides/private-package-registry.mdx
Normal file
263
docs/en/enterprise/guides/private-package-registry.mdx
Normal file
@@ -0,0 +1,263 @@
|
||||
---
|
||||
title: "Private Package Registries"
|
||||
description: "Install private Python packages from authenticated PyPI registries in CrewAI AMP"
|
||||
icon: "lock"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
<Note>
|
||||
This guide covers how to configure your CrewAI project to install Python packages
|
||||
from private PyPI registries (Azure DevOps Artifacts, GitHub Packages, GitLab, AWS CodeArtifact, etc.)
|
||||
when deploying to CrewAI AMP.
|
||||
</Note>
|
||||
|
||||
## When You Need This
|
||||
|
||||
If your project depends on internal or proprietary Python packages hosted on a private registry
|
||||
rather than the public PyPI, you'll need to:
|
||||
|
||||
1. Tell UV **where** to find the package (an index URL)
|
||||
2. Tell UV **which** packages come from that index (a source mapping)
|
||||
3. Provide **credentials** so UV can authenticate during install
|
||||
|
||||
CrewAI AMP uses [UV](https://docs.astral.sh/uv/) for dependency resolution and installation.
|
||||
UV supports authenticated private registries through `pyproject.toml` configuration combined
|
||||
with environment variables for credentials.
|
||||
|
||||
## Step 1: Configure pyproject.toml
|
||||
|
||||
Three pieces work together in your `pyproject.toml`:
|
||||
|
||||
### 1a. Declare the dependency
|
||||
|
||||
Add the private package to your `[project.dependencies]` like any other dependency:
|
||||
|
||||
```toml
|
||||
[project]
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.100.1,<1.0.0",
|
||||
"my-private-package>=1.2.0",
|
||||
]
|
||||
```
|
||||
|
||||
### 1b. Define the index
|
||||
|
||||
Register your private registry as a named index under `[[tool.uv.index]]`:
|
||||
|
||||
```toml
|
||||
[[tool.uv.index]]
|
||||
name = "my-private-registry"
|
||||
url = "https://pkgs.dev.azure.com/my-org/_packaging/my-feed/pypi/simple/"
|
||||
explicit = true
|
||||
```
|
||||
|
||||
<Info>
|
||||
The `name` field is important — UV uses it to construct the environment variable names
|
||||
for authentication (see [Step 2](#step-2-set-authentication-credentials) below).
|
||||
|
||||
Setting `explicit = true` means UV won't search this index for every package — only the
|
||||
ones you explicitly map to it in `[tool.uv.sources]`. This avoids unnecessary queries
|
||||
against your private registry and protects against dependency confusion attacks.
|
||||
</Info>
|
||||
|
||||
### 1c. Map the package to the index
|
||||
|
||||
Tell UV which packages should be resolved from your private index using `[tool.uv.sources]`:
|
||||
|
||||
```toml
|
||||
[tool.uv.sources]
|
||||
my-private-package = { index = "my-private-registry" }
|
||||
```
|
||||
|
||||
### Complete example
|
||||
|
||||
```toml
|
||||
[project]
|
||||
name = "my-crew-project"
|
||||
version = "0.1.0"
|
||||
requires-python = ">=3.10,<=3.13"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.100.1,<1.0.0",
|
||||
"my-private-package>=1.2.0",
|
||||
]
|
||||
|
||||
[tool.crewai]
|
||||
type = "crew"
|
||||
|
||||
[[tool.uv.index]]
|
||||
name = "my-private-registry"
|
||||
url = "https://pkgs.dev.azure.com/my-org/_packaging/my-feed/pypi/simple/"
|
||||
explicit = true
|
||||
|
||||
[tool.uv.sources]
|
||||
my-private-package = { index = "my-private-registry" }
|
||||
```
|
||||
|
||||
After updating `pyproject.toml`, regenerate your lock file:
|
||||
|
||||
```bash
|
||||
uv lock
|
||||
```
|
||||
|
||||
<Warning>
|
||||
Always commit the updated `uv.lock` along with your `pyproject.toml` changes.
|
||||
The lock file is required for deployment — see [Prepare for Deployment](/en/enterprise/guides/prepare-for-deployment).
|
||||
</Warning>
|
||||
|
||||
## Step 2: Set Authentication Credentials
|
||||
|
||||
UV authenticates against private indexes using environment variables that follow a naming convention
|
||||
based on the index name you defined in `pyproject.toml`:
|
||||
|
||||
```
|
||||
UV_INDEX_{UPPER_NAME}_USERNAME
|
||||
UV_INDEX_{UPPER_NAME}_PASSWORD
|
||||
```
|
||||
|
||||
Where `{UPPER_NAME}` is your index name converted to **uppercase** with **hyphens replaced by underscores**.
|
||||
|
||||
For example, an index named `my-private-registry` uses:
|
||||
|
||||
| Variable | Value |
|
||||
|----------|-------|
|
||||
| `UV_INDEX_MY_PRIVATE_REGISTRY_USERNAME` | Your registry username or token name |
|
||||
| `UV_INDEX_MY_PRIVATE_REGISTRY_PASSWORD` | Your registry password or token/PAT |
|
||||
|
||||
<Warning>
|
||||
These environment variables **must** be added via the CrewAI AMP **Environment Variables** settings —
|
||||
either globally or at the deployment level. They cannot be set in `.env` files or hardcoded in your project.
|
||||
|
||||
See [Setting Environment Variables in AMP](#setting-environment-variables-in-amp) below.
|
||||
</Warning>
|
||||
|
||||
## Registry Provider Reference
|
||||
|
||||
The table below shows the index URL format and credential values for common registry providers.
|
||||
Replace placeholder values with your actual organization and feed details.
|
||||
|
||||
| Provider | Index URL | Username | Password |
|
||||
|----------|-----------|----------|----------|
|
||||
| **Azure DevOps Artifacts** | `https://pkgs.dev.azure.com/{org}/_packaging/{feed}/pypi/simple/` | Any non-empty string (e.g. `token`) | Personal Access Token (PAT) with Packaging Read scope |
|
||||
| **GitHub Packages** | `https://pypi.pkg.github.com/{owner}/simple/` | GitHub username | Personal Access Token (classic) with `read:packages` scope |
|
||||
| **GitLab Package Registry** | `https://gitlab.com/api/v4/projects/{project_id}/packages/pypi/simple/` | `__token__` | Project or Personal Access Token with `read_api` scope |
|
||||
| **AWS CodeArtifact** | Use the URL from `aws codeartifact get-repository-endpoint` | `aws` | Token from `aws codeartifact get-authorization-token` |
|
||||
| **Google Artifact Registry** | `https://{region}-python.pkg.dev/{project}/{repo}/simple/` | `_json_key_base64` | Base64-encoded service account key |
|
||||
| **JFrog Artifactory** | `https://{instance}.jfrog.io/artifactory/api/pypi/{repo}/simple/` | Username or email | API key or identity token |
|
||||
| **Self-hosted (devpi, Nexus, etc.)** | Your registry's simple API URL | Registry username | Registry password |
|
||||
|
||||
<Tip>
|
||||
For **AWS CodeArtifact**, the authorization token expires periodically.
|
||||
You'll need to refresh the `UV_INDEX_*_PASSWORD` value when it expires.
|
||||
Consider automating this in your CI/CD pipeline.
|
||||
</Tip>
|
||||
|
||||
## Setting Environment Variables in AMP
|
||||
|
||||
Private registry credentials must be configured as environment variables in CrewAI AMP.
|
||||
You have two options:
|
||||
|
||||
<Tabs>
|
||||
<Tab title="Web Interface">
|
||||
1. Log in to [CrewAI AMP](https://app.crewai.com)
|
||||
2. Navigate to your automation
|
||||
3. Open the **Environment Variables** tab
|
||||
4. Add each variable (`UV_INDEX_*_USERNAME` and `UV_INDEX_*_PASSWORD`) with its value
|
||||
|
||||
See the [Deploy to AMP — Set Environment Variables](/en/enterprise/guides/deploy-to-amp#set-environment-variables) step for details.
|
||||
</Tab>
|
||||
<Tab title="CLI Deployment">
|
||||
Add the variables to your local `.env` file before running `crewai deploy create`.
|
||||
The CLI will securely transfer them to the platform:
|
||||
|
||||
```bash
|
||||
# .env
|
||||
OPENAI_API_KEY=sk-...
|
||||
UV_INDEX_MY_PRIVATE_REGISTRY_USERNAME=token
|
||||
UV_INDEX_MY_PRIVATE_REGISTRY_PASSWORD=your-pat-here
|
||||
```
|
||||
|
||||
```bash
|
||||
crewai deploy create
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
<Warning>
|
||||
**Never** commit credentials to your repository. Use AMP environment variables for all secrets.
|
||||
The `.env` file should be listed in `.gitignore`.
|
||||
</Warning>
|
||||
|
||||
To update credentials on an existing deployment, see [Update Your Crew — Environment Variables](/en/enterprise/guides/update-crew).
|
||||
|
||||
## How It All Fits Together
|
||||
|
||||
When CrewAI AMP builds your automation, the resolution flow works like this:
|
||||
|
||||
<Steps>
|
||||
<Step title="Build starts">
|
||||
AMP pulls your repository and reads `pyproject.toml` and `uv.lock`.
|
||||
</Step>
|
||||
<Step title="UV resolves dependencies">
|
||||
UV reads `[tool.uv.sources]` to determine which index each package should come from.
|
||||
</Step>
|
||||
<Step title="UV authenticates">
|
||||
For each private index, UV looks up `UV_INDEX_{NAME}_USERNAME` and `UV_INDEX_{NAME}_PASSWORD`
|
||||
from the environment variables you configured in AMP.
|
||||
</Step>
|
||||
<Step title="Packages install">
|
||||
UV downloads and installs all packages — both public (from PyPI) and private (from your registry).
|
||||
</Step>
|
||||
<Step title="Automation runs">
|
||||
Your crew or flow starts with all dependencies available.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Authentication Errors During Build
|
||||
|
||||
**Symptom**: Build fails with `401 Unauthorized` or `403 Forbidden` when resolving a private package.
|
||||
|
||||
**Check**:
|
||||
- The `UV_INDEX_*` environment variable names match your index name exactly (uppercased, hyphens → underscores)
|
||||
- Credentials are set in AMP environment variables, not just in a local `.env`
|
||||
- Your token/PAT has the required read permissions for the package feed
|
||||
- The token hasn't expired (especially relevant for AWS CodeArtifact)
|
||||
|
||||
### Package Not Found
|
||||
|
||||
**Symptom**: `No matching distribution found for my-private-package`.
|
||||
|
||||
**Check**:
|
||||
- The index URL in `pyproject.toml` ends with `/simple/`
|
||||
- The `[tool.uv.sources]` entry maps the correct package name to the correct index name
|
||||
- The package is actually published to your private registry
|
||||
- Run `uv lock` locally with the same credentials to verify resolution works
|
||||
|
||||
### Lock File Conflicts
|
||||
|
||||
**Symptom**: `uv lock` fails or produces unexpected results after adding a private index.
|
||||
|
||||
**Solution**: Set the credentials locally and regenerate:
|
||||
|
||||
```bash
|
||||
export UV_INDEX_MY_PRIVATE_REGISTRY_USERNAME=token
|
||||
export UV_INDEX_MY_PRIVATE_REGISTRY_PASSWORD=your-pat
|
||||
uv lock
|
||||
```
|
||||
|
||||
Then commit the updated `uv.lock`.
|
||||
|
||||
## Related Guides
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card title="Prepare for Deployment" icon="clipboard-check" href="/en/enterprise/guides/prepare-for-deployment">
|
||||
Verify project structure and dependencies before deploying.
|
||||
</Card>
|
||||
<Card title="Deploy to AMP" icon="rocket" href="/en/enterprise/guides/deploy-to-amp">
|
||||
Deploy your crew or flow and configure environment variables.
|
||||
</Card>
|
||||
<Card title="Update Your Crew" icon="arrows-rotate" href="/en/enterprise/guides/update-crew">
|
||||
Update environment variables and push changes to a running deployment.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
@@ -8,10 +8,6 @@ mode: "wide"
|
||||
## Quickstarts & Demos
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card title="Flowstate Chat History" icon="comments" href="/en/learn/flowstate-chat-history">
|
||||
Manage chat sessions with sliding-window history, summary compaction, and persisted Flow state.
|
||||
</Card>
|
||||
|
||||
<Card title="Collaboration" icon="people-arrows" href="https://github.com/crewAIInc/crewAI-quickstarts/blob/main/Collaboration/crewai_collaboration.ipynb">
|
||||
Coordinate multiple agents on shared tasks. Includes notebook with end-to-end collaboration pattern.
|
||||
</Card>
|
||||
|
||||
@@ -34,10 +34,6 @@ mode: "wide"
|
||||
## Flows
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card title="Flowstate Chat History" icon="comments" href="/en/learn/flowstate-chat-history">
|
||||
Stateful chat pattern with compacted context and persisted session state.
|
||||
</Card>
|
||||
|
||||
<Card title="Content Creator Flow" icon="pen" href="https://github.com/crewAIInc/crewAI-examples/tree/main/flows/content_creator_flow">
|
||||
Multi‑crew content generation with routing.
|
||||
</Card>
|
||||
|
||||
@@ -47,23 +47,6 @@ CrewAI offers two ways to manage state in your flows:
|
||||
|
||||
Let's examine each approach in detail.
|
||||
|
||||
### Flow State vs Memory: When to use each
|
||||
|
||||
Both features keep context, but they solve different problems.
|
||||
|
||||
| Dimension | Flow State (`self.state`) | Memory (`self.remember` / `self.recall`) |
|
||||
|---|---|---|
|
||||
| Primary purpose | Track execution and deterministic workflow data | Store and retrieve semantic knowledge across interactions |
|
||||
| Data shape | Explicit fields (dict/Pydantic model) | Text records with inferred scopes and ranked recall |
|
||||
| Typical lifetime | Current flow run (or persisted checkpoints) | Long-term knowledge over many runs |
|
||||
| Access pattern | Direct reads/writes (`self.state.field`) | Query-based retrieval (`self.recall("...")`) |
|
||||
| Best for | Routing flags, counters, intermediate outputs, chat window | Durable facts, prior outcomes, reusable context |
|
||||
| Chat use | Recent turns + running summary + control flags | Long-tail memory outside context window |
|
||||
|
||||
Practical rule:
|
||||
- Use **state** for what your control flow depends on right now.
|
||||
- Use **memory** for what you may want to retrieve later by meaning.
|
||||
|
||||
## Unstructured State Management
|
||||
|
||||
Unstructured state uses a dictionary-like approach, offering flexibility and simplicity for straightforward applications.
|
||||
|
||||
@@ -27,11 +27,8 @@ mode: "wide"
|
||||
</div>
|
||||
|
||||
<div style={{ display: 'flex', flexWrap: 'wrap', gap: 12, justifyContent: 'center' }}>
|
||||
<a className="button button-primary" href="/en/installation">Install</a>
|
||||
<a className="button" href="/en/quickstart">Quickstart</a>
|
||||
<a className="button" href="/en/guides/crews/first-crew">First Crew</a>
|
||||
<a className="button" href="/en/guides/flows/first-flow">First Flow</a>
|
||||
<a className="button" href="/en/concepts/llms">LLM Setup</a>
|
||||
<a className="button button-primary" href="/en/quickstart">Get started</a>
|
||||
<a className="button" href="/en/changelog">View changelog</a>
|
||||
<a className="button" href="/en/api-reference/introduction">API Reference</a>
|
||||
</div>
|
||||
|
||||
@@ -39,49 +36,17 @@ mode: "wide"
|
||||
|
||||
<div style={{ marginTop: 32 }} />
|
||||
|
||||
## Start in 3 steps
|
||||
## Get started
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card title="1) Install" href="/en/installation" icon="wrench">
|
||||
<Card title="Introduction" href="/en/introduction" icon="sparkles">
|
||||
Overview of CrewAI concepts, architecture, and what you can build with agents, crews, and flows.
|
||||
</Card>
|
||||
<Card title="Installation" href="/en/installation" icon="wrench">
|
||||
Install via `uv`, configure API keys, and set up the CLI for local development.
|
||||
</Card>
|
||||
<Card title="2) Run Quickstart" href="/en/quickstart" icon="rocket">
|
||||
Launch your first working crew with a minimal project and iterate from there.
|
||||
</Card>
|
||||
<Card title="3) Pick a path" href="/en/ai/overview" icon="sitemap">
|
||||
Continue with canonical domain packs for Flows, Agents, Crews, LLMs, Memory, and Tools.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## Most-used pages
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card title="First Crew" href="/en/guides/crews/first-crew" icon="users">
|
||||
Build a production-style crew with role/task configuration and execution flow.
|
||||
</Card>
|
||||
<Card title="First Flow" href="/en/guides/flows/first-flow" icon="arrow-progress">
|
||||
Build event-driven orchestration with state, listeners, and routing.
|
||||
</Card>
|
||||
<Card title="Flowstate Chat History" href="/en/learn/flowstate-chat-history" icon="comments">
|
||||
Stateful chat history pattern with persistence and summary compaction.
|
||||
</Card>
|
||||
<Card title="Agents" href="/en/concepts/agents" icon="user">
|
||||
Agent role design, tool boundaries, and output contracts.
|
||||
</Card>
|
||||
<Card title="Crews" href="/en/concepts/crews" icon="users-gear">
|
||||
Multi-agent collaboration patterns and process semantics.
|
||||
</Card>
|
||||
<Card title="Flows" href="/en/concepts/flows" icon="code-branch">
|
||||
Deterministic orchestration, state lifecycle, persistence, and resume.
|
||||
</Card>
|
||||
<Card title="LLMs" href="/en/concepts/llms" icon="microchip-ai">
|
||||
Model setup, provider config, routing patterns, and reliability defaults.
|
||||
</Card>
|
||||
<Card title="Memory" href="/en/concepts/memory" icon="database">
|
||||
Semantic recall, scope strategy, and state-vs-memory architecture.
|
||||
</Card>
|
||||
<Card title="Tools" href="/en/tools/overview" icon="wrench">
|
||||
Tool categories, integration surfaces, and practical usage patterns.
|
||||
<Card title="Quickstart" href="/en/quickstart" icon="rocket">
|
||||
Spin up your first crew in minutes. Learn the core runtime, project layout, and dev loop.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
@@ -125,11 +90,7 @@ mode: "wide"
|
||||
</CardGroup>
|
||||
|
||||
<Callout title="Explore real-world patterns" icon="github">
|
||||
Browse the <a href="/en/examples/cookbooks">examples and cookbooks</a> for end-to-end reference implementations across agents, flows, and enterprise automations. For a practical conversational pattern, start with <a href="/en/learn/flowstate-chat-history">Flowstate Chat History</a>.
|
||||
</Callout>
|
||||
|
||||
<Callout title="AI-First Docs" icon="sitemap">
|
||||
Use the <a href="/en/ai/overview">AI-First Documentation map</a> for canonical domain packs across Flows, Agents, Crews, LLMs, Memory, and Tools.
|
||||
Browse the <a href="/en/examples/cookbooks">examples and cookbooks</a> for end-to-end reference implementations across agents, flows, and enterprise automations.
|
||||
</Callout>
|
||||
|
||||
## Stay connected
|
||||
|
||||
@@ -16,52 +16,6 @@ It empowers developers to build production-ready multi-agent systems by combinin
|
||||
|
||||
With over 100,000 developers certified through our community courses, CrewAI is the standard for enterprise-ready AI automation.
|
||||
|
||||
## Start Here
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card title="Install" href="/en/installation" icon="wrench">
|
||||
Set up CrewAI, configure API keys, and prepare your local environment.
|
||||
</Card>
|
||||
<Card title="Quickstart" href="/en/quickstart" icon="rocket">
|
||||
Run your first working crew with a minimal setup.
|
||||
</Card>
|
||||
<Card title="First Crew" href="/en/guides/crews/first-crew" icon="users-gear">
|
||||
Build a production-style crew with roles, tasks, and execution flow.
|
||||
</Card>
|
||||
<Card title="First Flow" href="/en/guides/flows/first-flow" icon="arrow-progress">
|
||||
Build event-driven orchestration with state, listeners, and routers.
|
||||
</Card>
|
||||
<Card title="LLM Setup" href="/en/concepts/llms" icon="microchip-ai">
|
||||
Configure providers, models, and reliability defaults.
|
||||
</Card>
|
||||
<Card title="API Reference" href="/en/api-reference/introduction" icon="book">
|
||||
Use kickoff, resume, and status endpoints for production integrations.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## Most-used Docs
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card title="Agents" href="/en/concepts/agents" icon="user">
|
||||
Role design, tool boundaries, and output contracts.
|
||||
</Card>
|
||||
<Card title="Crews" href="/en/concepts/crews" icon="users">
|
||||
Multi-agent coordination and process choices.
|
||||
</Card>
|
||||
<Card title="Flows" href="/en/concepts/flows" icon="code-branch">
|
||||
Deterministic orchestration, state, persistence, and resume.
|
||||
</Card>
|
||||
<Card title="Memory" href="/en/concepts/memory" icon="database">
|
||||
Scope strategy and semantic recall across runs.
|
||||
</Card>
|
||||
<Card title="Flowstate Chat History" href="/en/learn/flowstate-chat-history" icon="comments">
|
||||
Stateful chat context with summary compaction and persistence.
|
||||
</Card>
|
||||
<Card title="AI-First Docs Map" href="/en/ai/overview" icon="sitemap">
|
||||
Canonical domain packs for Flows, Agents, Crews, LLMs, Memory, and Tools.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## The CrewAI Architecture
|
||||
|
||||
CrewAI's architecture is designed to balance autonomy with control.
|
||||
@@ -176,7 +130,7 @@ For any production-ready application, **start with a Flow**.
|
||||
<Card
|
||||
title="Quick Start"
|
||||
icon="bolt"
|
||||
href="/en/quickstart"
|
||||
href="en/quickstart"
|
||||
>
|
||||
Follow our quickstart guide to create your first CrewAI agent and get hands-on experience.
|
||||
</Card>
|
||||
|
||||
@@ -1,167 +0,0 @@
|
||||
---
|
||||
title: "Flowstate Chat History"
|
||||
description: "Build a stateful chat workflow that keeps context compact, persistent, and production-friendly."
|
||||
icon: "comments"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
This guide shows a practical pattern for managing LLM chat history with Flow state:
|
||||
|
||||
- Keep recent turns in a sliding window
|
||||
- Summarize older turns into a compact running summary
|
||||
- Persist state automatically with `@persist()`
|
||||
- Keep optional long-term recall using Flow memory
|
||||
|
||||
## Why this pattern works
|
||||
|
||||
Naively appending every message to prompts causes token bloat and unstable behavior over long sessions. A better approach is:
|
||||
|
||||
1. Keep only the most recent turns in `state.messages`
|
||||
2. Move older turns into `state.running_summary`
|
||||
3. Build prompts from `running_summary + recent messages`
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. CrewAI installed and configured
|
||||
2. API key configured for your model provider
|
||||
3. Basic familiarity with Flow decorators (`@start`, `@listen`)
|
||||
|
||||
## Step 1: Define typed chat state
|
||||
|
||||
```python Code
|
||||
from typing import Dict, List
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class ChatSessionState(BaseModel):
|
||||
session_id: str = "demo-session"
|
||||
running_summary: str = ""
|
||||
messages: List[Dict[str, str]] = Field(default_factory=list)
|
||||
max_recent_messages: int = 8
|
||||
last_user_message: str = ""
|
||||
assistant_reply: str = ""
|
||||
turn_count: int = 0
|
||||
```
|
||||
|
||||
## Step 2: Build the Flow
|
||||
|
||||
```python Code
|
||||
from crewai.flow.flow import Flow, start, listen
|
||||
from crewai.flow.persistence import persist
|
||||
from litellm import completion
|
||||
|
||||
|
||||
@persist()
|
||||
class ChatHistoryFlow(Flow[ChatSessionState]):
|
||||
model = "gpt-4o-mini"
|
||||
|
||||
@start()
|
||||
def capture_user_message(self):
|
||||
self.state.last_user_message = self.state.last_user_message.strip()
|
||||
self.state.messages.append(
|
||||
{"role": "user", "content": self.state.last_user_message}
|
||||
)
|
||||
self.state.turn_count += 1
|
||||
return self.state.last_user_message
|
||||
|
||||
@listen(capture_user_message)
|
||||
def compact_old_history(self, _):
|
||||
if len(self.state.messages) <= self.state.max_recent_messages:
|
||||
return "no_compaction"
|
||||
|
||||
overflow = self.state.messages[:-self.state.max_recent_messages]
|
||||
self.state.messages = self.state.messages[-self.state.max_recent_messages :]
|
||||
overflow_text = "\n".join(
|
||||
f"{m['role']}: {m['content']}" for m in overflow
|
||||
)
|
||||
|
||||
summary_prompt = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "Summarize old chat turns into short bullet points. Preserve facts, constraints, and decisions.",
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": (
|
||||
f"Existing summary:\n{self.state.running_summary or '(empty)'}\n\n"
|
||||
f"New old turns:\n{overflow_text}"
|
||||
),
|
||||
},
|
||||
]
|
||||
summary_response = completion(model=self.model, messages=summary_prompt)
|
||||
self.state.running_summary = summary_response["choices"][0]["message"]["content"]
|
||||
return "compacted"
|
||||
|
||||
@listen(compact_old_history)
|
||||
def generate_reply(self, _):
|
||||
system_context = (
|
||||
"You are a helpful assistant.\n"
|
||||
f"Conversation summary so far:\n{self.state.running_summary or '(none)'}"
|
||||
)
|
||||
|
||||
response = completion(
|
||||
model=self.model,
|
||||
messages=[{"role": "system", "content": system_context}, *self.state.messages],
|
||||
)
|
||||
answer = response["choices"][0]["message"]["content"]
|
||||
|
||||
self.state.assistant_reply = answer
|
||||
self.state.messages.append({"role": "assistant", "content": answer})
|
||||
|
||||
# Optional: store key turns in long-term memory for later recall
|
||||
self.remember(
|
||||
f"Session {self.state.session_id} turn {self.state.turn_count}: "
|
||||
f"user={self.state.last_user_message} assistant={answer}",
|
||||
scope=f"/chat/{self.state.session_id}",
|
||||
)
|
||||
return answer
|
||||
```
|
||||
|
||||
## Step 3: Run it
|
||||
|
||||
```python Code
|
||||
flow = ChatHistoryFlow()
|
||||
|
||||
first = flow.kickoff(
|
||||
inputs={
|
||||
"session_id": "customer-42",
|
||||
"last_user_message": "I need help choosing a pricing plan for a 10-person team.",
|
||||
}
|
||||
)
|
||||
print("Assistant:", first)
|
||||
|
||||
second = flow.kickoff(
|
||||
inputs={
|
||||
"last_user_message": "We also need SSO and audit logs. What do you recommend now?",
|
||||
}
|
||||
)
|
||||
print("Assistant:", second)
|
||||
print("Turns:", flow.state.turn_count)
|
||||
print("Recent messages:", len(flow.state.messages))
|
||||
```
|
||||
|
||||
## Expected output (shape)
|
||||
|
||||
```text Output
|
||||
Assistant: ...initial recommendation...
|
||||
Assistant: ...updated recommendation with SSO and audit-log requirements...
|
||||
Turns: 2
|
||||
Recent messages: 4
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
- If replies ignore earlier context:
|
||||
increase `max_recent_messages` and ensure `running_summary` is included in the system context.
|
||||
- If prompts become too large:
|
||||
lower `max_recent_messages` and summarize more aggressively.
|
||||
- If sessions collide:
|
||||
provide a stable `session_id` and isolate memory scope with `/chat/{session_id}`.
|
||||
|
||||
## Next steps
|
||||
|
||||
- Add tool calls for account lookup or product catalog retrieval
|
||||
- Route to human review for high-risk decisions
|
||||
- Add structured output to capture recommendations in machine-readable JSON
|
||||
@@ -176,6 +176,11 @@ Crew를 GitHub 저장소에 푸시해야 합니다. 아직 Crew를 만들지 않
|
||||

|
||||
</Frame>
|
||||
|
||||
<Info>
|
||||
프라이빗 Python 패키지를 사용하시나요? 여기에 레지스트리 자격 증명도 추가해야 합니다.
|
||||
필요한 변수는 [프라이빗 패키지 레지스트리](/ko/enterprise/guides/private-package-registry)를 참조하세요.
|
||||
</Info>
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="Crew 배포하기">
|
||||
|
||||
@@ -256,6 +256,12 @@ Crews와 Flows 모두 `src/project_name/main.py`에 진입점이 있습니다:
|
||||
1. **LLM API 키** (OpenAI, Anthropic, Google 등)
|
||||
2. **도구 API 키** - 외부 도구를 사용하는 경우 (Serper 등)
|
||||
|
||||
<Info>
|
||||
프로젝트가 **프라이빗 PyPI 레지스트리**의 패키지에 의존하는 경우, 레지스트리 인증 자격 증명도
|
||||
환경 변수로 구성해야 합니다. 자세한 내용은
|
||||
[프라이빗 패키지 레지스트리](/ko/enterprise/guides/private-package-registry) 가이드를 참조하세요.
|
||||
</Info>
|
||||
|
||||
<Tip>
|
||||
구성 문제를 조기에 발견하기 위해 배포 전에 동일한 환경 변수로
|
||||
로컬에서 프로젝트를 테스트하세요.
|
||||
|
||||
261
docs/ko/enterprise/guides/private-package-registry.mdx
Normal file
261
docs/ko/enterprise/guides/private-package-registry.mdx
Normal file
@@ -0,0 +1,261 @@
|
||||
---
|
||||
title: "프라이빗 패키지 레지스트리"
|
||||
description: "CrewAI AMP에서 인증된 PyPI 레지스트리의 프라이빗 Python 패키지 설치하기"
|
||||
icon: "lock"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
<Note>
|
||||
이 가이드는 CrewAI AMP에 배포할 때 프라이빗 PyPI 레지스트리(Azure DevOps Artifacts, GitHub Packages,
|
||||
GitLab, AWS CodeArtifact 등)에서 Python 패키지를 설치하도록 CrewAI 프로젝트를 구성하는 방법을 다룹니다.
|
||||
</Note>
|
||||
|
||||
## 이 가이드가 필요한 경우
|
||||
|
||||
프로젝트가 공개 PyPI가 아닌 프라이빗 레지스트리에 호스팅된 내부 또는 독점 Python 패키지에
|
||||
의존하는 경우, 다음을 수행해야 합니다:
|
||||
|
||||
1. UV에 패키지를 **어디서** 찾을지 알려줍니다 (index URL)
|
||||
2. UV에 **어떤** 패키지가 해당 index에서 오는지 알려줍니다 (source 매핑)
|
||||
3. UV가 설치 중에 인증할 수 있도록 **자격 증명**을 제공합니다
|
||||
|
||||
CrewAI AMP는 의존성 해결 및 설치에 [UV](https://docs.astral.sh/uv/)를 사용합니다.
|
||||
UV는 `pyproject.toml` 구성과 자격 증명용 환경 변수를 결합하여 인증된 프라이빗 레지스트리를 지원합니다.
|
||||
|
||||
## 1단계: pyproject.toml 구성
|
||||
|
||||
`pyproject.toml`에서 세 가지 요소가 함께 작동합니다:
|
||||
|
||||
### 1a. 의존성 선언
|
||||
|
||||
프라이빗 패키지를 다른 의존성과 마찬가지로 `[project.dependencies]`에 추가합니다:
|
||||
|
||||
```toml
|
||||
[project]
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.100.1,<1.0.0",
|
||||
"my-private-package>=1.2.0",
|
||||
]
|
||||
```
|
||||
|
||||
### 1b. index 정의
|
||||
|
||||
프라이빗 레지스트리를 `[[tool.uv.index]]` 아래에 명명된 index로 등록합니다:
|
||||
|
||||
```toml
|
||||
[[tool.uv.index]]
|
||||
name = "my-private-registry"
|
||||
url = "https://pkgs.dev.azure.com/my-org/_packaging/my-feed/pypi/simple/"
|
||||
explicit = true
|
||||
```
|
||||
|
||||
<Info>
|
||||
`name` 필드는 중요합니다 — UV는 이를 사용하여 인증을 위한 환경 변수 이름을
|
||||
구성합니다 (아래 [2단계](#2단계-인증-자격-증명-설정)를 참조하세요).
|
||||
|
||||
`explicit = true`를 설정하면 UV가 모든 패키지에 대해 이 index를 검색하지 않습니다 —
|
||||
`[tool.uv.sources]`에서 명시적으로 매핑한 패키지만 검색합니다. 이렇게 하면 프라이빗
|
||||
레지스트리에 대한 불필요한 쿼리를 방지하고 의존성 혼동 공격을 차단할 수 있습니다.
|
||||
</Info>
|
||||
|
||||
### 1c. 패키지를 index에 매핑
|
||||
|
||||
`[tool.uv.sources]`를 사용하여 프라이빗 index에서 해결해야 할 패키지를 UV에 알려줍니다:
|
||||
|
||||
```toml
|
||||
[tool.uv.sources]
|
||||
my-private-package = { index = "my-private-registry" }
|
||||
```
|
||||
|
||||
### 전체 예시
|
||||
|
||||
```toml
|
||||
[project]
|
||||
name = "my-crew-project"
|
||||
version = "0.1.0"
|
||||
requires-python = ">=3.10,<=3.13"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.100.1,<1.0.0",
|
||||
"my-private-package>=1.2.0",
|
||||
]
|
||||
|
||||
[tool.crewai]
|
||||
type = "crew"
|
||||
|
||||
[[tool.uv.index]]
|
||||
name = "my-private-registry"
|
||||
url = "https://pkgs.dev.azure.com/my-org/_packaging/my-feed/pypi/simple/"
|
||||
explicit = true
|
||||
|
||||
[tool.uv.sources]
|
||||
my-private-package = { index = "my-private-registry" }
|
||||
```
|
||||
|
||||
`pyproject.toml`을 업데이트한 후 lock 파일을 다시 생성합니다:
|
||||
|
||||
```bash
|
||||
uv lock
|
||||
```
|
||||
|
||||
<Warning>
|
||||
업데이트된 `uv.lock`을 항상 `pyproject.toml` 변경 사항과 함께 커밋하세요.
|
||||
lock 파일은 배포에 필수입니다 — [배포 준비하기](/ko/enterprise/guides/prepare-for-deployment)를 참조하세요.
|
||||
</Warning>
|
||||
|
||||
## 2단계: 인증 자격 증명 설정
|
||||
|
||||
UV는 `pyproject.toml`에서 정의한 index 이름을 기반으로 한 명명 규칙을 따르는
|
||||
환경 변수를 사용하여 프라이빗 index에 인증합니다:
|
||||
|
||||
```
|
||||
UV_INDEX_{UPPER_NAME}_USERNAME
|
||||
UV_INDEX_{UPPER_NAME}_PASSWORD
|
||||
```
|
||||
|
||||
여기서 `{UPPER_NAME}`은 index 이름을 **대문자**로 변환하고 **하이픈을 언더스코어로 대체**한 것입니다.
|
||||
|
||||
예를 들어, `my-private-registry`라는 이름의 index는 다음을 사용합니다:
|
||||
|
||||
| 변수 | 값 |
|
||||
|------|-----|
|
||||
| `UV_INDEX_MY_PRIVATE_REGISTRY_USERNAME` | 레지스트리 사용자 이름 또는 토큰 이름 |
|
||||
| `UV_INDEX_MY_PRIVATE_REGISTRY_PASSWORD` | 레지스트리 비밀번호 또는 토큰/PAT |
|
||||
|
||||
<Warning>
|
||||
이 환경 변수는 CrewAI AMP **환경 변수** 설정을 통해 **반드시** 추가해야 합니다 —
|
||||
전역적으로 또는 배포 수준에서. `.env` 파일에 설정하거나 프로젝트에 하드코딩할 수 없습니다.
|
||||
|
||||
아래 [AMP에서 환경 변수 설정](#amp에서-환경-변수-설정)을 참조하세요.
|
||||
</Warning>
|
||||
|
||||
## 레지스트리 제공업체 참조
|
||||
|
||||
아래 표는 일반적인 레지스트리 제공업체의 index URL 형식과 자격 증명 값을 보여줍니다.
|
||||
자리 표시자 값을 실제 조직 및 피드 세부 정보로 대체하세요.
|
||||
|
||||
| 제공업체 | Index URL | 사용자 이름 | 비밀번호 |
|
||||
|---------|-----------|-----------|---------|
|
||||
| **Azure DevOps Artifacts** | `https://pkgs.dev.azure.com/{org}/_packaging/{feed}/pypi/simple/` | 비어 있지 않은 임의의 문자열 (예: `token`) | Packaging Read 범위의 Personal Access Token (PAT) |
|
||||
| **GitHub Packages** | `https://pypi.pkg.github.com/{owner}/simple/` | GitHub 사용자 이름 | `read:packages` 범위의 Personal Access Token (classic) |
|
||||
| **GitLab Package Registry** | `https://gitlab.com/api/v4/projects/{project_id}/packages/pypi/simple/` | `__token__` | `read_api` 범위의 Project 또는 Personal Access Token |
|
||||
| **AWS CodeArtifact** | `aws codeartifact get-repository-endpoint`의 URL 사용 | `aws` | `aws codeartifact get-authorization-token`의 토큰 |
|
||||
| **Google Artifact Registry** | `https://{region}-python.pkg.dev/{project}/{repo}/simple/` | `_json_key_base64` | Base64로 인코딩된 서비스 계정 키 |
|
||||
| **JFrog Artifactory** | `https://{instance}.jfrog.io/artifactory/api/pypi/{repo}/simple/` | 사용자 이름 또는 이메일 | API 키 또는 ID 토큰 |
|
||||
| **자체 호스팅 (devpi, Nexus 등)** | 레지스트리의 simple API URL | 레지스트리 사용자 이름 | 레지스트리 비밀번호 |
|
||||
|
||||
<Tip>
|
||||
**AWS CodeArtifact**의 경우 인증 토큰이 주기적으로 만료됩니다.
|
||||
만료되면 `UV_INDEX_*_PASSWORD` 값을 갱신해야 합니다.
|
||||
CI/CD 파이프라인에서 이를 자동화하는 것을 고려하세요.
|
||||
</Tip>
|
||||
|
||||
## AMP에서 환경 변수 설정
|
||||
|
||||
프라이빗 레지스트리 자격 증명은 CrewAI AMP에서 환경 변수로 구성해야 합니다.
|
||||
두 가지 옵션이 있습니다:
|
||||
|
||||
<Tabs>
|
||||
<Tab title="웹 인터페이스">
|
||||
1. [CrewAI AMP](https://app.crewai.com)에 로그인합니다
|
||||
2. 자동화로 이동합니다
|
||||
3. **Environment Variables** 탭을 엽니다
|
||||
4. 각 변수 (`UV_INDEX_*_USERNAME` 및 `UV_INDEX_*_PASSWORD`)에 값을 추가합니다
|
||||
|
||||
자세한 내용은 [AMP에 배포하기 — 환경 변수 설정하기](/ko/enterprise/guides/deploy-to-amp#환경-변수-설정하기) 단계를 참조하세요.
|
||||
</Tab>
|
||||
<Tab title="CLI 배포">
|
||||
`crewai deploy create`를 실행하기 전에 로컬 `.env` 파일에 변수를 추가합니다.
|
||||
CLI가 이를 안전하게 플랫폼으로 전송합니다:
|
||||
|
||||
```bash
|
||||
# .env
|
||||
OPENAI_API_KEY=sk-...
|
||||
UV_INDEX_MY_PRIVATE_REGISTRY_USERNAME=token
|
||||
UV_INDEX_MY_PRIVATE_REGISTRY_PASSWORD=your-pat-here
|
||||
```
|
||||
|
||||
```bash
|
||||
crewai deploy create
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
<Warning>
|
||||
자격 증명을 저장소에 **절대** 커밋하지 마세요. 모든 비밀 정보에는 AMP 환경 변수를 사용하세요.
|
||||
`.env` 파일은 `.gitignore`에 포함되어야 합니다.
|
||||
</Warning>
|
||||
|
||||
기존 배포의 자격 증명을 업데이트하려면 [Crew 업데이트하기 — 환경 변수](/ko/enterprise/guides/update-crew)를 참조하세요.
|
||||
|
||||
## 전체 동작 흐름
|
||||
|
||||
CrewAI AMP가 자동화를 빌드할 때, 해결 흐름은 다음과 같이 작동합니다:
|
||||
|
||||
<Steps>
|
||||
<Step title="빌드 시작">
|
||||
AMP가 저장소를 가져오고 `pyproject.toml`과 `uv.lock`을 읽습니다.
|
||||
</Step>
|
||||
<Step title="UV가 의존성 해결">
|
||||
UV가 `[tool.uv.sources]`를 읽어 각 패키지가 어떤 index에서 와야 하는지 결정합니다.
|
||||
</Step>
|
||||
<Step title="UV가 인증">
|
||||
각 프라이빗 index에 대해 UV가 AMP에서 구성한 환경 변수에서
|
||||
`UV_INDEX_{NAME}_USERNAME`과 `UV_INDEX_{NAME}_PASSWORD`를 조회합니다.
|
||||
</Step>
|
||||
<Step title="패키지 설치">
|
||||
UV가 공개(PyPI) 및 프라이빗(레지스트리) 패키지를 모두 다운로드하고 설치합니다.
|
||||
</Step>
|
||||
<Step title="자동화 실행">
|
||||
모든 의존성이 사용 가능한 상태에서 crew 또는 flow가 시작됩니다.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## 문제 해결
|
||||
|
||||
### 빌드 중 인증 오류
|
||||
|
||||
**증상**: 프라이빗 패키지를 해결할 때 `401 Unauthorized` 또는 `403 Forbidden`으로 빌드가 실패합니다.
|
||||
|
||||
**확인사항**:
|
||||
- `UV_INDEX_*` 환경 변수 이름이 index 이름과 정확히 일치하는지 확인합니다 (대문자, 하이픈 -> 언더스코어)
|
||||
- 자격 증명이 로컬 `.env`뿐만 아니라 AMP 환경 변수에 설정되어 있는지 확인합니다
|
||||
- 토큰/PAT에 패키지 피드에 필요한 읽기 권한이 있는지 확인합니다
|
||||
- 토큰이 만료되지 않았는지 확인합니다 (특히 AWS CodeArtifact의 경우)
|
||||
|
||||
### 패키지를 찾을 수 없음
|
||||
|
||||
**증상**: `No matching distribution found for my-private-package`.
|
||||
|
||||
**확인사항**:
|
||||
- `pyproject.toml`의 index URL이 `/simple/`로 끝나는지 확인합니다
|
||||
- `[tool.uv.sources]` 항목이 올바른 패키지 이름을 올바른 index 이름에 매핑하는지 확인합니다
|
||||
- 패키지가 실제로 프라이빗 레지스트리에 게시되어 있는지 확인합니다
|
||||
- 동일한 자격 증명으로 로컬에서 `uv lock`을 실행하여 해결이 작동하는지 확인합니다
|
||||
|
||||
### Lock 파일 충돌
|
||||
|
||||
**증상**: 프라이빗 index를 추가한 후 `uv lock`이 실패하거나 예상치 못한 결과를 생성합니다.
|
||||
|
||||
**해결책**: 로컬에서 자격 증명을 설정하고 다시 생성합니다:
|
||||
|
||||
```bash
|
||||
export UV_INDEX_MY_PRIVATE_REGISTRY_USERNAME=token
|
||||
export UV_INDEX_MY_PRIVATE_REGISTRY_PASSWORD=your-pat
|
||||
uv lock
|
||||
```
|
||||
|
||||
그런 다음 업데이트된 `uv.lock`을 커밋합니다.
|
||||
|
||||
## 관련 가이드
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card title="배포 준비하기" icon="clipboard-check" href="/ko/enterprise/guides/prepare-for-deployment">
|
||||
배포 전에 프로젝트 구조와 의존성을 확인합니다.
|
||||
</Card>
|
||||
<Card title="AMP에 배포하기" icon="rocket" href="/ko/enterprise/guides/deploy-to-amp">
|
||||
crew 또는 flow를 배포하고 환경 변수를 구성합니다.
|
||||
</Card>
|
||||
<Card title="Crew 업데이트하기" icon="arrows-rotate" href="/ko/enterprise/guides/update-crew">
|
||||
환경 변수를 업데이트하고 실행 중인 배포에 변경 사항을 푸시합니다.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
@@ -176,6 +176,11 @@ Você precisa enviar seu crew para um repositório do GitHub. Caso ainda não te
|
||||

|
||||
</Frame>
|
||||
|
||||
<Info>
|
||||
Usando pacotes Python privados? Você também precisará adicionar suas credenciais de registro aqui.
|
||||
Consulte [Registros de Pacotes Privados](/pt-BR/enterprise/guides/private-package-registry) para as variáveis necessárias.
|
||||
</Info>
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="Implante Seu Crew">
|
||||
|
||||
@@ -256,6 +256,12 @@ Antes da implantação, certifique-se de ter:
|
||||
1. **Chaves de API de LLM** prontas (OpenAI, Anthropic, Google, etc.)
|
||||
2. **Chaves de API de ferramentas** se estiver usando ferramentas externas (Serper, etc.)
|
||||
|
||||
<Info>
|
||||
Se seu projeto depende de pacotes de um **registro PyPI privado**, você também precisará configurar
|
||||
credenciais de autenticação do registro como variáveis de ambiente. Consulte o guia
|
||||
[Registros de Pacotes Privados](/pt-BR/enterprise/guides/private-package-registry) para mais detalhes.
|
||||
</Info>
|
||||
|
||||
<Tip>
|
||||
Teste seu projeto localmente com as mesmas variáveis de ambiente antes de implantar
|
||||
para detectar problemas de configuração antecipadamente.
|
||||
|
||||
263
docs/pt-BR/enterprise/guides/private-package-registry.mdx
Normal file
263
docs/pt-BR/enterprise/guides/private-package-registry.mdx
Normal file
@@ -0,0 +1,263 @@
|
||||
---
|
||||
title: "Registros de Pacotes Privados"
|
||||
description: "Instale pacotes Python privados de registros PyPI autenticados no CrewAI AMP"
|
||||
icon: "lock"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
<Note>
|
||||
Este guia aborda como configurar seu projeto CrewAI para instalar pacotes Python
|
||||
de registros PyPI privados (Azure DevOps Artifacts, GitHub Packages, GitLab, AWS CodeArtifact, etc.)
|
||||
ao implantar no CrewAI AMP.
|
||||
</Note>
|
||||
|
||||
## Quando Você Precisa Disso
|
||||
|
||||
Se seu projeto depende de pacotes Python internos ou proprietários hospedados em um registro privado
|
||||
em vez do PyPI público, você precisará:
|
||||
|
||||
1. Informar ao UV **onde** encontrar o pacote (uma URL de index)
|
||||
2. Informar ao UV **quais** pacotes vêm desse index (um mapeamento de source)
|
||||
3. Fornecer **credenciais** para que o UV possa autenticar durante a instalação
|
||||
|
||||
O CrewAI AMP usa [UV](https://docs.astral.sh/uv/) para resolução e instalação de dependências.
|
||||
O UV suporta registros privados autenticados por meio da configuração do `pyproject.toml` combinada
|
||||
com variáveis de ambiente para credenciais.
|
||||
|
||||
## Passo 1: Configurar o pyproject.toml
|
||||
|
||||
Três elementos trabalham juntos no seu `pyproject.toml`:
|
||||
|
||||
### 1a. Declarar a dependência
|
||||
|
||||
Adicione o pacote privado ao seu `[project.dependencies]` como qualquer outra dependência:
|
||||
|
||||
```toml
|
||||
[project]
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.100.1,<1.0.0",
|
||||
"my-private-package>=1.2.0",
|
||||
]
|
||||
```
|
||||
|
||||
### 1b. Definir o index
|
||||
|
||||
Registre seu registro privado como um index nomeado em `[[tool.uv.index]]`:
|
||||
|
||||
```toml
|
||||
[[tool.uv.index]]
|
||||
name = "my-private-registry"
|
||||
url = "https://pkgs.dev.azure.com/my-org/_packaging/my-feed/pypi/simple/"
|
||||
explicit = true
|
||||
```
|
||||
|
||||
<Info>
|
||||
O campo `name` é importante — o UV o utiliza para construir os nomes das variáveis de ambiente
|
||||
para autenticação (veja o [Passo 2](#passo-2-configurar-credenciais-de-autenticação) abaixo).
|
||||
|
||||
Definir `explicit = true` significa que o UV não consultará esse index para todos os pacotes — apenas
|
||||
os que você mapear explicitamente em `[tool.uv.sources]`. Isso evita consultas desnecessárias
|
||||
ao seu registro privado e protege contra ataques de confusão de dependências.
|
||||
</Info>
|
||||
|
||||
### 1c. Mapear o pacote para o index
|
||||
|
||||
Informe ao UV quais pacotes devem ser resolvidos a partir do seu index privado usando `[tool.uv.sources]`:
|
||||
|
||||
```toml
|
||||
[tool.uv.sources]
|
||||
my-private-package = { index = "my-private-registry" }
|
||||
```
|
||||
|
||||
### Exemplo completo
|
||||
|
||||
```toml
|
||||
[project]
|
||||
name = "my-crew-project"
|
||||
version = "0.1.0"
|
||||
requires-python = ">=3.10,<=3.13"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.100.1,<1.0.0",
|
||||
"my-private-package>=1.2.0",
|
||||
]
|
||||
|
||||
[tool.crewai]
|
||||
type = "crew"
|
||||
|
||||
[[tool.uv.index]]
|
||||
name = "my-private-registry"
|
||||
url = "https://pkgs.dev.azure.com/my-org/_packaging/my-feed/pypi/simple/"
|
||||
explicit = true
|
||||
|
||||
[tool.uv.sources]
|
||||
my-private-package = { index = "my-private-registry" }
|
||||
```
|
||||
|
||||
Após atualizar o `pyproject.toml`, regenere seu arquivo lock:
|
||||
|
||||
```bash
|
||||
uv lock
|
||||
```
|
||||
|
||||
<Warning>
|
||||
Sempre faça commit do `uv.lock` atualizado junto com as alterações no `pyproject.toml`.
|
||||
O arquivo lock é obrigatório para implantação — veja [Preparar para Implantação](/pt-BR/enterprise/guides/prepare-for-deployment).
|
||||
</Warning>
|
||||
|
||||
## Passo 2: Configurar Credenciais de Autenticação
|
||||
|
||||
O UV autentica em indexes privados usando variáveis de ambiente que seguem uma convenção de nomenclatura
|
||||
baseada no nome do index que você definiu no `pyproject.toml`:
|
||||
|
||||
```
|
||||
UV_INDEX_{UPPER_NAME}_USERNAME
|
||||
UV_INDEX_{UPPER_NAME}_PASSWORD
|
||||
```
|
||||
|
||||
Onde `{UPPER_NAME}` é o nome do seu index convertido para **maiúsculas** com **hifens substituídos por underscores**.
|
||||
|
||||
Por exemplo, um index chamado `my-private-registry` usa:
|
||||
|
||||
| Variável | Valor |
|
||||
|----------|-------|
|
||||
| `UV_INDEX_MY_PRIVATE_REGISTRY_USERNAME` | Seu nome de usuário ou nome do token do registro |
|
||||
| `UV_INDEX_MY_PRIVATE_REGISTRY_PASSWORD` | Sua senha ou token/PAT do registro |
|
||||
|
||||
<Warning>
|
||||
Essas variáveis de ambiente **devem** ser adicionadas pelas configurações de **Variáveis de Ambiente** do CrewAI AMP —
|
||||
globalmente ou no nível da implantação. Elas não podem ser definidas em arquivos `.env` ou codificadas no seu projeto.
|
||||
|
||||
Veja [Configurar Variáveis de Ambiente no AMP](#configurar-variáveis-de-ambiente-no-amp) abaixo.
|
||||
</Warning>
|
||||
|
||||
## Referência de Provedores de Registro
|
||||
|
||||
A tabela abaixo mostra o formato da URL de index e os valores de credenciais para provedores de registro comuns.
|
||||
Substitua os valores de exemplo pelos detalhes reais da sua organização e feed.
|
||||
|
||||
| Provedor | URL do Index | Usuário | Senha |
|
||||
|----------|-------------|---------|-------|
|
||||
| **Azure DevOps Artifacts** | `https://pkgs.dev.azure.com/{org}/_packaging/{feed}/pypi/simple/` | Qualquer string não vazia (ex: `token`) | Personal Access Token (PAT) com escopo Packaging Read |
|
||||
| **GitHub Packages** | `https://pypi.pkg.github.com/{owner}/simple/` | Nome de usuário do GitHub | Personal Access Token (classic) com escopo `read:packages` |
|
||||
| **GitLab Package Registry** | `https://gitlab.com/api/v4/projects/{project_id}/packages/pypi/simple/` | `__token__` | Project ou Personal Access Token com escopo `read_api` |
|
||||
| **AWS CodeArtifact** | Use a URL de `aws codeartifact get-repository-endpoint` | `aws` | Token de `aws codeartifact get-authorization-token` |
|
||||
| **Google Artifact Registry** | `https://{region}-python.pkg.dev/{project}/{repo}/simple/` | `_json_key_base64` | Chave de conta de serviço codificada em Base64 |
|
||||
| **JFrog Artifactory** | `https://{instance}.jfrog.io/artifactory/api/pypi/{repo}/simple/` | Nome de usuário ou email | Chave API ou token de identidade |
|
||||
| **Auto-hospedado (devpi, Nexus, etc.)** | URL da API simple do seu registro | Nome de usuário do registro | Senha do registro |
|
||||
|
||||
<Tip>
|
||||
Para **AWS CodeArtifact**, o token de autorização expira periodicamente.
|
||||
Você precisará atualizar o valor de `UV_INDEX_*_PASSWORD` quando ele expirar.
|
||||
Considere automatizar isso no seu pipeline de CI/CD.
|
||||
</Tip>
|
||||
|
||||
## Configurar Variáveis de Ambiente no AMP
|
||||
|
||||
As credenciais do registro privado devem ser configuradas como variáveis de ambiente no CrewAI AMP.
|
||||
Você tem duas opções:
|
||||
|
||||
<Tabs>
|
||||
<Tab title="Interface Web">
|
||||
1. Faça login no [CrewAI AMP](https://app.crewai.com)
|
||||
2. Navegue até sua automação
|
||||
3. Abra a aba **Environment Variables**
|
||||
4. Adicione cada variável (`UV_INDEX_*_USERNAME` e `UV_INDEX_*_PASSWORD`) com seu valor
|
||||
|
||||
Veja o passo [Deploy para AMP — Definir Variáveis de Ambiente](/pt-BR/enterprise/guides/deploy-to-amp#definir-as-variáveis-de-ambiente) para detalhes.
|
||||
</Tab>
|
||||
<Tab title="Implantação via CLI">
|
||||
Adicione as variáveis ao seu arquivo `.env` local antes de executar `crewai deploy create`.
|
||||
A CLI as transferirá com segurança para a plataforma:
|
||||
|
||||
```bash
|
||||
# .env
|
||||
OPENAI_API_KEY=sk-...
|
||||
UV_INDEX_MY_PRIVATE_REGISTRY_USERNAME=token
|
||||
UV_INDEX_MY_PRIVATE_REGISTRY_PASSWORD=your-pat-here
|
||||
```
|
||||
|
||||
```bash
|
||||
crewai deploy create
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
<Warning>
|
||||
**Nunca** faça commit de credenciais no seu repositório. Use variáveis de ambiente do AMP para todos os segredos.
|
||||
O arquivo `.env` deve estar listado no `.gitignore`.
|
||||
</Warning>
|
||||
|
||||
Para atualizar credenciais em uma implantação existente, veja [Atualizar Seu Crew — Variáveis de Ambiente](/pt-BR/enterprise/guides/update-crew).
|
||||
|
||||
## Como Tudo se Conecta
|
||||
|
||||
Quando o CrewAI AMP faz o build da sua automação, o fluxo de resolução funciona assim:
|
||||
|
||||
<Steps>
|
||||
<Step title="Build inicia">
|
||||
O AMP busca seu repositório e lê o `pyproject.toml` e o `uv.lock`.
|
||||
</Step>
|
||||
<Step title="UV resolve dependências">
|
||||
O UV lê `[tool.uv.sources]` para determinar de qual index cada pacote deve vir.
|
||||
</Step>
|
||||
<Step title="UV autentica">
|
||||
Para cada index privado, o UV busca `UV_INDEX_{NAME}_USERNAME` e `UV_INDEX_{NAME}_PASSWORD`
|
||||
nas variáveis de ambiente que você configurou no AMP.
|
||||
</Step>
|
||||
<Step title="Pacotes são instalados">
|
||||
O UV baixa e instala todos os pacotes — tanto públicos (do PyPI) quanto privados (do seu registro).
|
||||
</Step>
|
||||
<Step title="Automação executa">
|
||||
Seu crew ou flow inicia com todas as dependências disponíveis.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## Solução de Problemas
|
||||
|
||||
### Erros de Autenticação Durante o Build
|
||||
|
||||
**Sintoma**: Build falha com `401 Unauthorized` ou `403 Forbidden` ao resolver um pacote privado.
|
||||
|
||||
**Verifique**:
|
||||
- Os nomes das variáveis de ambiente `UV_INDEX_*` correspondem exatamente ao nome do seu index (maiúsculas, hifens -> underscores)
|
||||
- As credenciais estão definidas nas variáveis de ambiente do AMP, não apenas em um `.env` local
|
||||
- Seu token/PAT tem as permissões de leitura necessárias para o feed de pacotes
|
||||
- O token não expirou (especialmente relevante para AWS CodeArtifact)
|
||||
|
||||
### Pacote Não Encontrado
|
||||
|
||||
**Sintoma**: `No matching distribution found for my-private-package`.
|
||||
|
||||
**Verifique**:
|
||||
- A URL do index no `pyproject.toml` termina com `/simple/`
|
||||
- A entrada `[tool.uv.sources]` mapeia o nome correto do pacote para o nome correto do index
|
||||
- O pacote está realmente publicado no seu registro privado
|
||||
- Execute `uv lock` localmente com as mesmas credenciais para verificar se a resolução funciona
|
||||
|
||||
### Conflitos no Arquivo Lock
|
||||
|
||||
**Sintoma**: `uv lock` falha ou produz resultados inesperados após adicionar um index privado.
|
||||
|
||||
**Solução**: Defina as credenciais localmente e regenere:
|
||||
|
||||
```bash
|
||||
export UV_INDEX_MY_PRIVATE_REGISTRY_USERNAME=token
|
||||
export UV_INDEX_MY_PRIVATE_REGISTRY_PASSWORD=your-pat
|
||||
uv lock
|
||||
```
|
||||
|
||||
Em seguida, faça commit do `uv.lock` atualizado.
|
||||
|
||||
## Guias Relacionados
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card title="Preparar para Implantação" icon="clipboard-check" href="/pt-BR/enterprise/guides/prepare-for-deployment">
|
||||
Verifique a estrutura do projeto e as dependências antes de implantar.
|
||||
</Card>
|
||||
<Card title="Deploy para AMP" icon="rocket" href="/pt-BR/enterprise/guides/deploy-to-amp">
|
||||
Implante seu crew ou flow e configure variáveis de ambiente.
|
||||
</Card>
|
||||
<Card title="Atualizar Seu Crew" icon="arrows-rotate" href="/pt-BR/enterprise/guides/update-crew">
|
||||
Atualize variáveis de ambiente e envie alterações para uma implantação em execução.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
@@ -8,12 +8,10 @@ authors = [
|
||||
]
|
||||
requires-python = ">=3.10, <3.14"
|
||||
dependencies = [
|
||||
"lancedb~=0.5.4",
|
||||
"pytube~=15.0.0",
|
||||
"requests~=2.32.5",
|
||||
"docker~=7.1.0",
|
||||
"crewai==1.9.3",
|
||||
"lancedb~=0.5.4",
|
||||
"tiktoken~=0.8.0",
|
||||
"beautifulsoup4~=4.13.4",
|
||||
"python-docx~=1.2.0",
|
||||
|
||||
@@ -38,10 +38,11 @@ dependencies = [
|
||||
"json5~=0.10.0",
|
||||
"portalocker~=2.7.0",
|
||||
"pydantic-settings~=2.10.1",
|
||||
"httpx~=0.28.1",
|
||||
"mcp~=1.26.0",
|
||||
"uv~=0.9.13",
|
||||
"aiosqlite~=0.21.0",
|
||||
"lancedb>=0.4.0",
|
||||
"lancedb>=0.29.2",
|
||||
]
|
||||
|
||||
[project.urls]
|
||||
|
||||
@@ -10,7 +10,6 @@ from crewai.flow.flow import Flow
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.llm import LLM
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.memory.unified_memory import Memory
|
||||
from crewai.process import Process
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.llm_guardrail import LLMGuardrail
|
||||
@@ -72,6 +71,25 @@ def _track_install_async() -> None:
|
||||
|
||||
|
||||
_track_install_async()
|
||||
|
||||
_LAZY_IMPORTS: dict[str, tuple[str, str]] = {
|
||||
"Memory": ("crewai.memory.unified_memory", "Memory"),
|
||||
}
|
||||
|
||||
|
||||
def __getattr__(name: str) -> Any:
|
||||
"""Lazily import heavy modules (e.g. Memory → lancedb) on first access."""
|
||||
if name in _LAZY_IMPORTS:
|
||||
module_path, attr = _LAZY_IMPORTS[name]
|
||||
import importlib
|
||||
|
||||
mod = importlib.import_module(module_path)
|
||||
val = getattr(mod, attr)
|
||||
globals()[name] = val
|
||||
return val
|
||||
raise AttributeError(f"module 'crewai' has no attribute {name!r}")
|
||||
|
||||
|
||||
__all__ = [
|
||||
"LLM",
|
||||
"Agent",
|
||||
|
||||
@@ -386,8 +386,8 @@ class Agent(BaseAgent):
|
||||
query = task.description
|
||||
matches = unified_memory.recall(query, limit=10)
|
||||
if matches:
|
||||
memory = "Relevant memories:\n" + "\n".join(
|
||||
f"- {m.record.content}" for m in matches
|
||||
memory = "Relevant memories:\n" + "\n\n".join(
|
||||
m.format() for m in matches
|
||||
)
|
||||
if memory.strip() != "":
|
||||
task_prompt += self.i18n.slice("memory").format(memory=memory)
|
||||
@@ -622,10 +622,10 @@ class Agent(BaseAgent):
|
||||
)
|
||||
if unified_memory is not None:
|
||||
query = task.description
|
||||
matches = unified_memory.recall(query, limit=10)
|
||||
matches = unified_memory.recall(query, limit=5)
|
||||
if matches:
|
||||
memory = "Relevant memories:\n" + "\n".join(
|
||||
f"- {m.record.content}" for m in matches
|
||||
m.format() for m in matches
|
||||
)
|
||||
if memory.strip() != "":
|
||||
task_prompt += self.i18n.slice("memory").format(memory=memory)
|
||||
@@ -864,7 +864,11 @@ class Agent(BaseAgent):
|
||||
respect_context_window=self.respect_context_window,
|
||||
request_within_rpm_limit=rpm_limit_fn,
|
||||
callbacks=[TokenCalcHandler(self._token_process)],
|
||||
response_model=task.response_model if task else None,
|
||||
response_model=(
|
||||
task.response_model or task.output_pydantic or task.output_json
|
||||
)
|
||||
if task
|
||||
else None,
|
||||
)
|
||||
|
||||
def _update_executor_parameters(
|
||||
@@ -893,7 +897,11 @@ class Agent(BaseAgent):
|
||||
self.agent_executor.stop = stop_words
|
||||
self.agent_executor.tools_names = get_tool_names(tools)
|
||||
self.agent_executor.tools_description = render_text_description_and_args(tools)
|
||||
self.agent_executor.response_model = task.response_model if task else None
|
||||
self.agent_executor.response_model = (
|
||||
(task.response_model or task.output_pydantic or task.output_json)
|
||||
if task
|
||||
else None
|
||||
)
|
||||
|
||||
self.agent_executor.tools_handler = self.tools_handler
|
||||
self.agent_executor.request_within_rpm_limit = rpm_limit_fn
|
||||
@@ -1712,7 +1720,8 @@ class Agent(BaseAgent):
|
||||
|
||||
existing_names = {sanitize_tool_name(t.name) for t in raw_tools}
|
||||
raw_tools.extend(
|
||||
mt for mt in create_memory_tools(agent_memory)
|
||||
mt
|
||||
for mt in create_memory_tools(agent_memory)
|
||||
if sanitize_tool_name(mt.name) not in existing_names
|
||||
)
|
||||
|
||||
@@ -1805,8 +1814,8 @@ class Agent(BaseAgent):
|
||||
matches = agent_memory.recall(formatted_messages, limit=10)
|
||||
memory_block = ""
|
||||
if matches:
|
||||
memory_block = "Relevant memories:\n" + "\n".join(
|
||||
f"- {m.record.content}" for m in matches
|
||||
memory_block = "Relevant memories:\n" + "\n\n".join(
|
||||
m.format() for m in matches
|
||||
)
|
||||
if memory_block:
|
||||
formatted_messages += "\n\n" + self.i18n.slice("memory").format(
|
||||
@@ -1937,14 +1946,15 @@ class Agent(BaseAgent):
|
||||
if isinstance(messages, str):
|
||||
input_str = messages
|
||||
else:
|
||||
input_str = "\n".join(
|
||||
str(msg.get("content", "")) for msg in messages if msg.get("content")
|
||||
) or "User request"
|
||||
raw = (
|
||||
f"Input: {input_str}\n"
|
||||
f"Agent: {self.role}\n"
|
||||
f"Result: {output_text}"
|
||||
)
|
||||
input_str = (
|
||||
"\n".join(
|
||||
str(msg.get("content", ""))
|
||||
for msg in messages
|
||||
if msg.get("content")
|
||||
)
|
||||
or "User request"
|
||||
)
|
||||
raw = f"Input: {input_str}\nAgent: {self.role}\nResult: {output_text}"
|
||||
extracted = agent_memory.extract_memories(raw)
|
||||
if extracted:
|
||||
agent_memory.remember_many(extracted)
|
||||
|
||||
@@ -30,7 +30,7 @@ class CrewAgentExecutorMixin:
|
||||
memory = getattr(self.agent, "memory", None) or (
|
||||
getattr(self.crew, "_memory", None) if self.crew else None
|
||||
)
|
||||
if memory is None or not self.task:
|
||||
if memory is None or not self.task or getattr(memory, "_read_only", False):
|
||||
return
|
||||
if (
|
||||
f"Action: {sanitize_tool_name('Delegate work to coworker')}"
|
||||
|
||||
@@ -6,8 +6,10 @@ and memory management.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Callable
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
import inspect
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, Literal, cast
|
||||
|
||||
@@ -48,6 +50,7 @@ from crewai.utilities.agent_utils import (
|
||||
handle_unknown_error,
|
||||
has_reached_max_iterations,
|
||||
is_context_length_exceeded,
|
||||
parse_tool_call_args,
|
||||
process_llm_response,
|
||||
track_delegation_if_needed,
|
||||
)
|
||||
@@ -736,7 +739,9 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
] = []
|
||||
for call_id, func_name, func_args in parsed_calls:
|
||||
original_tool = original_tools_by_name.get(func_name)
|
||||
execution_plan.append((call_id, func_name, func_args, original_tool))
|
||||
execution_plan.append(
|
||||
(call_id, func_name, func_args, original_tool)
|
||||
)
|
||||
|
||||
self._append_assistant_tool_calls_message(
|
||||
[
|
||||
@@ -746,7 +751,9 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
)
|
||||
|
||||
max_workers = min(8, len(execution_plan))
|
||||
ordered_results: list[dict[str, Any] | None] = [None] * len(execution_plan)
|
||||
ordered_results: list[dict[str, Any] | None] = [None] * len(
|
||||
execution_plan
|
||||
)
|
||||
with ThreadPoolExecutor(max_workers=max_workers) as pool:
|
||||
futures = {
|
||||
pool.submit(
|
||||
@@ -803,7 +810,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
return tool_finish
|
||||
|
||||
reasoning_prompt = self._i18n.slice("post_tool_reasoning")
|
||||
reasoning_message: LLMMessage = {
|
||||
reasoning_message = {
|
||||
"role": "user",
|
||||
"content": reasoning_prompt,
|
||||
}
|
||||
@@ -888,13 +895,9 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
ToolUsageStartedEvent,
|
||||
)
|
||||
|
||||
if isinstance(func_args, str):
|
||||
try:
|
||||
args_dict = json.loads(func_args)
|
||||
except json.JSONDecodeError:
|
||||
args_dict = {}
|
||||
else:
|
||||
args_dict = func_args
|
||||
args_dict, parse_error = parse_tool_call_args(func_args, func_name, call_id, original_tool)
|
||||
if parse_error is not None:
|
||||
return parse_error
|
||||
|
||||
if original_tool is None:
|
||||
for tool in self.original_tools or []:
|
||||
@@ -908,9 +911,9 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
elif (
|
||||
should_execute
|
||||
and original_tool
|
||||
and getattr(original_tool, "max_usage_count", None) is not None
|
||||
and getattr(original_tool, "current_usage_count", 0)
|
||||
>= original_tool.max_usage_count
|
||||
and (max_count := getattr(original_tool, "max_usage_count", None))
|
||||
is not None
|
||||
and getattr(original_tool, "current_usage_count", 0) >= max_count
|
||||
):
|
||||
max_usage_reached = True
|
||||
|
||||
@@ -989,13 +992,17 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
and hasattr(original_tool, "cache_function")
|
||||
and callable(original_tool.cache_function)
|
||||
):
|
||||
should_cache = original_tool.cache_function(args_dict, raw_result)
|
||||
should_cache = original_tool.cache_function(
|
||||
args_dict, raw_result
|
||||
)
|
||||
if should_cache:
|
||||
self.tools_handler.cache.add(
|
||||
tool=func_name, input=input_str, output=raw_result
|
||||
)
|
||||
|
||||
result = str(raw_result) if not isinstance(raw_result, str) else raw_result
|
||||
result = (
|
||||
str(raw_result) if not isinstance(raw_result, str) else raw_result
|
||||
)
|
||||
except Exception as e:
|
||||
result = f"Error executing tool: {e}"
|
||||
if self.task:
|
||||
@@ -1490,7 +1497,9 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
formatted_answer: Current agent response.
|
||||
"""
|
||||
if self.step_callback:
|
||||
self.step_callback(formatted_answer)
|
||||
cb_result = self.step_callback(formatted_answer)
|
||||
if inspect.iscoroutine(cb_result):
|
||||
asyncio.run(cb_result)
|
||||
|
||||
def _append_message(
|
||||
self, text: str, role: Literal["user", "assistant", "system"] = "assistant"
|
||||
|
||||
@@ -2,8 +2,8 @@ import time
|
||||
from typing import TYPE_CHECKING, Any, TypeVar, cast
|
||||
import webbrowser
|
||||
|
||||
import httpx
|
||||
from pydantic import BaseModel, Field
|
||||
import requests
|
||||
from rich.console import Console
|
||||
|
||||
from crewai.cli.authentication.utils import validate_jwt_token
|
||||
@@ -98,7 +98,7 @@ class AuthenticationCommand:
|
||||
"scope": " ".join(self.oauth2_provider.get_oauth_scopes()),
|
||||
"audience": self.oauth2_provider.get_audience(),
|
||||
}
|
||||
response = requests.post(
|
||||
response = httpx.post(
|
||||
url=self.oauth2_provider.get_authorize_url(),
|
||||
data=device_code_payload,
|
||||
timeout=20,
|
||||
@@ -130,7 +130,7 @@ class AuthenticationCommand:
|
||||
|
||||
attempts = 0
|
||||
while True and attempts < 10:
|
||||
response = requests.post(
|
||||
response = httpx.post(
|
||||
self.oauth2_provider.get_token_url(), data=token_payload, timeout=30
|
||||
)
|
||||
token_data = response.json()
|
||||
@@ -149,7 +149,7 @@ class AuthenticationCommand:
|
||||
return
|
||||
|
||||
if token_data["error"] not in ("authorization_pending", "slow_down"):
|
||||
raise requests.HTTPError(
|
||||
raise httpx.HTTPError(
|
||||
token_data.get("error_description") or token_data.get("error")
|
||||
)
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import requests
|
||||
from requests.exceptions import JSONDecodeError
|
||||
import json
|
||||
|
||||
import httpx
|
||||
from rich.console import Console
|
||||
|
||||
from crewai.cli.authentication.token import get_auth_token
|
||||
@@ -30,16 +31,16 @@ class PlusAPIMixin:
|
||||
console.print("Run 'crewai login' to sign up/login.", style="bold green")
|
||||
raise SystemExit from None
|
||||
|
||||
def _validate_response(self, response: requests.Response) -> None:
|
||||
def _validate_response(self, response: httpx.Response) -> None:
|
||||
"""
|
||||
Handle and display error messages from API responses.
|
||||
|
||||
Args:
|
||||
response (requests.Response): The response from the Plus API
|
||||
response (httpx.Response): The response from the Plus API
|
||||
"""
|
||||
try:
|
||||
json_response = response.json()
|
||||
except (JSONDecodeError, ValueError):
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
console.print(
|
||||
"Failed to parse response from Enterprise API failed. Details:",
|
||||
style="bold red",
|
||||
@@ -62,7 +63,7 @@ class PlusAPIMixin:
|
||||
)
|
||||
raise SystemExit
|
||||
|
||||
if not response.ok:
|
||||
if not response.is_success:
|
||||
console.print(
|
||||
"Request to Enterprise API failed. Details:", style="bold red"
|
||||
)
|
||||
|
||||
@@ -69,7 +69,7 @@ ENV_VARS: dict[str, list[dict[str, Any]]] = {
|
||||
},
|
||||
{
|
||||
"prompt": "Enter your AWS Region Name (press Enter to skip)",
|
||||
"key_name": "AWS_REGION_NAME",
|
||||
"key_name": "AWS_DEFAULT_REGION",
|
||||
},
|
||||
],
|
||||
"azure": [
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import json
|
||||
from typing import Any, cast
|
||||
|
||||
import requests
|
||||
from requests.exceptions import JSONDecodeError, RequestException
|
||||
import httpx
|
||||
from rich.console import Console
|
||||
|
||||
from crewai.cli.authentication.main import Oauth2Settings, ProviderFactory
|
||||
@@ -47,12 +47,12 @@ class EnterpriseConfigureCommand(BaseCommand):
|
||||
"User-Agent": f"CrewAI-CLI/{get_crewai_version()}",
|
||||
"X-Crewai-Version": get_crewai_version(),
|
||||
}
|
||||
response = requests.get(oauth_endpoint, timeout=30, headers=headers)
|
||||
response = httpx.get(oauth_endpoint, timeout=30, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
try:
|
||||
oauth_config = response.json()
|
||||
except JSONDecodeError as e:
|
||||
except json.JSONDecodeError as e:
|
||||
raise ValueError(f"Invalid JSON response from {oauth_endpoint}") from e
|
||||
|
||||
self._validate_oauth_config(oauth_config)
|
||||
@@ -62,7 +62,7 @@ class EnterpriseConfigureCommand(BaseCommand):
|
||||
)
|
||||
return cast(dict[str, Any], oauth_config)
|
||||
|
||||
except RequestException as e:
|
||||
except httpx.HTTPError as e:
|
||||
raise ValueError(f"Failed to connect to enterprise URL: {e!s}") from e
|
||||
except Exception as e:
|
||||
raise ValueError(f"Error fetching OAuth2 configuration: {e!s}") from e
|
||||
|
||||
@@ -290,13 +290,20 @@ class MemoryTUI(App[None]):
|
||||
if self._memory is None:
|
||||
panel.update(self._init_error or "No memory loaded.")
|
||||
return
|
||||
display_limit = 1000
|
||||
info = self._memory.info(path)
|
||||
self._last_scope_info = info
|
||||
self._entries = self._memory.list_records(scope=path, limit=200)
|
||||
self._entries = self._memory.list_records(scope=path, limit=display_limit)
|
||||
panel.update(_format_scope_info(info))
|
||||
panel.border_title = "Detail"
|
||||
entry_list = self.query_one("#entry-list", OptionList)
|
||||
entry_list.border_title = f"Entries ({len(self._entries)})"
|
||||
capped = info.record_count > display_limit
|
||||
count_label = (
|
||||
f"Entries (showing {display_limit} of {info.record_count} — display limit)"
|
||||
if capped
|
||||
else f"Entries ({len(self._entries)})"
|
||||
)
|
||||
entry_list.border_title = count_label
|
||||
self._populate_entry_list()
|
||||
|
||||
def on_option_list_option_highlighted(
|
||||
@@ -376,6 +383,11 @@ class MemoryTUI(App[None]):
|
||||
return
|
||||
|
||||
info_lines: list[str] = []
|
||||
info_lines.append(
|
||||
"[dim italic]Searched the full dataset"
|
||||
+ (f" within [bold]{scope}[/]" if scope else "")
|
||||
+ " using the recall flow (semantic + recency + importance).[/]\n"
|
||||
)
|
||||
if not self._custom_embedder:
|
||||
info_lines.append(
|
||||
"[dim italic]Note: Using default OpenAI embedder. "
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from requests import HTTPError
|
||||
from httpx import HTTPStatusError
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
|
||||
@@ -10,11 +10,11 @@ console = Console()
|
||||
|
||||
|
||||
class OrganizationCommand(BaseCommand, PlusAPIMixin):
|
||||
def __init__(self):
|
||||
def __init__(self) -> None:
|
||||
BaseCommand.__init__(self)
|
||||
PlusAPIMixin.__init__(self, telemetry=self._telemetry)
|
||||
|
||||
def list(self):
|
||||
def list(self) -> None:
|
||||
try:
|
||||
response = self.plus_api_client.get_organizations()
|
||||
response.raise_for_status()
|
||||
@@ -33,7 +33,7 @@ class OrganizationCommand(BaseCommand, PlusAPIMixin):
|
||||
table.add_row(org["name"], org["uuid"])
|
||||
|
||||
console.print(table)
|
||||
except HTTPError as e:
|
||||
except HTTPStatusError as e:
|
||||
if e.response.status_code == 401:
|
||||
console.print(
|
||||
"You are not logged in to any organization. Use 'crewai login' to login.",
|
||||
@@ -50,7 +50,7 @@ class OrganizationCommand(BaseCommand, PlusAPIMixin):
|
||||
)
|
||||
raise SystemExit(1) from e
|
||||
|
||||
def switch(self, org_id):
|
||||
def switch(self, org_id: str) -> None:
|
||||
try:
|
||||
response = self.plus_api_client.get_organizations()
|
||||
response.raise_for_status()
|
||||
@@ -72,7 +72,7 @@ class OrganizationCommand(BaseCommand, PlusAPIMixin):
|
||||
f"Successfully switched to {org['name']} ({org['uuid']})",
|
||||
style="bold green",
|
||||
)
|
||||
except HTTPError as e:
|
||||
except HTTPStatusError as e:
|
||||
if e.response.status_code == 401:
|
||||
console.print(
|
||||
"You are not logged in to any organization. Use 'crewai login' to login.",
|
||||
@@ -87,7 +87,7 @@ class OrganizationCommand(BaseCommand, PlusAPIMixin):
|
||||
console.print(f"Failed to switch organization: {e!s}", style="bold red")
|
||||
raise SystemExit(1) from e
|
||||
|
||||
def current(self):
|
||||
def current(self) -> None:
|
||||
settings = Settings()
|
||||
if settings.org_uuid:
|
||||
console.print(
|
||||
|
||||
@@ -3,7 +3,6 @@ from typing import Any
|
||||
from urllib.parse import urljoin
|
||||
|
||||
import httpx
|
||||
import requests
|
||||
|
||||
from crewai.cli.config import Settings
|
||||
from crewai.cli.constants import DEFAULT_CREWAI_ENTERPRISE_URL
|
||||
@@ -43,16 +42,16 @@ class PlusAPI:
|
||||
|
||||
def _make_request(
|
||||
self, method: str, endpoint: str, **kwargs: Any
|
||||
) -> requests.Response:
|
||||
) -> httpx.Response:
|
||||
url = urljoin(self.base_url, endpoint)
|
||||
session = requests.Session()
|
||||
session.trust_env = False
|
||||
return session.request(method, url, headers=self.headers, **kwargs)
|
||||
verify = kwargs.pop("verify", True)
|
||||
with httpx.Client(trust_env=False, verify=verify) as client:
|
||||
return client.request(method, url, headers=self.headers, **kwargs)
|
||||
|
||||
def login_to_tool_repository(self) -> requests.Response:
|
||||
def login_to_tool_repository(self) -> httpx.Response:
|
||||
return self._make_request("POST", f"{self.TOOLS_RESOURCE}/login")
|
||||
|
||||
def get_tool(self, handle: str) -> requests.Response:
|
||||
def get_tool(self, handle: str) -> httpx.Response:
|
||||
return self._make_request("GET", f"{self.TOOLS_RESOURCE}/{handle}")
|
||||
|
||||
async def get_agent(self, handle: str) -> httpx.Response:
|
||||
@@ -68,7 +67,7 @@ class PlusAPI:
|
||||
description: str | None,
|
||||
encoded_file: str,
|
||||
available_exports: list[dict[str, Any]] | None = None,
|
||||
) -> requests.Response:
|
||||
) -> httpx.Response:
|
||||
params = {
|
||||
"handle": handle,
|
||||
"public": is_public,
|
||||
@@ -79,54 +78,52 @@ class PlusAPI:
|
||||
}
|
||||
return self._make_request("POST", f"{self.TOOLS_RESOURCE}", json=params)
|
||||
|
||||
def deploy_by_name(self, project_name: str) -> requests.Response:
|
||||
def deploy_by_name(self, project_name: str) -> httpx.Response:
|
||||
return self._make_request(
|
||||
"POST", f"{self.CREWS_RESOURCE}/by-name/{project_name}/deploy"
|
||||
)
|
||||
|
||||
def deploy_by_uuid(self, uuid: str) -> requests.Response:
|
||||
def deploy_by_uuid(self, uuid: str) -> httpx.Response:
|
||||
return self._make_request("POST", f"{self.CREWS_RESOURCE}/{uuid}/deploy")
|
||||
|
||||
def crew_status_by_name(self, project_name: str) -> requests.Response:
|
||||
def crew_status_by_name(self, project_name: str) -> httpx.Response:
|
||||
return self._make_request(
|
||||
"GET", f"{self.CREWS_RESOURCE}/by-name/{project_name}/status"
|
||||
)
|
||||
|
||||
def crew_status_by_uuid(self, uuid: str) -> requests.Response:
|
||||
def crew_status_by_uuid(self, uuid: str) -> httpx.Response:
|
||||
return self._make_request("GET", f"{self.CREWS_RESOURCE}/{uuid}/status")
|
||||
|
||||
def crew_by_name(
|
||||
self, project_name: str, log_type: str = "deployment"
|
||||
) -> requests.Response:
|
||||
) -> httpx.Response:
|
||||
return self._make_request(
|
||||
"GET", f"{self.CREWS_RESOURCE}/by-name/{project_name}/logs/{log_type}"
|
||||
)
|
||||
|
||||
def crew_by_uuid(
|
||||
self, uuid: str, log_type: str = "deployment"
|
||||
) -> requests.Response:
|
||||
def crew_by_uuid(self, uuid: str, log_type: str = "deployment") -> httpx.Response:
|
||||
return self._make_request(
|
||||
"GET", f"{self.CREWS_RESOURCE}/{uuid}/logs/{log_type}"
|
||||
)
|
||||
|
||||
def delete_crew_by_name(self, project_name: str) -> requests.Response:
|
||||
def delete_crew_by_name(self, project_name: str) -> httpx.Response:
|
||||
return self._make_request(
|
||||
"DELETE", f"{self.CREWS_RESOURCE}/by-name/{project_name}"
|
||||
)
|
||||
|
||||
def delete_crew_by_uuid(self, uuid: str) -> requests.Response:
|
||||
def delete_crew_by_uuid(self, uuid: str) -> httpx.Response:
|
||||
return self._make_request("DELETE", f"{self.CREWS_RESOURCE}/{uuid}")
|
||||
|
||||
def list_crews(self) -> requests.Response:
|
||||
def list_crews(self) -> httpx.Response:
|
||||
return self._make_request("GET", self.CREWS_RESOURCE)
|
||||
|
||||
def create_crew(self, payload: dict[str, Any]) -> requests.Response:
|
||||
def create_crew(self, payload: dict[str, Any]) -> httpx.Response:
|
||||
return self._make_request("POST", self.CREWS_RESOURCE, json=payload)
|
||||
|
||||
def get_organizations(self) -> requests.Response:
|
||||
def get_organizations(self) -> httpx.Response:
|
||||
return self._make_request("GET", self.ORGANIZATIONS_RESOURCE)
|
||||
|
||||
def initialize_trace_batch(self, payload: dict[str, Any]) -> requests.Response:
|
||||
def initialize_trace_batch(self, payload: dict[str, Any]) -> httpx.Response:
|
||||
return self._make_request(
|
||||
"POST",
|
||||
f"{self.TRACING_RESOURCE}/batches",
|
||||
@@ -136,7 +133,7 @@ class PlusAPI:
|
||||
|
||||
def initialize_ephemeral_trace_batch(
|
||||
self, payload: dict[str, Any]
|
||||
) -> requests.Response:
|
||||
) -> httpx.Response:
|
||||
return self._make_request(
|
||||
"POST",
|
||||
f"{self.EPHEMERAL_TRACING_RESOURCE}/batches",
|
||||
@@ -145,7 +142,7 @@ class PlusAPI:
|
||||
|
||||
def send_trace_events(
|
||||
self, trace_batch_id: str, payload: dict[str, Any]
|
||||
) -> requests.Response:
|
||||
) -> httpx.Response:
|
||||
return self._make_request(
|
||||
"POST",
|
||||
f"{self.TRACING_RESOURCE}/batches/{trace_batch_id}/events",
|
||||
@@ -155,7 +152,7 @@ class PlusAPI:
|
||||
|
||||
def send_ephemeral_trace_events(
|
||||
self, trace_batch_id: str, payload: dict[str, Any]
|
||||
) -> requests.Response:
|
||||
) -> httpx.Response:
|
||||
return self._make_request(
|
||||
"POST",
|
||||
f"{self.EPHEMERAL_TRACING_RESOURCE}/batches/{trace_batch_id}/events",
|
||||
@@ -165,7 +162,7 @@ class PlusAPI:
|
||||
|
||||
def finalize_trace_batch(
|
||||
self, trace_batch_id: str, payload: dict[str, Any]
|
||||
) -> requests.Response:
|
||||
) -> httpx.Response:
|
||||
return self._make_request(
|
||||
"PATCH",
|
||||
f"{self.TRACING_RESOURCE}/batches/{trace_batch_id}/finalize",
|
||||
@@ -175,7 +172,7 @@ class PlusAPI:
|
||||
|
||||
def finalize_ephemeral_trace_batch(
|
||||
self, trace_batch_id: str, payload: dict[str, Any]
|
||||
) -> requests.Response:
|
||||
) -> httpx.Response:
|
||||
return self._make_request(
|
||||
"PATCH",
|
||||
f"{self.EPHEMERAL_TRACING_RESOURCE}/batches/{trace_batch_id}/finalize",
|
||||
@@ -185,7 +182,7 @@ class PlusAPI:
|
||||
|
||||
def mark_trace_batch_as_failed(
|
||||
self, trace_batch_id: str, error_message: str
|
||||
) -> requests.Response:
|
||||
) -> httpx.Response:
|
||||
return self._make_request(
|
||||
"PATCH",
|
||||
f"{self.TRACING_RESOURCE}/batches/{trace_batch_id}",
|
||||
@@ -193,13 +190,11 @@ class PlusAPI:
|
||||
timeout=30,
|
||||
)
|
||||
|
||||
def get_triggers(self) -> requests.Response:
|
||||
def get_triggers(self) -> httpx.Response:
|
||||
"""Get all available triggers from integrations."""
|
||||
return self._make_request("GET", f"{self.INTEGRATIONS_RESOURCE}/apps")
|
||||
|
||||
def get_trigger_payload(
|
||||
self, app_slug: str, trigger_slug: str
|
||||
) -> requests.Response:
|
||||
def get_trigger_payload(self, app_slug: str, trigger_slug: str) -> httpx.Response:
|
||||
"""Get sample payload for a specific trigger."""
|
||||
return self._make_request(
|
||||
"GET", f"{self.INTEGRATIONS_RESOURCE}/{app_slug}/{trigger_slug}/payload"
|
||||
|
||||
@@ -8,7 +8,7 @@ from typing import Any
|
||||
|
||||
import certifi
|
||||
import click
|
||||
import requests
|
||||
import httpx
|
||||
|
||||
from crewai.cli.constants import JSON_URL, MODELS, PROVIDERS
|
||||
|
||||
@@ -165,20 +165,20 @@ def fetch_provider_data(cache_file: Path) -> dict[str, Any] | None:
|
||||
ssl_config = os.environ["SSL_CERT_FILE"] = certifi.where()
|
||||
|
||||
try:
|
||||
response = requests.get(JSON_URL, stream=True, timeout=60, verify=ssl_config)
|
||||
response.raise_for_status()
|
||||
data = download_data(response)
|
||||
with open(cache_file, "w") as f:
|
||||
json.dump(data, f)
|
||||
return data
|
||||
except requests.RequestException as e:
|
||||
with httpx.stream("GET", JSON_URL, timeout=60, verify=ssl_config) as response:
|
||||
response.raise_for_status()
|
||||
data = download_data(response)
|
||||
with open(cache_file, "w") as f:
|
||||
json.dump(data, f)
|
||||
return data
|
||||
except httpx.HTTPError as e:
|
||||
click.secho(f"Error fetching provider data: {e}", fg="red")
|
||||
except json.JSONDecodeError:
|
||||
click.secho("Error parsing provider data. Invalid JSON format.", fg="red")
|
||||
return None
|
||||
|
||||
|
||||
def download_data(response: requests.Response) -> dict[str, Any]:
|
||||
def download_data(response: httpx.Response) -> dict[str, Any]:
|
||||
"""Downloads data from a given HTTP response and returns the JSON content.
|
||||
|
||||
Args:
|
||||
@@ -194,7 +194,7 @@ def download_data(response: requests.Response) -> dict[str, Any]:
|
||||
with click.progressbar(
|
||||
length=total_size, label="Downloading", show_pos=True
|
||||
) as bar:
|
||||
for chunk in response.iter_content(block_size):
|
||||
for chunk in response.iter_bytes(block_size):
|
||||
if chunk:
|
||||
data_chunks.append(chunk)
|
||||
bar.update(len(chunk))
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Callable, Coroutine
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from datetime import datetime
|
||||
import inspect
|
||||
import json
|
||||
import threading
|
||||
from typing import TYPE_CHECKING, Any, Literal, cast
|
||||
@@ -64,6 +66,7 @@ from crewai.utilities.agent_utils import (
|
||||
has_reached_max_iterations,
|
||||
is_context_length_exceeded,
|
||||
is_inside_event_loop,
|
||||
parse_tool_call_args,
|
||||
process_llm_response,
|
||||
track_delegation_if_needed,
|
||||
)
|
||||
@@ -778,7 +781,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
from_cache = cast(bool, execution_result["from_cache"])
|
||||
original_tool = execution_result["original_tool"]
|
||||
|
||||
tool_message: LLMMessage = {
|
||||
tool_message = {
|
||||
"role": "tool",
|
||||
"tool_call_id": call_id,
|
||||
"name": func_name,
|
||||
@@ -846,13 +849,9 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
call_id, func_name, func_args = info
|
||||
|
||||
# Parse arguments
|
||||
if isinstance(func_args, str):
|
||||
try:
|
||||
args_dict = json.loads(func_args)
|
||||
except json.JSONDecodeError:
|
||||
args_dict = {}
|
||||
else:
|
||||
args_dict = func_args
|
||||
args_dict, parse_error = parse_tool_call_args(func_args, func_name, call_id)
|
||||
if parse_error is not None:
|
||||
return parse_error
|
||||
|
||||
# Get agent_key for event tracking
|
||||
agent_key = getattr(self.agent, "key", "unknown") if self.agent else "unknown"
|
||||
@@ -1358,7 +1357,9 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
formatted_answer: Current agent response.
|
||||
"""
|
||||
if self.step_callback:
|
||||
self.step_callback(formatted_answer)
|
||||
cb_result = self.step_callback(formatted_answer)
|
||||
if inspect.iscoroutine(cb_result):
|
||||
asyncio.run(cb_result)
|
||||
|
||||
def _append_message_to_state(
|
||||
self, text: str, role: Literal["user", "assistant", "system"] = "assistant"
|
||||
|
||||
@@ -2,10 +2,10 @@ from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Callable
|
||||
import time
|
||||
from functools import wraps
|
||||
import inspect
|
||||
import json
|
||||
import time
|
||||
from types import MethodType
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
@@ -49,15 +49,20 @@ from crewai.events.types.agent_events import (
|
||||
LiteAgentExecutionErrorEvent,
|
||||
LiteAgentExecutionStartedEvent,
|
||||
)
|
||||
from crewai.events.types.logging_events import AgentLogsExecutionEvent
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryRetrievalCompletedEvent,
|
||||
MemoryRetrievalFailedEvent,
|
||||
MemoryRetrievalStartedEvent,
|
||||
)
|
||||
from crewai.events.types.logging_events import AgentLogsExecutionEvent
|
||||
from crewai.flow.flow_trackable import FlowTrackable
|
||||
from crewai.hooks.llm_hooks import get_after_llm_call_hooks, get_before_llm_call_hooks
|
||||
from crewai.hooks.types import AfterLLMCallHookType, BeforeLLMCallHookType
|
||||
from crewai.hooks.types import (
|
||||
AfterLLMCallHookCallable,
|
||||
AfterLLMCallHookType,
|
||||
BeforeLLMCallHookCallable,
|
||||
BeforeLLMCallHookType,
|
||||
)
|
||||
from crewai.lite_agent_output import LiteAgentOutput
|
||||
from crewai.llm import LLM
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
@@ -270,11 +275,11 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
_guardrail: GuardrailCallable | None = PrivateAttr(default=None)
|
||||
_guardrail_retry_count: int = PrivateAttr(default=0)
|
||||
_callbacks: list[TokenCalcHandler] = PrivateAttr(default_factory=list)
|
||||
_before_llm_call_hooks: list[BeforeLLMCallHookType] = PrivateAttr(
|
||||
default_factory=get_before_llm_call_hooks
|
||||
_before_llm_call_hooks: list[BeforeLLMCallHookType | BeforeLLMCallHookCallable] = (
|
||||
PrivateAttr(default_factory=get_before_llm_call_hooks)
|
||||
)
|
||||
_after_llm_call_hooks: list[AfterLLMCallHookType] = PrivateAttr(
|
||||
default_factory=get_after_llm_call_hooks
|
||||
_after_llm_call_hooks: list[AfterLLMCallHookType | AfterLLMCallHookCallable] = (
|
||||
PrivateAttr(default_factory=get_after_llm_call_hooks)
|
||||
)
|
||||
_memory: Any = PrivateAttr(default=None)
|
||||
|
||||
@@ -440,12 +445,16 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
return self.role
|
||||
|
||||
@property
|
||||
def before_llm_call_hooks(self) -> list[BeforeLLMCallHookType]:
|
||||
def before_llm_call_hooks(
|
||||
self,
|
||||
) -> list[BeforeLLMCallHookType | BeforeLLMCallHookCallable]:
|
||||
"""Get the before_llm_call hooks for this agent."""
|
||||
return self._before_llm_call_hooks
|
||||
|
||||
@property
|
||||
def after_llm_call_hooks(self) -> list[AfterLLMCallHookType]:
|
||||
def after_llm_call_hooks(
|
||||
self,
|
||||
) -> list[AfterLLMCallHookType | AfterLLMCallHookCallable]:
|
||||
"""Get the after_llm_call hooks for this agent."""
|
||||
return self._after_llm_call_hooks
|
||||
|
||||
@@ -482,11 +491,12 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
# Inject memory tools once if memory is configured (mirrors Agent._prepare_kickoff)
|
||||
if self._memory is not None:
|
||||
from crewai.tools.memory_tools import create_memory_tools
|
||||
from crewai.utilities.agent_utils import sanitize_tool_name
|
||||
from crewai.utilities.string_utils import sanitize_tool_name
|
||||
|
||||
existing_names = {sanitize_tool_name(t.name) for t in self._parsed_tools}
|
||||
memory_tools = [
|
||||
mt for mt in create_memory_tools(self._memory)
|
||||
mt
|
||||
for mt in create_memory_tools(self._memory)
|
||||
if sanitize_tool_name(mt.name) not in existing_names
|
||||
]
|
||||
if memory_tools:
|
||||
@@ -565,9 +575,10 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
if memory_block:
|
||||
formatted = self.i18n.slice("memory").format(memory=memory_block)
|
||||
if self._messages and self._messages[0].get("role") == "system":
|
||||
self._messages[0]["content"] = (
|
||||
self._messages[0].get("content", "") + "\n\n" + formatted
|
||||
)
|
||||
existing_content = self._messages[0].get("content", "")
|
||||
if not isinstance(existing_content, str):
|
||||
existing_content = ""
|
||||
self._messages[0]["content"] = existing_content + "\n\n" + formatted
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryRetrievalCompletedEvent(
|
||||
@@ -588,16 +599,12 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
)
|
||||
|
||||
def _save_to_memory(self, output_text: str) -> None:
|
||||
"""Extract discrete memories from the run and remember each. No-op if _memory is None."""
|
||||
if self._memory is None:
|
||||
"""Extract discrete memories from the run and remember each. No-op if _memory is None or read-only."""
|
||||
if self._memory is None or getattr(self._memory, "_read_only", False):
|
||||
return
|
||||
input_str = self._get_last_user_content() or "User request"
|
||||
try:
|
||||
raw = (
|
||||
f"Input: {input_str}\n"
|
||||
f"Agent: {self.role}\n"
|
||||
f"Result: {output_text}"
|
||||
)
|
||||
raw = f"Input: {input_str}\nAgent: {self.role}\nResult: {output_text}"
|
||||
extracted = self._memory.extract_memories(raw)
|
||||
if extracted:
|
||||
self._memory.remember_many(extracted, agent_role=self.role)
|
||||
@@ -622,13 +629,20 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
)
|
||||
|
||||
# Execute the agent using invoke loop
|
||||
agent_finish = self._invoke_loop()
|
||||
active_response_format = response_format or self.response_format
|
||||
agent_finish = self._invoke_loop(response_model=active_response_format)
|
||||
if self._memory is not None:
|
||||
self._save_to_memory(agent_finish.output)
|
||||
output_text = (
|
||||
agent_finish.output.model_dump_json()
|
||||
if isinstance(agent_finish.output, BaseModel)
|
||||
else agent_finish.output
|
||||
)
|
||||
self._save_to_memory(output_text)
|
||||
formatted_result: BaseModel | None = None
|
||||
|
||||
active_response_format = response_format or self.response_format
|
||||
if active_response_format:
|
||||
if isinstance(agent_finish.output, BaseModel):
|
||||
formatted_result = agent_finish.output
|
||||
elif active_response_format:
|
||||
try:
|
||||
model_schema = generate_model_description(active_response_format)
|
||||
schema = json.dumps(model_schema, indent=2)
|
||||
@@ -660,8 +674,13 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
usage_metrics = self._token_process.get_summary()
|
||||
|
||||
# Create output
|
||||
raw_output = (
|
||||
agent_finish.output.model_dump_json()
|
||||
if isinstance(agent_finish.output, BaseModel)
|
||||
else agent_finish.output
|
||||
)
|
||||
output = LiteAgentOutput(
|
||||
raw=agent_finish.output,
|
||||
raw=raw_output,
|
||||
pydantic=formatted_result,
|
||||
agent_role=self.role,
|
||||
usage_metrics=usage_metrics.model_dump() if usage_metrics else None,
|
||||
@@ -838,10 +857,15 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
|
||||
return formatted_messages
|
||||
|
||||
def _invoke_loop(self) -> AgentFinish:
|
||||
def _invoke_loop(
|
||||
self, response_model: type[BaseModel] | None = None
|
||||
) -> AgentFinish:
|
||||
"""
|
||||
Run the agent's thought process until it reaches a conclusion or max iterations.
|
||||
|
||||
Args:
|
||||
response_model: Optional Pydantic model for native structured output.
|
||||
|
||||
Returns:
|
||||
AgentFinish: The final result of the agent execution.
|
||||
"""
|
||||
@@ -870,12 +894,19 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
printer=self._printer,
|
||||
from_agent=self,
|
||||
executor_context=self,
|
||||
response_model=response_model,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
if isinstance(answer, BaseModel):
|
||||
formatted_answer = AgentFinish(
|
||||
thought="", output=answer, text=answer.model_dump_json()
|
||||
)
|
||||
break
|
||||
|
||||
formatted_answer = process_llm_response(
|
||||
cast(str, answer), self.use_stop_words
|
||||
)
|
||||
@@ -901,7 +932,7 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
)
|
||||
|
||||
self._append_message(formatted_answer.text, role="assistant")
|
||||
except OutputParserError as e: # noqa: PERF203
|
||||
except OutputParserError as e:
|
||||
if self.verbose:
|
||||
self._printer.print(
|
||||
content="Failed to parse LLM output. Retrying...",
|
||||
|
||||
@@ -234,7 +234,7 @@ class BedrockCompletion(BaseLLM):
|
||||
aws_access_key_id: str | None = None,
|
||||
aws_secret_access_key: str | None = None,
|
||||
aws_session_token: str | None = None,
|
||||
region_name: str = "us-east-1",
|
||||
region_name: str | None = None,
|
||||
temperature: float | None = None,
|
||||
max_tokens: int | None = None,
|
||||
top_p: float | None = None,
|
||||
@@ -287,15 +287,6 @@ class BedrockCompletion(BaseLLM):
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
# Initialize Bedrock client with proper configuration
|
||||
session = Session(
|
||||
aws_access_key_id=aws_access_key_id or os.getenv("AWS_ACCESS_KEY_ID"),
|
||||
aws_secret_access_key=aws_secret_access_key
|
||||
or os.getenv("AWS_SECRET_ACCESS_KEY"),
|
||||
aws_session_token=aws_session_token or os.getenv("AWS_SESSION_TOKEN"),
|
||||
region_name=region_name,
|
||||
)
|
||||
|
||||
# Configure client with timeouts and retries following AWS best practices
|
||||
config = Config(
|
||||
read_timeout=300,
|
||||
@@ -306,8 +297,12 @@ class BedrockCompletion(BaseLLM):
|
||||
tcp_keepalive=True,
|
||||
)
|
||||
|
||||
self.client = session.client("bedrock-runtime", config=config)
|
||||
self.region_name = region_name
|
||||
self.region_name = (
|
||||
region_name
|
||||
or os.getenv("AWS_DEFAULT_REGION")
|
||||
or os.getenv("AWS_REGION_NAME")
|
||||
or "us-east-1"
|
||||
)
|
||||
|
||||
self.aws_access_key_id = aws_access_key_id or os.getenv("AWS_ACCESS_KEY_ID")
|
||||
self.aws_secret_access_key = aws_secret_access_key or os.getenv(
|
||||
@@ -315,6 +310,16 @@ class BedrockCompletion(BaseLLM):
|
||||
)
|
||||
self.aws_session_token = aws_session_token or os.getenv("AWS_SESSION_TOKEN")
|
||||
|
||||
# Initialize Bedrock client with proper configuration
|
||||
session = Session(
|
||||
aws_access_key_id=self.aws_access_key_id,
|
||||
aws_secret_access_key=self.aws_secret_access_key,
|
||||
aws_session_token=self.aws_session_token,
|
||||
region_name=self.region_name,
|
||||
)
|
||||
|
||||
self.client = session.client("bedrock-runtime", config=config)
|
||||
|
||||
self._async_exit_stack = AsyncExitStack() if AIOBOTOCORE_AVAILABLE else None
|
||||
self._async_client_initialized = False
|
||||
|
||||
|
||||
@@ -894,7 +894,7 @@ class GeminiCompletion(BaseLLM):
|
||||
content = self._extract_text_from_response(response)
|
||||
|
||||
effective_response_model = None if self.tools else response_model
|
||||
if not effective_response_model:
|
||||
if not response_model:
|
||||
content = self._apply_stop_words(content)
|
||||
|
||||
return self._finalize_completion_response(
|
||||
|
||||
@@ -1,6 +1,14 @@
|
||||
"""Memory module: unified Memory with LLM analysis and pluggable storage."""
|
||||
"""Memory module: unified Memory with LLM analysis and pluggable storage.
|
||||
|
||||
Heavy dependencies are lazily imported so that
|
||||
``import crewai`` does not initialise at runtime — critical for
|
||||
Celery pre-fork and similar deployment patterns.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
from crewai.memory.encoding_flow import EncodingFlow
|
||||
from crewai.memory.memory_scope import MemoryScope, MemorySlice
|
||||
from crewai.memory.types import (
|
||||
MemoryMatch,
|
||||
@@ -10,7 +18,24 @@ from crewai.memory.types import (
|
||||
embed_text,
|
||||
embed_texts,
|
||||
)
|
||||
from crewai.memory.unified_memory import Memory
|
||||
|
||||
_LAZY_IMPORTS: dict[str, tuple[str, str]] = {
|
||||
"Memory": ("crewai.memory.unified_memory", "Memory"),
|
||||
"EncodingFlow": ("crewai.memory.encoding_flow", "EncodingFlow"),
|
||||
}
|
||||
|
||||
|
||||
def __getattr__(name: str) -> Any:
|
||||
"""Lazily import Memory / EncodingFlow to avoid pulling in lancedb at import time."""
|
||||
if name in _LAZY_IMPORTS:
|
||||
import importlib
|
||||
|
||||
module_path, attr = _LAZY_IMPORTS[name]
|
||||
mod = importlib.import_module(module_path)
|
||||
val = getattr(mod, attr)
|
||||
globals()[name] = val
|
||||
return val
|
||||
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
|
||||
|
||||
|
||||
__all__ = [
|
||||
|
||||
@@ -145,7 +145,7 @@ class MemoryScope:
|
||||
|
||||
|
||||
class MemorySlice:
|
||||
"""View over multiple scopes: recall searches all, remember requires explicit scope unless read_only."""
|
||||
"""View over multiple scopes: recall searches all, remember is a no-op when read_only."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -160,7 +160,7 @@ class MemorySlice:
|
||||
memory: The underlying Memory instance.
|
||||
scopes: List of scope paths to include.
|
||||
categories: Optional category filter for recall.
|
||||
read_only: If True, remember() raises PermissionError.
|
||||
read_only: If True, remember() is a silent no-op.
|
||||
"""
|
||||
self._memory = memory
|
||||
self._scopes = [s.rstrip("/") or "/" for s in scopes]
|
||||
@@ -176,10 +176,10 @@ class MemorySlice:
|
||||
importance: float | None = None,
|
||||
source: str | None = None,
|
||||
private: bool = False,
|
||||
) -> MemoryRecord:
|
||||
"""Remember into an explicit scope. Required when read_only=False."""
|
||||
) -> MemoryRecord | None:
|
||||
"""Remember into an explicit scope. No-op when read_only=True."""
|
||||
if self._read_only:
|
||||
raise PermissionError("This MemorySlice is read-only")
|
||||
return None
|
||||
return self._memory.remember(
|
||||
content,
|
||||
scope=scope,
|
||||
|
||||
@@ -53,6 +53,7 @@ class LanceDBStorage:
|
||||
path: str | Path | None = None,
|
||||
table_name: str = "memories",
|
||||
vector_dim: int | None = None,
|
||||
compact_every: int = 100,
|
||||
) -> None:
|
||||
"""Initialize LanceDB storage.
|
||||
|
||||
@@ -64,6 +65,10 @@ class LanceDBStorage:
|
||||
vector_dim: Dimensionality of the embedding vector. When ``None``
|
||||
(default), the dimension is auto-detected from the existing
|
||||
table schema or from the first saved embedding.
|
||||
compact_every: Number of ``save()`` calls between automatic
|
||||
background compactions. Each ``save()`` creates one new
|
||||
fragment file; compaction merges them, keeping query
|
||||
performance consistent. Set to 0 to disable.
|
||||
"""
|
||||
if path is None:
|
||||
storage_dir = os.environ.get("CREWAI_STORAGE_DIR")
|
||||
@@ -78,6 +83,22 @@ class LanceDBStorage:
|
||||
self._table_name = table_name
|
||||
self._db = lancedb.connect(str(self._path))
|
||||
|
||||
# On macOS and Linux the default per-process open-file limit is 256.
|
||||
# A LanceDB table stores one file per fragment (one fragment per save()
|
||||
# call by default). With hundreds of fragments, a single full-table
|
||||
# scan opens all of them simultaneously, exhausting the limit.
|
||||
# Raise it proactively so scans on large tables never hit OS error 24.
|
||||
try:
|
||||
import resource
|
||||
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
|
||||
if soft < 4096:
|
||||
resource.setrlimit(resource.RLIMIT_NOFILE, (min(hard, 4096), hard))
|
||||
except Exception: # noqa: S110
|
||||
pass # Windows or already at the max hard limit — safe to ignore
|
||||
|
||||
self._compact_every = compact_every
|
||||
self._save_count = 0
|
||||
|
||||
# Get or create a shared write lock for this database path.
|
||||
resolved = str(self._path.resolve())
|
||||
with LanceDBStorage._path_locks_guard:
|
||||
@@ -91,6 +112,11 @@ class LanceDBStorage:
|
||||
try:
|
||||
self._table: lancedb.table.Table | None = self._db.open_table(self._table_name)
|
||||
self._vector_dim: int = self._infer_dim_from_table(self._table)
|
||||
# Best-effort: create the scope index if it doesn't exist yet.
|
||||
self._ensure_scope_index()
|
||||
# Compact in the background if the table has accumulated many
|
||||
# fragments from previous runs (each save() creates one).
|
||||
self._compact_if_needed()
|
||||
except Exception:
|
||||
self._table = None
|
||||
self._vector_dim = vector_dim or 0 # 0 = not yet known
|
||||
@@ -178,6 +204,56 @@ class LanceDBStorage:
|
||||
table.delete("id = '__schema_placeholder__'")
|
||||
return table
|
||||
|
||||
def _ensure_scope_index(self) -> None:
|
||||
"""Create a BTREE scalar index on the ``scope`` column if not present.
|
||||
|
||||
A scalar index lets LanceDB skip a full table scan when filtering by
|
||||
scope prefix, which is the hot path for ``list_records``,
|
||||
``get_scope_info``, and ``list_scopes``. The call is best-effort:
|
||||
if the table is empty or the index already exists the exception is
|
||||
swallowed silently.
|
||||
"""
|
||||
if self._table is None:
|
||||
return
|
||||
try:
|
||||
self._table.create_scalar_index("scope", index_type="BTREE", replace=False)
|
||||
except Exception: # noqa: S110
|
||||
pass # index already exists, table empty, or unsupported version
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Automatic background compaction
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _compact_if_needed(self) -> None:
|
||||
"""Spawn a background compaction on startup.
|
||||
|
||||
Called whenever an existing table is opened so that fragments
|
||||
accumulated in previous sessions are silently merged before the
|
||||
first query. ``optimize()`` returns quickly when the table is
|
||||
already compact, so the cost is negligible in the common case.
|
||||
"""
|
||||
if self._table is None or self._compact_every <= 0:
|
||||
return
|
||||
self._compact_async()
|
||||
|
||||
def _compact_async(self) -> None:
|
||||
"""Fire-and-forget: compact the table in a daemon background thread."""
|
||||
threading.Thread(
|
||||
target=self._compact_safe,
|
||||
daemon=True,
|
||||
name="lancedb-compact",
|
||||
).start()
|
||||
|
||||
def _compact_safe(self) -> None:
|
||||
"""Run ``table.optimize()`` in a background thread, absorbing errors."""
|
||||
try:
|
||||
if self._table is not None:
|
||||
self._table.optimize()
|
||||
# Refresh the scope index so new fragments are covered.
|
||||
self._ensure_scope_index()
|
||||
except Exception:
|
||||
_logger.debug("LanceDB background compaction failed", exc_info=True)
|
||||
|
||||
def _ensure_table(self, vector_dim: int | None = None) -> lancedb.table.Table:
|
||||
"""Return the table, creating it lazily if needed.
|
||||
|
||||
@@ -239,6 +315,7 @@ class LanceDBStorage:
|
||||
if r.embedding and len(r.embedding) > 0:
|
||||
dim = len(r.embedding)
|
||||
break
|
||||
is_new_table = self._table is None
|
||||
with self._write_lock:
|
||||
self._ensure_table(vector_dim=dim)
|
||||
rows = [self._record_to_row(r) for r in records]
|
||||
@@ -246,6 +323,13 @@ class LanceDBStorage:
|
||||
if r["vector"] is None or len(r["vector"]) != self._vector_dim:
|
||||
r["vector"] = [0.0] * self._vector_dim
|
||||
self._retry_write("add", rows)
|
||||
# Create the scope index on the first save so it covers the initial dataset.
|
||||
if is_new_table:
|
||||
self._ensure_scope_index()
|
||||
# Auto-compact every N saves so fragment files don't pile up.
|
||||
self._save_count += 1
|
||||
if self._compact_every > 0 and self._save_count % self._compact_every == 0:
|
||||
self._compact_async()
|
||||
|
||||
def update(self, record: MemoryRecord) -> None:
|
||||
"""Update a record by ID. Preserves created_at, updates last_accessed."""
|
||||
@@ -261,6 +345,10 @@ class LanceDBStorage:
|
||||
def touch_records(self, record_ids: list[str]) -> None:
|
||||
"""Update last_accessed to now for the given record IDs.
|
||||
|
||||
Uses a single batch ``table.update()`` call instead of N
|
||||
delete-and-re-add cycles, which is both faster and avoids
|
||||
unnecessary write amplification.
|
||||
|
||||
Args:
|
||||
record_ids: IDs of records to touch.
|
||||
"""
|
||||
@@ -268,25 +356,20 @@ class LanceDBStorage:
|
||||
return
|
||||
with self._write_lock:
|
||||
now = datetime.utcnow().isoformat()
|
||||
for rid in record_ids:
|
||||
safe_id = str(rid).replace("'", "''")
|
||||
rows = (
|
||||
self._table.search([0.0] * self._vector_dim)
|
||||
.where(f"id = '{safe_id}'")
|
||||
.limit(1)
|
||||
.to_list()
|
||||
)
|
||||
if rows:
|
||||
rows[0]["last_accessed"] = now
|
||||
self._retry_write("delete", f"id = '{safe_id}'")
|
||||
self._retry_write("add", [rows[0]])
|
||||
safe_ids = [str(rid).replace("'", "''") for rid in record_ids]
|
||||
ids_expr = ", ".join(f"'{rid}'" for rid in safe_ids)
|
||||
self._retry_write(
|
||||
"update",
|
||||
where=f"id IN ({ids_expr})",
|
||||
values={"last_accessed": now},
|
||||
)
|
||||
|
||||
def get_record(self, record_id: str) -> MemoryRecord | None:
|
||||
"""Return a single record by ID, or None if not found."""
|
||||
if self._table is None:
|
||||
return None
|
||||
safe_id = str(record_id).replace("'", "''")
|
||||
rows = self._table.search([0.0] * self._vector_dim).where(f"id = '{safe_id}'").limit(1).to_list()
|
||||
rows = self._table.search().where(f"id = '{safe_id}'").limit(1).to_list()
|
||||
if not rows:
|
||||
return None
|
||||
return self._row_to_record(rows[0])
|
||||
@@ -374,13 +457,31 @@ class LanceDBStorage:
|
||||
self._retry_write("delete", where_expr)
|
||||
return before - self._table.count_rows()
|
||||
|
||||
def _scan_rows(self, scope_prefix: str | None = None, limit: int = _SCAN_ROWS_LIMIT) -> list[dict[str, Any]]:
|
||||
"""Scan rows optionally filtered by scope prefix."""
|
||||
def _scan_rows(
|
||||
self,
|
||||
scope_prefix: str | None = None,
|
||||
limit: int = _SCAN_ROWS_LIMIT,
|
||||
columns: list[str] | None = None,
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Scan rows optionally filtered by scope prefix.
|
||||
|
||||
Uses a full table scan (no vector query) so the limit is applied after
|
||||
the scope filter, not to ANN candidates before filtering.
|
||||
|
||||
Args:
|
||||
scope_prefix: Optional scope path prefix to filter by.
|
||||
limit: Maximum number of rows to return (applied after filtering).
|
||||
columns: Optional list of column names to fetch. Pass only the
|
||||
columns you need for metadata operations to avoid reading the
|
||||
heavy ``vector`` column unnecessarily.
|
||||
"""
|
||||
if self._table is None:
|
||||
return []
|
||||
q = self._table.search([0.0] * self._vector_dim)
|
||||
q = self._table.search()
|
||||
if scope_prefix is not None and scope_prefix.strip("/"):
|
||||
q = q.where(f"scope LIKE '{scope_prefix.rstrip('/')}%'")
|
||||
if columns is not None:
|
||||
q = q.select(columns)
|
||||
return q.limit(limit).to_list()
|
||||
|
||||
def list_records(
|
||||
@@ -406,7 +507,10 @@ class LanceDBStorage:
|
||||
prefix = scope if scope != "/" else ""
|
||||
if prefix and not prefix.startswith("/"):
|
||||
prefix = "/" + prefix
|
||||
rows = self._scan_rows(prefix or None)
|
||||
rows = self._scan_rows(
|
||||
prefix or None,
|
||||
columns=["scope", "categories_str", "created_at"],
|
||||
)
|
||||
if not rows:
|
||||
return ScopeInfo(
|
||||
path=scope or "/",
|
||||
@@ -453,7 +557,7 @@ class LanceDBStorage:
|
||||
def list_scopes(self, parent: str = "/") -> list[str]:
|
||||
parent = parent.rstrip("/") or ""
|
||||
prefix = (parent + "/") if parent else "/"
|
||||
rows = self._scan_rows(prefix if prefix != "/" else None)
|
||||
rows = self._scan_rows(prefix if prefix != "/" else None, columns=["scope"])
|
||||
children: set[str] = set()
|
||||
for row in rows:
|
||||
sc = str(row.get("scope", ""))
|
||||
@@ -465,7 +569,7 @@ class LanceDBStorage:
|
||||
return sorted(children)
|
||||
|
||||
def list_categories(self, scope_prefix: str | None = None) -> dict[str, int]:
|
||||
rows = self._scan_rows(scope_prefix)
|
||||
rows = self._scan_rows(scope_prefix, columns=["categories_str"])
|
||||
counts: dict[str, int] = {}
|
||||
for row in rows:
|
||||
cat_str = row.get("categories_str") or "[]"
|
||||
@@ -498,6 +602,21 @@ class LanceDBStorage:
|
||||
if prefix:
|
||||
self._table.delete(f"scope >= '{prefix}' AND scope < '{prefix}/\uFFFF'")
|
||||
|
||||
def optimize(self) -> None:
|
||||
"""Compact the table synchronously and refresh the scope index.
|
||||
|
||||
Under normal usage this is called automatically in the background
|
||||
(every ``compact_every`` saves and on startup when the table is
|
||||
fragmented). Call this explicitly only when you need the compaction
|
||||
to be complete before the next operation — for example immediately
|
||||
after a large bulk import, before a latency-sensitive recall.
|
||||
It is a no-op if the table does not exist.
|
||||
"""
|
||||
if self._table is None:
|
||||
return
|
||||
self._table.optimize()
|
||||
self._ensure_scope_index()
|
||||
|
||||
async def asave(self, records: list[MemoryRecord]) -> None:
|
||||
self.save(records)
|
||||
|
||||
|
||||
@@ -87,6 +87,26 @@ class MemoryMatch(BaseModel):
|
||||
description="Information the system looked for but could not find.",
|
||||
)
|
||||
|
||||
def format(self) -> str:
|
||||
"""Format this match as a human-readable string including metadata.
|
||||
|
||||
Returns:
|
||||
A multi-line string with score, content, categories, and non-empty
|
||||
metadata fields.
|
||||
"""
|
||||
lines = [f"- (score={self.score:.2f}) {self.record.content}"]
|
||||
if self.record.categories:
|
||||
lines.append(f" categories: {', '.join(self.record.categories)}")
|
||||
if self.record.metadata:
|
||||
for key, value in self.record.metadata.items():
|
||||
if value:
|
||||
if isinstance(value, list):
|
||||
rendered_value = ", ".join(str(item) for item in value)
|
||||
else:
|
||||
rendered_value = str(value)
|
||||
lines.append(f" {key}: {rendered_value}")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
class ScopeInfo(BaseModel):
|
||||
"""Information about a scope in the memory hierarchy."""
|
||||
|
||||
@@ -21,7 +21,6 @@ from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.memory.analyze import extract_memories_from_content
|
||||
from crewai.memory.recall_flow import RecallFlow
|
||||
from crewai.memory.storage.backend import StorageBackend
|
||||
from crewai.memory.storage.lancedb_storage import LanceDBStorage
|
||||
from crewai.memory.types import (
|
||||
MemoryConfig,
|
||||
MemoryMatch,
|
||||
@@ -88,6 +87,10 @@ class Memory:
|
||||
# Queries shorter than this skip LLM analysis (saving ~1-3s).
|
||||
# Longer queries (full task descriptions) benefit from LLM distillation.
|
||||
query_analysis_threshold: int = 200,
|
||||
# When True, all write operations (remember, remember_many) are silently
|
||||
# skipped. Useful for sharing a read-only view of memory across agents
|
||||
# without any of them persisting new memories.
|
||||
read_only: bool = False,
|
||||
) -> None:
|
||||
"""Initialize Memory.
|
||||
|
||||
@@ -107,7 +110,9 @@ class Memory:
|
||||
complex_query_threshold: For complex queries, explore deeper below this confidence.
|
||||
exploration_budget: Number of LLM-driven exploration rounds during deep recall.
|
||||
query_analysis_threshold: Queries shorter than this skip LLM analysis during deep recall.
|
||||
read_only: If True, remember() and remember_many() are silent no-ops.
|
||||
"""
|
||||
self._read_only = read_only
|
||||
self._config = MemoryConfig(
|
||||
recency_weight=recency_weight,
|
||||
semantic_weight=semantic_weight,
|
||||
@@ -133,11 +138,10 @@ class Memory:
|
||||
embedder if (embedder is not None and not isinstance(embedder, dict)) else None
|
||||
)
|
||||
|
||||
# Storage is initialized eagerly (local, no API key needed).
|
||||
if storage == "lancedb":
|
||||
self._storage = LanceDBStorage()
|
||||
elif isinstance(storage, str):
|
||||
self._storage = LanceDBStorage(path=storage)
|
||||
if isinstance(storage, str):
|
||||
from crewai.memory.storage.lancedb_storage import LanceDBStorage
|
||||
|
||||
self._storage = LanceDBStorage() if storage == "lancedb" else LanceDBStorage(path=storage)
|
||||
else:
|
||||
self._storage = storage
|
||||
|
||||
@@ -335,11 +339,13 @@ class Memory:
|
||||
agent_role: Optional agent role for event metadata.
|
||||
|
||||
Returns:
|
||||
The created MemoryRecord.
|
||||
The created MemoryRecord, or None if this memory is read-only.
|
||||
|
||||
Raises:
|
||||
Exception: On save failure (events emitted).
|
||||
"""
|
||||
if self._read_only:
|
||||
return None # type: ignore[return-value]
|
||||
_source_type = "unified_memory"
|
||||
try:
|
||||
crewai_event_bus.emit(
|
||||
@@ -420,7 +426,7 @@ class Memory:
|
||||
Returns:
|
||||
Empty list (records are not available until the background save completes).
|
||||
"""
|
||||
if not contents:
|
||||
if not contents or self._read_only:
|
||||
return []
|
||||
|
||||
self._submit_save(
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from concurrent.futures import Future
|
||||
from copy import copy as shallow_copy
|
||||
import datetime
|
||||
@@ -585,16 +586,29 @@ class Task(BaseModel):
|
||||
|
||||
self._post_agent_execution(agent)
|
||||
|
||||
if not self._guardrails and not self._guardrail:
|
||||
if isinstance(result, BaseModel):
|
||||
raw = result.model_dump_json()
|
||||
if self.output_pydantic:
|
||||
pydantic_output = result
|
||||
json_output = None
|
||||
elif self.output_json:
|
||||
pydantic_output = None
|
||||
json_output = result.model_dump()
|
||||
else:
|
||||
pydantic_output = None
|
||||
json_output = None
|
||||
elif not self._guardrails and not self._guardrail:
|
||||
raw = result
|
||||
pydantic_output, json_output = self._export_output(result)
|
||||
else:
|
||||
raw = result
|
||||
pydantic_output, json_output = None, None
|
||||
|
||||
task_output = TaskOutput(
|
||||
name=self.name or self.description,
|
||||
description=self.description,
|
||||
expected_output=self.expected_output,
|
||||
raw=result,
|
||||
raw=raw,
|
||||
pydantic=pydantic_output,
|
||||
json_dict=json_output,
|
||||
agent=agent.role,
|
||||
@@ -624,11 +638,15 @@ class Task(BaseModel):
|
||||
self.end_time = datetime.datetime.now()
|
||||
|
||||
if self.callback:
|
||||
self.callback(self.output)
|
||||
cb_result = self.callback(self.output)
|
||||
if inspect.isawaitable(cb_result):
|
||||
await cb_result
|
||||
|
||||
crew = self.agent.crew # type: ignore[union-attr]
|
||||
if crew and crew.task_callback and crew.task_callback != self.callback:
|
||||
crew.task_callback(self.output)
|
||||
cb_result = crew.task_callback(self.output)
|
||||
if inspect.isawaitable(cb_result):
|
||||
await cb_result
|
||||
|
||||
if self.output_file:
|
||||
content = (
|
||||
@@ -682,16 +700,29 @@ class Task(BaseModel):
|
||||
|
||||
self._post_agent_execution(agent)
|
||||
|
||||
if not self._guardrails and not self._guardrail:
|
||||
if isinstance(result, BaseModel):
|
||||
raw = result.model_dump_json()
|
||||
if self.output_pydantic:
|
||||
pydantic_output = result
|
||||
json_output = None
|
||||
elif self.output_json:
|
||||
pydantic_output = None
|
||||
json_output = result.model_dump()
|
||||
else:
|
||||
pydantic_output = None
|
||||
json_output = None
|
||||
elif not self._guardrails and not self._guardrail:
|
||||
raw = result
|
||||
pydantic_output, json_output = self._export_output(result)
|
||||
else:
|
||||
raw = result
|
||||
pydantic_output, json_output = None, None
|
||||
|
||||
task_output = TaskOutput(
|
||||
name=self.name or self.description,
|
||||
description=self.description,
|
||||
expected_output=self.expected_output,
|
||||
raw=result,
|
||||
raw=raw,
|
||||
pydantic=pydantic_output,
|
||||
json_dict=json_output,
|
||||
agent=agent.role,
|
||||
@@ -722,11 +753,15 @@ class Task(BaseModel):
|
||||
self.end_time = datetime.datetime.now()
|
||||
|
||||
if self.callback:
|
||||
self.callback(self.output)
|
||||
cb_result = self.callback(self.output)
|
||||
if inspect.iscoroutine(cb_result):
|
||||
asyncio.run(cb_result)
|
||||
|
||||
crew = self.agent.crew # type: ignore[union-attr]
|
||||
if crew and crew.task_callback and crew.task_callback != self.callback:
|
||||
crew.task_callback(self.output)
|
||||
cb_result = crew.task_callback(self.output)
|
||||
if inspect.iscoroutine(cb_result):
|
||||
asyncio.run(cb_result)
|
||||
|
||||
if self.output_file:
|
||||
content = (
|
||||
|
||||
@@ -18,6 +18,7 @@ from pydantic import (
|
||||
BaseModel as PydanticBaseModel,
|
||||
ConfigDict,
|
||||
Field,
|
||||
ValidationError,
|
||||
create_model,
|
||||
field_validator,
|
||||
)
|
||||
@@ -150,14 +151,37 @@ class BaseTool(BaseModel, ABC):
|
||||
|
||||
super().model_post_init(__context)
|
||||
|
||||
def _validate_kwargs(self, kwargs: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Validate keyword arguments against args_schema if present.
|
||||
|
||||
Args:
|
||||
kwargs: The keyword arguments to validate.
|
||||
|
||||
Returns:
|
||||
Validated (and possibly coerced) keyword arguments.
|
||||
|
||||
Raises:
|
||||
ValueError: If validation against args_schema fails.
|
||||
"""
|
||||
if kwargs and self.args_schema is not None and self.args_schema.model_fields:
|
||||
try:
|
||||
validated = self.args_schema.model_validate(kwargs)
|
||||
return validated.model_dump()
|
||||
except Exception as e:
|
||||
raise ValueError(
|
||||
f"Tool '{self.name}' arguments validation failed: {e}"
|
||||
) from e
|
||||
return kwargs
|
||||
|
||||
def run(
|
||||
self,
|
||||
*args: Any,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
kwargs = self._validate_kwargs(kwargs)
|
||||
|
||||
result = self._run(*args, **kwargs)
|
||||
|
||||
# If _run is async, we safely run it
|
||||
if asyncio.iscoroutine(result):
|
||||
result = asyncio.run(result)
|
||||
|
||||
@@ -179,6 +203,7 @@ class BaseTool(BaseModel, ABC):
|
||||
Returns:
|
||||
The result of the tool execution.
|
||||
"""
|
||||
kwargs = self._validate_kwargs(kwargs)
|
||||
result = await self._arun(*args, **kwargs)
|
||||
self.current_usage_count += 1
|
||||
return result
|
||||
@@ -331,6 +356,8 @@ class Tool(BaseTool, Generic[P, R]):
|
||||
Returns:
|
||||
The result of the tool execution.
|
||||
"""
|
||||
kwargs = self._validate_kwargs(kwargs)
|
||||
|
||||
result = self.func(*args, **kwargs)
|
||||
|
||||
if asyncio.iscoroutine(result):
|
||||
@@ -361,6 +388,7 @@ class Tool(BaseTool, Generic[P, R]):
|
||||
Returns:
|
||||
The result of the tool execution.
|
||||
"""
|
||||
kwargs = self._validate_kwargs(kwargs)
|
||||
result = await self._arun(*args, **kwargs)
|
||||
self.current_usage_count += 1
|
||||
return result
|
||||
|
||||
@@ -20,14 +20,6 @@ class RecallMemorySchema(BaseModel):
|
||||
"or multiple items to search for several things at once."
|
||||
),
|
||||
)
|
||||
scope: str | None = Field(
|
||||
default=None,
|
||||
description="Optional scope to narrow the search (e.g. /project/alpha)",
|
||||
)
|
||||
depth: str = Field(
|
||||
default="shallow",
|
||||
description="'shallow' for fast vector search, 'deep' for LLM-analyzed retrieval",
|
||||
)
|
||||
|
||||
|
||||
class RecallMemoryTool(BaseTool):
|
||||
@@ -41,36 +33,31 @@ class RecallMemoryTool(BaseTool):
|
||||
def _run(
|
||||
self,
|
||||
queries: list[str] | str,
|
||||
scope: str | None = None,
|
||||
depth: str = "shallow",
|
||||
**kwargs: Any,
|
||||
) -> str:
|
||||
"""Search memory for relevant information.
|
||||
|
||||
Args:
|
||||
queries: One or more search queries (string or list of strings).
|
||||
scope: Optional scope prefix to narrow the search.
|
||||
depth: "shallow" for fast vector search, "deep" for LLM-analyzed retrieval.
|
||||
|
||||
Returns:
|
||||
Formatted string of matching memories, or a message if none found.
|
||||
"""
|
||||
if isinstance(queries, str):
|
||||
queries = [queries]
|
||||
actual_depth = depth if depth in ("shallow", "deep") else "shallow"
|
||||
|
||||
all_lines: list[str] = []
|
||||
seen_ids: set[str] = set()
|
||||
for query in queries:
|
||||
matches = self.memory.recall(query, scope=scope, limit=5, depth=actual_depth)
|
||||
matches = self.memory.recall(query)
|
||||
for m in matches:
|
||||
if m.record.id not in seen_ids:
|
||||
seen_ids.add(m.record.id)
|
||||
all_lines.append(f"- (score={m.score:.2f}) {m.record.content}")
|
||||
all_lines.append(m.format())
|
||||
|
||||
if not all_lines:
|
||||
return "No relevant memories found."
|
||||
return "Found memories:\n" + "\n".join(all_lines)
|
||||
return "Found memories:\n" + "\n\n".join(all_lines)
|
||||
|
||||
|
||||
class RememberSchema(BaseModel):
|
||||
@@ -117,20 +104,28 @@ class RememberTool(BaseTool):
|
||||
def create_memory_tools(memory: Any) -> list[BaseTool]:
|
||||
"""Create Recall and Remember tools for the given memory instance.
|
||||
|
||||
When memory is read-only (``_read_only=True``), only the RecallMemoryTool
|
||||
is returned — the RememberTool is omitted so agents are never offered a
|
||||
save capability they cannot use.
|
||||
|
||||
Args:
|
||||
memory: A Memory, MemoryScope, or MemorySlice instance.
|
||||
|
||||
Returns:
|
||||
List containing a RecallMemoryTool and a RememberTool.
|
||||
List containing a RecallMemoryTool and, if not read-only, a RememberTool.
|
||||
"""
|
||||
i18n = get_i18n()
|
||||
return [
|
||||
tools: list[BaseTool] = [
|
||||
RecallMemoryTool(
|
||||
memory=memory,
|
||||
description=i18n.tools("recall_memory"),
|
||||
),
|
||||
RememberTool(
|
||||
memory=memory,
|
||||
description=i18n.tools("save_to_memory"),
|
||||
),
|
||||
]
|
||||
if not getattr(memory, "_read_only", False):
|
||||
tools.append(
|
||||
RememberTool(
|
||||
memory=memory,
|
||||
description=i18n.tools("save_to_memory"),
|
||||
)
|
||||
)
|
||||
return tools
|
||||
|
||||
@@ -3,6 +3,7 @@ from __future__ import annotations
|
||||
import asyncio
|
||||
from collections.abc import Callable, Sequence
|
||||
import concurrent.futures
|
||||
import inspect
|
||||
import json
|
||||
import re
|
||||
from typing import TYPE_CHECKING, Any, Final, Literal, TypedDict
|
||||
@@ -501,7 +502,9 @@ def handle_agent_action_core(
|
||||
- TODO: Remove messages parameter and its usage.
|
||||
"""
|
||||
if step_callback:
|
||||
step_callback(tool_result)
|
||||
cb_result = step_callback(tool_result)
|
||||
if inspect.iscoroutine(cb_result):
|
||||
asyncio.run(cb_result)
|
||||
|
||||
formatted_answer.text += f"\nObservation: {tool_result.result}"
|
||||
formatted_answer.result = tool_result.result
|
||||
@@ -1143,6 +1146,36 @@ def extract_tool_call_info(
|
||||
return None
|
||||
|
||||
|
||||
def parse_tool_call_args(
|
||||
func_args: dict[str, Any] | str,
|
||||
func_name: str,
|
||||
call_id: str,
|
||||
original_tool: Any = None,
|
||||
) -> tuple[dict[str, Any], None] | tuple[None, dict[str, Any]]:
|
||||
"""Parse tool call arguments from a JSON string or dict.
|
||||
|
||||
Returns:
|
||||
``(args_dict, None)`` on success, or ``(None, error_result)`` on
|
||||
JSON parse failure where ``error_result`` is a ready-to-return dict
|
||||
with the same shape as ``_execute_single_native_tool_call`` return values.
|
||||
"""
|
||||
if isinstance(func_args, str):
|
||||
try:
|
||||
return json.loads(func_args), None
|
||||
except json.JSONDecodeError as e:
|
||||
return None, {
|
||||
"call_id": call_id,
|
||||
"func_name": func_name,
|
||||
"result": (
|
||||
f"Error: Failed to parse tool arguments as JSON: {e}. "
|
||||
f"Please provide valid JSON arguments for the '{func_name}' tool."
|
||||
),
|
||||
"from_cache": False,
|
||||
"original_tool": original_tool,
|
||||
}
|
||||
return func_args, None
|
||||
|
||||
|
||||
def _setup_before_llm_call_hooks(
|
||||
executor_context: CrewAgentExecutor | AgentExecutor | LiteAgent | None,
|
||||
printer: Printer,
|
||||
|
||||
@@ -69,7 +69,7 @@ def create_llm(
|
||||
UNACCEPTED_ATTRIBUTES: Final[list[str]] = [
|
||||
"AWS_ACCESS_KEY_ID",
|
||||
"AWS_SECRET_ACCESS_KEY",
|
||||
"AWS_REGION_NAME",
|
||||
"AWS_DEFAULT_REGION",
|
||||
]
|
||||
|
||||
|
||||
@@ -146,7 +146,7 @@ def _llm_via_environment_or_fallback() -> LLM | None:
|
||||
unaccepted_attributes = [
|
||||
"AWS_ACCESS_KEY_ID",
|
||||
"AWS_SECRET_ACCESS_KEY",
|
||||
"AWS_REGION_NAME",
|
||||
"AWS_DEFAULT_REGION",
|
||||
]
|
||||
set_provider = model_name.partition("/")[0] if "/" in model_name else "openai"
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
import asyncio
|
||||
from typing import Any
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
from unittest.mock import AsyncMock, MagicMock, Mock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
@@ -291,6 +291,46 @@ class TestAsyncAgentExecutor:
|
||||
assert max_concurrent > 1, f"Expected concurrent execution, max concurrent was {max_concurrent}"
|
||||
|
||||
|
||||
class TestInvokeStepCallback:
|
||||
"""Tests for _invoke_step_callback with sync and async callbacks."""
|
||||
|
||||
def test_invoke_step_callback_with_sync_callback(
|
||||
self, executor: CrewAgentExecutor
|
||||
) -> None:
|
||||
"""Test that a sync step callback is called normally."""
|
||||
callback = Mock()
|
||||
executor.step_callback = callback
|
||||
answer = AgentFinish(thought="thinking", output="test", text="final")
|
||||
|
||||
executor._invoke_step_callback(answer)
|
||||
|
||||
callback.assert_called_once_with(answer)
|
||||
|
||||
def test_invoke_step_callback_with_async_callback(
|
||||
self, executor: CrewAgentExecutor
|
||||
) -> None:
|
||||
"""Test that an async step callback is awaited via asyncio.run."""
|
||||
async_callback = AsyncMock()
|
||||
executor.step_callback = async_callback
|
||||
answer = AgentFinish(thought="thinking", output="test", text="final")
|
||||
|
||||
with patch("crewai.agents.crew_agent_executor.asyncio.run") as mock_run:
|
||||
executor._invoke_step_callback(answer)
|
||||
|
||||
async_callback.assert_called_once_with(answer)
|
||||
mock_run.assert_called_once()
|
||||
|
||||
def test_invoke_step_callback_with_none(
|
||||
self, executor: CrewAgentExecutor
|
||||
) -> None:
|
||||
"""Test that no error is raised when step_callback is None."""
|
||||
executor.step_callback = None
|
||||
answer = AgentFinish(thought="thinking", output="test", text="final")
|
||||
|
||||
# Should not raise
|
||||
executor._invoke_step_callback(answer)
|
||||
|
||||
|
||||
class TestAsyncLLMResponseHelper:
|
||||
"""Tests for aget_llm_response helper function."""
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ import os
|
||||
import threading
|
||||
import time
|
||||
from collections import Counter
|
||||
from unittest.mock import patch
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
import pytest
|
||||
from pydantic import BaseModel, Field
|
||||
@@ -1129,3 +1129,150 @@ class TestMaxUsageCountWithNativeToolCalling:
|
||||
# Verify the requested calls occurred while keeping usage bounded.
|
||||
assert tool.current_usage_count >= 2
|
||||
assert tool.current_usage_count <= tool.max_usage_count
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# JSON Parse Error Handling Tests
|
||||
# =============================================================================
|
||||
|
||||
|
||||
class TestNativeToolCallingJsonParseError:
|
||||
"""Tests that malformed JSON tool arguments produce clear errors
|
||||
instead of silently dropping all arguments."""
|
||||
|
||||
def _make_executor(self, tools: list[BaseTool]) -> "CrewAgentExecutor":
|
||||
"""Create a minimal CrewAgentExecutor with mocked dependencies."""
|
||||
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||
from crewai.tools.base_tool import to_langchain
|
||||
|
||||
structured_tools = to_langchain(tools)
|
||||
mock_agent = Mock()
|
||||
mock_agent.key = "test_agent"
|
||||
mock_agent.role = "tester"
|
||||
mock_agent.verbose = False
|
||||
mock_agent.fingerprint = None
|
||||
mock_agent.tools_results = []
|
||||
|
||||
mock_task = Mock()
|
||||
mock_task.name = "test"
|
||||
mock_task.description = "test"
|
||||
mock_task.id = "test-id"
|
||||
|
||||
executor = object.__new__(CrewAgentExecutor)
|
||||
executor.agent = mock_agent
|
||||
executor.task = mock_task
|
||||
executor.crew = Mock()
|
||||
executor.tools = structured_tools
|
||||
executor.original_tools = tools
|
||||
executor.tools_handler = None
|
||||
executor._printer = Mock()
|
||||
executor.messages = []
|
||||
|
||||
return executor
|
||||
|
||||
def test_malformed_json_returns_parse_error(self) -> None:
|
||||
"""Malformed JSON args must return a descriptive error, not silently become {}."""
|
||||
|
||||
class CodeTool(BaseTool):
|
||||
name: str = "execute_code"
|
||||
description: str = "Run code"
|
||||
|
||||
def _run(self, code: str) -> str:
|
||||
return f"ran: {code}"
|
||||
|
||||
tool = CodeTool()
|
||||
executor = self._make_executor([tool])
|
||||
|
||||
from crewai.utilities.agent_utils import convert_tools_to_openai_schema
|
||||
_, available_functions = convert_tools_to_openai_schema([tool])
|
||||
|
||||
malformed_json = '{"code": "print("hello")"}'
|
||||
|
||||
result = executor._execute_single_native_tool_call(
|
||||
call_id="call_123",
|
||||
func_name="execute_code",
|
||||
func_args=malformed_json,
|
||||
available_functions=available_functions,
|
||||
)
|
||||
|
||||
assert "Failed to parse tool arguments as JSON" in result["result"]
|
||||
assert tool.current_usage_count == 0
|
||||
|
||||
def test_valid_json_still_executes_normally(self) -> None:
|
||||
"""Valid JSON args should execute the tool as before."""
|
||||
|
||||
class CodeTool(BaseTool):
|
||||
name: str = "execute_code"
|
||||
description: str = "Run code"
|
||||
|
||||
def _run(self, code: str) -> str:
|
||||
return f"ran: {code}"
|
||||
|
||||
tool = CodeTool()
|
||||
executor = self._make_executor([tool])
|
||||
|
||||
from crewai.utilities.agent_utils import convert_tools_to_openai_schema
|
||||
_, available_functions = convert_tools_to_openai_schema([tool])
|
||||
|
||||
valid_json = '{"code": "print(1)"}'
|
||||
|
||||
result = executor._execute_single_native_tool_call(
|
||||
call_id="call_456",
|
||||
func_name="execute_code",
|
||||
func_args=valid_json,
|
||||
available_functions=available_functions,
|
||||
)
|
||||
|
||||
assert result["result"] == "ran: print(1)"
|
||||
|
||||
def test_dict_args_bypass_json_parsing(self) -> None:
|
||||
"""When func_args is already a dict, no JSON parsing occurs."""
|
||||
|
||||
class CodeTool(BaseTool):
|
||||
name: str = "execute_code"
|
||||
description: str = "Run code"
|
||||
|
||||
def _run(self, code: str) -> str:
|
||||
return f"ran: {code}"
|
||||
|
||||
tool = CodeTool()
|
||||
executor = self._make_executor([tool])
|
||||
|
||||
from crewai.utilities.agent_utils import convert_tools_to_openai_schema
|
||||
_, available_functions = convert_tools_to_openai_schema([tool])
|
||||
|
||||
result = executor._execute_single_native_tool_call(
|
||||
call_id="call_789",
|
||||
func_name="execute_code",
|
||||
func_args={"code": "x = 42"},
|
||||
available_functions=available_functions,
|
||||
)
|
||||
|
||||
assert result["result"] == "ran: x = 42"
|
||||
|
||||
def test_schema_validation_catches_missing_args_on_native_path(self) -> None:
|
||||
"""The native function calling path should now enforce args_schema,
|
||||
catching missing required fields before _run is called."""
|
||||
|
||||
class StrictTool(BaseTool):
|
||||
name: str = "strict_tool"
|
||||
description: str = "A tool with required args"
|
||||
|
||||
def _run(self, code: str, language: str) -> str:
|
||||
return f"{language}: {code}"
|
||||
|
||||
tool = StrictTool()
|
||||
executor = self._make_executor([tool])
|
||||
|
||||
from crewai.utilities.agent_utils import convert_tools_to_openai_schema
|
||||
_, available_functions = convert_tools_to_openai_schema([tool])
|
||||
|
||||
result = executor._execute_single_native_tool_call(
|
||||
call_id="call_schema",
|
||||
func_name="strict_tool",
|
||||
func_args={"code": "print(1)"},
|
||||
available_functions=available_functions,
|
||||
)
|
||||
|
||||
assert "Error" in result["result"]
|
||||
assert "validation failed" in result["result"].lower() or "missing" in result["result"].lower()
|
||||
|
||||
@@ -0,0 +1,197 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"contents": [{"parts": [{"text": "\nCurrent Task: Calculate 15 + 27 using
|
||||
your add_numbers tool. Report the result.\n\nThis is the expected criteria for
|
||||
your final answer: A structured calculation result\nyou MUST return the actual
|
||||
complete content as the final answer, not a summary.\nFormat your final answer
|
||||
according to the following OpenAPI schema: {\n \"properties\": {\n \"operation\":
|
||||
{\n \"description\": \"The mathematical operation performed\",\n \"title\":
|
||||
\"Operation\",\n \"type\": \"string\"\n },\n \"result\": {\n \"description\":
|
||||
\"The result of the calculation\",\n \"title\": \"Result\",\n \"type\":
|
||||
\"integer\"\n },\n \"explanation\": {\n \"description\": \"Brief
|
||||
explanation of the calculation\",\n \"title\": \"Explanation\",\n \"type\":
|
||||
\"string\"\n }\n },\n \"required\": [\n \"operation\",\n \"result\",\n \"explanation\"\n ],\n \"title\":
|
||||
\"CalculationResult\",\n \"type\": \"object\",\n \"additionalProperties\":
|
||||
false\n}\n\nIMPORTANT: Preserve the original content exactly as-is. Do NOT rewrite,
|
||||
paraphrase, or modify the meaning of the content. Only structure it to match
|
||||
the schema format.\n\nDo not include the OpenAPI schema in the final output.
|
||||
Ensure the final output does not include any code block markers like ```json
|
||||
or ```python."}], "role": "user"}], "systemInstruction": {"parts": [{"text":
|
||||
"You are Calculator. You are a calculator assistant that uses tools to compute
|
||||
results.\nYour personal goal is: Perform calculations using available tools"}],
|
||||
"role": "user"}, "tools": [{"functionDeclarations": [{"description": "Add two
|
||||
numbers together and return the sum.", "name": "add_numbers", "parameters_json_schema":
|
||||
{"properties": {"a": {"title": "A", "type": "integer"}, "b": {"title": "B",
|
||||
"type": "integer"}}, "required": ["a", "b"], "type": "object", "additionalProperties":
|
||||
false}}, {"description": "Use this tool to provide your final structured response.
|
||||
Call this tool when you have gathered all necessary information and are ready
|
||||
to provide the final answer in the required format.", "name": "structured_output",
|
||||
"parameters_json_schema": {"properties": {"operation": {"description": "The
|
||||
mathematical operation performed", "title": "Operation", "type": "string"},
|
||||
"result": {"description": "The result of the calculation", "title": "Result",
|
||||
"type": "integer"}, "explanation": {"description": "Brief explanation of the
|
||||
calculation", "title": "Explanation", "type": "string"}}, "required": ["operation",
|
||||
"result", "explanation"], "title": "CalculationResult", "type": "object", "additionalProperties":
|
||||
false, "propertyOrdering": ["operation", "result", "explanation"]}}]}], "generationConfig":
|
||||
{"stopSequences": ["\nObservation:"]}}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- '*/*'
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '2763'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- generativelanguage.googleapis.com
|
||||
x-goog-api-client:
|
||||
- google-genai-sdk/1.49.0 gl-python/3.13.12
|
||||
x-goog-api-key:
|
||||
- X-GOOG-API-KEY-XXX
|
||||
method: POST
|
||||
uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash-001:generateContent
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\":
|
||||
[\n {\n \"functionCall\": {\n \"name\": \"add_numbers\",\n
|
||||
\ \"args\": {\n \"a\": 15,\n \"b\":
|
||||
27\n }\n }\n }\n ],\n \"role\":
|
||||
\"model\"\n },\n \"finishReason\": \"STOP\",\n \"avgLogprobs\":
|
||||
4.3579145442760951e-06\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\":
|
||||
377,\n \"candidatesTokenCount\": 7,\n \"totalTokenCount\": 384,\n \"promptTokensDetails\":
|
||||
[\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 377\n
|
||||
\ }\n ],\n \"candidatesTokensDetails\": [\n {\n \"modality\":
|
||||
\"TEXT\",\n \"tokenCount\": 7\n }\n ]\n },\n \"modelVersion\":
|
||||
\"gemini-2.0-flash-001\",\n \"responseId\": \"vVefaYDSOouXjMcPicLCsQY\"\n}\n"
|
||||
headers:
|
||||
Alt-Svc:
|
||||
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
|
||||
Content-Type:
|
||||
- application/json; charset=UTF-8
|
||||
Date:
|
||||
- Wed, 25 Feb 2026 20:12:46 GMT
|
||||
Server:
|
||||
- scaffolding on HTTPServer2
|
||||
Server-Timing:
|
||||
- gfet4t7; dur=718
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
X-Frame-Options:
|
||||
- X-FRAME-OPTIONS-XXX
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"contents": [{"parts": [{"text": "\nCurrent Task: Calculate 15 + 27 using
|
||||
your add_numbers tool. Report the result.\n\nThis is the expected criteria for
|
||||
your final answer: A structured calculation result\nyou MUST return the actual
|
||||
complete content as the final answer, not a summary.\nFormat your final answer
|
||||
according to the following OpenAPI schema: {\n \"properties\": {\n \"operation\":
|
||||
{\n \"description\": \"The mathematical operation performed\",\n \"title\":
|
||||
\"Operation\",\n \"type\": \"string\"\n },\n \"result\": {\n \"description\":
|
||||
\"The result of the calculation\",\n \"title\": \"Result\",\n \"type\":
|
||||
\"integer\"\n },\n \"explanation\": {\n \"description\": \"Brief
|
||||
explanation of the calculation\",\n \"title\": \"Explanation\",\n \"type\":
|
||||
\"string\"\n }\n },\n \"required\": [\n \"operation\",\n \"result\",\n \"explanation\"\n ],\n \"title\":
|
||||
\"CalculationResult\",\n \"type\": \"object\",\n \"additionalProperties\":
|
||||
false\n}\n\nIMPORTANT: Preserve the original content exactly as-is. Do NOT rewrite,
|
||||
paraphrase, or modify the meaning of the content. Only structure it to match
|
||||
the schema format.\n\nDo not include the OpenAPI schema in the final output.
|
||||
Ensure the final output does not include any code block markers like ```json
|
||||
or ```python."}], "role": "user"}, {"parts": [{"functionCall": {"args": {"a":
|
||||
15, "b": 27}, "name": "add_numbers"}}], "role": "model"}, {"parts": [{"functionResponse":
|
||||
{"name": "add_numbers", "response": {"result": 42}}}], "role": "user"}, {"parts":
|
||||
[{"text": "Analyze the tool result. If requirements are met, provide the Final
|
||||
Answer. Otherwise, call the next tool. Deliver only the answer without meta-commentary."}],
|
||||
"role": "user"}], "systemInstruction": {"parts": [{"text": "You are Calculator.
|
||||
You are a calculator assistant that uses tools to compute results.\nYour personal
|
||||
goal is: Perform calculations using available tools"}], "role": "user"}, "tools":
|
||||
[{"functionDeclarations": [{"description": "Add two numbers together and return
|
||||
the sum.", "name": "add_numbers", "parameters_json_schema": {"properties": {"a":
|
||||
{"title": "A", "type": "integer"}, "b": {"title": "B", "type": "integer"}},
|
||||
"required": ["a", "b"], "type": "object", "additionalProperties": false}}, {"description":
|
||||
"Use this tool to provide your final structured response. Call this tool when
|
||||
you have gathered all necessary information and are ready to provide the final
|
||||
answer in the required format.", "name": "structured_output", "parameters_json_schema":
|
||||
{"properties": {"operation": {"description": "The mathematical operation performed",
|
||||
"title": "Operation", "type": "string"}, "result": {"description": "The result
|
||||
of the calculation", "title": "Result", "type": "integer"}, "explanation": {"description":
|
||||
"Brief explanation of the calculation", "title": "Explanation", "type": "string"}},
|
||||
"required": ["operation", "result", "explanation"], "title": "CalculationResult",
|
||||
"type": "object", "additionalProperties": false, "propertyOrdering": ["operation",
|
||||
"result", "explanation"]}}]}], "generationConfig": {"stopSequences": ["\nObservation:"]}}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- '*/*'
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '3166'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- generativelanguage.googleapis.com
|
||||
x-goog-api-client:
|
||||
- google-genai-sdk/1.49.0 gl-python/3.13.12
|
||||
x-goog-api-key:
|
||||
- X-GOOG-API-KEY-XXX
|
||||
method: POST
|
||||
uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash-001:generateContent
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\":
|
||||
[\n {\n \"functionCall\": {\n \"name\": \"structured_output\",\n
|
||||
\ \"args\": {\n \"result\": 42,\n \"explanation\":
|
||||
\"15 + 27 = 42\",\n \"operation\": \"addition\"\n }\n
|
||||
\ }\n }\n ],\n \"role\": \"model\"\n },\n
|
||||
\ \"finishReason\": \"STOP\",\n \"avgLogprobs\": -0.07498827245500353\n
|
||||
\ }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 421,\n \"candidatesTokenCount\":
|
||||
18,\n \"totalTokenCount\": 439,\n \"promptTokensDetails\": [\n {\n
|
||||
\ \"modality\": \"TEXT\",\n \"tokenCount\": 421\n }\n ],\n
|
||||
\ \"candidatesTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n
|
||||
\ \"tokenCount\": 18\n }\n ]\n },\n \"modelVersion\": \"gemini-2.0-flash-001\",\n
|
||||
\ \"responseId\": \"vlefac7bJb6TjMcPzYWh0Ag\"\n}\n"
|
||||
headers:
|
||||
Alt-Svc:
|
||||
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
|
||||
Content-Type:
|
||||
- application/json; charset=UTF-8
|
||||
Date:
|
||||
- Wed, 25 Feb 2026 20:12:47 GMT
|
||||
Server:
|
||||
- scaffolding on HTTPServer2
|
||||
Server-Timing:
|
||||
- gfet4t7; dur=774
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
X-Frame-Options:
|
||||
- X-FRAME-OPTIONS-XXX
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,97 +1,120 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Give me an integer score between 1-5 for the following title: ''The impact of AI in the future of work''\n\nThis is the expected criteria for your final answer: The score of the title.\nyou MUST return the actual complete content as the final answer, not a summary.\nEnsure your final answer strictly adheres to the following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\":
|
||||
\"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert
|
||||
scorer, specialized in scoring titles.\nYour personal goal is: Score the title"},{"role":"user","content":"\nCurrent
|
||||
Task: Give me an integer score between 1-5 for the following title: ''The impact
|
||||
of AI in the future of work''\n\nThis is the expected criteria for your final
|
||||
answer: The score of the title.\nyou MUST return the actual complete content
|
||||
as the final answer, not a summary.\nFormat your final answer according to the
|
||||
following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\":
|
||||
\"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\":
|
||||
\"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nIMPORTANT:
|
||||
Preserve the original content exactly as-is. Do NOT rewrite, paraphrase, or
|
||||
modify the meaning of the content. Only structure it to match the schema format.\n\nDo
|
||||
not include the OpenAPI schema in the final output. Ensure the final output
|
||||
does not include any code block markers like ```json or ```python.\n\nProvide
|
||||
your complete response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"score":{"title":"Score","type":"integer"}},"required":["score"],"title":"ScoreOutput","type":"object","additionalProperties":false},"name":"ScoreOutput","strict":true}},"stream":false}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1394'
|
||||
- '1421'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-helper-method:
|
||||
- beta.chat.completions.parse
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
- 3.13.12
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-CYg0OJQX3eMkY3pcrZz7iSh2HHTPF\",\n \"object\": \"chat.completion\",\n \"created\": 1762380656,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal Answer: {\\\"score\\\":4}\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 294,\n \"completion_tokens\": 18,\n \"total_tokens\": 312,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_4c2851f862\"\n}\n"
|
||||
string: "{\n \"id\": \"chatcmpl-DDDzfvCsU0fZWdxFwjGh6dmaEheAW\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1772044427,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"{\\\"score\\\":4}\",\n \"refusal\":
|
||||
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
276,\n \"completion_tokens\": 5,\n \"total_tokens\": 281,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_a391f2cee0\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- REDACTED-RAY
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 05 Nov 2025 22:10:56 GMT
|
||||
- Wed, 25 Feb 2026 18:33:48 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=REDACTED; path=/; expires=Wed, 05-Nov-25 22:40:56 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
- _cfuvid=REDACTED; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- user-hortuttj2f3qtmxyik2zxf4q
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '770'
|
||||
- '552'
|
||||
openai-project:
|
||||
- proj_fL4UBWR1CMpAAdgzaSKqsVvA
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '796'
|
||||
set-cookie:
|
||||
- SET-COOKIE-XXX
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- '500'
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- '200000'
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- '499'
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '199687'
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- 120ms
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- 93ms
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- req_REDACTED
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,189 +1,121 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test Backstory\nYour personal goal is: Test Goal\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task: Gather information about available books on the First World War\n\nThis is the expected criteria for your final answer: A list of available books on the First World War\nyou MUST return the actual complete content as the final answer, not a summary.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||
body: '{"messages":[{"role":"system","content":"You are Test Agent. Test Backstory\nYour
|
||||
personal goal is: Test Goal"},{"role":"user","content":"\nCurrent Task: Gather
|
||||
information about available books on the First World War\n\nThis is the expected
|
||||
criteria for your final answer: A list of available books on the First World
|
||||
War\nyou MUST return the actual complete content as the final answer, not a
|
||||
summary.\n\nProvide your complete response:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '903'
|
||||
- '465'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.68.2
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.68.2
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
- 3.13.12
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-BReRV6HdeL9wUgmKwfAZfVjuGdpAo\",\n \"object\": \"chat.completion\",\n \"created\": 1745930017,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"I now can give a great answer \\nFinal Answer: Here is a comprehensive list of available books on the First World War:\\n\\n1. **\\\"The Sleepwalkers: How Europe Went to War in 1914\\\" by Christopher Clark** \\n This book delves into the complex factors that led to the outbreak of the war, offering insights into the political and social dynamics of early 20th century Europe.\\n\\n2. **\\\"A World Undone: The Story of the Great War, 1914 to 1918\\\" by G.J. Meyer** \\n Meyer's expansive narrative covers the entire war with a focus on both military strategies and the human experiences endured by soldiers and civilians alike.\\n\\n3. **\\\"All Quiet on the Western Front\\\" by Erich Maria\
|
||||
\ Remarque** \\n A poignant novel that captures the resilience and trauma experienced by German soldiers during World War I, based on the author's own experiences.\\n\\n4. **\\\"The First World War\\\" by John Keegan** \\n Keegan provides a detailed military history of the war, featuring insights on battles, strategies, and the overall impact on global affairs.\\n\\n5. **\\\"Goodbye to All That\\\" by Robert Graves** \\n This autobiography recounts the author's experiences as a soldier during the war, offering a personal and critical perspective on the conflicts and the post-war era.\\n\\n6. **\\\"Catastrophe 1914: Europe Goes to War\\\" by Max Hastings** \\n Hastings chronicles the events leading up to World War I and the early battles, detailing the war's initial impact on European societies.\\n\\n7. **\\\"The War That Ended Peace: The Road to 1914\\\" by Margaret MacMillan** \\n MacMillan explores the political and historical factors that contributed to the outbreak\
|
||||
\ of war, emphasizing the decisions made by leaders across Europe.\\n\\n8. **\\\"The First World War: A Complete History\\\" by Martin Gilbert** \\n This complete history takes readers through the entirety of the war, from its causes to its aftermath, using a wide range of sources.\\n\\n9. **\\\"1914: The Year the World Ended\\\" by Paul Ham** \\n Ham focuses on the pivotal year of 1914 and the early war's devastation, analyzing its long-lasting effects on the world.\\n\\n10. **\\\"War Horse\\\" by Michael Morpurgo** \\n This children's novel tells the story of a horse and his experiences during the war, highlighting the bond between animals and humans amidst the chaos.\\n\\nEach of these books offers unique perspectives and rich details about the First World War, making them valuable resources for anyone interested in this pivotal period in history.\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\"\
|
||||
: \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 170,\n \"completion_tokens\": 534,\n \"total_tokens\": 704,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_dbaca60df0\"\n}\n"
|
||||
string: "{\n \"id\": \"chatcmpl-DDGA6ArRnT0S8ME2I1R4x9Mo4JyGJ\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1772052762,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Here is a list of available books on
|
||||
the First World War:\\n\\n1. \\\"The Guns of August\\\" by Barbara W. Tuchman\\n2.
|
||||
\\\"A World Undone: The Story of the Great War, 1914 to 1918\\\" by G.J. Meyer\\n3.
|
||||
\\\"The First World War\\\" by John Keegan\\n4. \\\"The Sleepwalkers: How
|
||||
Europe Went to War in 1914\\\" by Christopher Clark\\n5. \\\"To End All Wars:
|
||||
A Story of Loyalty and Rebellion, 1914-1918\\\" by Adam Hochschild\\n6. \\\"World
|
||||
War I: The Definitive Visual History\\\" by R.G. Grant\\n7. \\\"Catastrophe
|
||||
1914: Europe Goes to War\\\" by Max Hastings\\n8. \\\"The Great War and Modern
|
||||
Memory\\\" by Paul Fussell\\n9. \\\"Paris 1919: Six Months That Changed the
|
||||
World\\\" by Margaret MacMillan\\n10. \\\"The Pity of War: Explaining World
|
||||
War I\\\" by Niall Ferguson\\n\\nIf you need further details on any of these
|
||||
titles, feel free to ask.\",\n \"refusal\": null,\n \"annotations\":
|
||||
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 84,\n \"completion_tokens\":
|
||||
230,\n \"total_tokens\": 314,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_a391f2cee0\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 937ed42dee2e621f-GRU
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 29 Apr 2025 12:33:48 GMT
|
||||
- Wed, 25 Feb 2026 20:52:46 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=mLRCnpdB3n_6medIZWHnUu8MNRGZsD6riaRhN47PK74-1745930028-1.0.1.1-M2lDM1_V9hNCK0MZrBnFalF3lndC3JkS8zhDOGww_LmOrgdpU9fZLpNZUmyinCQOnlCjDjDYJUECM82ffT1anqBiO1NoDeNp91EPKiK7s.8; path=/; expires=Tue, 29-Apr-25 13:03:48 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
- _cfuvid=eTrj_ZhCx2XuylS5vYROwUlPrJBwOyrbS2Ki.msl45E-1745930028010-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '10856'
|
||||
- '3250'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
set-cookie:
|
||||
- SET-COOKIE-XXX
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999807'
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- req_bc2d62d8325b2bdd3e98544a66389132
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Guardrail Agent. You are a expert at validating the output of a task. By providing effective feedback if the output is not valid.\nYour personal goal is: Validate the output of the task\n\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!\nIMPORTANT: Your final answer MUST contain all the information requested in the following format: {\n \"valid\": bool,\n \"feedback\": str | None\n}\n\nIMPORTANT: Ensure the final output does not include any code block markers like ```json or ```python."}, {"role": "user", "content": "\n Ensure the following task result complies with the given guardrail.\n\n Task result:\n Here is a comprehensive list of available books on
|
||||
the First World War:\n\n1. **\"The Sleepwalkers: How Europe Went to War in 1914\" by Christopher Clark** \n This book delves into the complex factors that led to the outbreak of the war, offering insights into the political and social dynamics of early 20th century Europe.\n\n2. **\"A World Undone: The Story of the Great War, 1914 to 1918\" by G.J. Meyer** \n Meyer''s expansive narrative covers the entire war with a focus on both military strategies and the human experiences endured by soldiers and civilians alike.\n\n3. **\"All Quiet on the Western Front\" by Erich Maria Remarque** \n A poignant novel that captures the resilience and trauma experienced by German soldiers during World War I, based on the author''s own experiences.\n\n4. **\"The First World War\" by John Keegan** \n Keegan provides a detailed military history of the war, featuring insights on battles, strategies, and the overall impact on global affairs.\n\n5. **\"Goodbye to All That\" by Robert Graves** \n This
|
||||
autobiography recounts the author''s experiences as a soldier during the war, offering a personal and critical perspective on the conflicts and the post-war era.\n\n6. **\"Catastrophe 1914: Europe Goes to War\" by Max Hastings** \n Hastings chronicles the events leading up to World War I and the early battles, detailing the war''s initial impact on European societies.\n\n7. **\"The War That Ended Peace: The Road to 1914\" by Margaret MacMillan** \n MacMillan explores the political and historical factors that contributed to the outbreak of war, emphasizing the decisions made by leaders across Europe.\n\n8. **\"The First World War: A Complete History\" by Martin Gilbert** \n This complete history takes readers through the entirety of the war, from its causes to its aftermath, using a wide range of sources.\n\n9. **\"1914: The Year the World Ended\" by Paul Ham** \n Ham focuses on the pivotal year of 1914 and the early war''s devastation, analyzing its long-lasting effects
|
||||
on the world.\n\n10. **\"War Horse\" by Michael Morpurgo** \n This children''s novel tells the story of a horse and his experiences during the war, highlighting the bond between animals and humans amidst the chaos.\n\nEach of these books offers unique perspectives and rich details about the First World War, making them valuable resources for anyone interested in this pivotal period in history.\n\n Guardrail:\n Ensure the authors are from Italy\n \n Your task:\n - Confirm if the Task result complies with the guardrail.\n - If not, provide clear feedback explaining what is wrong (e.g., by how much it violates the rule, or what specific part fails).\n - Focus only on identifying issues \u2014 do not propose corrections.\n - If the Task result complies with the guardrail, saying that is valid\n "}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '3917'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=mLRCnpdB3n_6medIZWHnUu8MNRGZsD6riaRhN47PK74-1745930028-1.0.1.1-M2lDM1_V9hNCK0MZrBnFalF3lndC3JkS8zhDOGww_LmOrgdpU9fZLpNZUmyinCQOnlCjDjDYJUECM82ffT1anqBiO1NoDeNp91EPKiK7s.8; _cfuvid=eTrj_ZhCx2XuylS5vYROwUlPrJBwOyrbS2Ki.msl45E-1745930028010-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.68.2
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.68.2
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-BReTBRCAvSDG5VMdtF9ZjByy7lqSJ\",\n \"object\": \"chat.completion\",\n \"created\": 1745930121,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Thought: I now can give a great answer \\nFinal Answer: {\\n \\\"valid\\\": false,\\n \\\"feedback\\\": \\\"None of the authors listed in the task result are from Italy. All the authors mentioned are from other countries, such as Germany, the UK, and the US.\\\"\\n}\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 797,\n \"completion_tokens\": 60,\n \"total_tokens\": 857,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"\
|
||||
audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_0392822090\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 937ed6bd68faa435-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 29 Apr 2025 12:35:23 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '1138'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999072'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_2ba1be014a5974ba354aff564e26516a
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"trace_id": "4ced1ade-0d34-4d28-a47d-61011b1f3582", "execution_type": "crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null, "crew_name": "crew", "flow_name": null, "crewai_version": "1.2.1", "privacy_level": "standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count": 0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-10-31T07:25:08.937105+00:00"}, "ephemeral_trace_id": "4ced1ade-0d34-4d28-a47d-61011b1f3582"}'
|
||||
body: '{"trace_id": "4ced1ade-0d34-4d28-a47d-61011b1f3582", "execution_type":
|
||||
"crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null,
|
||||
"crew_name": "crew", "flow_name": null, "crewai_version": "1.2.1", "privacy_level":
|
||||
"standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count":
|
||||
0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-10-31T07:25:08.937105+00:00"},
|
||||
"ephemeral_trace_id": "4ced1ade-0d34-4d28-a47d-61011b1f3582"}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate, zstd
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
@@ -13,11 +16,13 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- CrewAI-CLI/1.2.1
|
||||
- X-USER-AGENT-XXX
|
||||
X-Crewai-Organization-Id:
|
||||
- 73c2b193-f579-422c-84c7-76a39a1da77f
|
||||
X-Crewai-Version:
|
||||
- 1.2.1
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
method: POST
|
||||
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches
|
||||
response:
|
||||
@@ -35,46 +40,60 @@ interactions:
|
||||
cache-control:
|
||||
- no-store
|
||||
content-security-policy:
|
||||
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self'' ''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/ https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net https://js.hscollectedforms.net
|
||||
https://js.usemessages.com https://snap.licdn.com https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data: *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com; connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/* https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com https://api.hubspot.com
|
||||
https://forms.hscollectedforms.net https://api.hubapi.com https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509 https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self'' *.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com https://drive.google.com https://slides.google.com https://accounts.google.com https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/ https://www.youtube.com https://share.descript.com'
|
||||
- CSP-FILTERED
|
||||
etag:
|
||||
- W/"684f9dff2cfefa325ac69ea38dba2309"
|
||||
- ETAG-XXX
|
||||
expires:
|
||||
- '0'
|
||||
permissions-policy:
|
||||
- camera=(), microphone=(self), geolocation=()
|
||||
- PERMISSIONS-POLICY-XXX
|
||||
pragma:
|
||||
- no-cache
|
||||
referrer-policy:
|
||||
- strict-origin-when-cross-origin
|
||||
- REFERRER-POLICY-XXX
|
||||
strict-transport-security:
|
||||
- max-age=63072000; includeSubDomains
|
||||
- STS-XXX
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
- nosniff
|
||||
- X-CONTENT-TYPE-XXX
|
||||
x-frame-options:
|
||||
- SAMEORIGIN
|
||||
- X-FRAME-OPTIONS-XXX
|
||||
x-permitted-cross-domain-policies:
|
||||
- none
|
||||
- X-PERMITTED-XXX
|
||||
x-request-id:
|
||||
- 630cda16-c991-4ed0-b534-16c03eb2ffca
|
||||
- X-REQUEST-ID-XXX
|
||||
x-runtime:
|
||||
- '0.072382'
|
||||
- X-RUNTIME-XXX
|
||||
x-xss-protection:
|
||||
- 1; mode=block
|
||||
- X-XSS-PROTECTION-XXX
|
||||
status:
|
||||
code: 201
|
||||
message: Created
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Give me an integer score between 1-5 for the following title: ''The impact of AI in the future of work''\n\nThis is the expected criteria for your final answer: The score of the title.\nyou MUST return the actual complete content as the final answer, not a summary.\nEnsure your final answer contains only the content in the following format: {\n \"properties\": {\n \"score\": {\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\":
|
||||
\"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nEnsure the final output does not include any code block markers like ```json or ```python.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert
|
||||
scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo
|
||||
give my best complete final answer to the task respond using the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent
|
||||
Task: Give me an integer score between 1-5 for the following title: ''The impact
|
||||
of AI in the future of work''\n\nThis is the expected criteria for your final
|
||||
answer: The score of the title.\nyou MUST return the actual complete content
|
||||
as the final answer, not a summary.\nEnsure your final answer contains only
|
||||
the content in the following format: {\n \"properties\": {\n \"score\":
|
||||
{\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\":
|
||||
[\n \"score\"\n ],\n \"title\": \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\":
|
||||
false\n}\n\nEnsure the final output does not include any code block markers
|
||||
like ```json or ```python.\n\nBegin! This is VERY important to you, use the
|
||||
tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
- ACCEPT-ENCODING-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
@@ -83,20 +102,18 @@ interactions:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
@@ -107,10 +124,21 @@ interactions:
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-CWdnRkRPYTVe5JfVO7aC1cdVfqIdd\",\n \"object\": \"chat.completion\",\n \"created\": 1761895509,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\n{\\n \\\"score\\\": 4\\n}\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 281,\n \"completion_tokens\": 19,\n \"total_tokens\": 300,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_4c2851f862\"\n}\n"
|
||||
string: "{\n \"id\": \"chatcmpl-CWdnRkRPYTVe5JfVO7aC1cdVfqIdd\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1761895509,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\n{\\n
|
||||
\ \\\"score\\\": 4\\n}\",\n \"refusal\": null,\n \"annotations\":
|
||||
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 281,\n \"completion_tokens\":
|
||||
19,\n \"total_tokens\": 300,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_4c2851f862\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 99716ab4788dea35-FCO
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
@@ -120,26 +148,25 @@ interactions:
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=S.q8_0ONHDHBHNOJdMZHwJDue9lKhWQHpKuP2lsspx4-1761895510-1.0.1.1-QUDxMm9SVfRT2R188bLcvxUd6SXIBmZgnz3D35UF95nNg8zX5Gzdg2OmU.uo29rqaGatjupcLPNMyhfOqeoyhNQ28Zz1ESSQLq0y70x3IvM; path=/; expires=Fri, 31-Oct-25 07:55:10 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
- _cfuvid=TvP4GePeQO8E5c_xWNGzJb84f940MFRG_lZ_0hWAc5M-1761895510432-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '569'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
@@ -149,40 +176,119 @@ interactions:
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999700'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999700'
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- req_393e029e99d54ab0b4e7c69c5cba099f
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"events": [{"event_id": "ea607d3f-c9ff-4aa8-babb-a84eb6d16663", "timestamp": "2025-10-31T07:25:08.935640+00:00", "type": "crew_kickoff_started", "event_data": {"timestamp": "2025-10-31T07:25:08.935640+00:00", "type": "crew_kickoff_started", "source_fingerprint": null, "source_type": null, "fingerprint_metadata": null, "task_id": null, "task_name": null, "agent_id": null, "agent_role": null, "crew_name": "crew", "crew": null, "inputs": null}}, {"event_id": "8e792d78-fe9c-4601-a7b4-7b105fa8fb40", "timestamp": "2025-10-31T07:25:08.937816+00:00", "type": "task_started", "event_data": {"task_description": "Give me an integer score between 1-5 for the following title: ''The impact of AI in the future of work''", "expected_output": "The score of the title.", "task_name": "Give me an integer score between 1-5 for the following title: ''The impact of AI in the future of work''", "context": "", "agent_role": "Scorer", "task_id": "677cf2dd-96a9-4eac-9140-0ecaba9609f7"}}, {"event_id": "a2fcdfee-a395-4dc8-99b8-ba3d8d843a70",
|
||||
"timestamp": "2025-10-31T07:25:08.938816+00:00", "type": "agent_execution_started", "event_data": {"agent_role": "Scorer", "agent_goal": "Score the title", "agent_backstory": "You''re an expert scorer, specialized in scoring titles."}}, {"event_id": "b0ba7582-6ea0-4b66-a64a-0a1e38d57502", "timestamp": "2025-10-31T07:25:08.938996+00:00", "type": "llm_call_started", "event_data": {"timestamp": "2025-10-31T07:25:08.938996+00:00", "type": "llm_call_started", "source_fingerprint": null, "source_type": null, "fingerprint_metadata": null, "task_id": "677cf2dd-96a9-4eac-9140-0ecaba9609f7", "task_name": "Give me an integer score between 1-5 for the following title: ''The impact of AI in the future of work''", "agent_id": "8d6e3481-36fa-4fca-9665-977e6d76a969", "agent_role": "Scorer", "from_task": null, "from_agent": null, "model": "gpt-4.1-mini", "messages": [{"role": "system", "content": "You are Scorer. You''re an expert scorer, specialized in scoring titles.\nYour personal goal is: Score
|
||||
the title\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task: Give me an integer score between 1-5 for the following title: ''The impact of AI in the future of work''\n\nThis is the expected criteria for your final answer: The score of the title.\nyou MUST return the actual complete content as the final answer, not a summary.\nEnsure your final answer contains only the content in the following format: {\n \"properties\": {\n \"score\": {\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nEnsure the final output does not include any
|
||||
code block markers like ```json or ```python.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}], "tools": null, "callbacks": ["<crewai.utilities.token_counter_callback.TokenCalcHandler object at 0x11da36000>"], "available_functions": null}}, {"event_id": "ab6b168b-d954-494f-ae58-d9ef7a1941dc", "timestamp": "2025-10-31T07:25:10.466669+00:00", "type": "llm_call_completed", "event_data": {"timestamp": "2025-10-31T07:25:10.466669+00:00", "type": "llm_call_completed", "source_fingerprint": null, "source_type": null, "fingerprint_metadata": null, "task_id": "677cf2dd-96a9-4eac-9140-0ecaba9609f7", "task_name": "Give me an integer score between 1-5 for the following title: ''The impact of AI in the future of work''", "agent_id": "8d6e3481-36fa-4fca-9665-977e6d76a969", "agent_role": "Scorer", "from_task": null, "from_agent": null, "messages": [{"role": "system", "content": "You are Scorer. You''re an expert
|
||||
scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task: Give me an integer score between 1-5 for the following title: ''The impact of AI in the future of work''\n\nThis is the expected criteria for your final answer: The score of the title.\nyou MUST return the actual complete content as the final answer, not a summary.\nEnsure your final answer contains only the content in the following format: {\n \"properties\": {\n \"score\": {\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\":
|
||||
false\n}\n\nEnsure the final output does not include any code block markers like ```json or ```python.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}], "response": "Thought: I now can give a great answer\n{\n \"score\": 4\n}", "call_type": "<LLMCallType.LLM_CALL: ''llm_call''>", "model": "gpt-4.1-mini"}}, {"event_id": "0b8a17b6-e7d2-464d-a969-56dd705a40ef", "timestamp": "2025-10-31T07:25:10.466933+00:00", "type": "agent_execution_completed", "event_data": {"agent_role": "Scorer", "agent_goal": "Score the title", "agent_backstory": "You''re an expert scorer, specialized in scoring titles."}}, {"event_id": "b835b8e7-992b-4364-9ff8-25c81203ef77", "timestamp": "2025-10-31T07:25:10.467175+00:00", "type": "task_completed", "event_data": {"task_description": "Give me an integer score between 1-5 for the following title: ''The impact of AI in the future of work''", "task_name": "Give me an integer score
|
||||
between 1-5 for the following title: ''The impact of AI in the future of work''", "task_id": "677cf2dd-96a9-4eac-9140-0ecaba9609f7", "output_raw": "Thought: I now can give a great answer\n{\n \"score\": 4\n}", "output_format": "OutputFormat.PYDANTIC", "agent_role": "Scorer"}}, {"event_id": "a9973b74-9ca6-46c3-b219-0b11ffa9e210", "timestamp": "2025-10-31T07:25:10.469421+00:00", "type": "crew_kickoff_completed", "event_data": {"timestamp": "2025-10-31T07:25:10.469421+00:00", "type": "crew_kickoff_completed", "source_fingerprint": null, "source_type": null, "fingerprint_metadata": null, "task_id": null, "task_name": null, "agent_id": null, "agent_role": null, "crew_name": "crew", "crew": null, "output": {"description": "Give me an integer score between 1-5 for the following title: ''The impact of AI in the future of work''", "name": "Give me an integer score between 1-5 for the following title: ''The impact of AI in the future of work''", "expected_output": "The score of the title.",
|
||||
"summary": "Give me an integer score between 1-5 for the following...", "raw": "Thought: I now can give a great answer\n{\n \"score\": 4\n}", "pydantic": {}, "json_dict": null, "agent": "Scorer", "output_format": "pydantic"}, "total_tokens": 300}}], "batch_metadata": {"events_count": 8, "batch_sequence": 1, "is_final_batch": false}}'
|
||||
body: '{"events": [{"event_id": "ea607d3f-c9ff-4aa8-babb-a84eb6d16663", "timestamp":
|
||||
"2025-10-31T07:25:08.935640+00:00", "type": "crew_kickoff_started", "event_data":
|
||||
{"timestamp": "2025-10-31T07:25:08.935640+00:00", "type": "crew_kickoff_started",
|
||||
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
|
||||
"task_id": null, "task_name": null, "agent_id": null, "agent_role": null, "crew_name":
|
||||
"crew", "crew": null, "inputs": null}}, {"event_id": "8e792d78-fe9c-4601-a7b4-7b105fa8fb40",
|
||||
"timestamp": "2025-10-31T07:25:08.937816+00:00", "type": "task_started", "event_data":
|
||||
{"task_description": "Give me an integer score between 1-5 for the following
|
||||
title: ''The impact of AI in the future of work''", "expected_output": "The
|
||||
score of the title.", "task_name": "Give me an integer score between 1-5 for
|
||||
the following title: ''The impact of AI in the future of work''", "context":
|
||||
"", "agent_role": "Scorer", "task_id": "677cf2dd-96a9-4eac-9140-0ecaba9609f7"}},
|
||||
{"event_id": "a2fcdfee-a395-4dc8-99b8-ba3d8d843a70", "timestamp": "2025-10-31T07:25:08.938816+00:00",
|
||||
"type": "agent_execution_started", "event_data": {"agent_role": "Scorer", "agent_goal":
|
||||
"Score the title", "agent_backstory": "You''re an expert scorer, specialized
|
||||
in scoring titles."}}, {"event_id": "b0ba7582-6ea0-4b66-a64a-0a1e38d57502",
|
||||
"timestamp": "2025-10-31T07:25:08.938996+00:00", "type": "llm_call_started",
|
||||
"event_data": {"timestamp": "2025-10-31T07:25:08.938996+00:00", "type": "llm_call_started",
|
||||
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
|
||||
"task_id": "677cf2dd-96a9-4eac-9140-0ecaba9609f7", "task_name": "Give me an
|
||||
integer score between 1-5 for the following title: ''The impact of AI in the
|
||||
future of work''", "agent_id": "8d6e3481-36fa-4fca-9665-977e6d76a969", "agent_role":
|
||||
"Scorer", "from_task": null, "from_agent": null, "model": "gpt-4.1-mini", "messages":
|
||||
[{"role": "system", "content": "You are Scorer. You''re an expert scorer, specialized
|
||||
in scoring titles.\nYour personal goal is: Score the title\nTo give my best
|
||||
complete final answer to the task respond using the exact following format:\n\nThought:
|
||||
I now can give a great answer\nFinal Answer: Your final answer must be the great
|
||||
and the most complete as possible, it must be outcome described.\n\nI MUST use
|
||||
these formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent
|
||||
Task: Give me an integer score between 1-5 for the following title: ''The impact
|
||||
of AI in the future of work''\n\nThis is the expected criteria for your final
|
||||
answer: The score of the title.\nyou MUST return the actual complete content
|
||||
as the final answer, not a summary.\nEnsure your final answer contains only
|
||||
the content in the following format: {\n \"properties\": {\n \"score\":
|
||||
{\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\":
|
||||
[\n \"score\"\n ],\n \"title\": \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\":
|
||||
false\n}\n\nEnsure the final output does not include any code block markers
|
||||
like ```json or ```python.\n\nBegin! This is VERY important to you, use the
|
||||
tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],
|
||||
"tools": null, "callbacks": ["<crewai.utilities.token_counter_callback.TokenCalcHandler
|
||||
object at 0x11da36000>"], "available_functions": null}}, {"event_id": "ab6b168b-d954-494f-ae58-d9ef7a1941dc",
|
||||
"timestamp": "2025-10-31T07:25:10.466669+00:00", "type": "llm_call_completed",
|
||||
"event_data": {"timestamp": "2025-10-31T07:25:10.466669+00:00", "type": "llm_call_completed",
|
||||
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
|
||||
"task_id": "677cf2dd-96a9-4eac-9140-0ecaba9609f7", "task_name": "Give me an
|
||||
integer score between 1-5 for the following title: ''The impact of AI in the
|
||||
future of work''", "agent_id": "8d6e3481-36fa-4fca-9665-977e6d76a969", "agent_role":
|
||||
"Scorer", "from_task": null, "from_agent": null, "messages": [{"role": "system",
|
||||
"content": "You are Scorer. You''re an expert scorer, specialized in scoring
|
||||
titles.\nYour personal goal is: Score the title\nTo give my best complete final
|
||||
answer to the task respond using the exact following format:\n\nThought: I now
|
||||
can give a great answer\nFinal Answer: Your final answer must be the great and
|
||||
the most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task:
|
||||
Give me an integer score between 1-5 for the following title: ''The impact of
|
||||
AI in the future of work''\n\nThis is the expected criteria for your final answer:
|
||||
The score of the title.\nyou MUST return the actual complete content as the
|
||||
final answer, not a summary.\nEnsure your final answer contains only the content
|
||||
in the following format: {\n \"properties\": {\n \"score\": {\n \"title\":
|
||||
\"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\":
|
||||
\"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nEnsure
|
||||
the final output does not include any code block markers like ```json or ```python.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}], "response": "Thought: I now
|
||||
can give a great answer\n{\n \"score\": 4\n}", "call_type": "<LLMCallType.LLM_CALL:
|
||||
''llm_call''>", "model": "gpt-4.1-mini"}}, {"event_id": "0b8a17b6-e7d2-464d-a969-56dd705a40ef",
|
||||
"timestamp": "2025-10-31T07:25:10.466933+00:00", "type": "agent_execution_completed",
|
||||
"event_data": {"agent_role": "Scorer", "agent_goal": "Score the title", "agent_backstory":
|
||||
"You''re an expert scorer, specialized in scoring titles."}}, {"event_id": "b835b8e7-992b-4364-9ff8-25c81203ef77",
|
||||
"timestamp": "2025-10-31T07:25:10.467175+00:00", "type": "task_completed", "event_data":
|
||||
{"task_description": "Give me an integer score between 1-5 for the following
|
||||
title: ''The impact of AI in the future of work''", "task_name": "Give me an
|
||||
integer score between 1-5 for the following title: ''The impact of AI in the
|
||||
future of work''", "task_id": "677cf2dd-96a9-4eac-9140-0ecaba9609f7", "output_raw":
|
||||
"Thought: I now can give a great answer\n{\n \"score\": 4\n}", "output_format":
|
||||
"OutputFormat.PYDANTIC", "agent_role": "Scorer"}}, {"event_id": "a9973b74-9ca6-46c3-b219-0b11ffa9e210",
|
||||
"timestamp": "2025-10-31T07:25:10.469421+00:00", "type": "crew_kickoff_completed",
|
||||
"event_data": {"timestamp": "2025-10-31T07:25:10.469421+00:00", "type": "crew_kickoff_completed",
|
||||
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
|
||||
"task_id": null, "task_name": null, "agent_id": null, "agent_role": null, "crew_name":
|
||||
"crew", "crew": null, "output": {"description": "Give me an integer score between
|
||||
1-5 for the following title: ''The impact of AI in the future of work''", "name":
|
||||
"Give me an integer score between 1-5 for the following title: ''The impact
|
||||
of AI in the future of work''", "expected_output": "The score of the title.",
|
||||
"summary": "Give me an integer score between 1-5 for the following...", "raw":
|
||||
"Thought: I now can give a great answer\n{\n \"score\": 4\n}", "pydantic":
|
||||
{}, "json_dict": null, "agent": "Scorer", "output_format": "pydantic"}, "total_tokens":
|
||||
300}}], "batch_metadata": {"events_count": 8, "batch_sequence": 1, "is_final_batch":
|
||||
false}}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate, zstd
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
@@ -190,11 +296,13 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- CrewAI-CLI/1.2.1
|
||||
- X-USER-AGENT-XXX
|
||||
X-Crewai-Organization-Id:
|
||||
- 73c2b193-f579-422c-84c7-76a39a1da77f
|
||||
X-Crewai-Version:
|
||||
- 1.2.1
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
method: POST
|
||||
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches/4ced1ade-0d34-4d28-a47d-61011b1f3582/events
|
||||
response:
|
||||
@@ -212,35 +320,33 @@ interactions:
|
||||
cache-control:
|
||||
- no-store
|
||||
content-security-policy:
|
||||
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self'' ''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/ https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net https://js.hscollectedforms.net
|
||||
https://js.usemessages.com https://snap.licdn.com https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data: *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com; connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/* https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com https://api.hubspot.com
|
||||
https://forms.hscollectedforms.net https://api.hubapi.com https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509 https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self'' *.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com https://drive.google.com https://slides.google.com https://accounts.google.com https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/ https://www.youtube.com https://share.descript.com'
|
||||
- CSP-FILTERED
|
||||
etag:
|
||||
- W/"be223998b84365d3a863f942c880adfb"
|
||||
- ETAG-XXX
|
||||
expires:
|
||||
- '0'
|
||||
permissions-policy:
|
||||
- camera=(), microphone=(self), geolocation=()
|
||||
- PERMISSIONS-POLICY-XXX
|
||||
pragma:
|
||||
- no-cache
|
||||
referrer-policy:
|
||||
- strict-origin-when-cross-origin
|
||||
- REFERRER-POLICY-XXX
|
||||
strict-transport-security:
|
||||
- max-age=63072000; includeSubDomains
|
||||
- STS-XXX
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
- nosniff
|
||||
- X-CONTENT-TYPE-XXX
|
||||
x-frame-options:
|
||||
- SAMEORIGIN
|
||||
- X-FRAME-OPTIONS-XXX
|
||||
x-permitted-cross-domain-policies:
|
||||
- none
|
||||
- X-PERMITTED-XXX
|
||||
x-request-id:
|
||||
- 9c19d6df-9190-4764-afed-f3444939d2e4
|
||||
- X-REQUEST-ID-XXX
|
||||
x-runtime:
|
||||
- '0.123911'
|
||||
- X-RUNTIME-XXX
|
||||
x-xss-protection:
|
||||
- 1; mode=block
|
||||
- X-XSS-PROTECTION-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
@@ -249,8 +355,6 @@ interactions:
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate, zstd
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
@@ -258,11 +362,13 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- CrewAI-CLI/1.2.1
|
||||
- X-USER-AGENT-XXX
|
||||
X-Crewai-Organization-Id:
|
||||
- 73c2b193-f579-422c-84c7-76a39a1da77f
|
||||
X-Crewai-Version:
|
||||
- 1.2.1
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
method: PATCH
|
||||
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches/4ced1ade-0d34-4d28-a47d-61011b1f3582/finalize
|
||||
response:
|
||||
@@ -280,35 +386,167 @@ interactions:
|
||||
cache-control:
|
||||
- no-store
|
||||
content-security-policy:
|
||||
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self'' ''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/ https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net https://js.hscollectedforms.net
|
||||
https://js.usemessages.com https://snap.licdn.com https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data: *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com; connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/* https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com https://api.hubspot.com
|
||||
https://forms.hscollectedforms.net https://api.hubapi.com https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509 https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self'' *.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com https://drive.google.com https://slides.google.com https://accounts.google.com https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/ https://www.youtube.com https://share.descript.com'
|
||||
- CSP-FILTERED
|
||||
etag:
|
||||
- W/"bff97e21bd1971750dcfdb102fba9dcd"
|
||||
- ETAG-XXX
|
||||
expires:
|
||||
- '0'
|
||||
permissions-policy:
|
||||
- camera=(), microphone=(self), geolocation=()
|
||||
- PERMISSIONS-POLICY-XXX
|
||||
pragma:
|
||||
- no-cache
|
||||
referrer-policy:
|
||||
- strict-origin-when-cross-origin
|
||||
- REFERRER-POLICY-XXX
|
||||
strict-transport-security:
|
||||
- max-age=63072000; includeSubDomains
|
||||
- STS-XXX
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
- nosniff
|
||||
- X-CONTENT-TYPE-XXX
|
||||
x-frame-options:
|
||||
- SAMEORIGIN
|
||||
- X-FRAME-OPTIONS-XXX
|
||||
x-permitted-cross-domain-policies:
|
||||
- none
|
||||
- X-PERMITTED-XXX
|
||||
x-request-id:
|
||||
- 2b6cd38d-78fa-4676-94ff-80e3bcf48a03
|
||||
- X-REQUEST-ID-XXX
|
||||
x-runtime:
|
||||
- '0.064858'
|
||||
- X-RUNTIME-XXX
|
||||
x-xss-protection:
|
||||
- 1; mode=block
|
||||
- X-XSS-PROTECTION-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert
|
||||
scorer, specialized in scoring titles.\nYour personal goal is: Score the title"},{"role":"user","content":"\nCurrent
|
||||
Task: Give me an integer score between 1-5 for the following title: ''The impact
|
||||
of AI in the future of work''\n\nThis is the expected criteria for your final
|
||||
answer: The score of the title.\nyou MUST return the actual complete content
|
||||
as the final answer, not a summary.\nFormat your final answer according to the
|
||||
following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\":
|
||||
\"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\":
|
||||
\"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nIMPORTANT:
|
||||
Preserve the original content exactly as-is. Do NOT rewrite, paraphrase, or
|
||||
modify the meaning of the content. Only structure it to match the schema format.\n\nDo
|
||||
not include the OpenAPI schema in the final output. Ensure the final output
|
||||
does not include any code block markers like ```json or ```python.\n\nProvide
|
||||
your complete response:"},{"role":"system","content":"You are Scorer. You''re
|
||||
an expert scorer, specialized in scoring titles.\nYour personal goal is: Score
|
||||
the title"},{"role":"user","content":"\nCurrent Task: Give me an integer score
|
||||
between 1-5 for the following title: ''The impact of AI in the future of work''\n\nThis
|
||||
is the expected criteria for your final answer: The score of the title.\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\nFormat
|
||||
your final answer according to the following OpenAPI schema: {\n \"properties\":
|
||||
{\n \"score\": {\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\":
|
||||
[\n \"score\"\n ],\n \"title\": \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\":
|
||||
false\n}\n\nIMPORTANT: Preserve the original content exactly as-is. Do NOT rewrite,
|
||||
paraphrase, or modify the meaning of the content. Only structure it to match
|
||||
the schema format.\n\nDo not include the OpenAPI schema in the final output.
|
||||
Ensure the final output does not include any code block markers like ```json
|
||||
or ```python.\n\nProvide your complete response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"score":{"title":"Score","type":"integer"}},"required":["score"],"title":"ScoreOutput","type":"object","additionalProperties":false},"name":"ScoreOutput","strict":true}},"stream":false}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '2541'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- COOKIE-XXX
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-helper-method:
|
||||
- beta.chat.completions.parse
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.12
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-DDE0D15NvBLDvn8Wy68ZscARhqMaX\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1772044461,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"{\\\"score\\\":4}\",\n \"refusal\":
|
||||
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
513,\n \"completion_tokens\": 5,\n \"total_tokens\": 518,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_a391f2cee0\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 25 Feb 2026 18:34:21 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '477'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
set-cookie:
|
||||
- SET-COOKIE-XXX
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
|
||||
@@ -426,4 +426,121 @@ interactions:
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"user","content":"Thought: I now can give a great
|
||||
answer\nFinal Answer: I would assign a score of 4 to the title \"The impact
|
||||
of AI in the future of work.\" The title is very relevant and timely, as artificial
|
||||
intelligence is a major transformative force affecting the labor market and
|
||||
employment trends. It is clear and concise, effectively highlighting the focus
|
||||
on AI''s influence on the future of work. However, while it is engaging and
|
||||
implies substantial potential impact, it could be slightly more specific or
|
||||
dynamic to reach an excellent level. Overall, it meets very good standards for
|
||||
potential impact, engagement, relevance, and clarity."}],"model":"gpt-4o","tool_choice":{"type":"function","function":{"name":"ScoreOutput"}},"tools":[{"type":"function","function":{"name":"ScoreOutput","description":"Correctly
|
||||
extracted `ScoreOutput` with all the required parameters with correct types","parameters":{"properties":{"score":{"title":"Score","type":"integer"}},"required":["score"],"type":"object"}}}]}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1034'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.12
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-DDE0G4tjiC8Je3BD8xhWMey7kZF66\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1772044464,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
|
||||
\ \"id\": \"call_x95I7UxdCvFccZ87imExKzu9\",\n \"type\":
|
||||
\"function\",\n \"function\": {\n \"name\": \"ScoreOutput\",\n
|
||||
\ \"arguments\": \"{\\\"score\\\":4}\"\n }\n }\n
|
||||
\ ],\n \"refusal\": null,\n \"annotations\": []\n },\n
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||
\ \"usage\": {\n \"prompt_tokens\": 188,\n \"completion_tokens\": 5,\n
|
||||
\ \"total_tokens\": 193,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_64dfa806c7\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 25 Feb 2026 18:34:24 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '385'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
set-cookie:
|
||||
- SET-COOKIE-XXX
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
|
||||
@@ -1,98 +1,120 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Give me an integer score between 1-5 for the following title: ''The impact of AI in the future of work''\n\nThis is the expected criteria for your final answer: The score of the title.\nyou MUST return the actual complete content as the final answer, not a summary.\nEnsure your final answer strictly adheres to the following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\":
|
||||
\"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert
|
||||
scorer, specialized in scoring titles.\nYour personal goal is: Score the title"},{"role":"user","content":"\nCurrent
|
||||
Task: Give me an integer score between 1-5 for the following title: ''The impact
|
||||
of AI in the future of work''\n\nThis is the expected criteria for your final
|
||||
answer: The score of the title.\nyou MUST return the actual complete content
|
||||
as the final answer, not a summary.\nFormat your final answer according to the
|
||||
following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\":
|
||||
\"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\":
|
||||
\"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nIMPORTANT:
|
||||
Preserve the original content exactly as-is. Do NOT rewrite, paraphrase, or
|
||||
modify the meaning of the content. Only structure it to match the schema format.\n\nDo
|
||||
not include the OpenAPI schema in the final output. Ensure the final output
|
||||
does not include any code block markers like ```json or ```python.\n\nProvide
|
||||
your complete response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"score":{"title":"Score","type":"integer"}},"required":["score"],"title":"ScoreOutput","type":"object","additionalProperties":false},"name":"ScoreOutput","strict":true}},"stream":false}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1394'
|
||||
- '1421'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-helper-method:
|
||||
- beta.chat.completions.parse
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
- 3.13.12
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-CYg0UpOvDuMqlqYkt9WW8lQSkyatz\",\n \"object\": \"chat.completion\",\n \"created\": 1762380662,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal Answer: {\\n \\\"score\\\": 4\\n}\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 294,\n \"completion_tokens\": 22,\n \"total_tokens\": 316,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_4c2851f862\"\
|
||||
\n}\n"
|
||||
string: "{\n \"id\": \"chatcmpl-DDE5QUOVeJDiOh6TuObUjh32f7Q0g\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1772044784,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"{\\\"score\\\":4}\",\n \"refusal\":
|
||||
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
276,\n \"completion_tokens\": 5,\n \"total_tokens\": 281,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_a391f2cee0\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- REDACTED-RAY
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 05 Nov 2025 22:11:02 GMT
|
||||
- Wed, 25 Feb 2026 18:39:44 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=REDACTED; path=/; expires=Wed, 05-Nov-25 22:41:02 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
- _cfuvid=REDACTED; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- user-hortuttj2f3qtmxyik2zxf4q
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '864'
|
||||
- '303'
|
||||
openai-project:
|
||||
- proj_fL4UBWR1CMpAAdgzaSKqsVvA
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '3087'
|
||||
set-cookie:
|
||||
- SET-COOKIE-XXX
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- '500'
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- '200000'
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- '499'
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '199687'
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- 120ms
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- 93ms
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- req_REDACTED
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
|
||||
@@ -427,4 +427,122 @@ interactions:
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"user","content":"Thought: The title \"The impact
|
||||
of AI in the future of work\" is highly relevant given the widespread and ongoing
|
||||
discussions about AI''s role in transforming workplaces globally. It is clear
|
||||
and concise, directly indicating the subject and scope, which helps the reader
|
||||
understand what to expect. In terms of engagement, it has strong potential to
|
||||
attract interest from professionals, researchers, and the general public curious
|
||||
about how AI will shape jobs and employment trends. Although it is somewhat
|
||||
broad and could be more specific to a particular aspect of work or type of AI,
|
||||
it remains focused enough to be effective as a general overview title.\n\nFinal
|
||||
Answer: 4"}],"model":"gpt-4o","tool_choice":{"type":"function","function":{"name":"ScoreOutput"}},"tools":[{"type":"function","function":{"name":"ScoreOutput","description":"Correctly
|
||||
extracted `ScoreOutput` with all the required parameters with correct types","parameters":{"properties":{"score":{"title":"Score","type":"integer"}},"required":["score"],"type":"object"}}}]}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1077'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.12
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-DDE0FPRrXCbAAssWcvT9wUojN8yPa\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1772044463,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
|
||||
\ \"id\": \"call_237IZJqLGcX4N5MZYEd6Wz2n\",\n \"type\":
|
||||
\"function\",\n \"function\": {\n \"name\": \"ScoreOutput\",\n
|
||||
\ \"arguments\": \"{\\\"score\\\":4}\"\n }\n }\n
|
||||
\ ],\n \"refusal\": null,\n \"annotations\": []\n },\n
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||
\ \"usage\": {\n \"prompt_tokens\": 191,\n \"completion_tokens\": 5,\n
|
||||
\ \"total_tokens\": 196,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_64dfa806c7\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 25 Feb 2026 18:34:23 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '365'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
set-cookie:
|
||||
- SET-COOKIE-XXX
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
|
||||
@@ -1,12 +1,29 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Give me an integer score between 1-5 for the following title: ''The impact of AI in the future of work''\n\nThis is the expected criteria for your final answer: The score of the title.\nyou MUST return the actual complete content as the final answer, not a summary.\nEnsure your final answer strictly adheres to the following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\":
|
||||
\"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert
|
||||
scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo
|
||||
give my best complete final answer to the task respond using the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent
|
||||
Task: Give me an integer score between 1-5 for the following title: ''The impact
|
||||
of AI in the future of work''\n\nThis is the expected criteria for your final
|
||||
answer: The score of the title.\nyou MUST return the actual complete content
|
||||
as the final answer, not a summary.\nEnsure your final answer strictly adheres
|
||||
to the following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\":
|
||||
\"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\":
|
||||
\"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nDo
|
||||
not include the OpenAPI schema in the final output. Ensure the final output
|
||||
does not include any code block markers like ```json or ```python.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
- ACCEPT-ENCODING-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
@@ -15,20 +32,18 @@ interactions:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
@@ -39,11 +54,21 @@ interactions:
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-CYg0P4wugCaRcXw9kmLG3BAMBmkA0\",\n \"object\": \"chat.completion\",\n \"created\": 1762380657,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal Answer: {\\n \\\"score\\\": 4\\n}\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 294,\n \"completion_tokens\": 22,\n \"total_tokens\": 316,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_4c2851f862\"\
|
||||
\n}\n"
|
||||
string: "{\n \"id\": \"chatcmpl-CYg0P4wugCaRcXw9kmLG3BAMBmkA0\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1762380657,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
||||
Answer: {\\n \\\"score\\\": 4\\n}\",\n \"refusal\": null,\n \"annotations\":
|
||||
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 294,\n \"completion_tokens\":
|
||||
22,\n \"total_tokens\": 316,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_4c2851f862\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- REDACTED-RAY
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
@@ -53,26 +78,25 @@ interactions:
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=REDACTED; path=/; expires=Wed, 05-Nov-25 22:40:57 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
- _cfuvid=REDACTED; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- user-hortuttj2f3qtmxyik2zxf4q
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '537'
|
||||
openai-project:
|
||||
- proj_fL4UBWR1CMpAAdgzaSKqsVvA
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
@@ -80,19 +104,153 @@ interactions:
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- '500'
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- '200000'
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- '499'
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '199687'
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- 120ms
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- 93ms
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- req_REDACTED
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert
|
||||
scorer, specialized in scoring titles.\nYour personal goal is: Score the title"},{"role":"user","content":"\nCurrent
|
||||
Task: Give me an integer score between 1-5 for the following title: ''The impact
|
||||
of AI in the future of work''\n\nThis is the expected criteria for your final
|
||||
answer: The score of the title.\nyou MUST return the actual complete content
|
||||
as the final answer, not a summary.\nFormat your final answer according to the
|
||||
following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\":
|
||||
\"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\":
|
||||
\"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nIMPORTANT:
|
||||
Preserve the original content exactly as-is. Do NOT rewrite, paraphrase, or
|
||||
modify the meaning of the content. Only structure it to match the schema format.\n\nDo
|
||||
not include the OpenAPI schema in the final output. Ensure the final output
|
||||
does not include any code block markers like ```json or ```python.\n\nProvide
|
||||
your complete response:"},{"role":"system","content":"You are Scorer. You''re
|
||||
an expert scorer, specialized in scoring titles.\nYour personal goal is: Score
|
||||
the title"},{"role":"user","content":"\nCurrent Task: Give me an integer score
|
||||
between 1-5 for the following title: ''The impact of AI in the future of work''\n\nThis
|
||||
is the expected criteria for your final answer: The score of the title.\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\nFormat
|
||||
your final answer according to the following OpenAPI schema: {\n \"properties\":
|
||||
{\n \"score\": {\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\":
|
||||
[\n \"score\"\n ],\n \"title\": \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\":
|
||||
false\n}\n\nIMPORTANT: Preserve the original content exactly as-is. Do NOT rewrite,
|
||||
paraphrase, or modify the meaning of the content. Only structure it to match
|
||||
the schema format.\n\nDo not include the OpenAPI schema in the final output.
|
||||
Ensure the final output does not include any code block markers like ```json
|
||||
or ```python.\n\nProvide your complete response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"score":{"title":"Score","type":"integer"}},"required":["score"],"title":"ScoreOutput","type":"object","additionalProperties":false},"name":"ScoreOutput","strict":true}},"stream":false}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '2541'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- COOKIE-XXX
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-helper-method:
|
||||
- beta.chat.completions.parse
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.12
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-DDDzz40VXTe9AsmG5ZSlL0IufvYKz\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1772044447,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"{\\\"score\\\":4}\",\n \"refusal\":
|
||||
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
513,\n \"completion_tokens\": 5,\n \"total_tokens\": 518,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_a391f2cee0\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 25 Feb 2026 18:34:07 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '426'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
set-cookie:
|
||||
- SET-COOKIE-XXX
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
|
||||
@@ -1,194 +1,254 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Give me an integer score between 1-5 for the following title: ''The impact of AI in the future of work''\n\nThis is the expected criteria for your final answer: The score of the title.\nyou MUST return the actual complete content as the final answer, not a summary.\nEnsure your final answer strictly adheres to the following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\":
|
||||
\"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert
|
||||
scorer, specialized in scoring titles.\nYour personal goal is: Score the title"},{"role":"user","content":"\nCurrent
|
||||
Task: Give me an integer score between 1-5 for the following title: ''The impact
|
||||
of AI in the future of work''\n\nThis is the expected criteria for your final
|
||||
answer: The score of the title.\nyou MUST return the actual complete content
|
||||
as the final answer, not a summary.\nFormat your final answer according to the
|
||||
following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\":
|
||||
\"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\":
|
||||
\"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nIMPORTANT:
|
||||
Preserve the original content exactly as-is. Do NOT rewrite, paraphrase, or
|
||||
modify the meaning of the content. Only structure it to match the schema format.\n\nDo
|
||||
not include the OpenAPI schema in the final output. Ensure the final output
|
||||
does not include any code block markers like ```json or ```python.\n\nProvide
|
||||
your complete response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"score":{"title":"Score","type":"integer"}},"required":["score"],"title":"ScoreOutput","type":"object","additionalProperties":false},"name":"ScoreOutput","strict":true}},"stream":false}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1394'
|
||||
- '1421'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-helper-method:
|
||||
- beta.chat.completions.parse
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
- 3.13.12
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-CYg0M3aPReBrUikkn7QiHFyZG8ETn\",\n \"object\": \"chat.completion\",\n \"created\": 1762380654,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal Answer: {\\n \\\"score\\\": 4\\n}\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 294,\n \"completion_tokens\": 22,\n \"total_tokens\": 316,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_4c2851f862\"\
|
||||
\n}\n"
|
||||
string: "{\n \"id\": \"chatcmpl-DDE5OBoRr3j1NGXkef0waj9TCBmLb\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1772044782,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"{\\\"score\\\":4}\",\n \"refusal\":
|
||||
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
276,\n \"completion_tokens\": 5,\n \"total_tokens\": 281,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_a391f2cee0\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- REDACTED-RAY
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 05 Nov 2025 22:10:54 GMT
|
||||
- Wed, 25 Feb 2026 18:39:42 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=REDACTED; path=/; expires=Wed, 05-Nov-25 22:40:54 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
- _cfuvid=REDACTED; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- user-hortuttj2f3qtmxyik2zxf4q
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '730'
|
||||
- '435'
|
||||
openai-project:
|
||||
- proj_fL4UBWR1CMpAAdgzaSKqsVvA
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '754'
|
||||
set-cookie:
|
||||
- SET-COOKIE-XXX
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- '500'
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- '200000'
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- '499'
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '199687'
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- 120ms
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- 93ms
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- req_REDACTED
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Given the score the title ''The impact of AI in the future of work'' got, give me an integer score between 1-5 for the following title: ''Return of the Jedi''\n\nThis is the expected criteria for your final answer: The score of the title.\nyou MUST return the actual complete content as the final answer, not a summary.\nEnsure your final answer strictly adheres to the following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\":
|
||||
[\n \"score\"\n ],\n \"title\": \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python.\n\nThis is the context you''re working with:\n{\n \"score\": 4\n}\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert
|
||||
scorer, specialized in scoring titles.\nYour personal goal is: Score the title"},{"role":"user","content":"\nCurrent
|
||||
Task: Give me an integer score between 1-5 for the following title: ''The impact
|
||||
of AI in the future of work''\n\nThis is the expected criteria for your final
|
||||
answer: The score of the title.\nyou MUST return the actual complete content
|
||||
as the final answer, not a summary.\nFormat your final answer according to the
|
||||
following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\":
|
||||
\"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\":
|
||||
\"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nIMPORTANT:
|
||||
Preserve the original content exactly as-is. Do NOT rewrite, paraphrase, or
|
||||
modify the meaning of the content. Only structure it to match the schema format.\n\nDo
|
||||
not include the OpenAPI schema in the final output. Ensure the final output
|
||||
does not include any code block markers like ```json or ```python.\n\nProvide
|
||||
your complete response:"},{"role":"assistant","content":"{\"score\":4}"},{"role":"system","content":"You
|
||||
are Scorer. You''re an expert scorer, specialized in scoring titles.\nYour personal
|
||||
goal is: Score the title"},{"role":"user","content":"\nCurrent Task: Given the
|
||||
score the title ''The impact of AI in the future of work'' got, give me an integer
|
||||
score between 1-5 for the following title: ''Return of the Jedi''\n\nThis is
|
||||
the expected criteria for your final answer: The score of the title.\nyou MUST
|
||||
return the actual complete content as the final answer, not a summary.\nFormat
|
||||
your final answer according to the following OpenAPI schema: {\n \"properties\":
|
||||
{\n \"score\": {\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\":
|
||||
[\n \"score\"\n ],\n \"title\": \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\":
|
||||
false\n}\n\nIMPORTANT: Preserve the original content exactly as-is. Do NOT rewrite,
|
||||
paraphrase, or modify the meaning of the content. Only structure it to match
|
||||
the schema format.\n\nDo not include the OpenAPI schema in the final output.
|
||||
Ensure the final output does not include any code block markers like ```json
|
||||
or ```python.\n\nThis is the context you''re working with:\n{\"score\":4}\n\nProvide
|
||||
your complete response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"score":{"title":"Score","type":"integer"}},"required":["score"],"title":"ScoreOutput","type":"object","additionalProperties":false},"name":"ScoreOutput","strict":true}},"stream":false}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1512'
|
||||
- '2699'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=REDACTED; _cfuvid=REDACTED
|
||||
- COOKIE-XXX
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-helper-method:
|
||||
- beta.chat.completions.parse
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
- 3.13.12
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-CYg0MEYp1MebCu2eCMBqCwXtNYTbD\",\n \"object\": \"chat.completion\",\n \"created\": 1762380654,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal Answer: {\\n \\\"score\\\": 3\\n}\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 324,\n \"completion_tokens\": 22,\n \"total_tokens\": 346,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_4c2851f862\"\
|
||||
\n}\n"
|
||||
string: "{\n \"id\": \"chatcmpl-DDE5OEawexwaazoOAgn4QD9W8roe6\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1772044782,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"{\\\"score\\\":3}\",\n \"refusal\":
|
||||
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
552,\n \"completion_tokens\": 5,\n \"total_tokens\": 557,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_a391f2cee0\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- REDACTED-RAY
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 05 Nov 2025 22:10:55 GMT
|
||||
- Wed, 25 Feb 2026 18:39:43 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- user-hortuttj2f3qtmxyik2zxf4q
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '983'
|
||||
- '309'
|
||||
openai-project:
|
||||
- proj_fL4UBWR1CMpAAdgzaSKqsVvA
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '1002'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- '500'
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- '200000'
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- '499'
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '199659'
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- 120ms
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- 102ms
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- req_REDACTED
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user