diff --git a/.env.test b/.env.test index 4ef1b503c..44662728d 100644 --- a/.env.test +++ b/.env.test @@ -21,7 +21,6 @@ OPENROUTER_API_KEY=fake-openrouter-key AWS_ACCESS_KEY_ID=fake-aws-access-key AWS_SECRET_ACCESS_KEY=fake-aws-secret-key AWS_DEFAULT_REGION=us-east-1 -AWS_REGION_NAME=us-east-1 # ----------------------------------------------------------------------------- # Azure OpenAI Configuration diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml new file mode 100644 index 000000000..309014dfe --- /dev/null +++ b/.github/workflows/nightly.yml @@ -0,0 +1,127 @@ +name: Nightly Canary Release + +on: + schedule: + - cron: '0 6 * * *' # daily at 6am UTC + workflow_dispatch: + +jobs: + check: + name: Check for new commits + runs-on: ubuntu-latest + permissions: + contents: read + outputs: + has_changes: ${{ steps.check.outputs.has_changes }} + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Check for commits in last 24h + id: check + run: | + RECENT=$(git log --since="24 hours ago" --oneline | head -1) + if [ -n "$RECENT" ]; then + echo "has_changes=true" >> "$GITHUB_OUTPUT" + else + echo "has_changes=false" >> "$GITHUB_OUTPUT" + fi + + build: + name: Build nightly packages + needs: check + if: needs.check.outputs.has_changes == 'true' || github.event_name == 'workflow_dispatch' + runs-on: ubuntu-latest + permissions: + contents: read + steps: + - uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Install uv + uses: astral-sh/setup-uv@v4 + + - name: Stamp nightly versions + run: | + DATE=$(date +%Y%m%d) + for init_file in \ + lib/crewai/src/crewai/__init__.py \ + lib/crewai-tools/src/crewai_tools/__init__.py \ + lib/crewai-files/src/crewai_files/__init__.py; do + CURRENT=$(python -c " + import re + text = open('$init_file').read() + print(re.search(r'__version__\s*=\s*\"(.*?)\"\s*$', text, re.MULTILINE).group(1)) + ") + NIGHTLY="${CURRENT}.dev${DATE}" + sed -i "s/__version__ = .*/__version__ = \"${NIGHTLY}\"/" "$init_file" + echo "$init_file: $CURRENT -> $NIGHTLY" + done + + # Update cross-package dependency pins to nightly versions + sed -i "s/\"crewai-tools==[^\"]*\"/\"crewai-tools==${NIGHTLY}\"/" lib/crewai/pyproject.toml + sed -i "s/\"crewai==[^\"]*\"/\"crewai==${NIGHTLY}\"/" lib/crewai-tools/pyproject.toml + echo "Updated cross-package dependency pins to ${NIGHTLY}" + + - name: Build packages + run: | + uv build --all-packages + rm dist/.gitignore + + - name: Upload artifacts + uses: actions/upload-artifact@v4 + with: + name: dist + path: dist/ + + publish: + name: Publish nightly to PyPI + needs: build + runs-on: ubuntu-latest + environment: + name: pypi + url: https://pypi.org/p/crewai + permissions: + id-token: write + contents: read + steps: + - uses: actions/checkout@v4 + + - name: Install uv + uses: astral-sh/setup-uv@v6 + with: + version: "0.8.4" + python-version: "3.12" + enable-cache: false + + - name: Download artifacts + uses: actions/download-artifact@v4 + with: + name: dist + path: dist + + - name: Publish to PyPI + env: + UV_PUBLISH_TOKEN: ${{ secrets.PYPI_API_TOKEN }} + run: | + failed=0 + for package in dist/*; do + if [[ "$package" == *"crewai_devtools"* ]]; then + echo "Skipping private package: $package" + continue + fi + echo "Publishing $package" + if ! uv publish "$package"; then + echo "Failed to publish $package" + failed=1 + fi + done + if [ $failed -eq 1 ]; then + echo "Some packages failed to publish" + exit 1 + fi \ No newline at end of file diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 04284c7fe..1b3000647 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -1,8 +1,6 @@ name: Publish to PyPI on: - repository_dispatch: - types: [deployment-tests-passed] workflow_dispatch: inputs: release_tag: @@ -20,11 +18,8 @@ jobs: - name: Determine release tag id: release run: | - # Priority: workflow_dispatch input > repository_dispatch payload > default branch if [ -n "${{ inputs.release_tag }}" ]; then echo "tag=${{ inputs.release_tag }}" >> $GITHUB_OUTPUT - elif [ -n "${{ github.event.client_payload.release_tag }}" ]; then - echo "tag=${{ github.event.client_payload.release_tag }}" >> $GITHUB_OUTPUT else echo "tag=" >> $GITHUB_OUTPUT fi diff --git a/.github/workflows/trigger-deployment-tests.yml b/.github/workflows/trigger-deployment-tests.yml deleted file mode 100644 index eaad490a5..000000000 --- a/.github/workflows/trigger-deployment-tests.yml +++ /dev/null @@ -1,18 +0,0 @@ -name: Trigger Deployment Tests - -on: - release: - types: [published] - -jobs: - trigger: - name: Trigger deployment tests - runs-on: ubuntu-latest - steps: - - name: Trigger deployment tests - uses: peter-evans/repository-dispatch@v3 - with: - token: ${{ secrets.CREWAI_DEPLOYMENTS_PAT }} - repository: ${{ secrets.CREWAI_DEPLOYMENTS_REPOSITORY }} - event-type: crewai-release - client-payload: '{"release_tag": "${{ github.event.release.tag_name }}", "release_name": "${{ github.event.release.name }}"}' diff --git a/conftest.py b/conftest.py index 1cce71c26..9b2c7c5c4 100644 --- a/conftest.py +++ b/conftest.py @@ -12,6 +12,7 @@ from dotenv import load_dotenv import pytest from vcr.request import Request # type: ignore[import-untyped] + try: import vcr.stubs.httpx_stubs as httpx_stubs # type: ignore[import-untyped] except ModuleNotFoundError: diff --git a/docs/docs.json b/docs/docs.json index 161d6d5ff..84eed2947 100644 --- a/docs/docs.json +++ b/docs/docs.json @@ -54,449 +54,922 @@ } ] }, - "tabs": [ + "versions": [ { - "tab": "Home", - "icon": "house", - "groups": [ + "version": "v1.10.1", + "default": true, + "tabs": [ { - "group": "Welcome", - "pages": [ - "index" - ] - } - ] - }, - { - "tab": "Documentation", - "icon": "book-open", - "groups": [ - { - "group": "Get Started", - "pages": [ - "en/introduction", - "en/installation", - "en/quickstart" - ] - }, - { - "group": "Guides", - "pages": [ + "tab": "Home", + "icon": "house", + "groups": [ { - "group": "Strategy", - "icon": "compass", + "group": "Welcome", "pages": [ - "en/guides/concepts/evaluating-use-cases" - ] - }, - { - "group": "Agents", - "icon": "user", - "pages": [ - "en/guides/agents/crafting-effective-agents" - ] - }, - { - "group": "Crews", - "icon": "users", - "pages": [ - "en/guides/crews/first-crew" - ] - }, - { - "group": "Flows", - "icon": "code-branch", - "pages": [ - "en/guides/flows/first-flow", - "en/guides/flows/mastering-flow-state" - ] - }, - { - "group": "Coding Tools", - "icon": "terminal", - "pages": [ - "en/guides/coding-tools/agents-md" - ] - }, - { - "group": "Advanced", - "icon": "gear", - "pages": [ - "en/guides/advanced/customizing-prompts", - "en/guides/advanced/fingerprinting" + "index" ] } ] }, { - "group": "Core Concepts", - "pages": [ - "en/concepts/agents", - "en/concepts/tasks", - "en/concepts/crews", - "en/concepts/flows", - "en/concepts/production-architecture", - "en/concepts/knowledge", - "en/concepts/llms", - "en/concepts/files", - "en/concepts/processes", - "en/concepts/collaboration", - "en/concepts/training", - "en/concepts/memory", - "en/concepts/reasoning", - "en/concepts/planning", - "en/concepts/testing", - "en/concepts/cli", - "en/concepts/tools", - "en/concepts/event-listener" - ] - }, - { - "group": "MCP Integration", - "pages": [ - "en/mcp/overview", - "en/mcp/dsl-integration", - "en/mcp/stdio", - "en/mcp/sse", - "en/mcp/streamable-http", - "en/mcp/multiple-servers", - "en/mcp/security" - ] - }, - { - "group": "Tools", - "pages": [ - "en/tools/overview", + "tab": "Documentation", + "icon": "book-open", + "groups": [ { - "group": "File & Document", - "icon": "folder-open", + "group": "Get Started", "pages": [ - "en/tools/file-document/overview", - "en/tools/file-document/filereadtool", - "en/tools/file-document/filewritetool", - "en/tools/file-document/pdfsearchtool", - "en/tools/file-document/docxsearchtool", - "en/tools/file-document/mdxsearchtool", - "en/tools/file-document/xmlsearchtool", - "en/tools/file-document/txtsearchtool", - "en/tools/file-document/jsonsearchtool", - "en/tools/file-document/csvsearchtool", - "en/tools/file-document/directorysearchtool", - "en/tools/file-document/directoryreadtool", - "en/tools/file-document/ocrtool", - "en/tools/file-document/pdf-text-writing-tool" + "en/introduction", + "en/installation", + "en/quickstart" ] }, { - "group": "Web Scraping & Browsing", - "icon": "globe", + "group": "Guides", "pages": [ - "en/tools/web-scraping/overview", - "en/tools/web-scraping/scrapewebsitetool", - "en/tools/web-scraping/scrapeelementfromwebsitetool", - "en/tools/web-scraping/scrapflyscrapetool", - "en/tools/web-scraping/seleniumscrapingtool", - "en/tools/web-scraping/scrapegraphscrapetool", - "en/tools/web-scraping/spidertool", - "en/tools/web-scraping/browserbaseloadtool", - "en/tools/web-scraping/hyperbrowserloadtool", - "en/tools/web-scraping/stagehandtool", - "en/tools/web-scraping/firecrawlcrawlwebsitetool", - "en/tools/web-scraping/firecrawlscrapewebsitetool", - "en/tools/web-scraping/oxylabsscraperstool", - "en/tools/web-scraping/brightdata-tools" + { + "group": "Strategy", + "icon": "compass", + "pages": [ + "en/guides/concepts/evaluating-use-cases" + ] + }, + { + "group": "Agents", + "icon": "user", + "pages": [ + "en/guides/agents/crafting-effective-agents" + ] + }, + { + "group": "Crews", + "icon": "users", + "pages": [ + "en/guides/crews/first-crew" + ] + }, + { + "group": "Flows", + "icon": "code-branch", + "pages": [ + "en/guides/flows/first-flow", + "en/guides/flows/mastering-flow-state" + ] + }, + { + "group": "Coding Tools", + "icon": "terminal", + "pages": [ + "en/guides/coding-tools/agents-md" + ] + }, + { + "group": "Advanced", + "icon": "gear", + "pages": [ + "en/guides/advanced/customizing-prompts", + "en/guides/advanced/fingerprinting" + ] + }, + { + "group": "Migration", + "icon": "shuffle", + "pages": [ + "en/guides/migration/migrating-from-langgraph" + ] + } ] }, { - "group": "Search & Research", - "icon": "magnifying-glass", + "group": "Core Concepts", "pages": [ - "en/tools/search-research/overview", - "en/tools/search-research/serperdevtool", - "en/tools/search-research/bravesearchtool", - "en/tools/search-research/exasearchtool", - "en/tools/search-research/linkupsearchtool", - "en/tools/search-research/githubsearchtool", - "en/tools/search-research/websitesearchtool", - "en/tools/search-research/codedocssearchtool", - "en/tools/search-research/youtubechannelsearchtool", - "en/tools/search-research/youtubevideosearchtool", - "en/tools/search-research/tavilysearchtool", - "en/tools/search-research/tavilyextractortool", - "en/tools/search-research/arxivpapertool", - "en/tools/search-research/serpapi-googlesearchtool", - "en/tools/search-research/serpapi-googleshoppingtool", - "en/tools/search-research/databricks-query-tool" + "en/concepts/agents", + "en/concepts/tasks", + "en/concepts/crews", + "en/concepts/flows", + "en/concepts/production-architecture", + "en/concepts/knowledge", + "en/concepts/llms", + "en/concepts/files", + "en/concepts/processes", + "en/concepts/collaboration", + "en/concepts/training", + "en/concepts/memory", + "en/concepts/reasoning", + "en/concepts/planning", + "en/concepts/testing", + "en/concepts/cli", + "en/concepts/tools", + "en/concepts/event-listener" ] }, { - "group": "Database & Data", - "icon": "database", + "group": "MCP Integration", "pages": [ - "en/tools/database-data/overview", - "en/tools/database-data/mysqltool", - "en/tools/database-data/pgsearchtool", - "en/tools/database-data/snowflakesearchtool", - "en/tools/database-data/nl2sqltool", - "en/tools/database-data/qdrantvectorsearchtool", - "en/tools/database-data/weaviatevectorsearchtool", - "en/tools/database-data/mongodbvectorsearchtool", - "en/tools/database-data/singlestoresearchtool" + "en/mcp/overview", + "en/mcp/dsl-integration", + "en/mcp/stdio", + "en/mcp/sse", + "en/mcp/streamable-http", + "en/mcp/multiple-servers", + "en/mcp/security" ] }, { - "group": "AI & Machine Learning", - "icon": "brain", + "group": "Tools", "pages": [ - "en/tools/ai-ml/overview", - "en/tools/ai-ml/dalletool", - "en/tools/ai-ml/visiontool", - "en/tools/ai-ml/aimindtool", - "en/tools/ai-ml/llamaindextool", - "en/tools/ai-ml/langchaintool", - "en/tools/ai-ml/ragtool", - "en/tools/ai-ml/codeinterpretertool" + "en/tools/overview", + { + "group": "File & Document", + "icon": "folder-open", + "pages": [ + "en/tools/file-document/overview", + "en/tools/file-document/filereadtool", + "en/tools/file-document/filewritetool", + "en/tools/file-document/pdfsearchtool", + "en/tools/file-document/docxsearchtool", + "en/tools/file-document/mdxsearchtool", + "en/tools/file-document/xmlsearchtool", + "en/tools/file-document/txtsearchtool", + "en/tools/file-document/jsonsearchtool", + "en/tools/file-document/csvsearchtool", + "en/tools/file-document/directorysearchtool", + "en/tools/file-document/directoryreadtool", + "en/tools/file-document/ocrtool", + "en/tools/file-document/pdf-text-writing-tool" + ] + }, + { + "group": "Web Scraping & Browsing", + "icon": "globe", + "pages": [ + "en/tools/web-scraping/overview", + "en/tools/web-scraping/scrapewebsitetool", + "en/tools/web-scraping/scrapeelementfromwebsitetool", + "en/tools/web-scraping/scrapflyscrapetool", + "en/tools/web-scraping/seleniumscrapingtool", + "en/tools/web-scraping/scrapegraphscrapetool", + "en/tools/web-scraping/spidertool", + "en/tools/web-scraping/browserbaseloadtool", + "en/tools/web-scraping/hyperbrowserloadtool", + "en/tools/web-scraping/stagehandtool", + "en/tools/web-scraping/firecrawlcrawlwebsitetool", + "en/tools/web-scraping/firecrawlscrapewebsitetool", + "en/tools/web-scraping/oxylabsscraperstool", + "en/tools/web-scraping/brightdata-tools" + ] + }, + { + "group": "Search & Research", + "icon": "magnifying-glass", + "pages": [ + "en/tools/search-research/overview", + "en/tools/search-research/serperdevtool", + "en/tools/search-research/bravesearchtool", + "en/tools/search-research/exasearchtool", + "en/tools/search-research/linkupsearchtool", + "en/tools/search-research/githubsearchtool", + "en/tools/search-research/websitesearchtool", + "en/tools/search-research/codedocssearchtool", + "en/tools/search-research/youtubechannelsearchtool", + "en/tools/search-research/youtubevideosearchtool", + "en/tools/search-research/tavilysearchtool", + "en/tools/search-research/tavilyextractortool", + "en/tools/search-research/arxivpapertool", + "en/tools/search-research/serpapi-googlesearchtool", + "en/tools/search-research/serpapi-googleshoppingtool", + "en/tools/search-research/databricks-query-tool" + ] + }, + { + "group": "Database & Data", + "icon": "database", + "pages": [ + "en/tools/database-data/overview", + "en/tools/database-data/mysqltool", + "en/tools/database-data/pgsearchtool", + "en/tools/database-data/snowflakesearchtool", + "en/tools/database-data/nl2sqltool", + "en/tools/database-data/qdrantvectorsearchtool", + "en/tools/database-data/weaviatevectorsearchtool", + "en/tools/database-data/mongodbvectorsearchtool", + "en/tools/database-data/singlestoresearchtool" + ] + }, + { + "group": "AI & Machine Learning", + "icon": "brain", + "pages": [ + "en/tools/ai-ml/overview", + "en/tools/ai-ml/dalletool", + "en/tools/ai-ml/visiontool", + "en/tools/ai-ml/aimindtool", + "en/tools/ai-ml/llamaindextool", + "en/tools/ai-ml/langchaintool", + "en/tools/ai-ml/ragtool", + "en/tools/ai-ml/codeinterpretertool" + ] + }, + { + "group": "Cloud & Storage", + "icon": "cloud", + "pages": [ + "en/tools/cloud-storage/overview", + "en/tools/cloud-storage/s3readertool", + "en/tools/cloud-storage/s3writertool", + "en/tools/cloud-storage/bedrockkbretriever" + ] + }, + { + "group": "Integrations", + "icon": "plug", + "pages": [ + "en/tools/integration/overview", + "en/tools/integration/bedrockinvokeagenttool", + "en/tools/integration/crewaiautomationtool", + "en/tools/integration/mergeagenthandlertool" + ] + }, + { + "group": "Automation", + "icon": "bolt", + "pages": [ + "en/tools/automation/overview", + "en/tools/automation/apifyactorstool", + "en/tools/automation/composiotool", + "en/tools/automation/multiontool", + "en/tools/automation/zapieractionstool" + ] + } ] }, { - "group": "Cloud & Storage", - "icon": "cloud", + "group": "Observability", "pages": [ - "en/tools/cloud-storage/overview", - "en/tools/cloud-storage/s3readertool", - "en/tools/cloud-storage/s3writertool", - "en/tools/cloud-storage/bedrockkbretriever" + "en/observability/tracing", + "en/observability/overview", + "en/observability/arize-phoenix", + "en/observability/braintrust", + "en/observability/datadog", + "en/observability/galileo", + "en/observability/langdb", + "en/observability/langfuse", + "en/observability/langtrace", + "en/observability/maxim", + "en/observability/mlflow", + "en/observability/neatlogs", + "en/observability/openlit", + "en/observability/opik", + "en/observability/patronus-evaluation", + "en/observability/portkey", + "en/observability/weave", + "en/observability/truefoundry" ] }, { - "group": "Integrations", - "icon": "plug", + "group": "Learn", "pages": [ - "en/tools/integration/overview", - "en/tools/integration/bedrockinvokeagenttool", - "en/tools/integration/crewaiautomationtool", - "en/tools/integration/mergeagenthandlertool" + "en/learn/overview", + "en/learn/llm-selection-guide", + "en/learn/conditional-tasks", + "en/learn/coding-agents", + "en/learn/create-custom-tools", + "en/learn/custom-llm", + "en/learn/custom-manager-agent", + "en/learn/customizing-agents", + "en/learn/dalle-image-generation", + "en/learn/force-tool-output-as-result", + "en/learn/hierarchical-process", + "en/learn/human-input-on-execution", + "en/learn/human-in-the-loop", + "en/learn/human-feedback-in-flows", + "en/learn/kickoff-async", + "en/learn/kickoff-for-each", + "en/learn/llm-connections", + "en/learn/multimodal-agents", + "en/learn/replay-tasks-from-latest-crew-kickoff", + "en/learn/sequential-process", + "en/learn/using-annotations", + "en/learn/execution-hooks", + "en/learn/llm-hooks", + "en/learn/tool-hooks" ] }, { - "group": "Automation", - "icon": "bolt", + "group": "Telemetry", "pages": [ - "en/tools/automation/overview", - "en/tools/automation/apifyactorstool", - "en/tools/automation/composiotool", - "en/tools/automation/multiontool", - "en/tools/automation/zapieractionstool" + "en/telemetry" ] } ] }, { - "group": "Observability", - "pages": [ - "en/observability/tracing", - "en/observability/overview", - "en/observability/arize-phoenix", - "en/observability/braintrust", - "en/observability/datadog", - "en/observability/galileo", - "en/observability/langdb", - "en/observability/langfuse", - "en/observability/langtrace", - "en/observability/maxim", - "en/observability/mlflow", - "en/observability/neatlogs", - "en/observability/openlit", - "en/observability/opik", - "en/observability/patronus-evaluation", - "en/observability/portkey", - "en/observability/weave", - "en/observability/truefoundry" + "tab": "AMP", + "icon": "briefcase", + "groups": [ + { + "group": "Getting Started", + "pages": [ + "en/enterprise/introduction" + ] + }, + { + "group": "Build", + "pages": [ + "en/enterprise/features/automations", + "en/enterprise/features/crew-studio", + "en/enterprise/features/marketplace", + "en/enterprise/features/agent-repositories", + "en/enterprise/features/tools-and-integrations", + "en/enterprise/features/pii-trace-redactions" + ] + }, + { + "group": "Operate", + "pages": [ + "en/enterprise/features/traces", + "en/enterprise/features/webhook-streaming", + "en/enterprise/features/hallucination-guardrail", + "en/enterprise/features/flow-hitl-management" + ] + }, + { + "group": "Manage", + "pages": [ + "en/enterprise/features/rbac" + ] + }, + { + "group": "Integration Docs", + "pages": [ + "en/enterprise/integrations/asana", + "en/enterprise/integrations/box", + "en/enterprise/integrations/clickup", + "en/enterprise/integrations/github", + "en/enterprise/integrations/gmail", + "en/enterprise/integrations/google_calendar", + "en/enterprise/integrations/google_contacts", + "en/enterprise/integrations/google_docs", + "en/enterprise/integrations/google_drive", + "en/enterprise/integrations/google_sheets", + "en/enterprise/integrations/google_slides", + "en/enterprise/integrations/hubspot", + "en/enterprise/integrations/jira", + "en/enterprise/integrations/linear", + "en/enterprise/integrations/microsoft_excel", + "en/enterprise/integrations/microsoft_onedrive", + "en/enterprise/integrations/microsoft_outlook", + "en/enterprise/integrations/microsoft_sharepoint", + "en/enterprise/integrations/microsoft_teams", + "en/enterprise/integrations/microsoft_word", + "en/enterprise/integrations/notion", + "en/enterprise/integrations/salesforce", + "en/enterprise/integrations/shopify", + "en/enterprise/integrations/slack", + "en/enterprise/integrations/stripe", + "en/enterprise/integrations/zendesk" + ] + }, + { + "group": "Triggers", + "pages": [ + "en/enterprise/guides/automation-triggers", + "en/enterprise/guides/gmail-trigger", + "en/enterprise/guides/google-calendar-trigger", + "en/enterprise/guides/google-drive-trigger", + "en/enterprise/guides/outlook-trigger", + "en/enterprise/guides/onedrive-trigger", + "en/enterprise/guides/microsoft-teams-trigger", + "en/enterprise/guides/slack-trigger", + "en/enterprise/guides/hubspot-trigger", + "en/enterprise/guides/salesforce-trigger", + "en/enterprise/guides/zapier-trigger" + ] + }, + { + "group": "How-To Guides", + "pages": [ + "en/enterprise/guides/build-crew", + "en/enterprise/guides/prepare-for-deployment", + "en/enterprise/guides/deploy-to-amp", + "en/enterprise/guides/private-package-registry", + "en/enterprise/guides/kickoff-crew", + "en/enterprise/guides/update-crew", + "en/enterprise/guides/enable-crew-studio", + "en/enterprise/guides/capture_telemetry_logs", + "en/enterprise/guides/azure-openai-setup", + "en/enterprise/guides/tool-repository", + "en/enterprise/guides/react-component-export", + "en/enterprise/guides/team-management", + "en/enterprise/guides/human-in-the-loop", + "en/enterprise/guides/webhook-automation" + ] + }, + { + "group": "Resources", + "pages": [ + "en/enterprise/resources/frequently-asked-questions" + ] + } ] }, { - "group": "Learn", - "pages": [ - "en/learn/overview", - "en/learn/llm-selection-guide", - "en/learn/conditional-tasks", - "en/learn/coding-agents", - "en/learn/create-custom-tools", - "en/learn/custom-llm", - "en/learn/custom-manager-agent", - "en/learn/customizing-agents", - "en/learn/dalle-image-generation", - "en/learn/force-tool-output-as-result", - "en/learn/hierarchical-process", - "en/learn/human-input-on-execution", - "en/learn/human-in-the-loop", - "en/learn/human-feedback-in-flows", - "en/learn/kickoff-async", - "en/learn/kickoff-for-each", - "en/learn/llm-connections", - "en/learn/multimodal-agents", - "en/learn/replay-tasks-from-latest-crew-kickoff", - "en/learn/sequential-process", - "en/learn/using-annotations", - "en/learn/execution-hooks", - "en/learn/llm-hooks", - "en/learn/tool-hooks" + "tab": "API Reference", + "icon": "magnifying-glass", + "groups": [ + { + "group": "Getting Started", + "pages": [ + "en/api-reference/introduction", + "en/api-reference/inputs", + "en/api-reference/kickoff", + "en/api-reference/resume", + "en/api-reference/status" + ] + } ] }, { - "group": "Telemetry", - "pages": [ - "en/telemetry" + "tab": "Examples", + "icon": "code", + "groups": [ + { + "group": "Examples", + "pages": [ + "en/examples/example", + "en/examples/cookbooks" + ] + } + ] + }, + { + "tab": "Changelog", + "icon": "clock", + "groups": [ + { + "group": "Release Notes", + "pages": [ + "en/changelog" + ] + } ] } ] }, { - "tab": "AMP", - "icon": "briefcase", - "groups": [ + "version": "v1.10.0", + "tabs": [ { - "group": "Getting Started", - "pages": [ - "en/enterprise/introduction" + "tab": "Home", + "icon": "house", + "groups": [ + { + "group": "Welcome", + "pages": [ + "index" + ] + } ] }, { - "group": "Build", - "pages": [ - "en/enterprise/features/automations", - "en/enterprise/features/crew-studio", - "en/enterprise/features/marketplace", - "en/enterprise/features/agent-repositories", - "en/enterprise/features/tools-and-integrations", - "en/enterprise/features/pii-trace-redactions" + "tab": "Documentation", + "icon": "book-open", + "groups": [ + { + "group": "Get Started", + "pages": [ + "en/introduction", + "en/installation", + "en/quickstart" + ] + }, + { + "group": "Guides", + "pages": [ + { + "group": "Strategy", + "icon": "compass", + "pages": [ + "en/guides/concepts/evaluating-use-cases" + ] + }, + { + "group": "Agents", + "icon": "user", + "pages": [ + "en/guides/agents/crafting-effective-agents" + ] + }, + { + "group": "Crews", + "icon": "users", + "pages": [ + "en/guides/crews/first-crew" + ] + }, + { + "group": "Flows", + "icon": "code-branch", + "pages": [ + "en/guides/flows/first-flow", + "en/guides/flows/mastering-flow-state" + ] + }, + { + "group": "Coding Tools", + "icon": "terminal", + "pages": [ + "en/guides/coding-tools/agents-md" + ] + }, + { + "group": "Advanced", + "icon": "gear", + "pages": [ + "en/guides/advanced/customizing-prompts", + "en/guides/advanced/fingerprinting" + ] + }, + { + "group": "Migration", + "icon": "shuffle", + "pages": [ + "en/guides/migration/migrating-from-langgraph" + ] + } + ] + }, + { + "group": "Core Concepts", + "pages": [ + "en/concepts/agents", + "en/concepts/tasks", + "en/concepts/crews", + "en/concepts/flows", + "en/concepts/production-architecture", + "en/concepts/knowledge", + "en/concepts/llms", + "en/concepts/files", + "en/concepts/processes", + "en/concepts/collaboration", + "en/concepts/training", + "en/concepts/memory", + "en/concepts/reasoning", + "en/concepts/planning", + "en/concepts/testing", + "en/concepts/cli", + "en/concepts/tools", + "en/concepts/event-listener" + ] + }, + { + "group": "MCP Integration", + "pages": [ + "en/mcp/overview", + "en/mcp/dsl-integration", + "en/mcp/stdio", + "en/mcp/sse", + "en/mcp/streamable-http", + "en/mcp/multiple-servers", + "en/mcp/security" + ] + }, + { + "group": "Tools", + "pages": [ + "en/tools/overview", + { + "group": "File & Document", + "icon": "folder-open", + "pages": [ + "en/tools/file-document/overview", + "en/tools/file-document/filereadtool", + "en/tools/file-document/filewritetool", + "en/tools/file-document/pdfsearchtool", + "en/tools/file-document/docxsearchtool", + "en/tools/file-document/mdxsearchtool", + "en/tools/file-document/xmlsearchtool", + "en/tools/file-document/txtsearchtool", + "en/tools/file-document/jsonsearchtool", + "en/tools/file-document/csvsearchtool", + "en/tools/file-document/directorysearchtool", + "en/tools/file-document/directoryreadtool", + "en/tools/file-document/ocrtool", + "en/tools/file-document/pdf-text-writing-tool" + ] + }, + { + "group": "Web Scraping & Browsing", + "icon": "globe", + "pages": [ + "en/tools/web-scraping/overview", + "en/tools/web-scraping/scrapewebsitetool", + "en/tools/web-scraping/scrapeelementfromwebsitetool", + "en/tools/web-scraping/scrapflyscrapetool", + "en/tools/web-scraping/seleniumscrapingtool", + "en/tools/web-scraping/scrapegraphscrapetool", + "en/tools/web-scraping/spidertool", + "en/tools/web-scraping/browserbaseloadtool", + "en/tools/web-scraping/hyperbrowserloadtool", + "en/tools/web-scraping/stagehandtool", + "en/tools/web-scraping/firecrawlcrawlwebsitetool", + "en/tools/web-scraping/firecrawlscrapewebsitetool", + "en/tools/web-scraping/oxylabsscraperstool", + "en/tools/web-scraping/brightdata-tools" + ] + }, + { + "group": "Search & Research", + "icon": "magnifying-glass", + "pages": [ + "en/tools/search-research/overview", + "en/tools/search-research/serperdevtool", + "en/tools/search-research/bravesearchtool", + "en/tools/search-research/exasearchtool", + "en/tools/search-research/linkupsearchtool", + "en/tools/search-research/githubsearchtool", + "en/tools/search-research/websitesearchtool", + "en/tools/search-research/codedocssearchtool", + "en/tools/search-research/youtubechannelsearchtool", + "en/tools/search-research/youtubevideosearchtool", + "en/tools/search-research/tavilysearchtool", + "en/tools/search-research/tavilyextractortool", + "en/tools/search-research/arxivpapertool", + "en/tools/search-research/serpapi-googlesearchtool", + "en/tools/search-research/serpapi-googleshoppingtool", + "en/tools/search-research/databricks-query-tool" + ] + }, + { + "group": "Database & Data", + "icon": "database", + "pages": [ + "en/tools/database-data/overview", + "en/tools/database-data/mysqltool", + "en/tools/database-data/pgsearchtool", + "en/tools/database-data/snowflakesearchtool", + "en/tools/database-data/nl2sqltool", + "en/tools/database-data/qdrantvectorsearchtool", + "en/tools/database-data/weaviatevectorsearchtool", + "en/tools/database-data/mongodbvectorsearchtool", + "en/tools/database-data/singlestoresearchtool" + ] + }, + { + "group": "AI & Machine Learning", + "icon": "brain", + "pages": [ + "en/tools/ai-ml/overview", + "en/tools/ai-ml/dalletool", + "en/tools/ai-ml/visiontool", + "en/tools/ai-ml/aimindtool", + "en/tools/ai-ml/llamaindextool", + "en/tools/ai-ml/langchaintool", + "en/tools/ai-ml/ragtool", + "en/tools/ai-ml/codeinterpretertool" + ] + }, + { + "group": "Cloud & Storage", + "icon": "cloud", + "pages": [ + "en/tools/cloud-storage/overview", + "en/tools/cloud-storage/s3readertool", + "en/tools/cloud-storage/s3writertool", + "en/tools/cloud-storage/bedrockkbretriever" + ] + }, + { + "group": "Integrations", + "icon": "plug", + "pages": [ + "en/tools/integration/overview", + "en/tools/integration/bedrockinvokeagenttool", + "en/tools/integration/crewaiautomationtool", + "en/tools/integration/mergeagenthandlertool" + ] + }, + { + "group": "Automation", + "icon": "bolt", + "pages": [ + "en/tools/automation/overview", + "en/tools/automation/apifyactorstool", + "en/tools/automation/composiotool", + "en/tools/automation/multiontool", + "en/tools/automation/zapieractionstool" + ] + } + ] + }, + { + "group": "Observability", + "pages": [ + "en/observability/tracing", + "en/observability/overview", + "en/observability/arize-phoenix", + "en/observability/braintrust", + "en/observability/datadog", + "en/observability/galileo", + "en/observability/langdb", + "en/observability/langfuse", + "en/observability/langtrace", + "en/observability/maxim", + "en/observability/mlflow", + "en/observability/neatlogs", + "en/observability/openlit", + "en/observability/opik", + "en/observability/patronus-evaluation", + "en/observability/portkey", + "en/observability/weave", + "en/observability/truefoundry" + ] + }, + { + "group": "Learn", + "pages": [ + "en/learn/overview", + "en/learn/llm-selection-guide", + "en/learn/conditional-tasks", + "en/learn/coding-agents", + "en/learn/create-custom-tools", + "en/learn/custom-llm", + "en/learn/custom-manager-agent", + "en/learn/customizing-agents", + "en/learn/dalle-image-generation", + "en/learn/force-tool-output-as-result", + "en/learn/hierarchical-process", + "en/learn/human-input-on-execution", + "en/learn/human-in-the-loop", + "en/learn/human-feedback-in-flows", + "en/learn/kickoff-async", + "en/learn/kickoff-for-each", + "en/learn/llm-connections", + "en/learn/multimodal-agents", + "en/learn/replay-tasks-from-latest-crew-kickoff", + "en/learn/sequential-process", + "en/learn/using-annotations", + "en/learn/execution-hooks", + "en/learn/llm-hooks", + "en/learn/tool-hooks" + ] + }, + { + "group": "Telemetry", + "pages": [ + "en/telemetry" + ] + } ] }, { - "group": "Operate", - "pages": [ - "en/enterprise/features/traces", - "en/enterprise/features/webhook-streaming", - "en/enterprise/features/hallucination-guardrail", - "en/enterprise/features/flow-hitl-management" + "tab": "AMP", + "icon": "briefcase", + "groups": [ + { + "group": "Getting Started", + "pages": [ + "en/enterprise/introduction" + ] + }, + { + "group": "Build", + "pages": [ + "en/enterprise/features/automations", + "en/enterprise/features/crew-studio", + "en/enterprise/features/marketplace", + "en/enterprise/features/agent-repositories", + "en/enterprise/features/tools-and-integrations", + "en/enterprise/features/pii-trace-redactions" + ] + }, + { + "group": "Operate", + "pages": [ + "en/enterprise/features/traces", + "en/enterprise/features/webhook-streaming", + "en/enterprise/features/hallucination-guardrail", + "en/enterprise/features/flow-hitl-management" + ] + }, + { + "group": "Manage", + "pages": [ + "en/enterprise/features/rbac" + ] + }, + { + "group": "Integration Docs", + "pages": [ + "en/enterprise/integrations/asana", + "en/enterprise/integrations/box", + "en/enterprise/integrations/clickup", + "en/enterprise/integrations/github", + "en/enterprise/integrations/gmail", + "en/enterprise/integrations/google_calendar", + "en/enterprise/integrations/google_contacts", + "en/enterprise/integrations/google_docs", + "en/enterprise/integrations/google_drive", + "en/enterprise/integrations/google_sheets", + "en/enterprise/integrations/google_slides", + "en/enterprise/integrations/hubspot", + "en/enterprise/integrations/jira", + "en/enterprise/integrations/linear", + "en/enterprise/integrations/microsoft_excel", + "en/enterprise/integrations/microsoft_onedrive", + "en/enterprise/integrations/microsoft_outlook", + "en/enterprise/integrations/microsoft_sharepoint", + "en/enterprise/integrations/microsoft_teams", + "en/enterprise/integrations/microsoft_word", + "en/enterprise/integrations/notion", + "en/enterprise/integrations/salesforce", + "en/enterprise/integrations/shopify", + "en/enterprise/integrations/slack", + "en/enterprise/integrations/stripe", + "en/enterprise/integrations/zendesk" + ] + }, + { + "group": "Triggers", + "pages": [ + "en/enterprise/guides/automation-triggers", + "en/enterprise/guides/gmail-trigger", + "en/enterprise/guides/google-calendar-trigger", + "en/enterprise/guides/google-drive-trigger", + "en/enterprise/guides/outlook-trigger", + "en/enterprise/guides/onedrive-trigger", + "en/enterprise/guides/microsoft-teams-trigger", + "en/enterprise/guides/slack-trigger", + "en/enterprise/guides/hubspot-trigger", + "en/enterprise/guides/salesforce-trigger", + "en/enterprise/guides/zapier-trigger" + ] + }, + { + "group": "How-To Guides", + "pages": [ + "en/enterprise/guides/build-crew", + "en/enterprise/guides/prepare-for-deployment", + "en/enterprise/guides/deploy-to-amp", + "en/enterprise/guides/private-package-registry", + "en/enterprise/guides/kickoff-crew", + "en/enterprise/guides/update-crew", + "en/enterprise/guides/enable-crew-studio", + "en/enterprise/guides/capture_telemetry_logs", + "en/enterprise/guides/azure-openai-setup", + "en/enterprise/guides/tool-repository", + "en/enterprise/guides/react-component-export", + "en/enterprise/guides/team-management", + "en/enterprise/guides/human-in-the-loop", + "en/enterprise/guides/webhook-automation" + ] + }, + { + "group": "Resources", + "pages": [ + "en/enterprise/resources/frequently-asked-questions" + ] + } ] }, { - "group": "Manage", - "pages": [ - "en/enterprise/features/rbac" + "tab": "API Reference", + "icon": "magnifying-glass", + "groups": [ + { + "group": "Getting Started", + "pages": [ + "en/api-reference/introduction", + "en/api-reference/inputs", + "en/api-reference/kickoff", + "en/api-reference/resume", + "en/api-reference/status" + ] + } ] }, { - "group": "Integration Docs", - "pages": [ - "en/enterprise/integrations/asana", - "en/enterprise/integrations/box", - "en/enterprise/integrations/clickup", - "en/enterprise/integrations/github", - "en/enterprise/integrations/gmail", - "en/enterprise/integrations/google_calendar", - "en/enterprise/integrations/google_contacts", - "en/enterprise/integrations/google_docs", - "en/enterprise/integrations/google_drive", - "en/enterprise/integrations/google_sheets", - "en/enterprise/integrations/google_slides", - "en/enterprise/integrations/hubspot", - "en/enterprise/integrations/jira", - "en/enterprise/integrations/linear", - "en/enterprise/integrations/microsoft_excel", - "en/enterprise/integrations/microsoft_onedrive", - "en/enterprise/integrations/microsoft_outlook", - "en/enterprise/integrations/microsoft_sharepoint", - "en/enterprise/integrations/microsoft_teams", - "en/enterprise/integrations/microsoft_word", - "en/enterprise/integrations/notion", - "en/enterprise/integrations/salesforce", - "en/enterprise/integrations/shopify", - "en/enterprise/integrations/slack", - "en/enterprise/integrations/stripe", - "en/enterprise/integrations/zendesk" + "tab": "Examples", + "icon": "code", + "groups": [ + { + "group": "Examples", + "pages": [ + "en/examples/example", + "en/examples/cookbooks" + ] + } ] }, { - "group": "Triggers", - "pages": [ - "en/enterprise/guides/automation-triggers", - "en/enterprise/guides/gmail-trigger", - "en/enterprise/guides/google-calendar-trigger", - "en/enterprise/guides/google-drive-trigger", - "en/enterprise/guides/outlook-trigger", - "en/enterprise/guides/onedrive-trigger", - "en/enterprise/guides/microsoft-teams-trigger", - "en/enterprise/guides/slack-trigger", - "en/enterprise/guides/hubspot-trigger", - "en/enterprise/guides/salesforce-trigger", - "en/enterprise/guides/zapier-trigger" - ] - }, - { - "group": "How-To Guides", - "pages": [ - "en/enterprise/guides/build-crew", - "en/enterprise/guides/prepare-for-deployment", - "en/enterprise/guides/deploy-to-amp", - "en/enterprise/guides/kickoff-crew", - "en/enterprise/guides/update-crew", - "en/enterprise/guides/enable-crew-studio", - "en/enterprise/guides/capture_telemetry_logs", - "en/enterprise/guides/azure-openai-setup", - "en/enterprise/guides/tool-repository", - "en/enterprise/guides/react-component-export", - "en/enterprise/guides/team-management", - "en/enterprise/guides/human-in-the-loop", - "en/enterprise/guides/webhook-automation" - ] - }, - { - "group": "Resources", - "pages": [ - "en/enterprise/resources/frequently-asked-questions" - ] - } - ] - }, - { - "tab": "API Reference", - "icon": "magnifying-glass", - "groups": [ - { - "group": "Getting Started", - "pages": [ - "en/api-reference/introduction", - "en/api-reference/inputs", - "en/api-reference/kickoff", - "en/api-reference/resume", - "en/api-reference/status" - ] - } - ] - }, - { - "tab": "Examples", - "icon": "code", - "groups": [ - { - "group": "Examples", - "pages": [ - "en/examples/example", - "en/examples/cookbooks" - ] - } - ] - }, - { - "tab": "Changelog", - "icon": "clock", - "groups": [ - { - "group": "Release Notes", - "pages": [ - "en/changelog" + "tab": "Changelog", + "icon": "clock", + "groups": [ + { + "group": "Release Notes", + "pages": [ + "en/changelog" + ] + } ] } ] @@ -529,427 +1002,878 @@ } ] }, - "tabs": [ + "versions": [ { - "tab": "Início", - "icon": "house", - "groups": [ + "version": "v1.10.1", + "default": true, + "tabs": [ { - "group": "Bem-vindo", - "pages": [ - "pt-BR/index" - ] - } - ] - }, - { - "tab": "Documentação", - "icon": "book-open", - "groups": [ - { - "group": "Começando", - "pages": [ - "pt-BR/introduction", - "pt-BR/installation", - "pt-BR/quickstart" - ] - }, - { - "group": "Guias", - "pages": [ + "tab": "Início", + "icon": "house", + "groups": [ { - "group": "Estratégia", - "icon": "compass", + "group": "Bem-vindo", "pages": [ - "pt-BR/guides/concepts/evaluating-use-cases" - ] - }, - { - "group": "Agentes", - "icon": "user", - "pages": [ - "pt-BR/guides/agents/crafting-effective-agents" - ] - }, - { - "group": "Crews", - "icon": "users", - "pages": [ - "pt-BR/guides/crews/first-crew" - ] - }, - { - "group": "Flows", - "icon": "code-branch", - "pages": [ - "pt-BR/guides/flows/first-flow", - "pt-BR/guides/flows/mastering-flow-state" - ] - }, - { - "group": "Avançado", - "icon": "gear", - "pages": [ - "pt-BR/guides/advanced/customizing-prompts", - "pt-BR/guides/advanced/fingerprinting" + "pt-BR/index" ] } ] }, { - "group": "Conceitos-Chave", - "pages": [ - "pt-BR/concepts/agents", - "pt-BR/concepts/tasks", - "pt-BR/concepts/crews", - "pt-BR/concepts/flows", - "pt-BR/concepts/production-architecture", - "pt-BR/concepts/knowledge", - "pt-BR/concepts/llms", - "pt-BR/concepts/files", - "pt-BR/concepts/processes", - "pt-BR/concepts/collaboration", - "pt-BR/concepts/training", - "pt-BR/concepts/memory", - "pt-BR/concepts/reasoning", - "pt-BR/concepts/planning", - "pt-BR/concepts/testing", - "pt-BR/concepts/cli", - "pt-BR/concepts/tools", - "pt-BR/concepts/event-listener" - ] - }, - { - "group": "Integração MCP", - "pages": [ - "pt-BR/mcp/overview", - "pt-BR/mcp/dsl-integration", - "pt-BR/mcp/stdio", - "pt-BR/mcp/sse", - "pt-BR/mcp/streamable-http", - "pt-BR/mcp/multiple-servers", - "pt-BR/mcp/security" - ] - }, - { - "group": "Ferramentas", - "pages": [ - "pt-BR/tools/overview", + "tab": "Documentação", + "icon": "book-open", + "groups": [ { - "group": "Arquivo & Documento", - "icon": "folder-open", + "group": "Começando", "pages": [ - "pt-BR/tools/file-document/overview", - "pt-BR/tools/file-document/filereadtool", - "pt-BR/tools/file-document/filewritetool", - "pt-BR/tools/file-document/pdfsearchtool", - "pt-BR/tools/file-document/docxsearchtool", - "pt-BR/tools/file-document/mdxsearchtool", - "pt-BR/tools/file-document/xmlsearchtool", - "pt-BR/tools/file-document/txtsearchtool", - "pt-BR/tools/file-document/jsonsearchtool", - "pt-BR/tools/file-document/csvsearchtool", - "pt-BR/tools/file-document/directorysearchtool", - "pt-BR/tools/file-document/directoryreadtool" + "pt-BR/introduction", + "pt-BR/installation", + "pt-BR/quickstart" ] }, { - "group": "Web Scraping & Navegação", - "icon": "globe", + "group": "Guias", "pages": [ - "pt-BR/tools/web-scraping/overview", - "pt-BR/tools/web-scraping/scrapewebsitetool", - "pt-BR/tools/web-scraping/scrapeelementfromwebsitetool", - "pt-BR/tools/web-scraping/scrapflyscrapetool", - "pt-BR/tools/web-scraping/seleniumscrapingtool", - "pt-BR/tools/web-scraping/scrapegraphscrapetool", - "pt-BR/tools/web-scraping/spidertool", - "pt-BR/tools/web-scraping/browserbaseloadtool", - "pt-BR/tools/web-scraping/hyperbrowserloadtool", - "pt-BR/tools/web-scraping/stagehandtool", - "pt-BR/tools/web-scraping/firecrawlcrawlwebsitetool", - "pt-BR/tools/web-scraping/firecrawlscrapewebsitetool", - "pt-BR/tools/web-scraping/oxylabsscraperstool" + { + "group": "Estratégia", + "icon": "compass", + "pages": [ + "pt-BR/guides/concepts/evaluating-use-cases" + ] + }, + { + "group": "Agentes", + "icon": "user", + "pages": [ + "pt-BR/guides/agents/crafting-effective-agents" + ] + }, + { + "group": "Crews", + "icon": "users", + "pages": [ + "pt-BR/guides/crews/first-crew" + ] + }, + { + "group": "Flows", + "icon": "code-branch", + "pages": [ + "pt-BR/guides/flows/first-flow", + "pt-BR/guides/flows/mastering-flow-state" + ] + }, + { + "group": "Avançado", + "icon": "gear", + "pages": [ + "pt-BR/guides/advanced/customizing-prompts", + "pt-BR/guides/advanced/fingerprinting" + ] + }, + { + "group": "Migração", + "icon": "shuffle", + "pages": [ + "pt-BR/guides/migration/migrating-from-langgraph" + ] + } ] }, { - "group": "Pesquisa", - "icon": "magnifying-glass", + "group": "Conceitos-Chave", "pages": [ - "pt-BR/tools/search-research/overview", - "pt-BR/tools/search-research/serperdevtool", - "pt-BR/tools/search-research/bravesearchtool", - "pt-BR/tools/search-research/exasearchtool", - "pt-BR/tools/search-research/linkupsearchtool", - "pt-BR/tools/search-research/githubsearchtool", - "pt-BR/tools/search-research/websitesearchtool", - "pt-BR/tools/search-research/codedocssearchtool", - "pt-BR/tools/search-research/youtubechannelsearchtool", - "pt-BR/tools/search-research/youtubevideosearchtool" + "pt-BR/concepts/agents", + "pt-BR/concepts/tasks", + "pt-BR/concepts/crews", + "pt-BR/concepts/flows", + "pt-BR/concepts/production-architecture", + "pt-BR/concepts/knowledge", + "pt-BR/concepts/llms", + "pt-BR/concepts/files", + "pt-BR/concepts/processes", + "pt-BR/concepts/collaboration", + "pt-BR/concepts/training", + "pt-BR/concepts/memory", + "pt-BR/concepts/reasoning", + "pt-BR/concepts/planning", + "pt-BR/concepts/testing", + "pt-BR/concepts/cli", + "pt-BR/concepts/tools", + "pt-BR/concepts/event-listener" ] }, { - "group": "Dados", - "icon": "database", + "group": "Integração MCP", "pages": [ - "pt-BR/tools/database-data/overview", - "pt-BR/tools/database-data/mysqltool", - "pt-BR/tools/database-data/pgsearchtool", - "pt-BR/tools/database-data/snowflakesearchtool", - "pt-BR/tools/database-data/nl2sqltool", - "pt-BR/tools/database-data/qdrantvectorsearchtool", - "pt-BR/tools/database-data/weaviatevectorsearchtool" + "pt-BR/mcp/overview", + "pt-BR/mcp/dsl-integration", + "pt-BR/mcp/stdio", + "pt-BR/mcp/sse", + "pt-BR/mcp/streamable-http", + "pt-BR/mcp/multiple-servers", + "pt-BR/mcp/security" ] }, { - "group": "IA & Machine Learning", - "icon": "brain", + "group": "Ferramentas", "pages": [ - "pt-BR/tools/ai-ml/overview", - "pt-BR/tools/ai-ml/dalletool", - "pt-BR/tools/ai-ml/visiontool", - "pt-BR/tools/ai-ml/aimindtool", - "pt-BR/tools/ai-ml/llamaindextool", - "pt-BR/tools/ai-ml/langchaintool", - "pt-BR/tools/ai-ml/ragtool", - "pt-BR/tools/ai-ml/codeinterpretertool" + "pt-BR/tools/overview", + { + "group": "Arquivo & Documento", + "icon": "folder-open", + "pages": [ + "pt-BR/tools/file-document/overview", + "pt-BR/tools/file-document/filereadtool", + "pt-BR/tools/file-document/filewritetool", + "pt-BR/tools/file-document/pdfsearchtool", + "pt-BR/tools/file-document/docxsearchtool", + "pt-BR/tools/file-document/mdxsearchtool", + "pt-BR/tools/file-document/xmlsearchtool", + "pt-BR/tools/file-document/txtsearchtool", + "pt-BR/tools/file-document/jsonsearchtool", + "pt-BR/tools/file-document/csvsearchtool", + "pt-BR/tools/file-document/directorysearchtool", + "pt-BR/tools/file-document/directoryreadtool" + ] + }, + { + "group": "Web Scraping & Navegação", + "icon": "globe", + "pages": [ + "pt-BR/tools/web-scraping/overview", + "pt-BR/tools/web-scraping/scrapewebsitetool", + "pt-BR/tools/web-scraping/scrapeelementfromwebsitetool", + "pt-BR/tools/web-scraping/scrapflyscrapetool", + "pt-BR/tools/web-scraping/seleniumscrapingtool", + "pt-BR/tools/web-scraping/scrapegraphscrapetool", + "pt-BR/tools/web-scraping/spidertool", + "pt-BR/tools/web-scraping/browserbaseloadtool", + "pt-BR/tools/web-scraping/hyperbrowserloadtool", + "pt-BR/tools/web-scraping/stagehandtool", + "pt-BR/tools/web-scraping/firecrawlcrawlwebsitetool", + "pt-BR/tools/web-scraping/firecrawlscrapewebsitetool", + "pt-BR/tools/web-scraping/oxylabsscraperstool" + ] + }, + { + "group": "Pesquisa", + "icon": "magnifying-glass", + "pages": [ + "pt-BR/tools/search-research/overview", + "pt-BR/tools/search-research/serperdevtool", + "pt-BR/tools/search-research/bravesearchtool", + "pt-BR/tools/search-research/exasearchtool", + "pt-BR/tools/search-research/linkupsearchtool", + "pt-BR/tools/search-research/githubsearchtool", + "pt-BR/tools/search-research/websitesearchtool", + "pt-BR/tools/search-research/codedocssearchtool", + "pt-BR/tools/search-research/youtubechannelsearchtool", + "pt-BR/tools/search-research/youtubevideosearchtool" + ] + }, + { + "group": "Dados", + "icon": "database", + "pages": [ + "pt-BR/tools/database-data/overview", + "pt-BR/tools/database-data/mysqltool", + "pt-BR/tools/database-data/pgsearchtool", + "pt-BR/tools/database-data/snowflakesearchtool", + "pt-BR/tools/database-data/nl2sqltool", + "pt-BR/tools/database-data/qdrantvectorsearchtool", + "pt-BR/tools/database-data/weaviatevectorsearchtool" + ] + }, + { + "group": "IA & Machine Learning", + "icon": "brain", + "pages": [ + "pt-BR/tools/ai-ml/overview", + "pt-BR/tools/ai-ml/dalletool", + "pt-BR/tools/ai-ml/visiontool", + "pt-BR/tools/ai-ml/aimindtool", + "pt-BR/tools/ai-ml/llamaindextool", + "pt-BR/tools/ai-ml/langchaintool", + "pt-BR/tools/ai-ml/ragtool", + "pt-BR/tools/ai-ml/codeinterpretertool" + ] + }, + { + "group": "Cloud & Armazenamento", + "icon": "cloud", + "pages": [ + "pt-BR/tools/cloud-storage/overview", + "pt-BR/tools/cloud-storage/s3readertool", + "pt-BR/tools/cloud-storage/s3writertool", + "pt-BR/tools/cloud-storage/bedrockkbretriever" + ] + }, + { + "group": "Integrations", + "icon": "plug", + "pages": [ + "pt-BR/tools/integration/overview", + "pt-BR/tools/integration/bedrockinvokeagenttool", + "pt-BR/tools/integration/crewaiautomationtool" + ] + }, + { + "group": "Automação", + "icon": "bolt", + "pages": [ + "pt-BR/tools/automation/overview", + "pt-BR/tools/automation/apifyactorstool", + "pt-BR/tools/automation/composiotool", + "pt-BR/tools/automation/multiontool" + ] + } ] }, { - "group": "Cloud & Armazenamento", - "icon": "cloud", + "group": "Observabilidade", "pages": [ - "pt-BR/tools/cloud-storage/overview", - "pt-BR/tools/cloud-storage/s3readertool", - "pt-BR/tools/cloud-storage/s3writertool", - "pt-BR/tools/cloud-storage/bedrockkbretriever" + "pt-BR/observability/tracing", + "pt-BR/observability/overview", + "pt-BR/observability/arize-phoenix", + "pt-BR/observability/braintrust", + "pt-BR/observability/datadog", + "pt-BR/observability/galileo", + "pt-BR/observability/langdb", + "pt-BR/observability/langfuse", + "pt-BR/observability/langtrace", + "pt-BR/observability/maxim", + "pt-BR/observability/mlflow", + "pt-BR/observability/openlit", + "pt-BR/observability/opik", + "pt-BR/observability/patronus-evaluation", + "pt-BR/observability/portkey", + "pt-BR/observability/weave", + "pt-BR/observability/truefoundry" ] }, { - "group": "Integrations", - "icon": "plug", + "group": "Aprenda", "pages": [ - "pt-BR/tools/integration/overview", - "pt-BR/tools/integration/bedrockinvokeagenttool", - "pt-BR/tools/integration/crewaiautomationtool" + "pt-BR/learn/overview", + "pt-BR/learn/llm-selection-guide", + "pt-BR/learn/conditional-tasks", + "pt-BR/learn/coding-agents", + "pt-BR/learn/create-custom-tools", + "pt-BR/learn/custom-llm", + "pt-BR/learn/custom-manager-agent", + "pt-BR/learn/customizing-agents", + "pt-BR/learn/dalle-image-generation", + "pt-BR/learn/force-tool-output-as-result", + "pt-BR/learn/hierarchical-process", + "pt-BR/learn/human-input-on-execution", + "pt-BR/learn/human-in-the-loop", + "pt-BR/learn/human-feedback-in-flows", + "pt-BR/learn/kickoff-async", + "pt-BR/learn/kickoff-for-each", + "pt-BR/learn/llm-connections", + "pt-BR/learn/multimodal-agents", + "pt-BR/learn/replay-tasks-from-latest-crew-kickoff", + "pt-BR/learn/sequential-process", + "pt-BR/learn/using-annotations", + "pt-BR/learn/execution-hooks", + "pt-BR/learn/llm-hooks", + "pt-BR/learn/tool-hooks" ] }, { - "group": "Automação", - "icon": "bolt", + "group": "Telemetria", "pages": [ - "pt-BR/tools/automation/overview", - "pt-BR/tools/automation/apifyactorstool", - "pt-BR/tools/automation/composiotool", - "pt-BR/tools/automation/multiontool" + "pt-BR/telemetry" ] } ] }, { - "group": "Observabilidade", - "pages": [ - "pt-BR/observability/tracing", - "pt-BR/observability/overview", - "pt-BR/observability/arize-phoenix", - "pt-BR/observability/braintrust", - "pt-BR/observability/datadog", - "pt-BR/observability/galileo", - "pt-BR/observability/langdb", - "pt-BR/observability/langfuse", - "pt-BR/observability/langtrace", - "pt-BR/observability/maxim", - "pt-BR/observability/mlflow", - "pt-BR/observability/openlit", - "pt-BR/observability/opik", - "pt-BR/observability/patronus-evaluation", - "pt-BR/observability/portkey", - "pt-BR/observability/weave", - "pt-BR/observability/truefoundry" + "tab": "AMP", + "icon": "briefcase", + "groups": [ + { + "group": "Começando", + "pages": [ + "pt-BR/enterprise/introduction" + ] + }, + { + "group": "Construir", + "pages": [ + "pt-BR/enterprise/features/automations", + "pt-BR/enterprise/features/crew-studio", + "pt-BR/enterprise/features/marketplace", + "pt-BR/enterprise/features/agent-repositories", + "pt-BR/enterprise/features/tools-and-integrations", + "pt-BR/enterprise/features/pii-trace-redactions" + ] + }, + { + "group": "Operar", + "pages": [ + "pt-BR/enterprise/features/traces", + "pt-BR/enterprise/features/webhook-streaming", + "pt-BR/enterprise/features/hallucination-guardrail", + "pt-BR/enterprise/features/flow-hitl-management" + ] + }, + { + "group": "Gerenciar", + "pages": [ + "pt-BR/enterprise/features/rbac" + ] + }, + { + "group": "Documentação de Integração", + "pages": [ + "pt-BR/enterprise/integrations/asana", + "pt-BR/enterprise/integrations/box", + "pt-BR/enterprise/integrations/clickup", + "pt-BR/enterprise/integrations/github", + "pt-BR/enterprise/integrations/gmail", + "pt-BR/enterprise/integrations/google_calendar", + "pt-BR/enterprise/integrations/google_contacts", + "pt-BR/enterprise/integrations/google_docs", + "pt-BR/enterprise/integrations/google_drive", + "pt-BR/enterprise/integrations/google_sheets", + "pt-BR/enterprise/integrations/google_slides", + "pt-BR/enterprise/integrations/hubspot", + "pt-BR/enterprise/integrations/jira", + "pt-BR/enterprise/integrations/linear", + "pt-BR/enterprise/integrations/microsoft_excel", + "pt-BR/enterprise/integrations/microsoft_onedrive", + "pt-BR/enterprise/integrations/microsoft_outlook", + "pt-BR/enterprise/integrations/microsoft_sharepoint", + "pt-BR/enterprise/integrations/microsoft_teams", + "pt-BR/enterprise/integrations/microsoft_word", + "pt-BR/enterprise/integrations/notion", + "pt-BR/enterprise/integrations/salesforce", + "pt-BR/enterprise/integrations/shopify", + "pt-BR/enterprise/integrations/slack", + "pt-BR/enterprise/integrations/stripe", + "pt-BR/enterprise/integrations/zendesk" + ] + }, + { + "group": "Guias", + "pages": [ + "pt-BR/enterprise/guides/build-crew", + "pt-BR/enterprise/guides/prepare-for-deployment", + "pt-BR/enterprise/guides/deploy-to-amp", + "pt-BR/enterprise/guides/private-package-registry", + "pt-BR/enterprise/guides/kickoff-crew", + "pt-BR/enterprise/guides/update-crew", + "pt-BR/enterprise/guides/enable-crew-studio", + "pt-BR/enterprise/guides/azure-openai-setup", + "pt-BR/enterprise/guides/tool-repository", + "pt-BR/enterprise/guides/react-component-export", + "pt-BR/enterprise/guides/team-management", + "pt-BR/enterprise/guides/human-in-the-loop", + "pt-BR/enterprise/guides/webhook-automation" + ] + }, + { + "group": "Triggers", + "pages": [ + "pt-BR/enterprise/guides/automation-triggers", + "pt-BR/enterprise/guides/gmail-trigger", + "pt-BR/enterprise/guides/google-calendar-trigger", + "pt-BR/enterprise/guides/google-drive-trigger", + "pt-BR/enterprise/guides/outlook-trigger", + "pt-BR/enterprise/guides/onedrive-trigger", + "pt-BR/enterprise/guides/microsoft-teams-trigger", + "pt-BR/enterprise/guides/slack-trigger", + "pt-BR/enterprise/guides/hubspot-trigger", + "pt-BR/enterprise/guides/salesforce-trigger", + "pt-BR/enterprise/guides/zapier-trigger" + ] + }, + { + "group": "Recursos", + "pages": [ + "pt-BR/enterprise/resources/frequently-asked-questions" + ] + } ] }, { - "group": "Aprenda", - "pages": [ - "pt-BR/learn/overview", - "pt-BR/learn/llm-selection-guide", - "pt-BR/learn/conditional-tasks", - "pt-BR/learn/coding-agents", - "pt-BR/learn/create-custom-tools", - "pt-BR/learn/custom-llm", - "pt-BR/learn/custom-manager-agent", - "pt-BR/learn/customizing-agents", - "pt-BR/learn/dalle-image-generation", - "pt-BR/learn/force-tool-output-as-result", - "pt-BR/learn/hierarchical-process", - "pt-BR/learn/human-input-on-execution", - "pt-BR/learn/human-in-the-loop", - "pt-BR/learn/human-feedback-in-flows", - "pt-BR/learn/kickoff-async", - "pt-BR/learn/kickoff-for-each", - "pt-BR/learn/llm-connections", - "pt-BR/learn/multimodal-agents", - "pt-BR/learn/replay-tasks-from-latest-crew-kickoff", - "pt-BR/learn/sequential-process", - "pt-BR/learn/using-annotations", - "pt-BR/learn/execution-hooks", - "pt-BR/learn/llm-hooks", - "pt-BR/learn/tool-hooks" + "tab": "Referência da API", + "icon": "magnifying-glass", + "groups": [ + { + "group": "Começando", + "pages": [ + "pt-BR/api-reference/introduction", + "pt-BR/api-reference/inputs", + "pt-BR/api-reference/kickoff", + "pt-BR/api-reference/resume", + "pt-BR/api-reference/status" + ] + } ] }, { - "group": "Telemetria", - "pages": [ - "pt-BR/telemetry" + "tab": "Exemplos", + "icon": "code", + "groups": [ + { + "group": "Exemplos", + "pages": [ + "pt-BR/examples/example", + "pt-BR/examples/cookbooks" + ] + } + ] + }, + { + "tab": "Notas de Versão", + "icon": "clock", + "groups": [ + { + "group": "Notas de Versão", + "pages": [ + "pt-BR/changelog" + ] + } ] } ] }, { - "tab": "AMP", - "icon": "briefcase", - "groups": [ + "version": "v1.10.0", + "tabs": [ { - "group": "Começando", - "pages": [ - "pt-BR/enterprise/introduction" + "tab": "Início", + "icon": "house", + "groups": [ + { + "group": "Bem-vindo", + "pages": [ + "pt-BR/index" + ] + } ] }, { - "group": "Construir", - "pages": [ - "pt-BR/enterprise/features/automations", - "pt-BR/enterprise/features/crew-studio", - "pt-BR/enterprise/features/marketplace", - "pt-BR/enterprise/features/agent-repositories", - "pt-BR/enterprise/features/tools-and-integrations", - "pt-BR/enterprise/features/pii-trace-redactions" + "tab": "Documentação", + "icon": "book-open", + "groups": [ + { + "group": "Começando", + "pages": [ + "pt-BR/introduction", + "pt-BR/installation", + "pt-BR/quickstart" + ] + }, + { + "group": "Guias", + "pages": [ + { + "group": "Estratégia", + "icon": "compass", + "pages": [ + "pt-BR/guides/concepts/evaluating-use-cases" + ] + }, + { + "group": "Agentes", + "icon": "user", + "pages": [ + "pt-BR/guides/agents/crafting-effective-agents" + ] + }, + { + "group": "Crews", + "icon": "users", + "pages": [ + "pt-BR/guides/crews/first-crew" + ] + }, + { + "group": "Flows", + "icon": "code-branch", + "pages": [ + "pt-BR/guides/flows/first-flow", + "pt-BR/guides/flows/mastering-flow-state" + ] + }, + { + "group": "Avançado", + "icon": "gear", + "pages": [ + "pt-BR/guides/advanced/customizing-prompts", + "pt-BR/guides/advanced/fingerprinting" + ] + }, + { + "group": "Migração", + "icon": "shuffle", + "pages": [ + "pt-BR/guides/migration/migrating-from-langgraph" + ] + } + ] + }, + { + "group": "Conceitos-Chave", + "pages": [ + "pt-BR/concepts/agents", + "pt-BR/concepts/tasks", + "pt-BR/concepts/crews", + "pt-BR/concepts/flows", + "pt-BR/concepts/production-architecture", + "pt-BR/concepts/knowledge", + "pt-BR/concepts/llms", + "pt-BR/concepts/files", + "pt-BR/concepts/processes", + "pt-BR/concepts/collaboration", + "pt-BR/concepts/training", + "pt-BR/concepts/memory", + "pt-BR/concepts/reasoning", + "pt-BR/concepts/planning", + "pt-BR/concepts/testing", + "pt-BR/concepts/cli", + "pt-BR/concepts/tools", + "pt-BR/concepts/event-listener" + ] + }, + { + "group": "Integração MCP", + "pages": [ + "pt-BR/mcp/overview", + "pt-BR/mcp/dsl-integration", + "pt-BR/mcp/stdio", + "pt-BR/mcp/sse", + "pt-BR/mcp/streamable-http", + "pt-BR/mcp/multiple-servers", + "pt-BR/mcp/security" + ] + }, + { + "group": "Ferramentas", + "pages": [ + "pt-BR/tools/overview", + { + "group": "Arquivo & Documento", + "icon": "folder-open", + "pages": [ + "pt-BR/tools/file-document/overview", + "pt-BR/tools/file-document/filereadtool", + "pt-BR/tools/file-document/filewritetool", + "pt-BR/tools/file-document/pdfsearchtool", + "pt-BR/tools/file-document/docxsearchtool", + "pt-BR/tools/file-document/mdxsearchtool", + "pt-BR/tools/file-document/xmlsearchtool", + "pt-BR/tools/file-document/txtsearchtool", + "pt-BR/tools/file-document/jsonsearchtool", + "pt-BR/tools/file-document/csvsearchtool", + "pt-BR/tools/file-document/directorysearchtool", + "pt-BR/tools/file-document/directoryreadtool" + ] + }, + { + "group": "Web Scraping & Navegação", + "icon": "globe", + "pages": [ + "pt-BR/tools/web-scraping/overview", + "pt-BR/tools/web-scraping/scrapewebsitetool", + "pt-BR/tools/web-scraping/scrapeelementfromwebsitetool", + "pt-BR/tools/web-scraping/scrapflyscrapetool", + "pt-BR/tools/web-scraping/seleniumscrapingtool", + "pt-BR/tools/web-scraping/scrapegraphscrapetool", + "pt-BR/tools/web-scraping/spidertool", + "pt-BR/tools/web-scraping/browserbaseloadtool", + "pt-BR/tools/web-scraping/hyperbrowserloadtool", + "pt-BR/tools/web-scraping/stagehandtool", + "pt-BR/tools/web-scraping/firecrawlcrawlwebsitetool", + "pt-BR/tools/web-scraping/firecrawlscrapewebsitetool", + "pt-BR/tools/web-scraping/oxylabsscraperstool" + ] + }, + { + "group": "Pesquisa", + "icon": "magnifying-glass", + "pages": [ + "pt-BR/tools/search-research/overview", + "pt-BR/tools/search-research/serperdevtool", + "pt-BR/tools/search-research/bravesearchtool", + "pt-BR/tools/search-research/exasearchtool", + "pt-BR/tools/search-research/linkupsearchtool", + "pt-BR/tools/search-research/githubsearchtool", + "pt-BR/tools/search-research/websitesearchtool", + "pt-BR/tools/search-research/codedocssearchtool", + "pt-BR/tools/search-research/youtubechannelsearchtool", + "pt-BR/tools/search-research/youtubevideosearchtool" + ] + }, + { + "group": "Dados", + "icon": "database", + "pages": [ + "pt-BR/tools/database-data/overview", + "pt-BR/tools/database-data/mysqltool", + "pt-BR/tools/database-data/pgsearchtool", + "pt-BR/tools/database-data/snowflakesearchtool", + "pt-BR/tools/database-data/nl2sqltool", + "pt-BR/tools/database-data/qdrantvectorsearchtool", + "pt-BR/tools/database-data/weaviatevectorsearchtool" + ] + }, + { + "group": "IA & Machine Learning", + "icon": "brain", + "pages": [ + "pt-BR/tools/ai-ml/overview", + "pt-BR/tools/ai-ml/dalletool", + "pt-BR/tools/ai-ml/visiontool", + "pt-BR/tools/ai-ml/aimindtool", + "pt-BR/tools/ai-ml/llamaindextool", + "pt-BR/tools/ai-ml/langchaintool", + "pt-BR/tools/ai-ml/ragtool", + "pt-BR/tools/ai-ml/codeinterpretertool" + ] + }, + { + "group": "Cloud & Armazenamento", + "icon": "cloud", + "pages": [ + "pt-BR/tools/cloud-storage/overview", + "pt-BR/tools/cloud-storage/s3readertool", + "pt-BR/tools/cloud-storage/s3writertool", + "pt-BR/tools/cloud-storage/bedrockkbretriever" + ] + }, + { + "group": "Integrations", + "icon": "plug", + "pages": [ + "pt-BR/tools/integration/overview", + "pt-BR/tools/integration/bedrockinvokeagenttool", + "pt-BR/tools/integration/crewaiautomationtool" + ] + }, + { + "group": "Automação", + "icon": "bolt", + "pages": [ + "pt-BR/tools/automation/overview", + "pt-BR/tools/automation/apifyactorstool", + "pt-BR/tools/automation/composiotool", + "pt-BR/tools/automation/multiontool" + ] + } + ] + }, + { + "group": "Observabilidade", + "pages": [ + "pt-BR/observability/tracing", + "pt-BR/observability/overview", + "pt-BR/observability/arize-phoenix", + "pt-BR/observability/braintrust", + "pt-BR/observability/datadog", + "pt-BR/observability/galileo", + "pt-BR/observability/langdb", + "pt-BR/observability/langfuse", + "pt-BR/observability/langtrace", + "pt-BR/observability/maxim", + "pt-BR/observability/mlflow", + "pt-BR/observability/openlit", + "pt-BR/observability/opik", + "pt-BR/observability/patronus-evaluation", + "pt-BR/observability/portkey", + "pt-BR/observability/weave", + "pt-BR/observability/truefoundry" + ] + }, + { + "group": "Aprenda", + "pages": [ + "pt-BR/learn/overview", + "pt-BR/learn/llm-selection-guide", + "pt-BR/learn/conditional-tasks", + "pt-BR/learn/coding-agents", + "pt-BR/learn/create-custom-tools", + "pt-BR/learn/custom-llm", + "pt-BR/learn/custom-manager-agent", + "pt-BR/learn/customizing-agents", + "pt-BR/learn/dalle-image-generation", + "pt-BR/learn/force-tool-output-as-result", + "pt-BR/learn/hierarchical-process", + "pt-BR/learn/human-input-on-execution", + "pt-BR/learn/human-in-the-loop", + "pt-BR/learn/human-feedback-in-flows", + "pt-BR/learn/kickoff-async", + "pt-BR/learn/kickoff-for-each", + "pt-BR/learn/llm-connections", + "pt-BR/learn/multimodal-agents", + "pt-BR/learn/replay-tasks-from-latest-crew-kickoff", + "pt-BR/learn/sequential-process", + "pt-BR/learn/using-annotations", + "pt-BR/learn/execution-hooks", + "pt-BR/learn/llm-hooks", + "pt-BR/learn/tool-hooks" + ] + }, + { + "group": "Telemetria", + "pages": [ + "pt-BR/telemetry" + ] + } ] }, { - "group": "Operar", - "pages": [ - "pt-BR/enterprise/features/traces", - "pt-BR/enterprise/features/webhook-streaming", - "pt-BR/enterprise/features/hallucination-guardrail", - "pt-BR/enterprise/features/flow-hitl-management" + "tab": "AMP", + "icon": "briefcase", + "groups": [ + { + "group": "Começando", + "pages": [ + "pt-BR/enterprise/introduction" + ] + }, + { + "group": "Construir", + "pages": [ + "pt-BR/enterprise/features/automations", + "pt-BR/enterprise/features/crew-studio", + "pt-BR/enterprise/features/marketplace", + "pt-BR/enterprise/features/agent-repositories", + "pt-BR/enterprise/features/tools-and-integrations", + "pt-BR/enterprise/features/pii-trace-redactions" + ] + }, + { + "group": "Operar", + "pages": [ + "pt-BR/enterprise/features/traces", + "pt-BR/enterprise/features/webhook-streaming", + "pt-BR/enterprise/features/hallucination-guardrail", + "pt-BR/enterprise/features/flow-hitl-management" + ] + }, + { + "group": "Gerenciar", + "pages": [ + "pt-BR/enterprise/features/rbac" + ] + }, + { + "group": "Documentação de Integração", + "pages": [ + "pt-BR/enterprise/integrations/asana", + "pt-BR/enterprise/integrations/box", + "pt-BR/enterprise/integrations/clickup", + "pt-BR/enterprise/integrations/github", + "pt-BR/enterprise/integrations/gmail", + "pt-BR/enterprise/integrations/google_calendar", + "pt-BR/enterprise/integrations/google_contacts", + "pt-BR/enterprise/integrations/google_docs", + "pt-BR/enterprise/integrations/google_drive", + "pt-BR/enterprise/integrations/google_sheets", + "pt-BR/enterprise/integrations/google_slides", + "pt-BR/enterprise/integrations/hubspot", + "pt-BR/enterprise/integrations/jira", + "pt-BR/enterprise/integrations/linear", + "pt-BR/enterprise/integrations/microsoft_excel", + "pt-BR/enterprise/integrations/microsoft_onedrive", + "pt-BR/enterprise/integrations/microsoft_outlook", + "pt-BR/enterprise/integrations/microsoft_sharepoint", + "pt-BR/enterprise/integrations/microsoft_teams", + "pt-BR/enterprise/integrations/microsoft_word", + "pt-BR/enterprise/integrations/notion", + "pt-BR/enterprise/integrations/salesforce", + "pt-BR/enterprise/integrations/shopify", + "pt-BR/enterprise/integrations/slack", + "pt-BR/enterprise/integrations/stripe", + "pt-BR/enterprise/integrations/zendesk" + ] + }, + { + "group": "Guias", + "pages": [ + "pt-BR/enterprise/guides/build-crew", + "pt-BR/enterprise/guides/prepare-for-deployment", + "pt-BR/enterprise/guides/deploy-to-amp", + "pt-BR/enterprise/guides/private-package-registry", + "pt-BR/enterprise/guides/kickoff-crew", + "pt-BR/enterprise/guides/update-crew", + "pt-BR/enterprise/guides/enable-crew-studio", + "pt-BR/enterprise/guides/azure-openai-setup", + "pt-BR/enterprise/guides/tool-repository", + "pt-BR/enterprise/guides/react-component-export", + "pt-BR/enterprise/guides/team-management", + "pt-BR/enterprise/guides/human-in-the-loop", + "pt-BR/enterprise/guides/webhook-automation" + ] + }, + { + "group": "Triggers", + "pages": [ + "pt-BR/enterprise/guides/automation-triggers", + "pt-BR/enterprise/guides/gmail-trigger", + "pt-BR/enterprise/guides/google-calendar-trigger", + "pt-BR/enterprise/guides/google-drive-trigger", + "pt-BR/enterprise/guides/outlook-trigger", + "pt-BR/enterprise/guides/onedrive-trigger", + "pt-BR/enterprise/guides/microsoft-teams-trigger", + "pt-BR/enterprise/guides/slack-trigger", + "pt-BR/enterprise/guides/hubspot-trigger", + "pt-BR/enterprise/guides/salesforce-trigger", + "pt-BR/enterprise/guides/zapier-trigger" + ] + }, + { + "group": "Recursos", + "pages": [ + "pt-BR/enterprise/resources/frequently-asked-questions" + ] + } ] }, { - "group": "Gerenciar", - "pages": [ - "pt-BR/enterprise/features/rbac" + "tab": "Referência da API", + "icon": "magnifying-glass", + "groups": [ + { + "group": "Começando", + "pages": [ + "pt-BR/api-reference/introduction", + "pt-BR/api-reference/inputs", + "pt-BR/api-reference/kickoff", + "pt-BR/api-reference/resume", + "pt-BR/api-reference/status" + ] + } ] }, { - "group": "Documentação de Integração", - "pages": [ - "pt-BR/enterprise/integrations/asana", - "pt-BR/enterprise/integrations/box", - "pt-BR/enterprise/integrations/clickup", - "pt-BR/enterprise/integrations/github", - "pt-BR/enterprise/integrations/gmail", - "pt-BR/enterprise/integrations/google_calendar", - "pt-BR/enterprise/integrations/google_contacts", - "pt-BR/enterprise/integrations/google_docs", - "pt-BR/enterprise/integrations/google_drive", - "pt-BR/enterprise/integrations/google_sheets", - "pt-BR/enterprise/integrations/google_slides", - "pt-BR/enterprise/integrations/hubspot", - "pt-BR/enterprise/integrations/jira", - "pt-BR/enterprise/integrations/linear", - "pt-BR/enterprise/integrations/microsoft_excel", - "pt-BR/enterprise/integrations/microsoft_onedrive", - "pt-BR/enterprise/integrations/microsoft_outlook", - "pt-BR/enterprise/integrations/microsoft_sharepoint", - "pt-BR/enterprise/integrations/microsoft_teams", - "pt-BR/enterprise/integrations/microsoft_word", - "pt-BR/enterprise/integrations/notion", - "pt-BR/enterprise/integrations/salesforce", - "pt-BR/enterprise/integrations/shopify", - "pt-BR/enterprise/integrations/slack", - "pt-BR/enterprise/integrations/stripe", - "pt-BR/enterprise/integrations/zendesk" + "tab": "Exemplos", + "icon": "code", + "groups": [ + { + "group": "Exemplos", + "pages": [ + "pt-BR/examples/example", + "pt-BR/examples/cookbooks" + ] + } ] }, { - "group": "Guias", - "pages": [ - "pt-BR/enterprise/guides/build-crew", - "pt-BR/enterprise/guides/prepare-for-deployment", - "pt-BR/enterprise/guides/deploy-to-amp", - "pt-BR/enterprise/guides/kickoff-crew", - "pt-BR/enterprise/guides/update-crew", - "pt-BR/enterprise/guides/enable-crew-studio", - "pt-BR/enterprise/guides/azure-openai-setup", - "pt-BR/enterprise/guides/tool-repository", - "pt-BR/enterprise/guides/react-component-export", - "pt-BR/enterprise/guides/team-management", - "pt-BR/enterprise/guides/human-in-the-loop", - "pt-BR/enterprise/guides/webhook-automation" - ] - }, - { - "group": "Triggers", - "pages": [ - "pt-BR/enterprise/guides/automation-triggers", - "pt-BR/enterprise/guides/gmail-trigger", - "pt-BR/enterprise/guides/google-calendar-trigger", - "pt-BR/enterprise/guides/google-drive-trigger", - "pt-BR/enterprise/guides/outlook-trigger", - "pt-BR/enterprise/guides/onedrive-trigger", - "pt-BR/enterprise/guides/microsoft-teams-trigger", - "pt-BR/enterprise/guides/slack-trigger", - "pt-BR/enterprise/guides/hubspot-trigger", - "pt-BR/enterprise/guides/salesforce-trigger", - "pt-BR/enterprise/guides/zapier-trigger" - ] - }, - { - "group": "Recursos", - "pages": [ - "pt-BR/enterprise/resources/frequently-asked-questions" - ] - } - ] - }, - { - "tab": "Referência da API", - "icon": "magnifying-glass", - "groups": [ - { - "group": "Começando", - "pages": [ - "pt-BR/api-reference/introduction", - "pt-BR/api-reference/inputs", - "pt-BR/api-reference/kickoff", - "pt-BR/api-reference/resume", - "pt-BR/api-reference/status" - ] - } - ] - }, - { - "tab": "Exemplos", - "icon": "code", - "groups": [ - { - "group": "Exemplos", - "pages": [ - "pt-BR/examples/example", - "pt-BR/examples/cookbooks" - ] - } - ] - }, - { - "tab": "Notas de Versão", - "icon": "clock", - "groups": [ - { - "group": "Notas de Versão", - "pages": [ - "pt-BR/changelog" + "tab": "Notas de Versão", + "icon": "clock", + "groups": [ + { + "group": "Notas de Versão", + "pages": [ + "pt-BR/changelog" + ] + } ] } ] @@ -982,439 +1906,902 @@ } ] }, - "tabs": [ + "versions": [ { - "tab": "홈", - "icon": "house", - "groups": [ + "version": "v1.10.1", + "default": true, + "tabs": [ { - "group": "환영합니다", - "pages": [ - "ko/index" - ] - } - ] - }, - { - "tab": "기술 문서", - "icon": "book-open", - "groups": [ - { - "group": "시작 안내", - "pages": [ - "ko/introduction", - "ko/installation", - "ko/quickstart" - ] - }, - { - "group": "가이드", - "pages": [ + "tab": "홈", + "icon": "house", + "groups": [ { - "group": "전략", - "icon": "compass", + "group": "환영합니다", "pages": [ - "ko/guides/concepts/evaluating-use-cases" - ] - }, - { - "group": "에이전트 (Agents)", - "icon": "user", - "pages": [ - "ko/guides/agents/crafting-effective-agents" - ] - }, - { - "group": "크루 (Crews)", - "icon": "users", - "pages": [ - "ko/guides/crews/first-crew" - ] - }, - { - "group": "플로우 (Flows)", - "icon": "code-branch", - "pages": [ - "ko/guides/flows/first-flow", - "ko/guides/flows/mastering-flow-state" - ] - }, - { - "group": "고급", - "icon": "gear", - "pages": [ - "ko/guides/advanced/customizing-prompts", - "ko/guides/advanced/fingerprinting" + "ko/index" ] } ] }, { - "group": "핵심 개념", - "pages": [ - "ko/concepts/agents", - "ko/concepts/tasks", - "ko/concepts/crews", - "ko/concepts/flows", - "ko/concepts/production-architecture", - "ko/concepts/knowledge", - "ko/concepts/llms", - "ko/concepts/files", - "ko/concepts/processes", - "ko/concepts/collaboration", - "ko/concepts/training", - "ko/concepts/memory", - "ko/concepts/reasoning", - "ko/concepts/planning", - "ko/concepts/testing", - "ko/concepts/cli", - "ko/concepts/tools", - "ko/concepts/event-listener" - ] - }, - { - "group": "MCP 통합", - "pages": [ - "ko/mcp/overview", - "ko/mcp/dsl-integration", - "ko/mcp/stdio", - "ko/mcp/sse", - "ko/mcp/streamable-http", - "ko/mcp/multiple-servers", - "ko/mcp/security" - ] - }, - { - "group": "도구 (Tools)", - "pages": [ - "ko/tools/overview", + "tab": "기술 문서", + "icon": "book-open", + "groups": [ { - "group": "파일 & 문서", - "icon": "folder-open", + "group": "시작 안내", "pages": [ - "ko/tools/file-document/overview", - "ko/tools/file-document/filereadtool", - "ko/tools/file-document/filewritetool", - "ko/tools/file-document/pdfsearchtool", - "ko/tools/file-document/docxsearchtool", - "ko/tools/file-document/mdxsearchtool", - "ko/tools/file-document/xmlsearchtool", - "ko/tools/file-document/txtsearchtool", - "ko/tools/file-document/jsonsearchtool", - "ko/tools/file-document/csvsearchtool", - "ko/tools/file-document/directorysearchtool", - "ko/tools/file-document/directoryreadtool", - "ko/tools/file-document/ocrtool", - "ko/tools/file-document/pdf-text-writing-tool" + "ko/introduction", + "ko/installation", + "ko/quickstart" ] }, { - "group": "웹 스크래핑 & 브라우징", - "icon": "globe", + "group": "가이드", "pages": [ - "ko/tools/web-scraping/overview", - "ko/tools/web-scraping/scrapewebsitetool", - "ko/tools/web-scraping/scrapeelementfromwebsitetool", - "ko/tools/web-scraping/scrapflyscrapetool", - "ko/tools/web-scraping/seleniumscrapingtool", - "ko/tools/web-scraping/scrapegraphscrapetool", - "ko/tools/web-scraping/spidertool", - "ko/tools/web-scraping/browserbaseloadtool", - "ko/tools/web-scraping/hyperbrowserloadtool", - "ko/tools/web-scraping/stagehandtool", - "ko/tools/web-scraping/firecrawlcrawlwebsitetool", - "ko/tools/web-scraping/firecrawlscrapewebsitetool", - "ko/tools/web-scraping/oxylabsscraperstool", - "ko/tools/web-scraping/brightdata-tools" + { + "group": "전략", + "icon": "compass", + "pages": [ + "ko/guides/concepts/evaluating-use-cases" + ] + }, + { + "group": "에이전트 (Agents)", + "icon": "user", + "pages": [ + "ko/guides/agents/crafting-effective-agents" + ] + }, + { + "group": "크루 (Crews)", + "icon": "users", + "pages": [ + "ko/guides/crews/first-crew" + ] + }, + { + "group": "플로우 (Flows)", + "icon": "code-branch", + "pages": [ + "ko/guides/flows/first-flow", + "ko/guides/flows/mastering-flow-state" + ] + }, + { + "group": "고급", + "icon": "gear", + "pages": [ + "ko/guides/advanced/customizing-prompts", + "ko/guides/advanced/fingerprinting" + ] + }, + { + "group": "마이그레이션", + "icon": "shuffle", + "pages": [ + "ko/guides/migration/migrating-from-langgraph" + ] + } ] }, { - "group": "검색 및 연구", - "icon": "magnifying-glass", + "group": "핵심 개념", "pages": [ - "ko/tools/search-research/overview", - "ko/tools/search-research/serperdevtool", - "ko/tools/search-research/bravesearchtool", - "ko/tools/search-research/exasearchtool", - "ko/tools/search-research/linkupsearchtool", - "ko/tools/search-research/githubsearchtool", - "ko/tools/search-research/websitesearchtool", - "ko/tools/search-research/codedocssearchtool", - "ko/tools/search-research/youtubechannelsearchtool", - "ko/tools/search-research/youtubevideosearchtool", - "ko/tools/search-research/tavilysearchtool", - "ko/tools/search-research/tavilyextractortool", - "ko/tools/search-research/arxivpapertool", - "ko/tools/search-research/serpapi-googlesearchtool", - "ko/tools/search-research/serpapi-googleshoppingtool", - "ko/tools/search-research/databricks-query-tool" + "ko/concepts/agents", + "ko/concepts/tasks", + "ko/concepts/crews", + "ko/concepts/flows", + "ko/concepts/production-architecture", + "ko/concepts/knowledge", + "ko/concepts/llms", + "ko/concepts/files", + "ko/concepts/processes", + "ko/concepts/collaboration", + "ko/concepts/training", + "ko/concepts/memory", + "ko/concepts/reasoning", + "ko/concepts/planning", + "ko/concepts/testing", + "ko/concepts/cli", + "ko/concepts/tools", + "ko/concepts/event-listener" ] }, { - "group": "데이터베이스 & 데이터", - "icon": "database", + "group": "MCP 통합", "pages": [ - "ko/tools/database-data/overview", - "ko/tools/database-data/mysqltool", - "ko/tools/database-data/pgsearchtool", - "ko/tools/database-data/snowflakesearchtool", - "ko/tools/database-data/nl2sqltool", - "ko/tools/database-data/qdrantvectorsearchtool", - "ko/tools/database-data/weaviatevectorsearchtool", - "ko/tools/database-data/mongodbvectorsearchtool", - "ko/tools/database-data/singlestoresearchtool" + "ko/mcp/overview", + "ko/mcp/dsl-integration", + "ko/mcp/stdio", + "ko/mcp/sse", + "ko/mcp/streamable-http", + "ko/mcp/multiple-servers", + "ko/mcp/security" ] }, { - "group": "인공지능 & 머신러닝", - "icon": "brain", + "group": "도구 (Tools)", "pages": [ - "ko/tools/ai-ml/overview", - "ko/tools/ai-ml/dalletool", - "ko/tools/ai-ml/visiontool", - "ko/tools/ai-ml/aimindtool", - "ko/tools/ai-ml/llamaindextool", - "ko/tools/ai-ml/langchaintool", - "ko/tools/ai-ml/ragtool", - "ko/tools/ai-ml/codeinterpretertool" + "ko/tools/overview", + { + "group": "파일 & 문서", + "icon": "folder-open", + "pages": [ + "ko/tools/file-document/overview", + "ko/tools/file-document/filereadtool", + "ko/tools/file-document/filewritetool", + "ko/tools/file-document/pdfsearchtool", + "ko/tools/file-document/docxsearchtool", + "ko/tools/file-document/mdxsearchtool", + "ko/tools/file-document/xmlsearchtool", + "ko/tools/file-document/txtsearchtool", + "ko/tools/file-document/jsonsearchtool", + "ko/tools/file-document/csvsearchtool", + "ko/tools/file-document/directorysearchtool", + "ko/tools/file-document/directoryreadtool", + "ko/tools/file-document/ocrtool", + "ko/tools/file-document/pdf-text-writing-tool" + ] + }, + { + "group": "웹 스크래핑 & 브라우징", + "icon": "globe", + "pages": [ + "ko/tools/web-scraping/overview", + "ko/tools/web-scraping/scrapewebsitetool", + "ko/tools/web-scraping/scrapeelementfromwebsitetool", + "ko/tools/web-scraping/scrapflyscrapetool", + "ko/tools/web-scraping/seleniumscrapingtool", + "ko/tools/web-scraping/scrapegraphscrapetool", + "ko/tools/web-scraping/spidertool", + "ko/tools/web-scraping/browserbaseloadtool", + "ko/tools/web-scraping/hyperbrowserloadtool", + "ko/tools/web-scraping/stagehandtool", + "ko/tools/web-scraping/firecrawlcrawlwebsitetool", + "ko/tools/web-scraping/firecrawlscrapewebsitetool", + "ko/tools/web-scraping/oxylabsscraperstool", + "ko/tools/web-scraping/brightdata-tools" + ] + }, + { + "group": "검색 및 연구", + "icon": "magnifying-glass", + "pages": [ + "ko/tools/search-research/overview", + "ko/tools/search-research/serperdevtool", + "ko/tools/search-research/bravesearchtool", + "ko/tools/search-research/exasearchtool", + "ko/tools/search-research/linkupsearchtool", + "ko/tools/search-research/githubsearchtool", + "ko/tools/search-research/websitesearchtool", + "ko/tools/search-research/codedocssearchtool", + "ko/tools/search-research/youtubechannelsearchtool", + "ko/tools/search-research/youtubevideosearchtool", + "ko/tools/search-research/tavilysearchtool", + "ko/tools/search-research/tavilyextractortool", + "ko/tools/search-research/arxivpapertool", + "ko/tools/search-research/serpapi-googlesearchtool", + "ko/tools/search-research/serpapi-googleshoppingtool", + "ko/tools/search-research/databricks-query-tool" + ] + }, + { + "group": "데이터베이스 & 데이터", + "icon": "database", + "pages": [ + "ko/tools/database-data/overview", + "ko/tools/database-data/mysqltool", + "ko/tools/database-data/pgsearchtool", + "ko/tools/database-data/snowflakesearchtool", + "ko/tools/database-data/nl2sqltool", + "ko/tools/database-data/qdrantvectorsearchtool", + "ko/tools/database-data/weaviatevectorsearchtool", + "ko/tools/database-data/mongodbvectorsearchtool", + "ko/tools/database-data/singlestoresearchtool" + ] + }, + { + "group": "인공지능 & 머신러닝", + "icon": "brain", + "pages": [ + "ko/tools/ai-ml/overview", + "ko/tools/ai-ml/dalletool", + "ko/tools/ai-ml/visiontool", + "ko/tools/ai-ml/aimindtool", + "ko/tools/ai-ml/llamaindextool", + "ko/tools/ai-ml/langchaintool", + "ko/tools/ai-ml/ragtool", + "ko/tools/ai-ml/codeinterpretertool" + ] + }, + { + "group": "클라우드 & 스토리지", + "icon": "cloud", + "pages": [ + "ko/tools/cloud-storage/overview", + "ko/tools/cloud-storage/s3readertool", + "ko/tools/cloud-storage/s3writertool", + "ko/tools/cloud-storage/bedrockkbretriever" + ] + }, + { + "group": "Integrations", + "icon": "plug", + "pages": [ + "ko/tools/integration/overview", + "ko/tools/integration/bedrockinvokeagenttool", + "ko/tools/integration/crewaiautomationtool" + ] + }, + { + "group": "자동화", + "icon": "bolt", + "pages": [ + "ko/tools/automation/overview", + "ko/tools/automation/apifyactorstool", + "ko/tools/automation/composiotool", + "ko/tools/automation/multiontool", + "ko/tools/automation/zapieractionstool" + ] + } ] }, { - "group": "클라우드 & 스토리지", - "icon": "cloud", + "group": "Observability", "pages": [ - "ko/tools/cloud-storage/overview", - "ko/tools/cloud-storage/s3readertool", - "ko/tools/cloud-storage/s3writertool", - "ko/tools/cloud-storage/bedrockkbretriever" + "ko/observability/tracing", + "ko/observability/overview", + "ko/observability/arize-phoenix", + "ko/observability/braintrust", + "ko/observability/datadog", + "ko/observability/galileo", + "ko/observability/langdb", + "ko/observability/langfuse", + "ko/observability/langtrace", + "ko/observability/maxim", + "ko/observability/mlflow", + "ko/observability/neatlogs", + "ko/observability/openlit", + "ko/observability/opik", + "ko/observability/patronus-evaluation", + "ko/observability/portkey", + "ko/observability/weave" ] }, { - "group": "Integrations", - "icon": "plug", + "group": "학습", "pages": [ - "ko/tools/integration/overview", - "ko/tools/integration/bedrockinvokeagenttool", - "ko/tools/integration/crewaiautomationtool" + "ko/learn/overview", + "ko/learn/llm-selection-guide", + "ko/learn/conditional-tasks", + "ko/learn/coding-agents", + "ko/learn/create-custom-tools", + "ko/learn/custom-llm", + "ko/learn/custom-manager-agent", + "ko/learn/customizing-agents", + "ko/learn/dalle-image-generation", + "ko/learn/force-tool-output-as-result", + "ko/learn/hierarchical-process", + "ko/learn/human-input-on-execution", + "ko/learn/human-in-the-loop", + "ko/learn/human-feedback-in-flows", + "ko/learn/kickoff-async", + "ko/learn/kickoff-for-each", + "ko/learn/llm-connections", + "ko/learn/multimodal-agents", + "ko/learn/replay-tasks-from-latest-crew-kickoff", + "ko/learn/sequential-process", + "ko/learn/using-annotations", + "ko/learn/execution-hooks", + "ko/learn/llm-hooks", + "ko/learn/tool-hooks" ] }, { - "group": "자동화", - "icon": "bolt", + "group": "Telemetry", "pages": [ - "ko/tools/automation/overview", - "ko/tools/automation/apifyactorstool", - "ko/tools/automation/composiotool", - "ko/tools/automation/multiontool", - "ko/tools/automation/zapieractionstool" + "ko/telemetry" ] } ] }, { - "group": "Observability", - "pages": [ - "ko/observability/tracing", - "ko/observability/overview", - "ko/observability/arize-phoenix", - "ko/observability/braintrust", - "ko/observability/datadog", - "ko/observability/galileo", - "ko/observability/langdb", - "ko/observability/langfuse", - "ko/observability/langtrace", - "ko/observability/maxim", - "ko/observability/mlflow", - "ko/observability/neatlogs", - "ko/observability/openlit", - "ko/observability/opik", - "ko/observability/patronus-evaluation", - "ko/observability/portkey", - "ko/observability/weave" + "tab": "엔터프라이즈", + "icon": "briefcase", + "groups": [ + { + "group": "시작 안내", + "pages": [ + "ko/enterprise/introduction" + ] + }, + { + "group": "빌드", + "pages": [ + "ko/enterprise/features/automations", + "ko/enterprise/features/crew-studio", + "ko/enterprise/features/marketplace", + "ko/enterprise/features/agent-repositories", + "ko/enterprise/features/tools-and-integrations", + "ko/enterprise/features/pii-trace-redactions" + ] + }, + { + "group": "운영", + "pages": [ + "ko/enterprise/features/traces", + "ko/enterprise/features/webhook-streaming", + "ko/enterprise/features/hallucination-guardrail", + "ko/enterprise/features/flow-hitl-management" + ] + }, + { + "group": "관리", + "pages": [ + "ko/enterprise/features/rbac" + ] + }, + { + "group": "통합 문서", + "pages": [ + "ko/enterprise/integrations/asana", + "ko/enterprise/integrations/box", + "ko/enterprise/integrations/clickup", + "ko/enterprise/integrations/github", + "ko/enterprise/integrations/gmail", + "ko/enterprise/integrations/google_calendar", + "ko/enterprise/integrations/google_contacts", + "ko/enterprise/integrations/google_docs", + "ko/enterprise/integrations/google_drive", + "ko/enterprise/integrations/google_sheets", + "ko/enterprise/integrations/google_slides", + "ko/enterprise/integrations/hubspot", + "ko/enterprise/integrations/jira", + "ko/enterprise/integrations/linear", + "ko/enterprise/integrations/microsoft_excel", + "ko/enterprise/integrations/microsoft_onedrive", + "ko/enterprise/integrations/microsoft_outlook", + "ko/enterprise/integrations/microsoft_sharepoint", + "ko/enterprise/integrations/microsoft_teams", + "ko/enterprise/integrations/microsoft_word", + "ko/enterprise/integrations/notion", + "ko/enterprise/integrations/salesforce", + "ko/enterprise/integrations/shopify", + "ko/enterprise/integrations/slack", + "ko/enterprise/integrations/stripe", + "ko/enterprise/integrations/zendesk" + ] + }, + { + "group": "How-To Guides", + "pages": [ + "ko/enterprise/guides/build-crew", + "ko/enterprise/guides/prepare-for-deployment", + "ko/enterprise/guides/deploy-to-amp", + "ko/enterprise/guides/private-package-registry", + "ko/enterprise/guides/kickoff-crew", + "ko/enterprise/guides/update-crew", + "ko/enterprise/guides/enable-crew-studio", + "ko/enterprise/guides/azure-openai-setup", + "ko/enterprise/guides/tool-repository", + "ko/enterprise/guides/react-component-export", + "ko/enterprise/guides/team-management", + "ko/enterprise/guides/human-in-the-loop", + "ko/enterprise/guides/webhook-automation" + ] + }, + { + "group": "트리거", + "pages": [ + "ko/enterprise/guides/automation-triggers", + "ko/enterprise/guides/gmail-trigger", + "ko/enterprise/guides/google-calendar-trigger", + "ko/enterprise/guides/google-drive-trigger", + "ko/enterprise/guides/outlook-trigger", + "ko/enterprise/guides/onedrive-trigger", + "ko/enterprise/guides/microsoft-teams-trigger", + "ko/enterprise/guides/slack-trigger", + "ko/enterprise/guides/hubspot-trigger", + "ko/enterprise/guides/salesforce-trigger", + "ko/enterprise/guides/zapier-trigger" + ] + }, + { + "group": "학습 자원", + "pages": [ + "ko/enterprise/resources/frequently-asked-questions" + ] + } ] }, { - "group": "학습", - "pages": [ - "ko/learn/overview", - "ko/learn/llm-selection-guide", - "ko/learn/conditional-tasks", - "ko/learn/coding-agents", - "ko/learn/create-custom-tools", - "ko/learn/custom-llm", - "ko/learn/custom-manager-agent", - "ko/learn/customizing-agents", - "ko/learn/dalle-image-generation", - "ko/learn/force-tool-output-as-result", - "ko/learn/hierarchical-process", - "ko/learn/human-input-on-execution", - "ko/learn/human-in-the-loop", - "ko/learn/human-feedback-in-flows", - "ko/learn/kickoff-async", - "ko/learn/kickoff-for-each", - "ko/learn/llm-connections", - "ko/learn/multimodal-agents", - "ko/learn/replay-tasks-from-latest-crew-kickoff", - "ko/learn/sequential-process", - "ko/learn/using-annotations", - "ko/learn/execution-hooks", - "ko/learn/llm-hooks", - "ko/learn/tool-hooks" + "tab": "API 레퍼런스", + "icon": "magnifying-glass", + "groups": [ + { + "group": "시작 안내", + "pages": [ + "ko/api-reference/introduction", + "ko/api-reference/inputs", + "ko/api-reference/kickoff", + "ko/api-reference/resume", + "ko/api-reference/status" + ] + } ] }, { - "group": "Telemetry", - "pages": [ - "ko/telemetry" + "tab": "예시", + "icon": "code", + "groups": [ + { + "group": "예시", + "pages": [ + "ko/examples/example", + "ko/examples/cookbooks" + ] + } + ] + }, + { + "tab": "변경 로그", + "icon": "clock", + "groups": [ + { + "group": "릴리스 노트", + "pages": [ + "ko/changelog" + ] + } ] } ] }, { - "tab": "엔터프라이즈", - "icon": "briefcase", - "groups": [ + "version": "v1.10.0", + "tabs": [ { - "group": "시작 안내", - "pages": [ - "ko/enterprise/introduction" + "tab": "홈", + "icon": "house", + "groups": [ + { + "group": "환영합니다", + "pages": [ + "ko/index" + ] + } ] }, { - "group": "빌드", - "pages": [ - "ko/enterprise/features/automations", - "ko/enterprise/features/crew-studio", - "ko/enterprise/features/marketplace", - "ko/enterprise/features/agent-repositories", - "ko/enterprise/features/tools-and-integrations", - "ko/enterprise/features/pii-trace-redactions" + "tab": "기술 문서", + "icon": "book-open", + "groups": [ + { + "group": "시작 안내", + "pages": [ + "ko/introduction", + "ko/installation", + "ko/quickstart" + ] + }, + { + "group": "가이드", + "pages": [ + { + "group": "전략", + "icon": "compass", + "pages": [ + "ko/guides/concepts/evaluating-use-cases" + ] + }, + { + "group": "에이전트 (Agents)", + "icon": "user", + "pages": [ + "ko/guides/agents/crafting-effective-agents" + ] + }, + { + "group": "크루 (Crews)", + "icon": "users", + "pages": [ + "ko/guides/crews/first-crew" + ] + }, + { + "group": "플로우 (Flows)", + "icon": "code-branch", + "pages": [ + "ko/guides/flows/first-flow", + "ko/guides/flows/mastering-flow-state" + ] + }, + { + "group": "고급", + "icon": "gear", + "pages": [ + "ko/guides/advanced/customizing-prompts", + "ko/guides/advanced/fingerprinting" + ] + }, + { + "group": "마이그레이션", + "icon": "shuffle", + "pages": [ + "ko/guides/migration/migrating-from-langgraph" + ] + } + ] + }, + { + "group": "핵심 개념", + "pages": [ + "ko/concepts/agents", + "ko/concepts/tasks", + "ko/concepts/crews", + "ko/concepts/flows", + "ko/concepts/production-architecture", + "ko/concepts/knowledge", + "ko/concepts/llms", + "ko/concepts/files", + "ko/concepts/processes", + "ko/concepts/collaboration", + "ko/concepts/training", + "ko/concepts/memory", + "ko/concepts/reasoning", + "ko/concepts/planning", + "ko/concepts/testing", + "ko/concepts/cli", + "ko/concepts/tools", + "ko/concepts/event-listener" + ] + }, + { + "group": "MCP 통합", + "pages": [ + "ko/mcp/overview", + "ko/mcp/dsl-integration", + "ko/mcp/stdio", + "ko/mcp/sse", + "ko/mcp/streamable-http", + "ko/mcp/multiple-servers", + "ko/mcp/security" + ] + }, + { + "group": "도구 (Tools)", + "pages": [ + "ko/tools/overview", + { + "group": "파일 & 문서", + "icon": "folder-open", + "pages": [ + "ko/tools/file-document/overview", + "ko/tools/file-document/filereadtool", + "ko/tools/file-document/filewritetool", + "ko/tools/file-document/pdfsearchtool", + "ko/tools/file-document/docxsearchtool", + "ko/tools/file-document/mdxsearchtool", + "ko/tools/file-document/xmlsearchtool", + "ko/tools/file-document/txtsearchtool", + "ko/tools/file-document/jsonsearchtool", + "ko/tools/file-document/csvsearchtool", + "ko/tools/file-document/directorysearchtool", + "ko/tools/file-document/directoryreadtool", + "ko/tools/file-document/ocrtool", + "ko/tools/file-document/pdf-text-writing-tool" + ] + }, + { + "group": "웹 스크래핑 & 브라우징", + "icon": "globe", + "pages": [ + "ko/tools/web-scraping/overview", + "ko/tools/web-scraping/scrapewebsitetool", + "ko/tools/web-scraping/scrapeelementfromwebsitetool", + "ko/tools/web-scraping/scrapflyscrapetool", + "ko/tools/web-scraping/seleniumscrapingtool", + "ko/tools/web-scraping/scrapegraphscrapetool", + "ko/tools/web-scraping/spidertool", + "ko/tools/web-scraping/browserbaseloadtool", + "ko/tools/web-scraping/hyperbrowserloadtool", + "ko/tools/web-scraping/stagehandtool", + "ko/tools/web-scraping/firecrawlcrawlwebsitetool", + "ko/tools/web-scraping/firecrawlscrapewebsitetool", + "ko/tools/web-scraping/oxylabsscraperstool", + "ko/tools/web-scraping/brightdata-tools" + ] + }, + { + "group": "검색 및 연구", + "icon": "magnifying-glass", + "pages": [ + "ko/tools/search-research/overview", + "ko/tools/search-research/serperdevtool", + "ko/tools/search-research/bravesearchtool", + "ko/tools/search-research/exasearchtool", + "ko/tools/search-research/linkupsearchtool", + "ko/tools/search-research/githubsearchtool", + "ko/tools/search-research/websitesearchtool", + "ko/tools/search-research/codedocssearchtool", + "ko/tools/search-research/youtubechannelsearchtool", + "ko/tools/search-research/youtubevideosearchtool", + "ko/tools/search-research/tavilysearchtool", + "ko/tools/search-research/tavilyextractortool", + "ko/tools/search-research/arxivpapertool", + "ko/tools/search-research/serpapi-googlesearchtool", + "ko/tools/search-research/serpapi-googleshoppingtool", + "ko/tools/search-research/databricks-query-tool" + ] + }, + { + "group": "데이터베이스 & 데이터", + "icon": "database", + "pages": [ + "ko/tools/database-data/overview", + "ko/tools/database-data/mysqltool", + "ko/tools/database-data/pgsearchtool", + "ko/tools/database-data/snowflakesearchtool", + "ko/tools/database-data/nl2sqltool", + "ko/tools/database-data/qdrantvectorsearchtool", + "ko/tools/database-data/weaviatevectorsearchtool", + "ko/tools/database-data/mongodbvectorsearchtool", + "ko/tools/database-data/singlestoresearchtool" + ] + }, + { + "group": "인공지능 & 머신러닝", + "icon": "brain", + "pages": [ + "ko/tools/ai-ml/overview", + "ko/tools/ai-ml/dalletool", + "ko/tools/ai-ml/visiontool", + "ko/tools/ai-ml/aimindtool", + "ko/tools/ai-ml/llamaindextool", + "ko/tools/ai-ml/langchaintool", + "ko/tools/ai-ml/ragtool", + "ko/tools/ai-ml/codeinterpretertool" + ] + }, + { + "group": "클라우드 & 스토리지", + "icon": "cloud", + "pages": [ + "ko/tools/cloud-storage/overview", + "ko/tools/cloud-storage/s3readertool", + "ko/tools/cloud-storage/s3writertool", + "ko/tools/cloud-storage/bedrockkbretriever" + ] + }, + { + "group": "Integrations", + "icon": "plug", + "pages": [ + "ko/tools/integration/overview", + "ko/tools/integration/bedrockinvokeagenttool", + "ko/tools/integration/crewaiautomationtool" + ] + }, + { + "group": "자동화", + "icon": "bolt", + "pages": [ + "ko/tools/automation/overview", + "ko/tools/automation/apifyactorstool", + "ko/tools/automation/composiotool", + "ko/tools/automation/multiontool", + "ko/tools/automation/zapieractionstool" + ] + } + ] + }, + { + "group": "Observability", + "pages": [ + "ko/observability/tracing", + "ko/observability/overview", + "ko/observability/arize-phoenix", + "ko/observability/braintrust", + "ko/observability/datadog", + "ko/observability/galileo", + "ko/observability/langdb", + "ko/observability/langfuse", + "ko/observability/langtrace", + "ko/observability/maxim", + "ko/observability/mlflow", + "ko/observability/neatlogs", + "ko/observability/openlit", + "ko/observability/opik", + "ko/observability/patronus-evaluation", + "ko/observability/portkey", + "ko/observability/weave" + ] + }, + { + "group": "학습", + "pages": [ + "ko/learn/overview", + "ko/learn/llm-selection-guide", + "ko/learn/conditional-tasks", + "ko/learn/coding-agents", + "ko/learn/create-custom-tools", + "ko/learn/custom-llm", + "ko/learn/custom-manager-agent", + "ko/learn/customizing-agents", + "ko/learn/dalle-image-generation", + "ko/learn/force-tool-output-as-result", + "ko/learn/hierarchical-process", + "ko/learn/human-input-on-execution", + "ko/learn/human-in-the-loop", + "ko/learn/human-feedback-in-flows", + "ko/learn/kickoff-async", + "ko/learn/kickoff-for-each", + "ko/learn/llm-connections", + "ko/learn/multimodal-agents", + "ko/learn/replay-tasks-from-latest-crew-kickoff", + "ko/learn/sequential-process", + "ko/learn/using-annotations", + "ko/learn/execution-hooks", + "ko/learn/llm-hooks", + "ko/learn/tool-hooks" + ] + }, + { + "group": "Telemetry", + "pages": [ + "ko/telemetry" + ] + } ] }, { - "group": "운영", - "pages": [ - "ko/enterprise/features/traces", - "ko/enterprise/features/webhook-streaming", - "ko/enterprise/features/hallucination-guardrail", - "ko/enterprise/features/flow-hitl-management" + "tab": "엔터프라이즈", + "icon": "briefcase", + "groups": [ + { + "group": "시작 안내", + "pages": [ + "ko/enterprise/introduction" + ] + }, + { + "group": "빌드", + "pages": [ + "ko/enterprise/features/automations", + "ko/enterprise/features/crew-studio", + "ko/enterprise/features/marketplace", + "ko/enterprise/features/agent-repositories", + "ko/enterprise/features/tools-and-integrations", + "ko/enterprise/features/pii-trace-redactions" + ] + }, + { + "group": "운영", + "pages": [ + "ko/enterprise/features/traces", + "ko/enterprise/features/webhook-streaming", + "ko/enterprise/features/hallucination-guardrail", + "ko/enterprise/features/flow-hitl-management" + ] + }, + { + "group": "관리", + "pages": [ + "ko/enterprise/features/rbac" + ] + }, + { + "group": "통합 문서", + "pages": [ + "ko/enterprise/integrations/asana", + "ko/enterprise/integrations/box", + "ko/enterprise/integrations/clickup", + "ko/enterprise/integrations/github", + "ko/enterprise/integrations/gmail", + "ko/enterprise/integrations/google_calendar", + "ko/enterprise/integrations/google_contacts", + "ko/enterprise/integrations/google_docs", + "ko/enterprise/integrations/google_drive", + "ko/enterprise/integrations/google_sheets", + "ko/enterprise/integrations/google_slides", + "ko/enterprise/integrations/hubspot", + "ko/enterprise/integrations/jira", + "ko/enterprise/integrations/linear", + "ko/enterprise/integrations/microsoft_excel", + "ko/enterprise/integrations/microsoft_onedrive", + "ko/enterprise/integrations/microsoft_outlook", + "ko/enterprise/integrations/microsoft_sharepoint", + "ko/enterprise/integrations/microsoft_teams", + "ko/enterprise/integrations/microsoft_word", + "ko/enterprise/integrations/notion", + "ko/enterprise/integrations/salesforce", + "ko/enterprise/integrations/shopify", + "ko/enterprise/integrations/slack", + "ko/enterprise/integrations/stripe", + "ko/enterprise/integrations/zendesk" + ] + }, + { + "group": "How-To Guides", + "pages": [ + "ko/enterprise/guides/build-crew", + "ko/enterprise/guides/prepare-for-deployment", + "ko/enterprise/guides/deploy-to-amp", + "ko/enterprise/guides/private-package-registry", + "ko/enterprise/guides/kickoff-crew", + "ko/enterprise/guides/update-crew", + "ko/enterprise/guides/enable-crew-studio", + "ko/enterprise/guides/azure-openai-setup", + "ko/enterprise/guides/tool-repository", + "ko/enterprise/guides/react-component-export", + "ko/enterprise/guides/team-management", + "ko/enterprise/guides/human-in-the-loop", + "ko/enterprise/guides/webhook-automation" + ] + }, + { + "group": "트리거", + "pages": [ + "ko/enterprise/guides/automation-triggers", + "ko/enterprise/guides/gmail-trigger", + "ko/enterprise/guides/google-calendar-trigger", + "ko/enterprise/guides/google-drive-trigger", + "ko/enterprise/guides/outlook-trigger", + "ko/enterprise/guides/onedrive-trigger", + "ko/enterprise/guides/microsoft-teams-trigger", + "ko/enterprise/guides/slack-trigger", + "ko/enterprise/guides/hubspot-trigger", + "ko/enterprise/guides/salesforce-trigger", + "ko/enterprise/guides/zapier-trigger" + ] + }, + { + "group": "학습 자원", + "pages": [ + "ko/enterprise/resources/frequently-asked-questions" + ] + } ] }, { - "group": "관리", - "pages": [ - "ko/enterprise/features/rbac" + "tab": "API 레퍼런스", + "icon": "magnifying-glass", + "groups": [ + { + "group": "시작 안내", + "pages": [ + "ko/api-reference/introduction", + "ko/api-reference/inputs", + "ko/api-reference/kickoff", + "ko/api-reference/resume", + "ko/api-reference/status" + ] + } ] }, { - "group": "통합 문서", - "pages": [ - "ko/enterprise/integrations/asana", - "ko/enterprise/integrations/box", - "ko/enterprise/integrations/clickup", - "ko/enterprise/integrations/github", - "ko/enterprise/integrations/gmail", - "ko/enterprise/integrations/google_calendar", - "ko/enterprise/integrations/google_contacts", - "ko/enterprise/integrations/google_docs", - "ko/enterprise/integrations/google_drive", - "ko/enterprise/integrations/google_sheets", - "ko/enterprise/integrations/google_slides", - "ko/enterprise/integrations/hubspot", - "ko/enterprise/integrations/jira", - "ko/enterprise/integrations/linear", - "ko/enterprise/integrations/microsoft_excel", - "ko/enterprise/integrations/microsoft_onedrive", - "ko/enterprise/integrations/microsoft_outlook", - "ko/enterprise/integrations/microsoft_sharepoint", - "ko/enterprise/integrations/microsoft_teams", - "ko/enterprise/integrations/microsoft_word", - "ko/enterprise/integrations/notion", - "ko/enterprise/integrations/salesforce", - "ko/enterprise/integrations/shopify", - "ko/enterprise/integrations/slack", - "ko/enterprise/integrations/stripe", - "ko/enterprise/integrations/zendesk" + "tab": "예시", + "icon": "code", + "groups": [ + { + "group": "예시", + "pages": [ + "ko/examples/example", + "ko/examples/cookbooks" + ] + } ] }, { - "group": "How-To Guides", - "pages": [ - "ko/enterprise/guides/build-crew", - "ko/enterprise/guides/prepare-for-deployment", - "ko/enterprise/guides/deploy-to-amp", - "ko/enterprise/guides/kickoff-crew", - "ko/enterprise/guides/update-crew", - "ko/enterprise/guides/enable-crew-studio", - "ko/enterprise/guides/azure-openai-setup", - "ko/enterprise/guides/tool-repository", - "ko/enterprise/guides/react-component-export", - "ko/enterprise/guides/team-management", - "ko/enterprise/guides/human-in-the-loop", - "ko/enterprise/guides/webhook-automation" - ] - }, - { - "group": "트리거", - "pages": [ - "ko/enterprise/guides/automation-triggers", - "ko/enterprise/guides/gmail-trigger", - "ko/enterprise/guides/google-calendar-trigger", - "ko/enterprise/guides/google-drive-trigger", - "ko/enterprise/guides/outlook-trigger", - "ko/enterprise/guides/onedrive-trigger", - "ko/enterprise/guides/microsoft-teams-trigger", - "ko/enterprise/guides/slack-trigger", - "ko/enterprise/guides/hubspot-trigger", - "ko/enterprise/guides/salesforce-trigger", - "ko/enterprise/guides/zapier-trigger" - ] - }, - { - "group": "학습 자원", - "pages": [ - "ko/enterprise/resources/frequently-asked-questions" - ] - } - ] - }, - { - "tab": "API 레퍼런스", - "icon": "magnifying-glass", - "groups": [ - { - "group": "시작 안내", - "pages": [ - "ko/api-reference/introduction", - "ko/api-reference/inputs", - "ko/api-reference/kickoff", - "ko/api-reference/resume", - "ko/api-reference/status" - ] - } - ] - }, - { - "tab": "예시", - "icon": "code", - "groups": [ - { - "group": "예시", - "pages": [ - "ko/examples/example", - "ko/examples/cookbooks" - ] - } - ] - }, - { - "tab": "변경 로그", - "icon": "clock", - "groups": [ - { - "group": "릴리스 노트", - "pages": [ - "ko/changelog" + "tab": "변경 로그", + "icon": "clock", + "groups": [ + { + "group": "릴리스 노트", + "pages": [ + "ko/changelog" + ] + } ] } ] @@ -1578,4 +2965,4 @@ "reddit": "https://www.reddit.com/r/crewAIInc/" } } -} \ No newline at end of file +} diff --git a/docs/en/changelog.mdx b/docs/en/changelog.mdx index ce074e466..b73204e73 100644 --- a/docs/en/changelog.mdx +++ b/docs/en/changelog.mdx @@ -4,6 +4,138 @@ description: "Product updates, improvements, and bug fixes for CrewAI" icon: "clock" mode: "wide" --- + + ## v1.10.1 + + [View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.10.1) + + ## What's Changed + + ### Features + - Upgrade Gemini GenAI + + ### Bug Fixes + - Adjust executor listener value to avoid recursion + - Group parallel function response parts in a single Content object in Gemini + - Surface thought output from thinking models in Gemini + - Load MCP and platform tools when agent tools are None + - Support Jupyter environments with running event loops in A2A + - Use anonymous ID for ephemeral traces + - Conditionally pass plus header + - Skip signal handler registration in non-main threads for telemetry + - Inject tool errors as observations and resolve name collisions + - Upgrade pypdf from 4.x to 6.7.4 to resolve Dependabot alerts + - Resolve critical and high Dependabot security alerts + + ### Documentation + - Sync Composio tool documentation across locales + + ## Contributors + + @giulio-leone, @greysonlalonde, @haxzie, @joaomdmoura, @lorenzejay, @mattatcha, @mplachta, @nicoferdi96 + + + + + ## v1.10.1a1 + + [View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.10.1a1) + + ## What's Changed + + ### Features + - Implement asynchronous invocation support in step callback methods + - Implement lazy loading for heavy dependencies in Memory module + + ### Documentation + - Update changelog and version for v1.10.0 + + ### Refactoring + - Refactor step callback methods to support asynchronous invocation + - Refactor to implement lazy loading for heavy dependencies in Memory module + + ### Bug Fixes + - Fix branch for release notes + + ## Contributors + + @greysonlalonde, @joaomdmoura + + + + + ## v1.10.1a1 + + [View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.10.1a1) + + ## What's Changed + + ### Refactoring + - Refactor step callback methods to support asynchronous invocation + - Implement lazy loading for heavy dependencies in Memory module + + ### Documentation + - Update changelog and version for v1.10.0 + + ### Bug Fixes + - Make branch for release notes + + ## Contributors + + @greysonlalonde, @joaomdmoura + + + + + ## v1.10.0 + + [View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.10.0) + + ## What's Changed + + ### Features + - Enhance MCP tool resolution and related events + - Update lancedb version and add lance-namespace packages + - Enhance JSON argument parsing and validation in CrewAgentExecutor and BaseTool + - Migrate CLI HTTP client from requests to httpx + - Add versioned documentation + - Add yanked detection for version notes + - Implement user input handling in Flows + - Enhance HITL self-loop functionality in human feedback integration tests + - Add started_event_id and set in eventbus + - Auto update tools.specs + + ### Bug Fixes + - Validate tool kwargs even when empty to prevent cryptic TypeError + - Preserve null types in tool parameter schemas for LLM + - Map output_pydantic/output_json to native structured output + - Ensure callbacks are ran/awaited if promise + - Capture method name in exception context + - Preserve enum type in router result; improve types + - Fix cyclic flows silently breaking when persistence ID is passed in inputs + - Correct CLI flag format from --skip-provider to --skip_provider + - Ensure OpenAI tool call stream is finalized + - Resolve complex schema $ref pointers in MCP tools + - Enforce additionalProperties=false in schemas + - Reject reserved script names for crew folders + - Resolve race condition in guardrail event emission test + + ### Documentation + - Add litellm dependency note for non-native LLM providers + - Clarify NL2SQL security model and hardening guidance + - Add 96 missing actions across 9 integrations + + ### Refactoring + - Refactor crew to provider + - Extract HITL to provider pattern + - Improve hook typing and registration + + ## Contributors + + @dependabot[bot], @github-actions[bot], @github-code-quality[bot], @greysonlalonde, @heitorado, @hobostay, @joaomdmoura, @johnvan7, @jonathansampson, @lorenzejay, @lucasgomide, @mattatcha, @mplachta, @nicoferdi96, @theCyberTech, @thiagomoretto, @vinibrsl + + + ## v1.9.0 diff --git a/docs/en/concepts/llms.mdx b/docs/en/concepts/llms.mdx index 88ac16e88..98bfbeb23 100644 --- a/docs/en/concepts/llms.mdx +++ b/docs/en/concepts/llms.mdx @@ -106,6 +106,15 @@ There are different places in CrewAI code where you can specify the model to use + + CrewAI provides native SDK integrations for OpenAI, Anthropic, Google (Gemini API), Azure, and AWS Bedrock — no extra install needed beyond the provider-specific extras (e.g. `uv add "crewai[openai]"`). + + All other providers are powered by **LiteLLM**. If you plan to use any of them, add it as a dependency to your project: + ```bash + uv add 'crewai[litellm]' + ``` + + ## Provider Configuration Examples CrewAI supports a multitude of LLM providers, each offering unique features, authentication methods, and model capabilities. @@ -275,6 +284,11 @@ In this section, you'll find detailed examples that help you select, configure, | `meta_llama/Llama-4-Maverick-17B-128E-Instruct-FP8` | 128k | 4028 | Text, Image | Text | | `meta_llama/Llama-3.3-70B-Instruct` | 128k | 4028 | Text | Text | | `meta_llama/Llama-3.3-8B-Instruct` | 128k | 4028 | Text | Text | + + **Note:** This provider uses LiteLLM. Add it as a dependency to your project: + ```bash + uv add 'crewai[litellm]' + ``` @@ -470,7 +484,7 @@ In this section, you'll find detailed examples that help you select, configure, To get an Express mode API key: - New Google Cloud users: Get an [express mode API key](https://cloud.google.com/vertex-ai/generative-ai/docs/start/quickstart?usertype=apikey) - Existing Google Cloud users: Get a [Google Cloud API key bound to a service account](https://cloud.google.com/docs/authentication/api-keys) - + For more details, see the [Vertex AI Express mode documentation](https://docs.cloud.google.com/vertex-ai/generative-ai/docs/start/quickstart?usertype=apikey). @@ -571,6 +585,11 @@ In this section, you'll find detailed examples that help you select, configure, | gemini-1.5-flash | 1M tokens | Balanced multimodal model, good for most tasks | | gemini-1.5-flash-8B | 1M tokens | Fastest, most cost-efficient, good for high-frequency tasks | | gemini-1.5-pro | 2M tokens | Best performing, wide variety of reasoning tasks including logical reasoning, coding, and creative collaboration | + + **Note:** This provider uses LiteLLM. Add it as a dependency to your project: + ```bash + uv add 'crewai[litellm]' + ``` @@ -652,6 +671,7 @@ In this section, you'll find detailed examples that help you select, configure, # Optional AWS_SESSION_TOKEN= # For temporary credentials AWS_DEFAULT_REGION= # Defaults to us-east-1 + AWS_REGION_NAME= # Alternative configuration for backwards compatibility with LiteLLM. Defaults to us-east-1 ``` **Basic Usage:** @@ -695,6 +715,7 @@ In this section, you'll find detailed examples that help you select, configure, - `AWS_SECRET_ACCESS_KEY`: AWS secret key (required) - `AWS_SESSION_TOKEN`: AWS session token for temporary credentials (optional) - `AWS_DEFAULT_REGION`: AWS region (defaults to `us-east-1`) + - `AWS_REGION_NAME`: AWS region (defaults to `us-east-1`). Alternative configuration for backwards compatibility with LiteLLM **Features:** - Native tool calling support via Converse API @@ -764,6 +785,11 @@ In this section, you'll find detailed examples that help you select, configure, model="sagemaker/" ) ``` + + **Note:** This provider uses LiteLLM. Add it as a dependency to your project: + ```bash + uv add 'crewai[litellm]' + ``` @@ -779,6 +805,11 @@ In this section, you'll find detailed examples that help you select, configure, temperature=0.7 ) ``` + + **Note:** This provider uses LiteLLM. Add it as a dependency to your project: + ```bash + uv add 'crewai[litellm]' + ``` @@ -865,6 +896,11 @@ In this section, you'll find detailed examples that help you select, configure, | rakuten/rakutenai-7b-instruct | 1,024 tokens | Advanced state-of-the-art LLM with language understanding, superior reasoning, and text generation. | | rakuten/rakutenai-7b-chat | 1,024 tokens | Advanced state-of-the-art LLM with language understanding, superior reasoning, and text generation. | | baichuan-inc/baichuan2-13b-chat | 4,096 tokens | Support Chinese and English chat, coding, math, instruction following, solving quizzes | + + **Note:** This provider uses LiteLLM. Add it as a dependency to your project: + ```bash + uv add 'crewai[litellm]' + ``` @@ -905,6 +941,11 @@ In this section, you'll find detailed examples that help you select, configure, # ... ``` + + **Note:** This provider uses LiteLLM. Add it as a dependency to your project: + ```bash + uv add 'crewai[litellm]' + ``` @@ -926,6 +967,11 @@ In this section, you'll find detailed examples that help you select, configure, | Llama 3.1 70B/8B | 131,072 tokens | High-performance, large context tasks | | Llama 3.2 Series | 8,192 tokens | General-purpose tasks | | Mixtral 8x7B | 32,768 tokens | Balanced performance and context | + + **Note:** This provider uses LiteLLM. Add it as a dependency to your project: + ```bash + uv add 'crewai[litellm]' + ``` @@ -948,6 +994,11 @@ In this section, you'll find detailed examples that help you select, configure, base_url="https://api.watsonx.ai/v1" ) ``` + + **Note:** This provider uses LiteLLM. Add it as a dependency to your project: + ```bash + uv add 'crewai[litellm]' + ``` @@ -961,6 +1012,11 @@ In this section, you'll find detailed examples that help you select, configure, base_url="http://localhost:11434" ) ``` + + **Note:** This provider uses LiteLLM. Add it as a dependency to your project: + ```bash + uv add 'crewai[litellm]' + ``` @@ -976,6 +1032,11 @@ In this section, you'll find detailed examples that help you select, configure, temperature=0.7 ) ``` + + **Note:** This provider uses LiteLLM. Add it as a dependency to your project: + ```bash + uv add 'crewai[litellm]' + ``` @@ -991,6 +1052,11 @@ In this section, you'll find detailed examples that help you select, configure, base_url="https://api.perplexity.ai/" ) ``` + + **Note:** This provider uses LiteLLM. Add it as a dependency to your project: + ```bash + uv add 'crewai[litellm]' + ``` @@ -1005,6 +1071,11 @@ In this section, you'll find detailed examples that help you select, configure, model="huggingface/meta-llama/Meta-Llama-3.1-8B-Instruct" ) ``` + + **Note:** This provider uses LiteLLM. Add it as a dependency to your project: + ```bash + uv add 'crewai[litellm]' + ``` @@ -1028,6 +1099,11 @@ In this section, you'll find detailed examples that help you select, configure, | Llama 3.2 Series | 8,192 tokens | General-purpose, multimodal tasks | | Llama 3.3 70B | Up to 131,072 tokens | High-performance and output quality | | Qwen2 familly | 8,192 tokens | High-performance and output quality | + + **Note:** This provider uses LiteLLM. Add it as a dependency to your project: + ```bash + uv add 'crewai[litellm]' + ``` @@ -1053,6 +1129,11 @@ In this section, you'll find detailed examples that help you select, configure, - Good balance of speed and quality - Support for long context windows + + **Note:** This provider uses LiteLLM. Add it as a dependency to your project: + ```bash + uv add 'crewai[litellm]' + ``` @@ -1075,6 +1156,11 @@ In this section, you'll find detailed examples that help you select, configure, - openrouter/deepseek/deepseek-r1 - openrouter/deepseek/deepseek-chat + + **Note:** This provider uses LiteLLM. Add it as a dependency to your project: + ```bash + uv add 'crewai[litellm]' + ``` @@ -1097,6 +1183,11 @@ In this section, you'll find detailed examples that help you select, configure, - Competitive pricing - Good balance of speed and quality + + **Note:** This provider uses LiteLLM. Add it as a dependency to your project: + ```bash + uv add 'crewai[litellm]' + ``` diff --git a/docs/en/enterprise/features/flow-hitl-management.mdx b/docs/en/enterprise/features/flow-hitl-management.mdx index c0b1fa957..36eb4325c 100644 --- a/docs/en/enterprise/features/flow-hitl-management.mdx +++ b/docs/en/enterprise/features/flow-hitl-management.mdx @@ -38,22 +38,21 @@ CrewAI Enterprise provides a comprehensive Human-in-the-Loop (HITL) management s Configure human review checkpoints within your Flows using the `@human_feedback` decorator. When execution reaches a review point, the system pauses, notifies the assignee via email, and waits for a response. ```python -from crewai.flow.flow import Flow, start, listen +from crewai.flow.flow import Flow, start, listen, or_ from crewai.flow.human_feedback import human_feedback, HumanFeedbackResult class ContentApprovalFlow(Flow): @start() def generate_content(self): - # AI generates content return "Generated marketing copy for Q1 campaign..." - @listen(generate_content) @human_feedback( message="Please review this content for brand compliance:", emit=["approved", "rejected", "needs_revision"], ) - def review_content(self, content): - return content + @listen(or_("generate_content", "needs_revision")) + def review_content(self): + return "Marketing copy for review..." @listen("approved") def publish_content(self, result: HumanFeedbackResult): @@ -62,10 +61,6 @@ class ContentApprovalFlow(Flow): @listen("rejected") def archive_content(self, result: HumanFeedbackResult): print(f"Content rejected. Reason: {result.feedback}") - - @listen("needs_revision") - def revise_content(self, result: HumanFeedbackResult): - print(f"Revision requested: {result.feedback}") ``` For complete implementation details, see the [Human Feedback in Flows](/en/learn/human-feedback-in-flows) guide. diff --git a/docs/en/enterprise/guides/deploy-to-amp.mdx b/docs/en/enterprise/guides/deploy-to-amp.mdx index 31ff8ca20..c0309c0b6 100644 --- a/docs/en/enterprise/guides/deploy-to-amp.mdx +++ b/docs/en/enterprise/guides/deploy-to-amp.mdx @@ -177,6 +177,11 @@ You need to push your crew to a GitHub repository. If you haven't created a crew ![Set Environment Variables](/images/enterprise/set-env-variables.png) + + Using private Python packages? You'll need to add your registry credentials here too. + See [Private Package Registries](/en/enterprise/guides/private-package-registry) for the required variables. + + diff --git a/docs/en/enterprise/guides/prepare-for-deployment.mdx b/docs/en/enterprise/guides/prepare-for-deployment.mdx index fe09e5319..3e472e4e6 100644 --- a/docs/en/enterprise/guides/prepare-for-deployment.mdx +++ b/docs/en/enterprise/guides/prepare-for-deployment.mdx @@ -256,6 +256,12 @@ Before deployment, ensure you have: 1. **LLM API keys** ready (OpenAI, Anthropic, Google, etc.) 2. **Tool API keys** if using external tools (Serper, etc.) + + If your project depends on packages from a **private PyPI registry**, you'll also need to configure + registry authentication credentials as environment variables. See the + [Private Package Registries](/en/enterprise/guides/private-package-registry) guide for details. + + Test your project locally with the same environment variables before deploying to catch configuration issues early. diff --git a/docs/en/enterprise/guides/private-package-registry.mdx b/docs/en/enterprise/guides/private-package-registry.mdx new file mode 100644 index 000000000..feb521436 --- /dev/null +++ b/docs/en/enterprise/guides/private-package-registry.mdx @@ -0,0 +1,263 @@ +--- +title: "Private Package Registries" +description: "Install private Python packages from authenticated PyPI registries in CrewAI AMP" +icon: "lock" +mode: "wide" +--- + + + This guide covers how to configure your CrewAI project to install Python packages + from private PyPI registries (Azure DevOps Artifacts, GitHub Packages, GitLab, AWS CodeArtifact, etc.) + when deploying to CrewAI AMP. + + +## When You Need This + +If your project depends on internal or proprietary Python packages hosted on a private registry +rather than the public PyPI, you'll need to: + +1. Tell UV **where** to find the package (an index URL) +2. Tell UV **which** packages come from that index (a source mapping) +3. Provide **credentials** so UV can authenticate during install + +CrewAI AMP uses [UV](https://docs.astral.sh/uv/) for dependency resolution and installation. +UV supports authenticated private registries through `pyproject.toml` configuration combined +with environment variables for credentials. + +## Step 1: Configure pyproject.toml + +Three pieces work together in your `pyproject.toml`: + +### 1a. Declare the dependency + +Add the private package to your `[project.dependencies]` like any other dependency: + +```toml +[project] +dependencies = [ + "crewai[tools]>=0.100.1,<1.0.0", + "my-private-package>=1.2.0", +] +``` + +### 1b. Define the index + +Register your private registry as a named index under `[[tool.uv.index]]`: + +```toml +[[tool.uv.index]] +name = "my-private-registry" +url = "https://pkgs.dev.azure.com/my-org/_packaging/my-feed/pypi/simple/" +explicit = true +``` + + + The `name` field is important — UV uses it to construct the environment variable names + for authentication (see [Step 2](#step-2-set-authentication-credentials) below). + + Setting `explicit = true` means UV won't search this index for every package — only the + ones you explicitly map to it in `[tool.uv.sources]`. This avoids unnecessary queries + against your private registry and protects against dependency confusion attacks. + + +### 1c. Map the package to the index + +Tell UV which packages should be resolved from your private index using `[tool.uv.sources]`: + +```toml +[tool.uv.sources] +my-private-package = { index = "my-private-registry" } +``` + +### Complete example + +```toml +[project] +name = "my-crew-project" +version = "0.1.0" +requires-python = ">=3.10,<=3.13" +dependencies = [ + "crewai[tools]>=0.100.1,<1.0.0", + "my-private-package>=1.2.0", +] + +[tool.crewai] +type = "crew" + +[[tool.uv.index]] +name = "my-private-registry" +url = "https://pkgs.dev.azure.com/my-org/_packaging/my-feed/pypi/simple/" +explicit = true + +[tool.uv.sources] +my-private-package = { index = "my-private-registry" } +``` + +After updating `pyproject.toml`, regenerate your lock file: + +```bash +uv lock +``` + + + Always commit the updated `uv.lock` along with your `pyproject.toml` changes. + The lock file is required for deployment — see [Prepare for Deployment](/en/enterprise/guides/prepare-for-deployment). + + +## Step 2: Set Authentication Credentials + +UV authenticates against private indexes using environment variables that follow a naming convention +based on the index name you defined in `pyproject.toml`: + +``` +UV_INDEX_{UPPER_NAME}_USERNAME +UV_INDEX_{UPPER_NAME}_PASSWORD +``` + +Where `{UPPER_NAME}` is your index name converted to **uppercase** with **hyphens replaced by underscores**. + +For example, an index named `my-private-registry` uses: + +| Variable | Value | +|----------|-------| +| `UV_INDEX_MY_PRIVATE_REGISTRY_USERNAME` | Your registry username or token name | +| `UV_INDEX_MY_PRIVATE_REGISTRY_PASSWORD` | Your registry password or token/PAT | + + + These environment variables **must** be added via the CrewAI AMP **Environment Variables** settings — + either globally or at the deployment level. They cannot be set in `.env` files or hardcoded in your project. + + See [Setting Environment Variables in AMP](#setting-environment-variables-in-amp) below. + + +## Registry Provider Reference + +The table below shows the index URL format and credential values for common registry providers. +Replace placeholder values with your actual organization and feed details. + +| Provider | Index URL | Username | Password | +|----------|-----------|----------|----------| +| **Azure DevOps Artifacts** | `https://pkgs.dev.azure.com/{org}/_packaging/{feed}/pypi/simple/` | Any non-empty string (e.g. `token`) | Personal Access Token (PAT) with Packaging Read scope | +| **GitHub Packages** | `https://pypi.pkg.github.com/{owner}/simple/` | GitHub username | Personal Access Token (classic) with `read:packages` scope | +| **GitLab Package Registry** | `https://gitlab.com/api/v4/projects/{project_id}/packages/pypi/simple/` | `__token__` | Project or Personal Access Token with `read_api` scope | +| **AWS CodeArtifact** | Use the URL from `aws codeartifact get-repository-endpoint` | `aws` | Token from `aws codeartifact get-authorization-token` | +| **Google Artifact Registry** | `https://{region}-python.pkg.dev/{project}/{repo}/simple/` | `_json_key_base64` | Base64-encoded service account key | +| **JFrog Artifactory** | `https://{instance}.jfrog.io/artifactory/api/pypi/{repo}/simple/` | Username or email | API key or identity token | +| **Self-hosted (devpi, Nexus, etc.)** | Your registry's simple API URL | Registry username | Registry password | + + + For **AWS CodeArtifact**, the authorization token expires periodically. + You'll need to refresh the `UV_INDEX_*_PASSWORD` value when it expires. + Consider automating this in your CI/CD pipeline. + + +## Setting Environment Variables in AMP + +Private registry credentials must be configured as environment variables in CrewAI AMP. +You have two options: + + + + 1. Log in to [CrewAI AMP](https://app.crewai.com) + 2. Navigate to your automation + 3. Open the **Environment Variables** tab + 4. Add each variable (`UV_INDEX_*_USERNAME` and `UV_INDEX_*_PASSWORD`) with its value + + See the [Deploy to AMP — Set Environment Variables](/en/enterprise/guides/deploy-to-amp#set-environment-variables) step for details. + + + Add the variables to your local `.env` file before running `crewai deploy create`. + The CLI will securely transfer them to the platform: + + ```bash + # .env + OPENAI_API_KEY=sk-... + UV_INDEX_MY_PRIVATE_REGISTRY_USERNAME=token + UV_INDEX_MY_PRIVATE_REGISTRY_PASSWORD=your-pat-here + ``` + + ```bash + crewai deploy create + ``` + + + + + **Never** commit credentials to your repository. Use AMP environment variables for all secrets. + The `.env` file should be listed in `.gitignore`. + + +To update credentials on an existing deployment, see [Update Your Crew — Environment Variables](/en/enterprise/guides/update-crew). + +## How It All Fits Together + +When CrewAI AMP builds your automation, the resolution flow works like this: + + + + AMP pulls your repository and reads `pyproject.toml` and `uv.lock`. + + + UV reads `[tool.uv.sources]` to determine which index each package should come from. + + + For each private index, UV looks up `UV_INDEX_{NAME}_USERNAME` and `UV_INDEX_{NAME}_PASSWORD` + from the environment variables you configured in AMP. + + + UV downloads and installs all packages — both public (from PyPI) and private (from your registry). + + + Your crew or flow starts with all dependencies available. + + + +## Troubleshooting + +### Authentication Errors During Build + +**Symptom**: Build fails with `401 Unauthorized` or `403 Forbidden` when resolving a private package. + +**Check**: +- The `UV_INDEX_*` environment variable names match your index name exactly (uppercased, hyphens → underscores) +- Credentials are set in AMP environment variables, not just in a local `.env` +- Your token/PAT has the required read permissions for the package feed +- The token hasn't expired (especially relevant for AWS CodeArtifact) + +### Package Not Found + +**Symptom**: `No matching distribution found for my-private-package`. + +**Check**: +- The index URL in `pyproject.toml` ends with `/simple/` +- The `[tool.uv.sources]` entry maps the correct package name to the correct index name +- The package is actually published to your private registry +- Run `uv lock` locally with the same credentials to verify resolution works + +### Lock File Conflicts + +**Symptom**: `uv lock` fails or produces unexpected results after adding a private index. + +**Solution**: Set the credentials locally and regenerate: + +```bash +export UV_INDEX_MY_PRIVATE_REGISTRY_USERNAME=token +export UV_INDEX_MY_PRIVATE_REGISTRY_PASSWORD=your-pat +uv lock +``` + +Then commit the updated `uv.lock`. + +## Related Guides + + + + Verify project structure and dependencies before deploying. + + + Deploy your crew or flow and configure environment variables. + + + Update environment variables and push changes to a running deployment. + + diff --git a/docs/en/guides/migration/migrating-from-langgraph.mdx b/docs/en/guides/migration/migrating-from-langgraph.mdx new file mode 100644 index 000000000..192aa53e4 --- /dev/null +++ b/docs/en/guides/migration/migrating-from-langgraph.mdx @@ -0,0 +1,518 @@ +--- +title: "Moving from LangGraph to CrewAI: A Practical Guide for Engineers" +description: If you already have built with LangGraph, learn how to quickly port your projects to CrewAI +icon: switch +mode: "wide" +--- + +You've built agents with LangGraph. You've wrestled with `StateGraph`, wired up conditional edges, and debugged state dictionaries at 2 AM. It works — but somewhere along the way, you started wondering if there's a better path to production. + +There is. **CrewAI Flows** gives you the same power — event-driven orchestration, conditional routing, shared state — with dramatically less boilerplate and a mental model that maps cleanly to how you actually think about multi-step AI workflows. + +This article walks through the core concepts side by side, shows real code comparisons, and demonstrates why CrewAI Flows is the framework you'll want to reach for next. + +--- + +## The Mental Model Shift + +LangGraph asks you to think in **graphs**: nodes, edges, and state dictionaries. Every workflow is a directed graph where you explicitly wire transitions between computation steps. It's powerful, but the abstraction carries overhead — especially when your workflow is fundamentally sequential with a few decision points. + +CrewAI Flows asks you to think in **events**: methods that start things, methods that listen for results, and methods that route execution. The topology of your workflow emerges from decorator annotations rather than explicit graph construction. This isn't just syntactic sugar — it changes how you design, read, and maintain your pipelines. + +Here's the core mapping: + +| LangGraph Concept | CrewAI Flows Equivalent | +| --- | --- | +| `StateGraph` class | `Flow` class | +| `add_node()` | Methods decorated with `@start`, `@listen` | +| `add_edge()` / `add_conditional_edges()` | `@listen()` / `@router()` decorators | +| `TypedDict` state | Pydantic `BaseModel` state | +| `START` / `END` constants | `@start()` decorator / natural method return | +| `graph.compile()` | `flow.kickoff()` | +| Checkpointer / persistence | Built-in memory (LanceDB-backed) | + +Let's see what this looks like in practice. + +--- + +## Demo 1: A Simple Sequential Pipeline + +Imagine you're building a pipeline that takes a topic, researches it, writes a summary, and formats the output. Here's how each framework handles it. + +### LangGraph Approach + +```python +from typing import TypedDict +from langgraph.graph import StateGraph, START, END + +class ResearchState(TypedDict): + topic: str + raw_research: str + summary: str + formatted_output: str + +def research_topic(state: ResearchState) -> dict: + # Call an LLM or search API + result = llm.invoke(f"Research the topic: {state['topic']}") + return {"raw_research": result} + +def write_summary(state: ResearchState) -> dict: + result = llm.invoke( + f"Summarize this research:\n{state['raw_research']}" + ) + return {"summary": result} + +def format_output(state: ResearchState) -> dict: + result = llm.invoke( + f"Format this summary as a polished article section:\n{state['summary']}" + ) + return {"formatted_output": result} + +# Build the graph +graph = StateGraph(ResearchState) +graph.add_node("research", research_topic) +graph.add_node("summarize", write_summary) +graph.add_node("format", format_output) + +graph.add_edge(START, "research") +graph.add_edge("research", "summarize") +graph.add_edge("summarize", "format") +graph.add_edge("format", END) + +# Compile and run +app = graph.compile() +result = app.invoke({"topic": "quantum computing advances in 2026"}) +print(result["formatted_output"]) +``` + +You define functions, register them as nodes, and manually wire every transition. For a simple sequence like this, there's a lot of ceremony. + +### CrewAI Flows Approach + +```python +from crewai import LLM, Agent, Crew, Process, Task +from crewai.flow.flow import Flow, listen, start +from pydantic import BaseModel + +llm = LLM(model="openai/gpt-5.2") + +class ResearchState(BaseModel): + topic: str = "" + raw_research: str = "" + summary: str = "" + formatted_output: str = "" + +class ResearchFlow(Flow[ResearchState]): + @start() + def research_topic(self): + # Option 1: Direct LLM call + result = llm.call(f"Research the topic: {self.state.topic}") + self.state.raw_research = result + return result + + @listen(research_topic) + def write_summary(self, research_output): + # Option 2: A single agent + summarizer = Agent( + role="Research Summarizer", + goal="Produce concise, accurate summaries of research content", + backstory="You are an expert at distilling complex research into clear, " + "digestible summaries.", + llm=llm, + verbose=True, + ) + result = summarizer.kickoff( + f"Summarize this research:\n{self.state.raw_research}" + ) + self.state.summary = str(result) + return self.state.summary + + @listen(write_summary) + def format_output(self, summary_output): + # Option 3: a complete crew (with one or more agents) + formatter = Agent( + role="Content Formatter", + goal="Transform research summaries into polished, publication-ready article sections", + backstory="You are a skilled editor with expertise in structuring and " + "presenting technical content for a general audience.", + llm=llm, + verbose=True, + ) + format_task = Task( + description=f"Format this summary as a polished article section:\n{self.state.summary}", + expected_output="A well-structured, polished article section ready for publication.", + agent=formatter, + ) + crew = Crew( + agents=[formatter], + tasks=[format_task], + process=Process.sequential, + verbose=True, + ) + result = crew.kickoff() + self.state.formatted_output = str(result) + return self.state.formatted_output + +# Run the flow +flow = ResearchFlow() +flow.state.topic = "quantum computing advances in 2026" +result = flow.kickoff() +print(flow.state.formatted_output) + +``` + +Notice what's different: no graph construction, no edge wiring, no compile step. The execution order is declared right where the logic lives. `@start()` marks the entry point, and `@listen(method_name)` chains steps together. The state is a proper Pydantic model with type safety, validation, and IDE auto-completion. + +--- + +## Demo 2: Conditional Routing + +This is where things get interesting. Say you're building a content pipeline that routes to different processing paths based on the type of content detected. + +### LangGraph Approach + +```python +from typing import TypedDict, Literal +from langgraph.graph import StateGraph, START, END + +class ContentState(TypedDict): + input_text: str + content_type: str + result: str + +def classify_content(state: ContentState) -> dict: + content_type = llm.invoke( + f"Classify this content as 'technical', 'creative', or 'business':\n{state['input_text']}" + ) + return {"content_type": content_type.strip().lower()} + +def process_technical(state: ContentState) -> dict: + result = llm.invoke(f"Process as technical doc:\n{state['input_text']}") + return {"result": result} + +def process_creative(state: ContentState) -> dict: + result = llm.invoke(f"Process as creative writing:\n{state['input_text']}") + return {"result": result} + +def process_business(state: ContentState) -> dict: + result = llm.invoke(f"Process as business content:\n{state['input_text']}") + return {"result": result} + +# Routing function +def route_content(state: ContentState) -> Literal["technical", "creative", "business"]: + return state["content_type"] + +# Build the graph +graph = StateGraph(ContentState) +graph.add_node("classify", classify_content) +graph.add_node("technical", process_technical) +graph.add_node("creative", process_creative) +graph.add_node("business", process_business) + +graph.add_edge(START, "classify") +graph.add_conditional_edges( + "classify", + route_content, + { + "technical": "technical", + "creative": "creative", + "business": "business", + } +) +graph.add_edge("technical", END) +graph.add_edge("creative", END) +graph.add_edge("business", END) + +app = graph.compile() +result = app.invoke({"input_text": "Explain how TCP handshakes work"}) +``` + +You need a separate routing function, explicit conditional edge mapping, and termination edges for every branch. The routing logic is decoupled from the node that produces the routing decision. + +### CrewAI Flows Approach + +```python +from crewai import LLM, Agent +from crewai.flow.flow import Flow, listen, router, start +from pydantic import BaseModel + +llm = LLM(model="openai/gpt-5.2") + +class ContentState(BaseModel): + input_text: str = "" + content_type: str = "" + result: str = "" + +class ContentFlow(Flow[ContentState]): + @start() + def classify_content(self): + self.state.content_type = ( + llm.call( + f"Classify this content as 'technical', 'creative', or 'business':\n" + f"{self.state.input_text}" + ) + .strip() + .lower() + ) + return self.state.content_type + + @router(classify_content) + def route_content(self, classification): + if classification == "technical": + return "process_technical" + elif classification == "creative": + return "process_creative" + else: + return "process_business" + + @listen("process_technical") + def handle_technical(self): + agent = Agent( + role="Technical Writer", + goal="Produce clear, accurate technical documentation", + backstory="You are an expert technical writer who specializes in " + "explaining complex technical concepts precisely.", + llm=llm, + verbose=True, + ) + self.state.result = str( + agent.kickoff(f"Process as technical doc:\n{self.state.input_text}") + ) + + @listen("process_creative") + def handle_creative(self): + agent = Agent( + role="Creative Writer", + goal="Craft engaging and imaginative creative content", + backstory="You are a talented creative writer with a flair for " + "compelling storytelling and vivid expression.", + llm=llm, + verbose=True, + ) + self.state.result = str( + agent.kickoff(f"Process as creative writing:\n{self.state.input_text}") + ) + + @listen("process_business") + def handle_business(self): + agent = Agent( + role="Business Writer", + goal="Produce professional, results-oriented business content", + backstory="You are an experienced business writer who communicates " + "strategy and value clearly to professional audiences.", + llm=llm, + verbose=True, + ) + self.state.result = str( + agent.kickoff(f"Process as business content:\n{self.state.input_text}") + ) + +flow = ContentFlow() +flow.state.input_text = "Explain how TCP handshakes work" +flow.kickoff() +print(flow.state.result) + +``` + +The `@router()` decorator turns a method into a decision point. It returns a string that matches a listener — no mapping dictionaries, no separate routing functions. The branching logic reads like a Python `if` statement because it *is* one. + +--- + +## Demo 3: Integrating AI Agent Crews into Flows + +Here's where CrewAI's real power shines. Flows aren't just for chaining LLM calls — they orchestrate full **Crews** of autonomous agents. This is something LangGraph simply doesn't have a native equivalent for. + +```python +from crewai import Agent, Task, Crew +from crewai.flow.flow import Flow, listen, start +from pydantic import BaseModel + +class ArticleState(BaseModel): + topic: str = "" + research: str = "" + draft: str = "" + final_article: str = "" + +class ArticleFlow(Flow[ArticleState]): + + @start() + def run_research_crew(self): + """A full Crew of agents handles research.""" + researcher = Agent( + role="Senior Research Analyst", + goal=f"Produce comprehensive research on: {self.state.topic}", + backstory="You're a veteran analyst known for thorough, " + "well-sourced research reports.", + llm="gpt-4o" + ) + + research_task = Task( + description=f"Research '{self.state.topic}' thoroughly. " + "Cover key trends, data points, and expert opinions.", + expected_output="A detailed research brief with sources.", + agent=researcher + ) + + crew = Crew(agents=[researcher], tasks=[research_task]) + result = crew.kickoff() + self.state.research = result.raw + return result.raw + + @listen(run_research_crew) + def run_writing_crew(self, research_output): + """A different Crew handles writing.""" + writer = Agent( + role="Technical Writer", + goal="Write a compelling article based on provided research.", + backstory="You turn complex research into engaging, clear prose.", + llm="gpt-4o" + ) + + editor = Agent( + role="Senior Editor", + goal="Review and polish articles for publication quality.", + backstory="20 years of editorial experience at top tech publications.", + llm="gpt-4o" + ) + + write_task = Task( + description=f"Write an article based on this research:\n{self.state.research}", + expected_output="A well-structured draft article.", + agent=writer + ) + + edit_task = Task( + description="Review, fact-check, and polish the draft article.", + expected_output="A publication-ready article.", + agent=editor + ) + + crew = Crew(agents=[writer, editor], tasks=[write_task, edit_task]) + result = crew.kickoff() + self.state.final_article = result.raw + return result.raw + +# Run the full pipeline +flow = ArticleFlow() +flow.state.topic = "The Future of Edge AI" +flow.kickoff() +print(flow.state.final_article) +``` + +This is the key insight: **Flows provide the orchestration layer, and Crews provide the intelligence layer.** Each step in a Flow can spin up a full team of collaborating agents, each with their own roles, goals, and tools. You get structured, predictable control flow *and* autonomous agent collaboration — the best of both worlds. + +In LangGraph, achieving something similar means manually implementing agent communication protocols, tool-calling loops, and delegation logic inside your node functions. It's possible, but it's plumbing you're building from scratch every time. + +--- + +## Demo 4: Parallel Execution and Synchronization + +Real-world pipelines often need to fan out work and join the results. CrewAI Flows handles this elegantly with `and_` and `or_` operators. + +```python +from crewai import LLM +from crewai.flow.flow import Flow, and_, listen, start +from pydantic import BaseModel + +llm = LLM(model="openai/gpt-5.2") + +class AnalysisState(BaseModel): + topic: str = "" + market_data: str = "" + tech_analysis: str = "" + competitor_intel: str = "" + final_report: str = "" + +class ParallelAnalysisFlow(Flow[AnalysisState]): + @start() + def start_method(self): + pass + + @listen(start_method) + def gather_market_data(self): + # Your agentic or deterministic code + pass + + @listen(start_method) + def run_tech_analysis(self): + # Your agentic or deterministic code + pass + + @listen(start_method) + def gather_competitor_intel(self): + # Your agentic or deterministic code + pass + + @listen(and_(gather_market_data, run_tech_analysis, gather_competitor_intel)) + def synthesize_report(self): + # Your agentic or deterministic code + pass + +flow = ParallelAnalysisFlow() +flow.state.topic = "AI-powered developer tools" +flow.kickoff() + +``` + +Multiple `@start()` decorators fire in parallel. The `and_()` combinator on the `@listen` decorator ensures `synthesize_report` only executes after *all three* upstream methods complete. There's also `or_()` for when you want to proceed as soon as *any* upstream task finishes. + +In LangGraph, you'd need to build a fan-out/fan-in pattern with parallel branches, a synchronization node, and careful state merging — all wired explicitly through edges. + +--- + +## Why CrewAI Flows for Production + +Beyond cleaner syntax, Flows deliver several production-critical advantages: + +**Built-in state persistence.** Flow state is backed by LanceDB, meaning your workflows can survive crashes, be resumed, and accumulate knowledge across runs. LangGraph requires you to configure a separate checkpointer. + +**Type-safe state management.** Pydantic models give you validation, serialization, and IDE support out of the box. LangGraph's `TypedDict` states don't validate at runtime. + +**First-class agent orchestration.** Crews are a native primitive. You define agents with roles, goals, backstories, and tools — and they collaborate autonomously within the structured envelope of a Flow. No need to reinvent multi-agent coordination. + +**Simpler mental model.** Decorators declare intent. `@start` means "begin here." `@listen(x)` means "run after x." `@router(x)` means "decide where to go after x." The code reads like the workflow it describes. + +**CLI integration.** Run flows with `crewai run`. No separate compilation step, no graph serialization. Your Flow is a Python class, and it runs like one. + +--- + +## Migration Cheat Sheet + +If you're sitting on a LangGraph codebase and want to move to CrewAI Flows, here's a practical conversion guide: + +1. **Map your state.** Convert your `TypedDict` to a Pydantic `BaseModel`. Add default values for all fields. +2. **Convert nodes to methods.** Each `add_node` function becomes a method on your `Flow` subclass. Replace `state["field"]` reads with `self.state.field`. +3. **Replace edges with decorators.** Your `add_edge(START, "first_node")` becomes `@start()` on the first method. Sequential `add_edge("a", "b")` becomes `@listen(a)` on method `b`. +4. **Replace conditional edges with `@router`.** Your routing function and `add_conditional_edges()` mapping become a single `@router()` method that returns a route string. +5. **Replace compile + invoke with kickoff.** Drop `graph.compile()`. Call `flow.kickoff()` instead. +6. **Consider where Crews fit.** Any node where you have complex multi-step agent logic is a candidate for extraction into a Crew. This is where you'll see the biggest quality improvement. + +--- + +## Getting Started + +Install CrewAI and scaffold a new Flow project: + +```bash +pip install crewai +crewai create flow my_first_flow +cd my_first_flow +``` + +This generates a project structure with a ready-to-edit Flow class, configuration files, and a `pyproject.toml` with `type = "flow"` already set. Run it with: + +```bash +crewai run +``` + +From there, add your agents, wire up your listeners, and ship it. + +--- + +## Final Thoughts + +LangGraph taught the ecosystem that AI workflows need structure. That was an important lesson. But CrewAI Flows takes that lesson and delivers it in a form that's faster to write, easier to read, and more powerful in production — especially when your workflows involve multiple collaborating agents. + +If you're building anything beyond a single-agent chain, give Flows a serious look. The decorator-driven model, native Crew integration, and built-in state management mean you'll spend less time on plumbing and more time on the problems that matter. + +Start with `crewai create flow`. You won't look back. diff --git a/docs/en/learn/human-feedback-in-flows.mdx b/docs/en/learn/human-feedback-in-flows.mdx index 523c25fc5..0c3792bca 100644 --- a/docs/en/learn/human-feedback-in-flows.mdx +++ b/docs/en/learn/human-feedback-in-flows.mdx @@ -98,33 +98,43 @@ def handle_feedback(self, result): When you specify `emit`, the decorator becomes a router. The human's free-form feedback is interpreted by an LLM and collapsed into one of the specified outcomes: ```python Code -@start() -@human_feedback( - message="Do you approve this content for publication?", - emit=["approved", "rejected", "needs_revision"], - llm="gpt-4o-mini", - default_outcome="needs_revision", -) -def review_content(self): - return "Draft blog post content here..." +from crewai.flow.flow import Flow, start, listen, or_ +from crewai.flow.human_feedback import human_feedback -@listen("approved") -def publish(self, result): - print(f"Publishing! User said: {result.feedback}") +class ReviewFlow(Flow): + @start() + def generate_content(self): + return "Draft blog post content here..." -@listen("rejected") -def discard(self, result): - print(f"Discarding. Reason: {result.feedback}") + @human_feedback( + message="Do you approve this content for publication?", + emit=["approved", "rejected", "needs_revision"], + llm="gpt-4o-mini", + default_outcome="needs_revision", + ) + @listen(or_("generate_content", "needs_revision")) + def review_content(self): + return "Draft blog post content here..." -@listen("needs_revision") -def revise(self, result): - print(f"Revising based on: {result.feedback}") + @listen("approved") + def publish(self, result): + print(f"Publishing! User said: {result.feedback}") + + @listen("rejected") + def discard(self, result): + print(f"Discarding. Reason: {result.feedback}") ``` +When the human says something like "needs more detail", the LLM collapses that to `"needs_revision"`, which triggers `review_content` again via `or_()` — creating a revision loop. The loop continues until the outcome is `"approved"` or `"rejected"`. + The LLM uses structured outputs (function calling) when available to guarantee the response is one of your specified outcomes. This makes routing reliable and predictable. + +A `@start()` method only runs once at the beginning of the flow. If you need a revision loop, separate the start method from the review method and use `@listen(or_("trigger", "revision_outcome"))` on the review method to enable the self-loop. + + ## HumanFeedbackResult The `HumanFeedbackResult` dataclass contains all information about a human feedback interaction: @@ -188,127 +198,183 @@ Each `HumanFeedbackResult` is appended to `human_feedback_history`, so multiple ## Complete Example: Content Approval Workflow -Here's a full example implementing a content review and approval workflow: +Here's a full example implementing a content review and approval workflow with a revision loop: ```python Code -from crewai.flow.flow import Flow, start, listen +from crewai.flow.flow import Flow, start, listen, or_ from crewai.flow.human_feedback import human_feedback, HumanFeedbackResult from pydantic import BaseModel class ContentState(BaseModel): - topic: str = "" draft: str = "" - final_content: str = "" revision_count: int = 0 + status: str = "pending" class ContentApprovalFlow(Flow[ContentState]): - """A flow that generates content and gets human approval.""" + """A flow that generates content and loops until the human approves.""" @start() - def get_topic(self): - self.state.topic = input("What topic should I write about? ") - return self.state.topic - - @listen(get_topic) - def generate_draft(self, topic): - # In real use, this would call an LLM - self.state.draft = f"# {topic}\n\nThis is a draft about {topic}..." + def generate_draft(self): + self.state.draft = "# AI Safety\n\nThis is a draft about AI Safety..." return self.state.draft - @listen(generate_draft) @human_feedback( - message="Please review this draft. Reply 'approved', 'rejected', or provide revision feedback:", + message="Please review this draft. Approve, reject, or describe what needs changing:", emit=["approved", "rejected", "needs_revision"], llm="gpt-4o-mini", default_outcome="needs_revision", ) - def review_draft(self, draft): - return draft + @listen(or_("generate_draft", "needs_revision")) + def review_draft(self): + self.state.revision_count += 1 + return f"{self.state.draft} (v{self.state.revision_count})" @listen("approved") def publish_content(self, result: HumanFeedbackResult): - self.state.final_content = result.output - print("\n✅ Content approved and published!") - print(f"Reviewer comment: {result.feedback}") + self.state.status = "published" + print(f"Content approved and published! Reviewer said: {result.feedback}") return "published" @listen("rejected") def handle_rejection(self, result: HumanFeedbackResult): - print("\n❌ Content rejected") - print(f"Reason: {result.feedback}") + self.state.status = "rejected" + print(f"Content rejected. Reason: {result.feedback}") return "rejected" - @listen("needs_revision") - def revise_content(self, result: HumanFeedbackResult): - self.state.revision_count += 1 - print(f"\n📝 Revision #{self.state.revision_count} requested") - print(f"Feedback: {result.feedback}") - # In a real flow, you might loop back to generate_draft - # For this example, we just acknowledge - return "revision_requested" - - -# Run the flow flow = ContentApprovalFlow() result = flow.kickoff() -print(f"\nFlow completed. Revisions requested: {flow.state.revision_count}") +print(f"\nFlow completed. Status: {flow.state.status}, Reviews: {flow.state.revision_count}") ``` ```text Output -What topic should I write about? AI Safety +================================================== +OUTPUT FOR REVIEW: +================================================== +# AI Safety + +This is a draft about AI Safety... (v1) +================================================== + +Please review this draft. Approve, reject, or describe what needs changing: +(Press Enter to skip, or type your feedback) + +Your feedback: Needs more detail on alignment research ================================================== OUTPUT FOR REVIEW: ================================================== # AI Safety -This is a draft about AI Safety... +This is a draft about AI Safety... (v2) ================================================== -Please review this draft. Reply 'approved', 'rejected', or provide revision feedback: +Please review this draft. Approve, reject, or describe what needs changing: (Press Enter to skip, or type your feedback) Your feedback: Looks good, approved! -✅ Content approved and published! -Reviewer comment: Looks good, approved! +Content approved and published! Reviewer said: Looks good, approved! -Flow completed. Revisions requested: 0 +Flow completed. Status: published, Reviews: 2 ``` +The key pattern is `@listen(or_("generate_draft", "needs_revision"))` — the review method listens to both the initial trigger and its own revision outcome, creating a self-loop that repeats until the human approves or rejects. + ## Combining with Other Decorators -The `@human_feedback` decorator works with other flow decorators. Place it as the innermost decorator (closest to the function): +The `@human_feedback` decorator works with `@start()`, `@listen()`, and `or_()`. Both decorator orderings work — the framework propagates attributes in both directions — but the recommended patterns are: ```python Code -# Correct: @human_feedback is innermost (closest to the function) +# One-shot review at the start of a flow (no self-loop) @start() -@human_feedback(message="Review this:") +@human_feedback(message="Review this:", emit=["approved", "rejected"], llm="gpt-4o-mini") def my_start_method(self): return "content" +# Linear review on a listener (no self-loop) @listen(other_method) -@human_feedback(message="Review this too:") +@human_feedback(message="Review this too:", emit=["good", "bad"], llm="gpt-4o-mini") def my_listener(self, data): return f"processed: {data}" + +# Self-loop: review that can loop back for revisions +@human_feedback(message="Approve or revise?", emit=["approved", "revise"], llm="gpt-4o-mini") +@listen(or_("upstream_method", "revise")) +def review_with_loop(self): + return "content for review" ``` - -Place `@human_feedback` as the innermost decorator (last/closest to the function) so it wraps the method directly and can capture the return value before passing to the flow system. - +### Self-loop pattern + +To create a revision loop, the review method must listen to **both** an upstream trigger and its own revision outcome using `or_()`: + +```python Code +@start() +def generate(self): + return "initial draft" + +@human_feedback( + message="Approve or request changes?", + emit=["revise", "approved"], + llm="gpt-4o-mini", + default_outcome="approved", +) +@listen(or_("generate", "revise")) +def review(self): + return "content" + +@listen("approved") +def publish(self): + return "published" +``` + +When the outcome is `"revise"`, the flow routes back to `review` (because it listens to `"revise"` via `or_()`). When the outcome is `"approved"`, the flow continues to `publish`. This works because the flow engine exempts routers from the "fire once" rule, allowing them to re-execute on each loop iteration. + +### Chained routers + +A listener triggered by one router's outcome can itself be a router: + +```python Code +@start() +def generate(self): + return "draft content" + +@human_feedback(message="First review:", emit=["approved", "rejected"], llm="gpt-4o-mini") +@listen("generate") +def first_review(self): + return "draft content" + +@human_feedback(message="Final review:", emit=["publish", "hold"], llm="gpt-4o-mini") +@listen("approved") +def final_review(self, prev): + return "final content" + +@listen("publish") +def on_publish(self, prev): + return "published" + +@listen("hold") +def on_hold(self, prev): + return "held for later" +``` + +### Limitations + +- **`@start()` methods run once**: A `@start()` method cannot self-loop. If you need a revision cycle, use a separate `@start()` method as the entry point and put the `@human_feedback` on a `@listen()` method. +- **No `@start()` + `@listen()` on the same method**: This is a Flow framework constraint. A method is either a start point or a listener, not both. ## Best Practices ### 1. Write Clear Request Messages -The `request` parameter is what the human sees. Make it actionable: +The `message` parameter is what the human sees. Make it actionable: ```python Code # ✅ Good - clear and actionable @@ -516,9 +582,9 @@ class ContentPipeline(Flow): @start() @human_feedback( message="Approve this content for publication?", - emit=["approved", "rejected", "needs_revision"], + emit=["approved", "rejected"], llm="gpt-4o-mini", - default_outcome="needs_revision", + default_outcome="rejected", provider=SlackNotificationProvider("#content-reviews"), ) def generate_content(self): @@ -534,11 +600,6 @@ class ContentPipeline(Flow): print(f"Archived. Reason: {result.feedback}") return {"status": "archived"} - @listen("needs_revision") - def queue_revision(self, result): - print(f"Queued for revision: {result.feedback}") - return {"status": "revision_needed"} - # Starting the flow (will pause and wait for Slack response) def start_content_pipeline(): @@ -594,22 +655,22 @@ Over time, the human sees progressively better pre-reviewed output because each ```python Code class ArticleReviewFlow(Flow): @start() + def generate_article(self): + return self.crew.kickoff(inputs={"topic": "AI Safety"}).raw + @human_feedback( message="Review this article draft:", emit=["approved", "needs_revision"], llm="gpt-4o-mini", learn=True, # enable HITL learning ) - def generate_article(self): - return self.crew.kickoff(inputs={"topic": "AI Safety"}).raw + @listen(or_("generate_article", "needs_revision")) + def review_article(self): + return self.last_human_feedback.output if self.last_human_feedback else "article draft" @listen("approved") def publish(self): print(f"Publishing: {self.last_human_feedback.output}") - - @listen("needs_revision") - def revise(self): - print("Revising based on feedback...") ``` **First run**: The human sees the raw output and says "Always include citations for factual claims." The lesson is distilled and stored in memory. diff --git a/docs/en/learn/llm-connections.mdx b/docs/en/learn/llm-connections.mdx index daedc21a2..2b7a5d278 100644 --- a/docs/en/learn/llm-connections.mdx +++ b/docs/en/learn/llm-connections.mdx @@ -7,7 +7,7 @@ mode: "wide" ## Connect CrewAI to LLMs -CrewAI uses LiteLLM to connect to a wide variety of Language Models (LLMs). This integration provides extensive versatility, allowing you to use models from numerous providers with a simple, unified interface. +CrewAI connects to LLMs through native SDK integrations for the most popular providers (OpenAI, Anthropic, Google Gemini, Azure, and AWS Bedrock), and uses LiteLLM as a flexible fallback for all other providers. By default, CrewAI uses the `gpt-4o-mini` model. This is determined by the `OPENAI_MODEL_NAME` environment variable, which defaults to "gpt-4o-mini" if not set. @@ -41,6 +41,14 @@ LiteLLM supports a wide range of providers, including but not limited to: For a complete and up-to-date list of supported providers, please refer to the [LiteLLM Providers documentation](https://docs.litellm.ai/docs/providers). + + To use any provider not covered by a native integration, add LiteLLM as a dependency to your project: + ```bash + uv add 'crewai[litellm]' + ``` + Native providers (OpenAI, Anthropic, Google Gemini, Azure, AWS Bedrock) use their own SDK extras — see the [Provider Configuration Examples](/en/concepts/llms#provider-configuration-examples). + + ## Changing the LLM To use a different LLM with your CrewAI agents, you have several options: diff --git a/docs/en/observability/tracing.mdx b/docs/en/observability/tracing.mdx index 5663e22ba..ce620946a 100644 --- a/docs/en/observability/tracing.mdx +++ b/docs/en/observability/tracing.mdx @@ -35,7 +35,7 @@ Visit [app.crewai.com](https://app.crewai.com) and create your free account. Thi If you haven't already, install CrewAI with the CLI tools: ```bash -uv add crewai[tools] +uv add 'crewai[tools]' ``` Then authenticate your CLI with your CrewAI AMP account: diff --git a/docs/en/tools/automation/composiotool.mdx b/docs/en/tools/automation/composiotool.mdx index b8edbc253..9613aeb19 100644 --- a/docs/en/tools/automation/composiotool.mdx +++ b/docs/en/tools/automation/composiotool.mdx @@ -18,77 +18,46 @@ Composio is an integration platform that allows you to connect your AI agents to To incorporate Composio tools into your project, follow the instructions below: ```shell -pip install composio-crewai +pip install composio composio-crewai pip install crewai ``` -After the installation is complete, either run `composio login` or export your composio API key as `COMPOSIO_API_KEY`. Get your Composio API key from [here](https://app.composio.dev) +After the installation is complete, set your Composio API key as `COMPOSIO_API_KEY`. Get your Composio API key from [here](https://platform.composio.dev) ## Example The following example demonstrates how to initialize the tool and execute a github action: -1. Initialize Composio toolset +1. Initialize Composio with CrewAI Provider ```python Code -from composio_crewai import ComposioToolSet, App, Action +from composio_crewai import ComposioProvider +from composio import Composio from crewai import Agent, Task, Crew -toolset = ComposioToolSet() +composio = Composio(provider=ComposioProvider()) ``` -2. Connect your GitHub account +2. Create a new Composio Session and retrieve the tools -```shell CLI -composio add github -``` -```python Code -request = toolset.initiate_connection(app=App.GITHUB) -print(f"Open this URL to authenticate: {request.redirectUrl}") +```python +session = composio.create( + user_id="your-user-id", + toolkits=["gmail", "github"] # optional, default is all toolkits +) +tools = session.tools() ``` +Read more about sessions and user management [here](https://docs.composio.dev/docs/configuring-sessions) -3. Get Tools +3. Authenticating users manually -- Retrieving all the tools from an app (not recommended for production): +Composio automatically authenticates the users during the agent chat session. However, you can also authenticate the user manually by calling the `authorize` method. ```python Code -tools = toolset.get_tools(apps=[App.GITHUB]) +connection_request = session.authorize("github") +print(f"Open this URL to authenticate: {connection_request.redirect_url}") ``` -- Filtering tools based on tags: -```python Code -tag = "users" - -filtered_action_enums = toolset.find_actions_by_tags( - App.GITHUB, - tags=[tag], -) - -tools = toolset.get_tools(actions=filtered_action_enums) -``` - -- Filtering tools based on use case: -```python Code -use_case = "Star a repository on GitHub" - -filtered_action_enums = toolset.find_actions_by_use_case( - App.GITHUB, use_case=use_case, advanced=False -) - -tools = toolset.get_tools(actions=filtered_action_enums) -``` -Set `advanced` to True to get actions for complex use cases - -- Using specific tools: - -In this demo, we will use the `GITHUB_STAR_A_REPOSITORY_FOR_THE_AUTHENTICATED_USER` action from the GitHub app. -```python Code -tools = toolset.get_tools( - actions=[Action.GITHUB_STAR_A_REPOSITORY_FOR_THE_AUTHENTICATED_USER] -) -``` -Learn more about filtering actions [here](https://docs.composio.dev/patterns/tools/use-tools/use-specific-actions) - 4. Define agent ```python Code @@ -116,4 +85,4 @@ crew = Crew(agents=[crewai_agent], tasks=[task]) crew.kickoff() ``` -* More detailed list of tools can be found [here](https://app.composio.dev) +* More detailed list of tools can be found [here](https://docs.composio.dev/toolkits) diff --git a/docs/en/tools/search-research/bravesearchtool.mdx b/docs/en/tools/search-research/bravesearchtool.mdx index 844f98a75..e6e84fd25 100644 --- a/docs/en/tools/search-research/bravesearchtool.mdx +++ b/docs/en/tools/search-research/bravesearchtool.mdx @@ -1,97 +1,316 @@ --- -title: Brave Search -description: The `BraveSearchTool` is designed to search the internet using the Brave Search API. +title: Brave Search Tools +description: A suite of tools for querying the Brave Search API — covering web, news, image, and video search. icon: searchengin mode: "wide" --- -# `BraveSearchTool` +# Brave Search Tools ## Description -This tool is designed to perform web searches using the Brave Search API. It allows you to search the internet with a specified query and retrieve relevant results. The tool supports customizable result counts and country-specific searches. +CrewAI offers a family of Brave Search tools, each targeting a specific [Brave Search API](https://brave.com/search/api/) endpoint. +Rather than a single catch-all tool, you can pick exactly the tool that matches the kind of results your agent needs: + +| Tool | Endpoint | Use case | +| --- | --- | --- | +| `BraveWebSearchTool` | Web Search | General web results, snippets, and URLs | +| `BraveNewsSearchTool` | News Search | Recent news articles and headlines | +| `BraveImageSearchTool` | Image Search | Image results with dimensions and source URLs | +| `BraveVideoSearchTool` | Video Search | Video results from across the web | +| `BraveLocalPOIsTool` | Local POIs | Find points of interest (e.g., restaurants) | +| `BraveLocalPOIsDescriptionTool` | Local POIs | Retrieve AI-generated location descriptions | +| `BraveLLMContextTool` | LLM Context | Pre-extracted web content optimized for AI agents, LLM grounding, and RAG pipelines. | + +All tools share a common base class (`BraveSearchToolBase`) that provides consistent behavior — rate limiting, automatic retries on `429` responses, header and parameter validation, and optional file saving. + + + The older `BraveSearchTool` class is still available for backwards compatibility, but it is considered **legacy** and will not receive the same level of attention going forward. We recommend migrating to the specific tools listed above, which offer richer configuration and a more focused interface. + + + + While many tools (e.g., _BraveWebSearchTool_, _BraveNewsSearchTool_, _BraveImageSearchTool_, and _BraveVideoSearchTool_) can be used with a free Brave Search API subscription/plan, some parameters (e.g., `enable_snippets`) and tools (e.g., _BraveLocalPOIsTool_ and _BraveLocalPOIsDescriptionTool_) require a paid plan. Consult your subscription plan's capabilities for clarification. + ## Installation -To incorporate this tool into your project, follow the installation instructions below: - ```shell pip install 'crewai[tools]' ``` -## Steps to Get Started +## Getting Started -To effectively use the `BraveSearchTool`, follow these steps: +1. **Install the package** — confirm that `crewai[tools]` is installed in your Python environment. +2. **Get an API key** — sign up at [api-dashboard.search.brave.com/login](https://api-dashboard.search.brave.com/login) to generate a key. +3. **Set the environment variable** — store your key as `BRAVE_API_KEY`, or pass it directly via the `api_key` parameter. -1. **Package Installation**: Confirm that the `crewai[tools]` package is installed in your Python environment. -2. **API Key Acquisition**: Acquire a Brave Search API key at https://api.search.brave.com/app/keys (sign in to generate a key). -3. **Environment Configuration**: Store your obtained API key in an environment variable named `BRAVE_API_KEY` to facilitate its use by the tool. +## Quick Examples -## Example - -The following example demonstrates how to initialize the tool and execute a search with a given query: +### Web Search ```python Code -from crewai_tools import BraveSearchTool +from crewai_tools import BraveWebSearchTool -# Initialize the tool for internet searching capabilities -tool = BraveSearchTool() - -# Execute a search -results = tool.run(search_query="CrewAI agent framework") +tool = BraveWebSearchTool() +results = tool.run(q="CrewAI agent framework") print(results) ``` -## Parameters - -The `BraveSearchTool` accepts the following parameters: - -- **search_query**: Mandatory. The search query you want to use to search the internet. -- **country**: Optional. Specify the country for the search results. Default is empty string. -- **n_results**: Optional. Number of search results to return. Default is `10`. -- **save_file**: Optional. Whether to save the search results to a file. Default is `False`. - -## Example with Parameters - -Here is an example demonstrating how to use the tool with additional parameters: +### News Search ```python Code -from crewai_tools import BraveSearchTool +from crewai_tools import BraveNewsSearchTool -# Initialize the tool with custom parameters -tool = BraveSearchTool( - country="US", - n_results=5, - save_file=True +tool = BraveNewsSearchTool() +results = tool.run(q="latest AI breakthroughs") +print(results) +``` + +### Image Search + +```python Code +from crewai_tools import BraveImageSearchTool + +tool = BraveImageSearchTool() +results = tool.run(q="northern lights photography") +print(results) +``` + +### Video Search + +```python Code +from crewai_tools import BraveVideoSearchTool + +tool = BraveVideoSearchTool() +results = tool.run(q="how to build AI agents") +print(results) +``` + +### Location POI Descriptions + +```python Code +from crewai_tools import ( + BraveWebSearchTool, + BraveLocalPOIsDescriptionTool, ) -# Execute a search -results = tool.run(search_query="Latest AI developments") -print(results) +web_search = BraveWebSearchTool(raw=True) +poi_details = BraveLocalPOIsDescriptionTool() + +results = web_search.run(q="italian restaurants in pensacola, florida") + +if "locations" in results: + location_ids = [ loc["id"] for loc in results["locations"]["results"] ] + if location_ids: + descriptions = poi_details.run(ids=location_ids) + print(descriptions) +``` + +## Common Constructor Parameters + +Every Brave Search tool accepts the following parameters at initialization: + +| Parameter | Type | Default | Description | +| --- | --- | --- | --- | +| `api_key` | `str \| None` | `None` | Brave API key. Falls back to the `BRAVE_API_KEY` environment variable. | +| `headers` | `dict \| None` | `None` | Additional HTTP headers to send with every request (e.g., `api-version`, geolocation headers). | +| `requests_per_second` | `float` | `1.0` | Maximum request rate. The tool will sleep between calls to stay within this limit. | +| `save_file` | `bool` | `False` | When `True`, each response is written to a timestamped `.txt` file. | +| `raw` | `bool` | `False` | When `True`, the full API JSON response is returned without any refinement. | +| `timeout` | `int` | `30` | HTTP request timeout in seconds. | +| `country` | `str \| None` | `None` | Legacy shorthand for geo-targeting (e.g., `"US"`). Prefer using the `country` query parameter directly. | +| `n_results` | `int` | `10` | Legacy shorthand for result count. Prefer using the `count` query parameter directly. | + + + The `country` and `n_results` constructor parameters exist for backwards compatibility. They are applied as defaults when the corresponding query parameters (`country`, `count`) are not provided at call time. For new code, we recommend passing `country` and `count` directly as query parameters instead. + + +## Query Parameters + +Each tool validates its query parameters against a Pydantic schema before sending the request. +The parameters vary slightly per endpoint — here is a summary of the most commonly used ones: + +### BraveWebSearchTool + +| Parameter | Description | +| --- | --- | +| `q` | **(required)** Search query string (max 400 chars). | +| `country` | Two-letter country code for geo-targeting (e.g., `"US"`). | +| `search_lang` | Two-letter language code for results (e.g., `"en"`). | +| `count` | Max number of results to return (1–20). | +| `offset` | Skip the first N pages of results (0–9). | +| `safesearch` | Content filter: `"off"`, `"moderate"`, or `"strict"`. | +| `freshness` | Recency filter: `"pd"` (past day), `"pw"` (past week), `"pm"` (past month), `"py"` (past year), or a date range like `"2025-01-01to2025-06-01"`. | +| `extra_snippets` | Include up to 5 additional text snippets per result. | +| `goggles` | Brave Goggles URL(s) and/or source for custom re-ranking. | + +For the complete parameter and header reference, see the [Brave Web Search API documentation](https://api-dashboard.search.brave.com/api-reference/web/search/get). + +### BraveNewsSearchTool + +| Parameter | Description | +| --- | --- | +| `q` | **(required)** Search query string (max 400 chars). | +| `country` | Two-letter country code for geo-targeting. | +| `search_lang` | Two-letter language code for results. | +| `count` | Max number of results to return (1–50). | +| `offset` | Skip the first N pages of results (0–9). | +| `safesearch` | Content filter: `"off"`, `"moderate"`, or `"strict"`. | +| `freshness` | Recency filter (same options as Web Search). | +| `goggles` | Brave Goggles URL(s) and/or source for custom re-ranking. | + +For the complete parameter and header reference, see the [Brave News Search API documentation](https://api-dashboard.search.brave.com/api-reference/news/news_search/get). + +### BraveImageSearchTool + +| Parameter | Description | +| --- | --- | +| `q` | **(required)** Search query string (max 400 chars). | +| `country` | Two-letter country code for geo-targeting. | +| `search_lang` | Two-letter language code for results. | +| `count` | Max number of results to return (1–200). | +| `safesearch` | Content filter: `"off"` or `"strict"`. | +| `spellcheck` | Attempt to correct spelling errors in the query. | + +For the complete parameter and header reference, see the [Brave Image Search API documentation](https://api-dashboard.search.brave.com/api-reference/images/image_search). + +### BraveVideoSearchTool + +| Parameter | Description | +| --- | --- | +| `q` | **(required)** Search query string (max 400 chars). | +| `country` | Two-letter country code for geo-targeting. | +| `search_lang` | Two-letter language code for results. | +| `count` | Max number of results to return (1–50). | +| `offset` | Skip the first N pages of results (0–9). | +| `safesearch` | Content filter: `"off"`, `"moderate"`, or `"strict"`. | +| `freshness` | Recency filter (same options as Web Search). | + +For the complete parameter and header reference, see the [Brave Video Search API documentation](https://api-dashboard.search.brave.com/api-reference/videos/video_search/get). + +### BraveLocalPOIsTool + +| Parameter | Description | +| --- | --- | +| `ids` | **(required)** A list of unique identifiers for the desired locations. | +| `search_lang` | Two-letter language code for results. | + +For the complete parameter and header reference, see [Brave Local POIs API documentation](https://api-dashboard.search.brave.com/api-reference/web/local_pois). + +### BraveLocalPOIsDescriptionTool + +| Parameter | Description | +| --- | --- | +| `ids` | **(required)** A list of unique identifiers for the desired locations. | + +For the complete parameter and header reference, see [Brave POI Descriptions API documentation](https://api-dashboard.search.brave.com/api-reference/web/poi_descriptions). + +## Custom Headers + +All tools support custom HTTP request headers. The Web Search tool, for example, accepts geolocation headers for location-aware results: + +```python Code +from crewai_tools import BraveWebSearchTool + +tool = BraveWebSearchTool( + headers={ + "x-loc-lat": "37.7749", + "x-loc-long": "-122.4194", + "x-loc-city": "San Francisco", + "x-loc-state": "CA", + "x-loc-country": "US", + } +) + +results = tool.run(q="best coffee shops nearby") +``` + +You can also update headers after initialization using the `set_headers()` method: + +```python Code +tool.set_headers({"api-version": "2025-01-01"}) +``` + +## Raw Mode + +By default, each tool refines the API response into a concise list of results. If you need the full, unprocessed API response, enable raw mode: + +```python Code +from crewai_tools import BraveWebSearchTool + +tool = BraveWebSearchTool(raw=True) +full_response = tool.run(q="Brave Search API") ``` ## Agent Integration Example -Here's how to integrate the `BraveSearchTool` with a CrewAI agent: +Here's how to equip a CrewAI agent with multiple Brave Search tools: ```python Code from crewai import Agent from crewai.project import agent -from crewai_tools import BraveSearchTool +from crewai_tools import BraveWebSearchTool, BraveNewsSearchTool -# Initialize the tool -brave_search_tool = BraveSearchTool() +web_search = BraveWebSearchTool() +news_search = BraveNewsSearchTool() -# Define an agent with the BraveSearchTool @agent def researcher(self) -> Agent: return Agent( config=self.agents_config["researcher"], - allow_delegation=False, - tools=[brave_search_tool] + tools=[web_search, news_search], ) ``` +## Advanced Example + +Combining multiple parameters for a targeted search: + +```python Code +from crewai_tools import BraveWebSearchTool + +tool = BraveWebSearchTool( + requests_per_second=0.5, # conservative rate limit + save_file=True, +) + +results = tool.run( + q="artificial intelligence news", + country="US", + search_lang="en", + count=5, + freshness="pm", # past month only + extra_snippets=True, +) +print(results) +``` + +## Migrating from `BraveSearchTool` (Legacy) + +If you are currently using `BraveSearchTool`, switching to the new tools is straightforward: + +```python Code +# Before (legacy) +from crewai_tools import BraveSearchTool + +tool = BraveSearchTool(country="US", n_results=5, save_file=True) +results = tool.run(search_query="AI agents") + +# After (recommended) +from crewai_tools import BraveWebSearchTool + +tool = BraveWebSearchTool(save_file=True) +results = tool.run(q="AI agents", country="US", count=5) +``` + +Key differences: +- **Import**: Use `BraveWebSearchTool` (or the news/image/video variant) instead of `BraveSearchTool`. +- **Query parameter**: Use `q` instead of `search_query`. (Both `search_query` and `query` are still accepted for convenience, but `q` is the preferred parameter.) +- **Result count**: Pass `count` as a query parameter instead of `n_results` at init time. +- **Country**: Pass `country` as a query parameter instead of at init time. +- **API key**: Can now be passed directly via `api_key=` in addition to the `BRAVE_API_KEY` environment variable. +- **Rate limiting**: Configurable via `requests_per_second` with automatic retry on `429` responses. + ## Conclusion -By integrating the `BraveSearchTool` into Python projects, users gain the ability to conduct real-time, relevant searches across the internet directly from their applications. The tool provides a simple interface to the powerful Brave Search API, making it easy to retrieve and process search results programmatically. By adhering to the setup and usage guidelines provided, incorporating this tool into projects is streamlined and straightforward. \ No newline at end of file +The Brave Search tool suite gives your CrewAI agents flexible, endpoint-specific access to the Brave Search API. Whether you need web pages, breaking news, images, or videos, there is a dedicated tool with validated parameters and built-in resilience. Pick the tool that fits your use case, and refer to the [Brave Search API documentation](https://brave.com/search/api/) for the full details on available parameters and response formats. diff --git a/docs/ko/changelog.mdx b/docs/ko/changelog.mdx index 8b3b4da28..10030ef7a 100644 --- a/docs/ko/changelog.mdx +++ b/docs/ko/changelog.mdx @@ -4,6 +4,138 @@ description: "CrewAI의 제품 업데이트, 개선 사항 및 버그 수정" icon: "clock" mode: "wide" --- + + ## v1.10.1 + + [GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.10.1) + + ## 변경 사항 + + ### 기능 + - Gemini GenAI 업그레이드 + + ### 버그 수정 + - 재귀를 피하기 위해 실행기 리스너 값을 조정 + - Gemini에서 병렬 함수 응답 부분을 단일 Content 객체로 그룹화 + - Gemini에서 사고 모델의 사고 출력을 표시 + - 에이전트 도구가 None일 때 MCP 및 플랫폼 도구 로드 + - A2A에서 실행 이벤트 루프가 있는 Jupyter 환경 지원 + - 일시적인 추적을 위해 익명 ID 사용 + - 조건부로 플러스 헤더 전달 + - 원격 측정을 위해 비주 스레드에서 신호 처리기 등록 건너뛰기 + - 도구 오류를 관찰로 주입하고 이름 충돌 해결 + - Dependabot 경고를 해결하기 위해 pypdf를 4.x에서 6.7.4로 업그레이드 + - 심각 및 높은 Dependabot 보안 경고 해결 + + ### 문서 + - Composio 도구 문서를 지역별로 동기화 + + ## 기여자 + + @giulio-leone, @greysonlalonde, @haxzie, @joaomdmoura, @lorenzejay, @mattatcha, @mplachta, @nicoferdi96 + + + + + ## v1.10.1a1 + + [GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.10.1a1) + + ## 변경 사항 + + ### 기능 + - 단계 콜백 메서드에서 비동기 호출 지원 구현 + - 메모리 모듈의 무거운 의존성에 대한 지연 로딩 구현 + + ### 문서 + - v1.10.0에 대한 변경 로그 및 버전 업데이트 + + ### 리팩토링 + - 비동기 호출을 지원하기 위해 단계 콜백 메서드 리팩토링 + - 메모리 모듈의 무거운 의존성에 대한 지연 로딩을 구현하기 위해 리팩토링 + + ### 버그 수정 + - 릴리스 노트의 분기 수정 + + ## 기여자 + + @greysonlalonde, @joaomdmoura + + + + + ## v1.10.1a1 + + [GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.10.1a1) + + ## 변경 사항 + + ### 리팩토링 + - 비동기 호출을 지원하기 위해 단계 콜백 메서드 리팩토링 + - 메모리 모듈의 무거운 의존성에 대해 지연 로딩 구현 + + ### 문서화 + - v1.10.0에 대한 변경 로그 및 버전 업데이트 + + ### 버그 수정 + - 릴리스 노트를 위한 브랜치 생성 + + ## 기여자 + + @greysonlalonde, @joaomdmoura + + + + + ## v1.10.0 + + [GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.10.0) + + ## 변경 사항 + + ### 기능 + - MCP 도구 해상도 및 관련 이벤트 개선 + - lancedb 버전 업데이트 및 lance-namespace 패키지 추가 + - CrewAgentExecutor 및 BaseTool에서 JSON 인수 파싱 및 검증 개선 + - CLI HTTP 클라이언트를 requests에서 httpx로 마이그레이션 + - 버전화된 문서 추가 + - 버전 노트에 대한 yanked 감지 추가 + - Flows에서 사용자 입력 처리 구현 + - 인간 피드백 통합 테스트에서 HITL 자기 루프 기능 개선 + - eventbus에 started_event_id 추가 및 설정 + - tools.specs 자동 업데이트 + + ### 버그 수정 + - 빈 경우에도 도구 kwargs를 검증하여 모호한 TypeError 방지 + - LLM을 위한 도구 매개변수 스키마에서 null 타입 유지 + - output_pydantic/output_json을 네이티브 구조화된 출력으로 매핑 + - 약속이 있는 경우 콜백이 실행/대기되도록 보장 + - 예외 컨텍스트에서 메서드 이름 캡처 + - 라우터 결과에서 enum 타입 유지; 타입 개선 + - 입력으로 지속성 ID가 전달될 때 조용히 깨지는 순환 흐름 수정 + - CLI 플래그 형식을 --skip-provider에서 --skip_provider로 수정 + - OpenAI 도구 호출 스트림이 완료되도록 보장 + - MCP 도구에서 복잡한 스키마 $ref 포인터 해결 + - 스키마에서 additionalProperties=false 강제 적용 + - 크루 폴더에 대해 예약된 스크립트 이름 거부 + - 가드레일 이벤트 방출 테스트에서 경쟁 조건 해결 + + ### 문서 + - 비네이티브 LLM 공급자를 위한 litellm 종속성 노트 추가 + - NL2SQL 보안 모델 및 강화 지침 명확화 + - 9개 통합에서 96개의 누락된 작업 추가 + + ### 리팩토링 + - crew를 provider로 리팩토링 + - HITL을 provider 패턴으로 추출 + - 훅 타이핑 및 등록 개선 + + ## 기여자 + + @dependabot[bot], @github-actions[bot], @github-code-quality[bot], @greysonlalonde, @heitorado, @hobostay, @joaomdmoura, @johnvan7, @jonathansampson, @lorenzejay, @lucasgomide, @mattatcha, @mplachta, @nicoferdi96, @theCyberTech, @thiagomoretto, @vinibrsl + + + ## v1.9.0 diff --git a/docs/ko/concepts/llms.mdx b/docs/ko/concepts/llms.mdx index 84b30f5a9..77e71d518 100644 --- a/docs/ko/concepts/llms.mdx +++ b/docs/ko/concepts/llms.mdx @@ -105,6 +105,15 @@ CrewAI 코드 내에는 사용할 모델을 지정할 수 있는 여러 위치 + + CrewAI는 OpenAI, Anthropic, Google (Gemini API), Azure, AWS Bedrock에 대해 네이티브 SDK 통합을 제공합니다 — 제공자별 extras(예: `uv add "crewai[openai]"`) 외에 추가 설치가 필요하지 않습니다. + + 그 외 모든 제공자는 **LiteLLM**을 통해 지원됩니다. 이를 사용하려면 프로젝트에 의존성으로 추가하세요: + ```bash + uv add 'crewai[litellm]' + ``` + + ## 공급자 구성 예시 CrewAI는 고유한 기능, 인증 방법, 모델 역량을 제공하는 다양한 LLM 공급자를 지원합니다. @@ -214,6 +223,11 @@ CrewAI는 고유한 기능, 인증 방법, 모델 역량을 제공하는 다양 | `meta_llama/Llama-4-Maverick-17B-128E-Instruct-FP8` | 128k | 4028 | 텍스트, 이미지 | 텍스트 | | `meta_llama/Llama-3.3-70B-Instruct` | 128k | 4028 | 텍스트 | 텍스트 | | `meta_llama/Llama-3.3-8B-Instruct` | 128k | 4028 | 텍스트 | 텍스트 | + + **참고:** 이 제공자는 LiteLLM을 사용합니다. 프로젝트에 의존성으로 추가하세요: + ```bash + uv add 'crewai[litellm]' + ``` @@ -354,6 +368,11 @@ CrewAI는 고유한 기능, 인증 방법, 모델 역량을 제공하는 다양 | gemini-1.5-flash | 1M 토큰 | 밸런스 잡힌 멀티모달 모델, 대부분의 작업에 적합 | | gemini-1.5-flash-8B | 1M 토큰 | 가장 빠르고, 비용 효율적, 고빈도 작업에 적합 | | gemini-1.5-pro | 2M 토큰 | 최고의 성능, 논리적 추론, 코딩, 창의적 협업 등 다양한 추론 작업에 적합 | + + **참고:** 이 제공자는 LiteLLM을 사용합니다. 프로젝트에 의존성으로 추가하세요: + ```bash + uv add 'crewai[litellm]' + ``` @@ -439,6 +458,11 @@ CrewAI는 고유한 기능, 인증 방법, 모델 역량을 제공하는 다양 model="sagemaker/" ) ``` + + **참고:** 이 제공자는 LiteLLM을 사용합니다. 프로젝트에 의존성으로 추가하세요: + ```bash + uv add 'crewai[litellm]' + ``` @@ -454,6 +478,11 @@ CrewAI는 고유한 기능, 인증 방법, 모델 역량을 제공하는 다양 temperature=0.7 ) ``` + + **참고:** 이 제공자는 LiteLLM을 사용합니다. 프로젝트에 의존성으로 추가하세요: + ```bash + uv add 'crewai[litellm]' + ``` @@ -540,6 +569,11 @@ CrewAI는 고유한 기능, 인증 방법, 모델 역량을 제공하는 다양 | rakuten/rakutenai-7b-instruct | 1,024 토큰 | 언어 이해, 추론, 텍스트 생성이 탁월한 최첨단 LLM | | rakuten/rakutenai-7b-chat | 1,024 토큰 | 언어 이해, 추론, 텍스트 생성이 탁월한 최첨단 LLM | | baichuan-inc/baichuan2-13b-chat | 4,096 토큰 | 중국어 및 영어 대화, 코딩, 수학, 지시 따르기, 퀴즈 풀이 지원 | + + **참고:** 이 제공자는 LiteLLM을 사용합니다. 프로젝트에 의존성으로 추가하세요: + ```bash + uv add 'crewai[litellm]' + ``` @@ -580,6 +614,11 @@ CrewAI는 고유한 기능, 인증 방법, 모델 역량을 제공하는 다양 # ... ``` + + **참고:** 이 제공자는 LiteLLM을 사용합니다. 프로젝트에 의존성으로 추가하세요: + ```bash + uv add 'crewai[litellm]' + ``` @@ -601,6 +640,11 @@ CrewAI는 고유한 기능, 인증 방법, 모델 역량을 제공하는 다양 | Llama 3.1 70B/8B| 131,072 토큰 | 고성능, 대용량 문맥 작업 | | Llama 3.2 Series| 8,192 토큰 | 범용 작업 | | Mixtral 8x7B | 32,768 토큰 | 성능과 문맥의 균형 | + + **참고:** 이 제공자는 LiteLLM을 사용합니다. 프로젝트에 의존성으로 추가하세요: + ```bash + uv add 'crewai[litellm]' + ``` @@ -623,6 +667,11 @@ CrewAI는 고유한 기능, 인증 방법, 모델 역량을 제공하는 다양 base_url="https://api.watsonx.ai/v1" ) ``` + + **참고:** 이 제공자는 LiteLLM을 사용합니다. 프로젝트에 의존성으로 추가하세요: + ```bash + uv add 'crewai[litellm]' + ``` @@ -636,6 +685,11 @@ CrewAI는 고유한 기능, 인증 방법, 모델 역량을 제공하는 다양 base_url="http://localhost:11434" ) ``` + + **참고:** 이 제공자는 LiteLLM을 사용합니다. 프로젝트에 의존성으로 추가하세요: + ```bash + uv add 'crewai[litellm]' + ``` @@ -651,6 +705,11 @@ CrewAI는 고유한 기능, 인증 방법, 모델 역량을 제공하는 다양 temperature=0.7 ) ``` + + **참고:** 이 제공자는 LiteLLM을 사용합니다. 프로젝트에 의존성으로 추가하세요: + ```bash + uv add 'crewai[litellm]' + ``` @@ -666,6 +725,11 @@ CrewAI는 고유한 기능, 인증 방법, 모델 역량을 제공하는 다양 base_url="https://api.perplexity.ai/" ) ``` + + **참고:** 이 제공자는 LiteLLM을 사용합니다. 프로젝트에 의존성으로 추가하세요: + ```bash + uv add 'crewai[litellm]' + ``` @@ -680,6 +744,11 @@ CrewAI는 고유한 기능, 인증 방법, 모델 역량을 제공하는 다양 model="huggingface/meta-llama/Meta-Llama-3.1-8B-Instruct" ) ``` + + **참고:** 이 제공자는 LiteLLM을 사용합니다. 프로젝트에 의존성으로 추가하세요: + ```bash + uv add 'crewai[litellm]' + ``` @@ -703,6 +772,11 @@ CrewAI는 고유한 기능, 인증 방법, 모델 역량을 제공하는 다양 | Llama 3.2 Series| 8,192 토큰 | 범용, 멀티모달 작업 | | Llama 3.3 70B | 최대 131,072 토큰 | 고성능, 높은 출력 품질 | | Qwen2 familly | 8,192 토큰 | 고성능, 높은 출력 품질 | + + **참고:** 이 제공자는 LiteLLM을 사용합니다. 프로젝트에 의존성으로 추가하세요: + ```bash + uv add 'crewai[litellm]' + ``` @@ -728,6 +802,11 @@ CrewAI는 고유한 기능, 인증 방법, 모델 역량을 제공하는 다양 - 속도와 품질의 우수한 밸런스 - 긴 컨텍스트 윈도우 지원 + + **참고:** 이 제공자는 LiteLLM을 사용합니다. 프로젝트에 의존성으로 추가하세요: + ```bash + uv add 'crewai[litellm]' + ``` @@ -750,6 +829,11 @@ CrewAI는 고유한 기능, 인증 방법, 모델 역량을 제공하는 다양 - openrouter/deepseek/deepseek-r1 - openrouter/deepseek/deepseek-chat + + **참고:** 이 제공자는 LiteLLM을 사용합니다. 프로젝트에 의존성으로 추가하세요: + ```bash + uv add 'crewai[litellm]' + ``` @@ -772,6 +856,11 @@ CrewAI는 고유한 기능, 인증 방법, 모델 역량을 제공하는 다양 - 경쟁력 있는 가격 - 속도와 품질의 우수한 밸런스 + + **참고:** 이 제공자는 LiteLLM을 사용합니다. 프로젝트에 의존성으로 추가하세요: + ```bash + uv add 'crewai[litellm]' + ``` diff --git a/docs/ko/enterprise/features/flow-hitl-management.mdx b/docs/ko/enterprise/features/flow-hitl-management.mdx index a760a4c44..adb8ee492 100644 --- a/docs/ko/enterprise/features/flow-hitl-management.mdx +++ b/docs/ko/enterprise/features/flow-hitl-management.mdx @@ -38,22 +38,21 @@ CrewAI Enterprise는 AI 워크플로우를 협업적인 인간-AI 프로세스 `@human_feedback` 데코레이터를 사용하여 Flow 내에 인간 검토 체크포인트를 구성합니다. 실행이 검토 포인트에 도달하면 시스템이 일시 중지되고, 담당자에게 이메일로 알리며, 응답을 기다립니다. ```python -from crewai.flow.flow import Flow, start, listen +from crewai.flow.flow import Flow, start, listen, or_ from crewai.flow.human_feedback import human_feedback, HumanFeedbackResult class ContentApprovalFlow(Flow): @start() def generate_content(self): - # AI가 콘텐츠 생성 return "Q1 캠페인용 마케팅 카피 생성..." - @listen(generate_content) @human_feedback( message="브랜드 준수를 위해 이 콘텐츠를 검토해 주세요:", emit=["approved", "rejected", "needs_revision"], ) - def review_content(self, content): - return content + @listen(or_("generate_content", "needs_revision")) + def review_content(self): + return "검토용 마케팅 카피..." @listen("approved") def publish_content(self, result: HumanFeedbackResult): @@ -62,10 +61,6 @@ class ContentApprovalFlow(Flow): @listen("rejected") def archive_content(self, result: HumanFeedbackResult): print(f"콘텐츠 거부됨. 사유: {result.feedback}") - - @listen("needs_revision") - def revise_content(self, result: HumanFeedbackResult): - print(f"수정 요청: {result.feedback}") ``` 완전한 구현 세부 사항은 [Flow에서 인간 피드백](/ko/learn/human-feedback-in-flows) 가이드를 참조하세요. diff --git a/docs/ko/enterprise/guides/deploy-to-amp.mdx b/docs/ko/enterprise/guides/deploy-to-amp.mdx index 5262701ee..66954c840 100644 --- a/docs/ko/enterprise/guides/deploy-to-amp.mdx +++ b/docs/ko/enterprise/guides/deploy-to-amp.mdx @@ -176,6 +176,11 @@ Crew를 GitHub 저장소에 푸시해야 합니다. 아직 Crew를 만들지 않 ![Set Environment Variables](/images/enterprise/set-env-variables.png) + + 프라이빗 Python 패키지를 사용하시나요? 여기에 레지스트리 자격 증명도 추가해야 합니다. + 필요한 변수는 [프라이빗 패키지 레지스트리](/ko/enterprise/guides/private-package-registry)를 참조하세요. + + diff --git a/docs/ko/enterprise/guides/prepare-for-deployment.mdx b/docs/ko/enterprise/guides/prepare-for-deployment.mdx index 9778dde4d..fa4d40109 100644 --- a/docs/ko/enterprise/guides/prepare-for-deployment.mdx +++ b/docs/ko/enterprise/guides/prepare-for-deployment.mdx @@ -256,6 +256,12 @@ Crews와 Flows 모두 `src/project_name/main.py`에 진입점이 있습니다: 1. **LLM API 키** (OpenAI, Anthropic, Google 등) 2. **도구 API 키** - 외부 도구를 사용하는 경우 (Serper 등) + + 프로젝트가 **프라이빗 PyPI 레지스트리**의 패키지에 의존하는 경우, 레지스트리 인증 자격 증명도 + 환경 변수로 구성해야 합니다. 자세한 내용은 + [프라이빗 패키지 레지스트리](/ko/enterprise/guides/private-package-registry) 가이드를 참조하세요. + + 구성 문제를 조기에 발견하기 위해 배포 전에 동일한 환경 변수로 로컬에서 프로젝트를 테스트하세요. diff --git a/docs/ko/enterprise/guides/private-package-registry.mdx b/docs/ko/enterprise/guides/private-package-registry.mdx new file mode 100644 index 000000000..41b07731f --- /dev/null +++ b/docs/ko/enterprise/guides/private-package-registry.mdx @@ -0,0 +1,261 @@ +--- +title: "프라이빗 패키지 레지스트리" +description: "CrewAI AMP에서 인증된 PyPI 레지스트리의 프라이빗 Python 패키지 설치하기" +icon: "lock" +mode: "wide" +--- + + + 이 가이드는 CrewAI AMP에 배포할 때 프라이빗 PyPI 레지스트리(Azure DevOps Artifacts, GitHub Packages, + GitLab, AWS CodeArtifact 등)에서 Python 패키지를 설치하도록 CrewAI 프로젝트를 구성하는 방법을 다룹니다. + + +## 이 가이드가 필요한 경우 + +프로젝트가 공개 PyPI가 아닌 프라이빗 레지스트리에 호스팅된 내부 또는 독점 Python 패키지에 +의존하는 경우, 다음을 수행해야 합니다: + +1. UV에 패키지를 **어디서** 찾을지 알려줍니다 (index URL) +2. UV에 **어떤** 패키지가 해당 index에서 오는지 알려줍니다 (source 매핑) +3. UV가 설치 중에 인증할 수 있도록 **자격 증명**을 제공합니다 + +CrewAI AMP는 의존성 해결 및 설치에 [UV](https://docs.astral.sh/uv/)를 사용합니다. +UV는 `pyproject.toml` 구성과 자격 증명용 환경 변수를 결합하여 인증된 프라이빗 레지스트리를 지원합니다. + +## 1단계: pyproject.toml 구성 + +`pyproject.toml`에서 세 가지 요소가 함께 작동합니다: + +### 1a. 의존성 선언 + +프라이빗 패키지를 다른 의존성과 마찬가지로 `[project.dependencies]`에 추가합니다: + +```toml +[project] +dependencies = [ + "crewai[tools]>=0.100.1,<1.0.0", + "my-private-package>=1.2.0", +] +``` + +### 1b. index 정의 + +프라이빗 레지스트리를 `[[tool.uv.index]]` 아래에 명명된 index로 등록합니다: + +```toml +[[tool.uv.index]] +name = "my-private-registry" +url = "https://pkgs.dev.azure.com/my-org/_packaging/my-feed/pypi/simple/" +explicit = true +``` + + + `name` 필드는 중요합니다 — UV는 이를 사용하여 인증을 위한 환경 변수 이름을 + 구성합니다 (아래 [2단계](#2단계-인증-자격-증명-설정)를 참조하세요). + + `explicit = true`를 설정하면 UV가 모든 패키지에 대해 이 index를 검색하지 않습니다 — + `[tool.uv.sources]`에서 명시적으로 매핑한 패키지만 검색합니다. 이렇게 하면 프라이빗 + 레지스트리에 대한 불필요한 쿼리를 방지하고 의존성 혼동 공격을 차단할 수 있습니다. + + +### 1c. 패키지를 index에 매핑 + +`[tool.uv.sources]`를 사용하여 프라이빗 index에서 해결해야 할 패키지를 UV에 알려줍니다: + +```toml +[tool.uv.sources] +my-private-package = { index = "my-private-registry" } +``` + +### 전체 예시 + +```toml +[project] +name = "my-crew-project" +version = "0.1.0" +requires-python = ">=3.10,<=3.13" +dependencies = [ + "crewai[tools]>=0.100.1,<1.0.0", + "my-private-package>=1.2.0", +] + +[tool.crewai] +type = "crew" + +[[tool.uv.index]] +name = "my-private-registry" +url = "https://pkgs.dev.azure.com/my-org/_packaging/my-feed/pypi/simple/" +explicit = true + +[tool.uv.sources] +my-private-package = { index = "my-private-registry" } +``` + +`pyproject.toml`을 업데이트한 후 lock 파일을 다시 생성합니다: + +```bash +uv lock +``` + + + 업데이트된 `uv.lock`을 항상 `pyproject.toml` 변경 사항과 함께 커밋하세요. + lock 파일은 배포에 필수입니다 — [배포 준비하기](/ko/enterprise/guides/prepare-for-deployment)를 참조하세요. + + +## 2단계: 인증 자격 증명 설정 + +UV는 `pyproject.toml`에서 정의한 index 이름을 기반으로 한 명명 규칙을 따르는 +환경 변수를 사용하여 프라이빗 index에 인증합니다: + +``` +UV_INDEX_{UPPER_NAME}_USERNAME +UV_INDEX_{UPPER_NAME}_PASSWORD +``` + +여기서 `{UPPER_NAME}`은 index 이름을 **대문자**로 변환하고 **하이픈을 언더스코어로 대체**한 것입니다. + +예를 들어, `my-private-registry`라는 이름의 index는 다음을 사용합니다: + +| 변수 | 값 | +|------|-----| +| `UV_INDEX_MY_PRIVATE_REGISTRY_USERNAME` | 레지스트리 사용자 이름 또는 토큰 이름 | +| `UV_INDEX_MY_PRIVATE_REGISTRY_PASSWORD` | 레지스트리 비밀번호 또는 토큰/PAT | + + + 이 환경 변수는 CrewAI AMP **환경 변수** 설정을 통해 **반드시** 추가해야 합니다 — + 전역적으로 또는 배포 수준에서. `.env` 파일에 설정하거나 프로젝트에 하드코딩할 수 없습니다. + + 아래 [AMP에서 환경 변수 설정](#amp에서-환경-변수-설정)을 참조하세요. + + +## 레지스트리 제공업체 참조 + +아래 표는 일반적인 레지스트리 제공업체의 index URL 형식과 자격 증명 값을 보여줍니다. +자리 표시자 값을 실제 조직 및 피드 세부 정보로 대체하세요. + +| 제공업체 | Index URL | 사용자 이름 | 비밀번호 | +|---------|-----------|-----------|---------| +| **Azure DevOps Artifacts** | `https://pkgs.dev.azure.com/{org}/_packaging/{feed}/pypi/simple/` | 비어 있지 않은 임의의 문자열 (예: `token`) | Packaging Read 범위의 Personal Access Token (PAT) | +| **GitHub Packages** | `https://pypi.pkg.github.com/{owner}/simple/` | GitHub 사용자 이름 | `read:packages` 범위의 Personal Access Token (classic) | +| **GitLab Package Registry** | `https://gitlab.com/api/v4/projects/{project_id}/packages/pypi/simple/` | `__token__` | `read_api` 범위의 Project 또는 Personal Access Token | +| **AWS CodeArtifact** | `aws codeartifact get-repository-endpoint`의 URL 사용 | `aws` | `aws codeartifact get-authorization-token`의 토큰 | +| **Google Artifact Registry** | `https://{region}-python.pkg.dev/{project}/{repo}/simple/` | `_json_key_base64` | Base64로 인코딩된 서비스 계정 키 | +| **JFrog Artifactory** | `https://{instance}.jfrog.io/artifactory/api/pypi/{repo}/simple/` | 사용자 이름 또는 이메일 | API 키 또는 ID 토큰 | +| **자체 호스팅 (devpi, Nexus 등)** | 레지스트리의 simple API URL | 레지스트리 사용자 이름 | 레지스트리 비밀번호 | + + + **AWS CodeArtifact**의 경우 인증 토큰이 주기적으로 만료됩니다. + 만료되면 `UV_INDEX_*_PASSWORD` 값을 갱신해야 합니다. + CI/CD 파이프라인에서 이를 자동화하는 것을 고려하세요. + + +## AMP에서 환경 변수 설정 + +프라이빗 레지스트리 자격 증명은 CrewAI AMP에서 환경 변수로 구성해야 합니다. +두 가지 옵션이 있습니다: + + + + 1. [CrewAI AMP](https://app.crewai.com)에 로그인합니다 + 2. 자동화로 이동합니다 + 3. **Environment Variables** 탭을 엽니다 + 4. 각 변수 (`UV_INDEX_*_USERNAME` 및 `UV_INDEX_*_PASSWORD`)에 값을 추가합니다 + + 자세한 내용은 [AMP에 배포하기 — 환경 변수 설정하기](/ko/enterprise/guides/deploy-to-amp#환경-변수-설정하기) 단계를 참조하세요. + + + `crewai deploy create`를 실행하기 전에 로컬 `.env` 파일에 변수를 추가합니다. + CLI가 이를 안전하게 플랫폼으로 전송합니다: + + ```bash + # .env + OPENAI_API_KEY=sk-... + UV_INDEX_MY_PRIVATE_REGISTRY_USERNAME=token + UV_INDEX_MY_PRIVATE_REGISTRY_PASSWORD=your-pat-here + ``` + + ```bash + crewai deploy create + ``` + + + + + 자격 증명을 저장소에 **절대** 커밋하지 마세요. 모든 비밀 정보에는 AMP 환경 변수를 사용하세요. + `.env` 파일은 `.gitignore`에 포함되어야 합니다. + + +기존 배포의 자격 증명을 업데이트하려면 [Crew 업데이트하기 — 환경 변수](/ko/enterprise/guides/update-crew)를 참조하세요. + +## 전체 동작 흐름 + +CrewAI AMP가 자동화를 빌드할 때, 해결 흐름은 다음과 같이 작동합니다: + + + + AMP가 저장소를 가져오고 `pyproject.toml`과 `uv.lock`을 읽습니다. + + + UV가 `[tool.uv.sources]`를 읽어 각 패키지가 어떤 index에서 와야 하는지 결정합니다. + + + 각 프라이빗 index에 대해 UV가 AMP에서 구성한 환경 변수에서 + `UV_INDEX_{NAME}_USERNAME`과 `UV_INDEX_{NAME}_PASSWORD`를 조회합니다. + + + UV가 공개(PyPI) 및 프라이빗(레지스트리) 패키지를 모두 다운로드하고 설치합니다. + + + 모든 의존성이 사용 가능한 상태에서 crew 또는 flow가 시작됩니다. + + + +## 문제 해결 + +### 빌드 중 인증 오류 + +**증상**: 프라이빗 패키지를 해결할 때 `401 Unauthorized` 또는 `403 Forbidden`으로 빌드가 실패합니다. + +**확인사항**: +- `UV_INDEX_*` 환경 변수 이름이 index 이름과 정확히 일치하는지 확인합니다 (대문자, 하이픈 -> 언더스코어) +- 자격 증명이 로컬 `.env`뿐만 아니라 AMP 환경 변수에 설정되어 있는지 확인합니다 +- 토큰/PAT에 패키지 피드에 필요한 읽기 권한이 있는지 확인합니다 +- 토큰이 만료되지 않았는지 확인합니다 (특히 AWS CodeArtifact의 경우) + +### 패키지를 찾을 수 없음 + +**증상**: `No matching distribution found for my-private-package`. + +**확인사항**: +- `pyproject.toml`의 index URL이 `/simple/`로 끝나는지 확인합니다 +- `[tool.uv.sources]` 항목이 올바른 패키지 이름을 올바른 index 이름에 매핑하는지 확인합니다 +- 패키지가 실제로 프라이빗 레지스트리에 게시되어 있는지 확인합니다 +- 동일한 자격 증명으로 로컬에서 `uv lock`을 실행하여 해결이 작동하는지 확인합니다 + +### Lock 파일 충돌 + +**증상**: 프라이빗 index를 추가한 후 `uv lock`이 실패하거나 예상치 못한 결과를 생성합니다. + +**해결책**: 로컬에서 자격 증명을 설정하고 다시 생성합니다: + +```bash +export UV_INDEX_MY_PRIVATE_REGISTRY_USERNAME=token +export UV_INDEX_MY_PRIVATE_REGISTRY_PASSWORD=your-pat +uv lock +``` + +그런 다음 업데이트된 `uv.lock`을 커밋합니다. + +## 관련 가이드 + + + + 배포 전에 프로젝트 구조와 의존성을 확인합니다. + + + crew 또는 flow를 배포하고 환경 변수를 구성합니다. + + + 환경 변수를 업데이트하고 실행 중인 배포에 변경 사항을 푸시합니다. + + diff --git a/docs/ko/guides/migration/migrating-from-langgraph.mdx b/docs/ko/guides/migration/migrating-from-langgraph.mdx new file mode 100644 index 000000000..fe708602d --- /dev/null +++ b/docs/ko/guides/migration/migrating-from-langgraph.mdx @@ -0,0 +1,518 @@ +--- +title: "LangGraph에서 CrewAI로 옮기기: 엔지니어를 위한 실전 가이드" +description: LangGraph로 이미 구축했다면, 프로젝트를 CrewAI로 빠르게 옮기는 방법을 알아보세요 +icon: switch +mode: "wide" +--- + +LangGraph로 에이전트를 구축해 왔습니다. `StateGraph`와 씨름하고, 조건부 에지를 연결하고, 새벽 2시에 상태 딕셔너리를 디버깅해 본 적도 있죠. 동작은 하지만 — 어느 순간부터 프로덕션으로 가는 더 나은 길이 없을까 고민하게 됩니다. + +있습니다. **CrewAI Flows**는 이벤트 기반 오케스트레이션, 조건부 라우팅, 공유 상태라는 동일한 힘을 훨씬 적은 보일러플레이트와 실제로 다단계 AI 워크플로우를 생각하는 방식에 잘 맞는 정신적 모델로 제공합니다. + +이 글은 핵심 개념을 나란히 비교하고 실제 코드 비교를 보여주며, 다음으로 손이 갈 프레임워크가 왜 CrewAI Flows인지 설명합니다. + +--- + +## 정신적 모델의 전환 + +LangGraph는 **그래프**로 생각하라고 요구합니다: 노드, 에지, 그리고 상태 딕셔너리. 모든 워크플로우는 계산 단계 사이의 전이를 명시적으로 연결하는 방향 그래프입니다. 강력하지만, 특히 워크플로우가 몇 개의 결정 지점이 있는 순차적 흐름일 때 이 추상화는 오버헤드를 가져옵니다. + +CrewAI Flows는 **이벤트**로 생각하라고 요구합니다: 시작하는 메서드, 결과를 듣는 메서드, 실행을 라우팅하는 메서드. 워크플로우의 토폴로지는 명시적 그래프 구성 대신 데코레이터 어노테이션에서 드러납니다. 이것은 단순한 문법 설탕이 아니라 — 파이프라인을 설계하고 읽고 유지하는 방식을 바꿉니다. + +핵심 매핑은 다음과 같습니다: + +| LangGraph 개념 | CrewAI Flows 대응 | +| --- | --- | +| `StateGraph` class | `Flow` class | +| `add_node()` | Methods decorated with `@start`, `@listen` | +| `add_edge()` / `add_conditional_edges()` | `@listen()` / `@router()` decorators | +| `TypedDict` state | Pydantic `BaseModel` state | +| `START` / `END` constants | `@start()` decorator / natural method return | +| `graph.compile()` | `flow.kickoff()` | +| Checkpointer / persistence | Built-in memory (LanceDB-backed) | + +실제로 어떻게 보이는지 살펴보겠습니다. + +--- + +## 데모 1: 간단한 순차 파이프라인 + +주제를 받아 조사하고, 요약을 작성한 뒤, 결과를 포맷팅하는 파이프라인을 만든다고 해봅시다. 각 프레임워크는 이렇게 처리합니다. + +### LangGraph 방식 + +```python +from typing import TypedDict +from langgraph.graph import StateGraph, START, END + +class ResearchState(TypedDict): + topic: str + raw_research: str + summary: str + formatted_output: str + +def research_topic(state: ResearchState) -> dict: + # Call an LLM or search API + result = llm.invoke(f"Research the topic: {state['topic']}") + return {"raw_research": result} + +def write_summary(state: ResearchState) -> dict: + result = llm.invoke( + f"Summarize this research:\n{state['raw_research']}" + ) + return {"summary": result} + +def format_output(state: ResearchState) -> dict: + result = llm.invoke( + f"Format this summary as a polished article section:\n{state['summary']}" + ) + return {"formatted_output": result} + +# Build the graph +graph = StateGraph(ResearchState) +graph.add_node("research", research_topic) +graph.add_node("summarize", write_summary) +graph.add_node("format", format_output) + +graph.add_edge(START, "research") +graph.add_edge("research", "summarize") +graph.add_edge("summarize", "format") +graph.add_edge("format", END) + +# Compile and run +app = graph.compile() +result = app.invoke({"topic": "quantum computing advances in 2026"}) +print(result["formatted_output"]) +``` + +함수를 정의하고 노드로 등록한 다음, 모든 전이를 수동으로 연결합니다. 이렇게 단순한 순서인데도 의례처럼 해야 할 작업이 많습니다. + +### CrewAI Flows 방식 + +```python +from crewai import LLM, Agent, Crew, Process, Task +from crewai.flow.flow import Flow, listen, start +from pydantic import BaseModel + +llm = LLM(model="openai/gpt-5.2") + +class ResearchState(BaseModel): + topic: str = "" + raw_research: str = "" + summary: str = "" + formatted_output: str = "" + +class ResearchFlow(Flow[ResearchState]): + @start() + def research_topic(self): + # Option 1: Direct LLM call + result = llm.call(f"Research the topic: {self.state.topic}") + self.state.raw_research = result + return result + + @listen(research_topic) + def write_summary(self, research_output): + # Option 2: A single agent + summarizer = Agent( + role="Research Summarizer", + goal="Produce concise, accurate summaries of research content", + backstory="You are an expert at distilling complex research into clear, " + "digestible summaries.", + llm=llm, + verbose=True, + ) + result = summarizer.kickoff( + f"Summarize this research:\n{self.state.raw_research}" + ) + self.state.summary = str(result) + return self.state.summary + + @listen(write_summary) + def format_output(self, summary_output): + # Option 3: a complete crew (with one or more agents) + formatter = Agent( + role="Content Formatter", + goal="Transform research summaries into polished, publication-ready article sections", + backstory="You are a skilled editor with expertise in structuring and " + "presenting technical content for a general audience.", + llm=llm, + verbose=True, + ) + format_task = Task( + description=f"Format this summary as a polished article section:\n{self.state.summary}", + expected_output="A well-structured, polished article section ready for publication.", + agent=formatter, + ) + crew = Crew( + agents=[formatter], + tasks=[format_task], + process=Process.sequential, + verbose=True, + ) + result = crew.kickoff() + self.state.formatted_output = str(result) + return self.state.formatted_output + +# Run the flow +flow = ResearchFlow() +flow.state.topic = "quantum computing advances in 2026" +result = flow.kickoff() +print(flow.state.formatted_output) + +``` + +눈에 띄는 차이점이 있습니다: 그래프 구성 없음, 에지 연결 없음, 컴파일 단계 없음. 실행 순서는 로직이 있는 곳에서 바로 선언됩니다. `@start()`는 진입점을 표시하고, `@listen(method_name)`은 단계들을 연결합니다. 상태는 타입 안전성, 검증, IDE 자동 완성까지 제공하는 제대로 된 Pydantic 모델입니다. + +--- + +## 데모 2: 조건부 라우팅 + +여기서 흥미로워집니다. 콘텐츠 유형에 따라 서로 다른 처리 경로로 라우팅하는 파이프라인을 만든다고 해봅시다. + +### LangGraph 방식 + +```python +from typing import TypedDict, Literal +from langgraph.graph import StateGraph, START, END + +class ContentState(TypedDict): + input_text: str + content_type: str + result: str + +def classify_content(state: ContentState) -> dict: + content_type = llm.invoke( + f"Classify this content as 'technical', 'creative', or 'business':\n{state['input_text']}" + ) + return {"content_type": content_type.strip().lower()} + +def process_technical(state: ContentState) -> dict: + result = llm.invoke(f"Process as technical doc:\n{state['input_text']}") + return {"result": result} + +def process_creative(state: ContentState) -> dict: + result = llm.invoke(f"Process as creative writing:\n{state['input_text']}") + return {"result": result} + +def process_business(state: ContentState) -> dict: + result = llm.invoke(f"Process as business content:\n{state['input_text']}") + return {"result": result} + +# Routing function +def route_content(state: ContentState) -> Literal["technical", "creative", "business"]: + return state["content_type"] + +# Build the graph +graph = StateGraph(ContentState) +graph.add_node("classify", classify_content) +graph.add_node("technical", process_technical) +graph.add_node("creative", process_creative) +graph.add_node("business", process_business) + +graph.add_edge(START, "classify") +graph.add_conditional_edges( + "classify", + route_content, + { + "technical": "technical", + "creative": "creative", + "business": "business", + } +) +graph.add_edge("technical", END) +graph.add_edge("creative", END) +graph.add_edge("business", END) + +app = graph.compile() +result = app.invoke({"input_text": "Explain how TCP handshakes work"}) +``` + +별도의 라우팅 함수, 명시적 조건부 에지 매핑, 그리고 모든 분기에 대한 종료 에지가 필요합니다. 라우팅 결정 로직이 그 결정을 만들어 내는 노드와 분리됩니다. + +### CrewAI Flows 방식 + +```python +from crewai import LLM, Agent +from crewai.flow.flow import Flow, listen, router, start +from pydantic import BaseModel + +llm = LLM(model="openai/gpt-5.2") + +class ContentState(BaseModel): + input_text: str = "" + content_type: str = "" + result: str = "" + +class ContentFlow(Flow[ContentState]): + @start() + def classify_content(self): + self.state.content_type = ( + llm.call( + f"Classify this content as 'technical', 'creative', or 'business':\n" + f"{self.state.input_text}" + ) + .strip() + .lower() + ) + return self.state.content_type + + @router(classify_content) + def route_content(self, classification): + if classification == "technical": + return "process_technical" + elif classification == "creative": + return "process_creative" + else: + return "process_business" + + @listen("process_technical") + def handle_technical(self): + agent = Agent( + role="Technical Writer", + goal="Produce clear, accurate technical documentation", + backstory="You are an expert technical writer who specializes in " + "explaining complex technical concepts precisely.", + llm=llm, + verbose=True, + ) + self.state.result = str( + agent.kickoff(f"Process as technical doc:\n{self.state.input_text}") + ) + + @listen("process_creative") + def handle_creative(self): + agent = Agent( + role="Creative Writer", + goal="Craft engaging and imaginative creative content", + backstory="You are a talented creative writer with a flair for " + "compelling storytelling and vivid expression.", + llm=llm, + verbose=True, + ) + self.state.result = str( + agent.kickoff(f"Process as creative writing:\n{self.state.input_text}") + ) + + @listen("process_business") + def handle_business(self): + agent = Agent( + role="Business Writer", + goal="Produce professional, results-oriented business content", + backstory="You are an experienced business writer who communicates " + "strategy and value clearly to professional audiences.", + llm=llm, + verbose=True, + ) + self.state.result = str( + agent.kickoff(f"Process as business content:\n{self.state.input_text}") + ) + +flow = ContentFlow() +flow.state.input_text = "Explain how TCP handshakes work" +flow.kickoff() +print(flow.state.result) + +``` + +`@router()` 데코레이터는 메서드를 결정 지점으로 만듭니다. 리스너와 매칭되는 문자열을 반환하므로, 매핑 딕셔너리도, 별도의 라우팅 함수도 필요 없습니다. 분기 로직이 Python `if` 문처럼 읽히는 이유는, 실제로 `if` 문이기 때문입니다. + +--- + +## 데모 3: AI 에이전트 Crew를 Flow에 통합하기 + +여기서 CrewAI의 진짜 힘이 드러납니다. Flows는 LLM 호출을 연결하는 것에 그치지 않고 자율적인 에이전트 **Crew** 전체를 오케스트레이션합니다. 이는 LangGraph에 기본으로 대응되는 개념이 없습니다. + +```python +from crewai import Agent, Task, Crew +from crewai.flow.flow import Flow, listen, start +from pydantic import BaseModel + +class ArticleState(BaseModel): + topic: str = "" + research: str = "" + draft: str = "" + final_article: str = "" + +class ArticleFlow(Flow[ArticleState]): + + @start() + def run_research_crew(self): + """A full Crew of agents handles research.""" + researcher = Agent( + role="Senior Research Analyst", + goal=f"Produce comprehensive research on: {self.state.topic}", + backstory="You're a veteran analyst known for thorough, " + "well-sourced research reports.", + llm="gpt-4o" + ) + + research_task = Task( + description=f"Research '{self.state.topic}' thoroughly. " + "Cover key trends, data points, and expert opinions.", + expected_output="A detailed research brief with sources.", + agent=researcher + ) + + crew = Crew(agents=[researcher], tasks=[research_task]) + result = crew.kickoff() + self.state.research = result.raw + return result.raw + + @listen(run_research_crew) + def run_writing_crew(self, research_output): + """A different Crew handles writing.""" + writer = Agent( + role="Technical Writer", + goal="Write a compelling article based on provided research.", + backstory="You turn complex research into engaging, clear prose.", + llm="gpt-4o" + ) + + editor = Agent( + role="Senior Editor", + goal="Review and polish articles for publication quality.", + backstory="20 years of editorial experience at top tech publications.", + llm="gpt-4o" + ) + + write_task = Task( + description=f"Write an article based on this research:\n{self.state.research}", + expected_output="A well-structured draft article.", + agent=writer + ) + + edit_task = Task( + description="Review, fact-check, and polish the draft article.", + expected_output="A publication-ready article.", + agent=editor + ) + + crew = Crew(agents=[writer, editor], tasks=[write_task, edit_task]) + result = crew.kickoff() + self.state.final_article = result.raw + return result.raw + +# Run the full pipeline +flow = ArticleFlow() +flow.state.topic = "The Future of Edge AI" +flow.kickoff() +print(flow.state.final_article) +``` + +핵심 인사이트는 다음과 같습니다: **Flows는 오케스트레이션 레이어를, Crews는 지능 레이어를 제공합니다.** Flow의 각 단계는 각자의 역할, 목표, 도구를 가진 협업 에이전트 팀을 띄울 수 있습니다. 구조화되고 예측 가능한 제어 흐름 *그리고* 자율적 에이전트 협업 — 두 세계의 장점을 모두 얻습니다. + +LangGraph에서 비슷한 것을 하려면 노드 함수 안에 에이전트 통신 프로토콜, 도구 호출 루프, 위임 로직을 직접 구현해야 합니다. 가능하긴 하지만, 매번 처음부터 배관을 만드는 셈입니다. + +--- + +## 데모 4: 병렬 실행과 동기화 + +실제 파이프라인은 종종 작업을 병렬로 분기하고 결과를 합쳐야 합니다. CrewAI Flows는 `and_`와 `or_` 연산자로 이를 우아하게 처리합니다. + +```python +from crewai import LLM +from crewai.flow.flow import Flow, and_, listen, start +from pydantic import BaseModel + +llm = LLM(model="openai/gpt-5.2") + +class AnalysisState(BaseModel): + topic: str = "" + market_data: str = "" + tech_analysis: str = "" + competitor_intel: str = "" + final_report: str = "" + +class ParallelAnalysisFlow(Flow[AnalysisState]): + @start() + def start_method(self): + pass + + @listen(start_method) + def gather_market_data(self): + # Your agentic or deterministic code + pass + + @listen(start_method) + def run_tech_analysis(self): + # Your agentic or deterministic code + pass + + @listen(start_method) + def gather_competitor_intel(self): + # Your agentic or deterministic code + pass + + @listen(and_(gather_market_data, run_tech_analysis, gather_competitor_intel)) + def synthesize_report(self): + # Your agentic or deterministic code + pass + +flow = ParallelAnalysisFlow() +flow.state.topic = "AI-powered developer tools" +flow.kickoff() + +``` + +여러 `@start()` 데코레이터는 병렬로 실행됩니다. `@listen` 데코레이터의 `and_()` 결합자는 `synthesize_report`가 *세 가지* 상위 메서드가 모두 완료된 뒤에만 실행되도록 보장합니다. *어떤* 상위 작업이든 끝나는 즉시 진행하고 싶다면 `or_()`도 사용할 수 있습니다. + +LangGraph에서는 병렬 분기, 동기화 노드, 신중한 상태 병합이 포함된 fan-out/fan-in 패턴을 만들어야 하며 — 모든 것을 에지로 명시적으로 연결해야 합니다. + +--- + +## 프로덕션에서 CrewAI Flows를 쓰는 이유 + +깔끔한 문법을 넘어, Flows는 여러 프로덕션 핵심 이점을 제공합니다: + +**내장 상태 지속성.** Flow 상태는 LanceDB에 의해 백업되므로 워크플로우가 크래시에서 살아남고, 재개될 수 있으며, 실행 간에 지식을 축적할 수 있습니다. LangGraph는 별도의 체크포인터를 구성해야 합니다. + +**타입 안전한 상태 관리.** Pydantic 모델은 즉시 검증, 직렬화, IDE 지원을 제공합니다. LangGraph의 `TypedDict` 상태는 런타임 검증을 하지 않습니다. + +**일급 에이전트 오케스트레이션.** Crews는 기본 프리미티브입니다. 역할, 목표, 배경, 도구를 가진 에이전트를 정의하고, Flow의 구조적 틀 안에서 자율적으로 협업하게 합니다. 다중 에이전트 조율을 다시 만들 필요가 없습니다. + +**더 단순한 정신적 모델.** 데코레이터는 의도를 선언합니다. `@start`는 "여기서 시작", `@listen(x)`는 "x 이후 실행", `@router(x)`는 "x 이후 어디로 갈지 결정"을 의미합니다. 코드는 자신이 설명하는 워크플로우처럼 읽힙니다. + +**CLI 통합.** `crewai run`으로 Flows를 실행합니다. 별도의 컴파일 단계나 그래프 직렬화가 없습니다. Flow는 Python 클래스이며, 그대로 실행됩니다. + +--- + +## 마이그레이션 치트 시트 + +LangGraph 코드베이스를 CrewAI Flows로 옮기고 싶다면, 다음의 실전 변환 가이드를 참고하세요: + +1. **상태를 매핑하세요.** `TypedDict`를 Pydantic `BaseModel`로 변환하고 모든 필드에 기본값을 추가하세요. +2. **노드를 메서드로 변환하세요.** 각 `add_node` 함수는 `Flow` 서브클래스의 메서드가 됩니다. `state["field"]` 읽기는 `self.state.field`로 바꾸세요. +3. **에지를 데코레이터로 교체하세요.** `add_edge(START, "first_node")`는 첫 메서드의 `@start()`가 됩니다. 순차적인 `add_edge("a", "b")`는 `b` 메서드의 `@listen(a)`가 됩니다. +4. **조건부 에지는 `@router`로 교체하세요.** 라우팅 함수와 `add_conditional_edges()` 매핑은 하나의 `@router()` 메서드로 통합하고, 라우트 문자열을 반환하세요. +5. **compile + invoke를 kickoff으로 교체하세요.** `graph.compile()`를 제거하고 `flow.kickoff()`를 호출하세요. +6. **Crew가 들어갈 지점을 고려하세요.** 복잡한 다단계 에이전트 로직이 있는 노드는 Crew로 분리할 후보입니다. 이 부분에서 가장 큰 품질 향상을 체감할 수 있습니다. + +--- + +## 시작하기 + +CrewAI를 설치하고 새 Flow 프로젝트를 스캐폴딩하세요: + +```bash +pip install crewai +crewai create flow my_first_flow +cd my_first_flow +``` + +이렇게 하면 바로 편집 가능한 Flow 클래스, 설정 파일, 그리고 `type = "flow"`가 이미 설정된 `pyproject.toml`이 포함된 프로젝트 구조가 생성됩니다. 다음으로 실행하세요: + +```bash +crewai run +``` + +그 다음부터는 에이전트를 추가하고 리스너를 연결한 뒤, 배포하면 됩니다. + +--- + +## 마무리 + +LangGraph는 AI 워크플로우에 구조가 필요하다는 사실을 생태계에 일깨워 주었습니다. 중요한 교훈이었습니다. 하지만 CrewAI Flows는 그 교훈을 더 빠르게 쓰고, 더 쉽게 읽으며, 프로덕션에서 더 강력한 형태로 제공합니다 — 특히 워크플로우에 여러 에이전트의 협업이 포함될 때 그렇습니다. + +단일 에이전트 체인을 넘는 무엇인가를 만들고 있다면, Flows를 진지하게 검토해 보세요. 데코레이터 기반 모델, Crews의 네이티브 통합, 내장 상태 관리를 통해 배관 작업에 쓰는 시간을 줄이고, 중요한 문제에 더 많은 시간을 쓸 수 있습니다. + +`crewai create flow`로 시작하세요. 후회하지 않을 겁니다. diff --git a/docs/ko/learn/human-feedback-in-flows.mdx b/docs/ko/learn/human-feedback-in-flows.mdx index 23877007e..a6305ca8a 100644 --- a/docs/ko/learn/human-feedback-in-flows.mdx +++ b/docs/ko/learn/human-feedback-in-flows.mdx @@ -98,33 +98,43 @@ def handle_feedback(self, result): `emit`을 지정하면, 데코레이터는 라우터가 됩니다. 인간의 자유 형식 피드백이 LLM에 의해 해석되어 지정된 outcome 중 하나로 매핑됩니다: ```python Code -@start() -@human_feedback( - message="이 콘텐츠의 출판을 승인하시겠습니까?", - emit=["approved", "rejected", "needs_revision"], - llm="gpt-4o-mini", - default_outcome="needs_revision", -) -def review_content(self): - return "블로그 게시물 초안 내용..." +from crewai.flow.flow import Flow, start, listen, or_ +from crewai.flow.human_feedback import human_feedback -@listen("approved") -def publish(self, result): - print(f"출판 중! 사용자 의견: {result.feedback}") +class ReviewFlow(Flow): + @start() + def generate_content(self): + return "블로그 게시물 초안 내용..." -@listen("rejected") -def discard(self, result): - print(f"폐기됨. 이유: {result.feedback}") + @human_feedback( + message="이 콘텐츠의 출판을 승인하시겠습니까?", + emit=["approved", "rejected", "needs_revision"], + llm="gpt-4o-mini", + default_outcome="needs_revision", + ) + @listen(or_("generate_content", "needs_revision")) + def review_content(self): + return "블로그 게시물 초안 내용..." -@listen("needs_revision") -def revise(self, result): - print(f"다음을 기반으로 수정 중: {result.feedback}") + @listen("approved") + def publish(self, result): + print(f"출판 중! 사용자 의견: {result.feedback}") + + @listen("rejected") + def discard(self, result): + print(f"폐기됨. 이유: {result.feedback}") ``` +사용자가 "더 자세한 내용이 필요합니다"와 같이 말하면, LLM이 이를 `"needs_revision"`으로 매핑하고, `or_()`를 통해 `review_content`가 다시 트리거됩니다 — 수정 루프가 생성됩니다. outcome이 `"approved"` 또는 `"rejected"`가 될 때까지 루프가 계속됩니다. + LLM은 가능한 경우 구조화된 출력(function calling)을 사용하여 응답이 지정된 outcome 중 하나임을 보장합니다. 이로 인해 라우팅이 신뢰할 수 있고 예측 가능해집니다. + +`@start()` 메서드는 flow 시작 시 한 번만 실행됩니다. 수정 루프가 필요한 경우, start 메서드를 review 메서드와 분리하고 review 메서드에 `@listen(or_("trigger", "revision_outcome"))`를 사용하여 self-loop을 활성화하세요. + + ## HumanFeedbackResult `HumanFeedbackResult` 데이터클래스는 인간 피드백 상호작용에 대한 모든 정보를 포함합니다: @@ -193,116 +203,162 @@ def summarize(self): ```python Code -from crewai.flow.flow import Flow, start, listen +from crewai.flow.flow import Flow, start, listen, or_ from crewai.flow.human_feedback import human_feedback, HumanFeedbackResult from pydantic import BaseModel class ContentState(BaseModel): - topic: str = "" draft: str = "" - final_content: str = "" revision_count: int = 0 + status: str = "pending" class ContentApprovalFlow(Flow[ContentState]): - """콘텐츠를 생성하고 인간의 승인을 받는 Flow입니다.""" + """콘텐츠를 생성하고 승인될 때까지 반복하는 Flow.""" @start() - def get_topic(self): - self.state.topic = input("어떤 주제에 대해 글을 쓸까요? ") - return self.state.topic - - @listen(get_topic) - def generate_draft(self, topic): - # 실제 사용에서는 LLM을 호출합니다 - self.state.draft = f"# {topic}\n\n{topic}에 대한 초안입니다..." + def generate_draft(self): + self.state.draft = "# AI 안전\n\nAI 안전에 대한 초안..." return self.state.draft - @listen(generate_draft) @human_feedback( - message="이 초안을 검토해 주세요. 'approved', 'rejected'로 답하거나 수정 피드백을 제공해 주세요:", + message="이 초안을 검토해 주세요. 승인, 거부 또는 변경이 필요한 사항을 설명해 주세요:", emit=["approved", "rejected", "needs_revision"], llm="gpt-4o-mini", default_outcome="needs_revision", ) - def review_draft(self, draft): - return draft + @listen(or_("generate_draft", "needs_revision")) + def review_draft(self): + self.state.revision_count += 1 + return f"{self.state.draft} (v{self.state.revision_count})" @listen("approved") def publish_content(self, result: HumanFeedbackResult): - self.state.final_content = result.output - print("\n✅ 콘텐츠가 승인되어 출판되었습니다!") - print(f"검토자 코멘트: {result.feedback}") + self.state.status = "published" + print(f"콘텐츠 승인 및 게시! 리뷰어 의견: {result.feedback}") return "published" @listen("rejected") def handle_rejection(self, result: HumanFeedbackResult): - print("\n❌ 콘텐츠가 거부되었습니다") - print(f"이유: {result.feedback}") + self.state.status = "rejected" + print(f"콘텐츠 거부됨. 이유: {result.feedback}") return "rejected" - @listen("needs_revision") - def revise_content(self, result: HumanFeedbackResult): - self.state.revision_count += 1 - print(f"\n📝 수정 #{self.state.revision_count} 요청됨") - print(f"피드백: {result.feedback}") - # 실제 Flow에서는 generate_draft로 돌아갈 수 있습니다 - # 이 예제에서는 단순히 확인합니다 - return "revision_requested" - - -# Flow 실행 flow = ContentApprovalFlow() result = flow.kickoff() -print(f"\nFlow 완료. 요청된 수정: {flow.state.revision_count}") +print(f"\nFlow 완료. 상태: {flow.state.status}, 검토 횟수: {flow.state.revision_count}") ``` ```text Output -어떤 주제에 대해 글을 쓸까요? AI 안전 +================================================== +OUTPUT FOR REVIEW: +================================================== +# AI 안전 + +AI 안전에 대한 초안... (v1) +================================================== + +이 초안을 검토해 주세요. 승인, 거부 또는 변경이 필요한 사항을 설명해 주세요: +(Press Enter to skip, or type your feedback) + +Your feedback: 더 자세한 내용이 필요합니다 ================================================== OUTPUT FOR REVIEW: ================================================== # AI 안전 -AI 안전에 대한 초안입니다... +AI 안전에 대한 초안... (v2) ================================================== -이 초안을 검토해 주세요. 'approved', 'rejected'로 답하거나 수정 피드백을 제공해 주세요: +이 초안을 검토해 주세요. 승인, 거부 또는 변경이 필요한 사항을 설명해 주세요: (Press Enter to skip, or type your feedback) Your feedback: 좋아 보입니다, 승인! -✅ 콘텐츠가 승인되어 출판되었습니다! -검토자 코멘트: 좋아 보입니다, 승인! +콘텐츠 승인 및 게시! 리뷰어 의견: 좋아 보입니다, 승인! -Flow 완료. 요청된 수정: 0 +Flow 완료. 상태: published, 검토 횟수: 2 ``` ## 다른 데코레이터와 결합하기 -`@human_feedback` 데코레이터는 다른 Flow 데코레이터와 함께 작동합니다. 가장 안쪽 데코레이터(함수에 가장 가까운)로 배치하세요: +`@human_feedback` 데코레이터는 `@start()`, `@listen()`, `or_()`와 함께 작동합니다. 데코레이터 순서는 두 가지 모두 동작합니다—프레임워크가 양방향으로 속성을 전파합니다—하지만 권장 패턴은 다음과 같습니다: ```python Code -# 올바름: @human_feedback이 가장 안쪽(함수에 가장 가까움) +# Flow 시작 시 일회성 검토 (self-loop 없음) @start() -@human_feedback(message="이것을 검토해 주세요:") +@human_feedback(message="이것을 검토해 주세요:", emit=["approved", "rejected"], llm="gpt-4o-mini") def my_start_method(self): return "content" +# 리스너에서 선형 검토 (self-loop 없음) @listen(other_method) -@human_feedback(message="이것도 검토해 주세요:") +@human_feedback(message="이것도 검토해 주세요:", emit=["good", "bad"], llm="gpt-4o-mini") def my_listener(self, data): return f"processed: {data}" + +# Self-loop: 수정을 위해 반복할 수 있는 검토 +@human_feedback(message="승인 또는 수정 요청?", emit=["approved", "revise"], llm="gpt-4o-mini") +@listen(or_("upstream_method", "revise")) +def review_with_loop(self): + return "content for review" ``` - -`@human_feedback`를 가장 안쪽 데코레이터(마지막/함수에 가장 가까움)로 배치하여 메서드를 직접 래핑하고 Flow 시스템에 전달하기 전에 반환 값을 캡처할 수 있도록 하세요. - +### Self-loop 패턴 + +수정 루프를 만들려면 `or_()`를 사용하여 검토 메서드가 **상위 트리거**와 **자체 수정 outcome**을 모두 리스닝해야 합니다: + +```python Code +@start() +def generate(self): + return "initial draft" + +@human_feedback( + message="승인하시겠습니까, 아니면 변경을 요청하시겠습니까?", + emit=["revise", "approved"], + llm="gpt-4o-mini", + default_outcome="approved", +) +@listen(or_("generate", "revise")) +def review(self): + return "content" + +@listen("approved") +def publish(self): + return "published" +``` + +outcome이 `"revise"`이면 flow가 `review`로 다시 라우팅됩니다 (`or_()`를 통해 `"revise"`를 리스닝하기 때문). outcome이 `"approved"`이면 flow가 `publish`로 계속됩니다. flow 엔진이 라우터를 "한 번만 실행" 규칙에서 제외하여 각 루프 반복마다 재실행할 수 있기 때문에 이 패턴이 동작합니다. + +### 체인된 라우터 + +한 라우터의 outcome으로 트리거된 리스너가 그 자체로 라우터가 될 수 있습니다: + +```python Code +@start() +@human_feedback(message="첫 번째 검토:", emit=["approved", "rejected"], llm="gpt-4o-mini") +def draft(self): + return "draft content" + +@listen("approved") +@human_feedback(message="최종 검토:", emit=["publish", "revise"], llm="gpt-4o-mini") +def final_review(self, prev): + return "final content" + +@listen("publish") +def on_publish(self, prev): + return "published" +``` + +### 제한 사항 + +- **`@start()` 메서드는 한 번만 실행**: `@start()` 메서드는 self-loop할 수 없습니다. 수정 주기가 필요하면 별도의 `@start()` 메서드를 진입점으로 사용하고 `@listen()` 메서드에 `@human_feedback`를 배치하세요. +- **동일 메서드에 `@start()` + `@listen()` 불가**: 이는 Flow 프레임워크 제약입니다. 메서드는 시작점이거나 리스너여야 하며, 둘 다일 수 없습니다. ## 모범 사례 @@ -516,9 +572,9 @@ class ContentPipeline(Flow): @start() @human_feedback( message="이 콘텐츠의 출판을 승인하시겠습니까?", - emit=["approved", "rejected", "needs_revision"], + emit=["approved", "rejected"], llm="gpt-4o-mini", - default_outcome="needs_revision", + default_outcome="rejected", provider=SlackNotificationProvider("#content-reviews"), ) def generate_content(self): @@ -534,11 +590,6 @@ class ContentPipeline(Flow): print(f"보관됨. 이유: {result.feedback}") return {"status": "archived"} - @listen("needs_revision") - def queue_revision(self, result): - print(f"수정 대기열에 추가됨: {result.feedback}") - return {"status": "revision_needed"} - # Flow 시작 (Slack 응답을 기다리며 일시 중지) def start_content_pipeline(): @@ -594,22 +645,22 @@ async def on_slack_feedback_async(flow_id: str, slack_message: str): ```python Code class ArticleReviewFlow(Flow): @start() - @human_feedback( - message="Review this article draft:", - emit=["approved", "needs_revision"], - llm="gpt-4o-mini", - learn=True, # HITL 학습 활성화 - ) def generate_article(self): return self.crew.kickoff(inputs={"topic": "AI Safety"}).raw + @human_feedback( + message="이 글 초안을 검토해 주세요:", + emit=["approved", "needs_revision"], + llm="gpt-4o-mini", + learn=True, + ) + @listen(or_("generate_article", "needs_revision")) + def review_article(self): + return self.last_human_feedback.output if self.last_human_feedback else "article draft" + @listen("approved") def publish(self): print(f"Publishing: {self.last_human_feedback.output}") - - @listen("needs_revision") - def revise(self): - print("Revising based on feedback...") ``` **첫 번째 실행**: 인간이 원시 출력을 보고 "사실에 대한 주장에는 항상 인용을 포함하세요."라고 말합니다. 교훈이 추출되어 메모리에 저장됩니다. diff --git a/docs/ko/learn/llm-connections.mdx b/docs/ko/learn/llm-connections.mdx index f373d8a89..6976ab8e0 100644 --- a/docs/ko/learn/llm-connections.mdx +++ b/docs/ko/learn/llm-connections.mdx @@ -7,7 +7,7 @@ mode: "wide" ## CrewAI를 LLM에 연결하기 -CrewAI는 LiteLLM을 사용하여 다양한 언어 모델(LLM)에 연결합니다. 이 통합은 높은 다양성을 제공하여, 여러 공급자의 모델을 간단하고 통합된 인터페이스로 사용할 수 있게 해줍니다. +CrewAI는 가장 인기 있는 제공자(OpenAI, Anthropic, Google Gemini, Azure, AWS Bedrock)에 대해 네이티브 SDK 통합을 통해 LLM에 연결하며, 그 외 모든 제공자에 대해서는 LiteLLM을 유연한 폴백으로 사용합니다. 기본적으로 CrewAI는 `gpt-4o-mini` 모델을 사용합니다. 이는 `OPENAI_MODEL_NAME` 환경 변수에 의해 결정되며, 설정되지 않은 경우 기본값은 "gpt-4o-mini"입니다. @@ -41,6 +41,14 @@ LiteLLM은 다음을 포함하되 이에 국한되지 않는 다양한 프로바 지원되는 프로바이더의 전체 및 최신 목록은 [LiteLLM 프로바이더 문서](https://docs.litellm.ai/docs/providers)를 참조하세요. + + 네이티브 통합에서 지원하지 않는 제공자를 사용하려면 LiteLLM을 프로젝트에 의존성으로 추가하세요: + ```bash + uv add 'crewai[litellm]' + ``` + 네이티브 제공자(OpenAI, Anthropic, Google Gemini, Azure, AWS Bedrock)는 자체 SDK extras를 사용합니다 — [공급자 구성 예시](/ko/concepts/llms#공급자-구성-예시)를 참조하세요. + + ## LLM 변경하기 CrewAI agent에서 다른 LLM을 사용하려면 여러 가지 방법이 있습니다: diff --git a/docs/ko/observability/tracing.mdx b/docs/ko/observability/tracing.mdx index 1691f01ae..eae6188f6 100644 --- a/docs/ko/observability/tracing.mdx +++ b/docs/ko/observability/tracing.mdx @@ -35,7 +35,7 @@ crewai login 아직 설치하지 않았다면 CLI 도구와 함께 CrewAI를 설치하세요: ```bash -uv add crewai[tools] +uv add 'crewai[tools]' ``` 그런 다음 CrewAI AMP 계정으로 CLI를 인증하세요: diff --git a/docs/ko/tools/automation/composiotool.mdx b/docs/ko/tools/automation/composiotool.mdx index 15c477e34..890360425 100644 --- a/docs/ko/tools/automation/composiotool.mdx +++ b/docs/ko/tools/automation/composiotool.mdx @@ -18,77 +18,46 @@ Composio는 AI 에이전트를 250개 이상의 도구와 연결할 수 있는 Composio 도구를 프로젝트에 통합하려면 아래 지침을 따르세요: ```shell -pip install composio-crewai +pip install composio composio-crewai pip install crewai ``` -설치가 완료된 후, `composio login`을 실행하거나 Composio API 키를 `COMPOSIO_API_KEY`로 export하세요. Composio API 키는 [여기](https://app.composio.dev)에서 받을 수 있습니다. +설치가 완료되면 Composio API 키를 `COMPOSIO_API_KEY`로 설정하세요. Composio API 키는 [여기](https://platform.composio.dev)에서 받을 수 있습니다. ## 예시 -다음 예시는 도구를 초기화하고 github action을 실행하는 방법을 보여줍니다: +다음 예시는 도구를 초기화하고 GitHub 액션을 실행하는 방법을 보여줍니다: -1. Composio 도구 세트 초기화 +1. CrewAI Provider와 함께 Composio 초기화 ```python Code -from composio_crewai import ComposioToolSet, App, Action +from composio_crewai import ComposioProvider +from composio import Composio from crewai import Agent, Task, Crew -toolset = ComposioToolSet() +composio = Composio(provider=ComposioProvider()) ``` -2. GitHub 계정 연결 +2. 새 Composio 세션을 만들고 도구 가져오기 -```shell CLI -composio add github -``` -```python Code -request = toolset.initiate_connection(app=App.GITHUB) -print(f"Open this URL to authenticate: {request.redirectUrl}") +```python +session = composio.create( + user_id="your-user-id", + toolkits=["gmail", "github"] # optional, default is all toolkits +) +tools = session.tools() ``` +세션 및 사용자 관리에 대한 자세한 내용은 [여기](https://docs.composio.dev/docs/configuring-sessions)를 참고하세요. -3. 도구 가져오기 +3. 사용자 수동 인증하기 -- 앱에서 모든 도구를 가져오기 (프로덕션 환경에서는 권장하지 않음): +Composio는 에이전트 채팅 세션 중에 사용자를 자동으로 인증합니다. 하지만 `authorize` 메서드를 호출해 사용자를 수동으로 인증할 수도 있습니다. ```python Code -tools = toolset.get_tools(apps=[App.GITHUB]) +connection_request = session.authorize("github") +print(f"Open this URL to authenticate: {connection_request.redirect_url}") ``` -- 태그를 기반으로 도구 필터링: -```python Code -tag = "users" - -filtered_action_enums = toolset.find_actions_by_tags( - App.GITHUB, - tags=[tag], -) - -tools = toolset.get_tools(actions=filtered_action_enums) -``` - -- 사용 사례를 기반으로 도구 필터링: -```python Code -use_case = "Star a repository on GitHub" - -filtered_action_enums = toolset.find_actions_by_use_case( - App.GITHUB, use_case=use_case, advanced=False -) - -tools = toolset.get_tools(actions=filtered_action_enums) -``` -`advanced`를 True로 설정하면 복잡한 사용 사례를 위한 액션을 가져올 수 있습니다 - -- 특정 도구 사용하기: - -이 데모에서는 GitHub 앱의 `GITHUB_STAR_A_REPOSITORY_FOR_THE_AUTHENTICATED_USER` 액션을 사용합니다. -```python Code -tools = toolset.get_tools( - actions=[Action.GITHUB_STAR_A_REPOSITORY_FOR_THE_AUTHENTICATED_USER] -) -``` -액션 필터링에 대해 더 자세한 내용을 보려면 [여기](https://docs.composio.dev/patterns/tools/use-tools/use-specific-actions)를 참고하세요. - 4. 에이전트 정의 ```python Code @@ -116,4 +85,4 @@ crew = Crew(agents=[crewai_agent], tasks=[task]) crew.kickoff() ``` -* 더욱 자세한 도구 리스트는 [여기](https://app.composio.dev)에서 확인하실 수 있습니다. \ No newline at end of file +* 더욱 자세한 도구 목록은 [여기](https://docs.composio.dev/toolkits)에서 확인할 수 있습니다. \ No newline at end of file diff --git a/docs/pt-BR/changelog.mdx b/docs/pt-BR/changelog.mdx index 8db611923..6c789abc2 100644 --- a/docs/pt-BR/changelog.mdx +++ b/docs/pt-BR/changelog.mdx @@ -4,6 +4,138 @@ description: "Atualizações de produto, melhorias e correções do CrewAI" icon: "clock" mode: "wide" --- + + ## v1.10.1 + + [Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.10.1) + + ## O que mudou + + ### Recursos + - Atualizar Gemini GenAI + + ### Correções de Bugs + - Ajustar o valor do listener do executor para evitar recursão + - Agrupar partes da resposta da função paralela em um único objeto Content no Gemini + - Exibir a saída de pensamento dos modelos de pensamento no Gemini + - Carregar ferramentas MCP e da plataforma quando as ferramentas do agente forem None + - Suportar ambientes Jupyter com loops de eventos em A2A + - Usar ID anônimo para rastreamentos efêmeros + - Passar condicionalmente o cabeçalho plus + - Ignorar o registro do manipulador de sinal em threads não principais para telemetria + - Injetar erros de ferramentas como observações e resolver colisões de nomes + - Atualizar pypdf de 4.x para 6.7.4 para resolver alertas do Dependabot + - Resolver alertas de segurança críticos e altos do Dependabot + + ### Documentação + - Sincronizar a documentação da ferramenta Composio entre locais + + ## Contribuidores + + @giulio-leone, @greysonlalonde, @haxzie, @joaomdmoura, @lorenzejay, @mattatcha, @mplachta, @nicoferdi96 + + + + + ## v1.10.1a1 + + [Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.10.1a1) + + ## O que Mudou + + ### Funcionalidades + - Implementar suporte a invocação assíncrona em métodos de callback de etapas + - Implementar carregamento sob demanda para dependências pesadas no módulo de Memória + + ### Documentação + - Atualizar changelog e versão para v1.10.0 + + ### Refatoração + - Refatorar métodos de callback de etapas para suportar invocação assíncrona + - Refatorar para implementar carregamento sob demanda para dependências pesadas no módulo de Memória + + ### Correções de Bugs + - Corrigir branch para notas de lançamento + + ## Contribuidores + + @greysonlalonde, @joaomdmoura + + + + + ## v1.10.1a1 + + [Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.10.1a1) + + ## O que Mudou + + ### Refatoração + - Refatorar métodos de callback de etapas para suportar invocação assíncrona + - Implementar carregamento sob demanda para dependências pesadas no módulo de Memória + + ### Documentação + - Atualizar changelog e versão para v1.10.0 + + ### Correções de Bugs + - Criar branch para notas de lançamento + + ## Contribuidores + + @greysonlalonde, @joaomdmoura + + + + + ## v1.10.0 + + [Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.10.0) + + ## O que Mudou + + ### Recursos + - Aprimorar a resolução da ferramenta MCP e eventos relacionados + - Atualizar a versão do lancedb e adicionar pacotes lance-namespace + - Aprimorar a análise e validação de argumentos JSON no CrewAgentExecutor e BaseTool + - Migrar o cliente HTTP da CLI de requests para httpx + - Adicionar documentação versionada + - Adicionar detecção de versões removidas para notas de versão + - Implementar tratamento de entrada do usuário em Flows + - Aprimorar a funcionalidade de auto-loop HITL nos testes de integração de feedback humano + - Adicionar started_event_id e definir no eventbus + - Atualizar automaticamente tools.specs + + ### Correções de Bugs + - Validar kwargs da ferramenta mesmo quando vazios para evitar TypeError crípticos + - Preservar tipos nulos nos esquemas de parâmetros da ferramenta para LLM + - Mapear output_pydantic/output_json para saída estruturada nativa + - Garantir que callbacks sejam executados/aguardados se forem promessas + - Capturar o nome do método no contexto da exceção + - Preservar tipo enum no resultado do roteador; melhorar tipos + - Corrigir fluxos cíclicos que quebram silenciosamente quando o ID de persistência é passado nas entradas + - Corrigir o formato da flag da CLI de --skip-provider para --skip_provider + - Garantir que o fluxo de chamada da ferramenta OpenAI seja finalizado + - Resolver ponteiros $ref de esquema complexos nas ferramentas MCP + - Impor additionalProperties=false nos esquemas + - Rejeitar nomes de scripts reservados para pastas de equipe + - Resolver condição de corrida no teste de emissão de eventos de guardrail + + ### Documentação + - Adicionar nota de dependência litellm para provedores de LLM não nativos + - Esclarecer o modelo de segurança NL2SQL e orientações de fortalecimento + - Adicionar 96 ações ausentes em 9 integrações + + ### Refatoração + - Refatorar crew para provider + - Extrair HITL para padrão de provider + - Melhorar tipagem e registro de hooks + + ## Contribuidores + + @dependabot[bot], @github-actions[bot], @github-code-quality[bot], @greysonlalonde, @heitorado, @hobostay, @joaomdmoura, @johnvan7, @jonathansampson, @lorenzejay, @lucasgomide, @mattatcha, @mplachta, @nicoferdi96, @theCyberTech, @thiagomoretto, @vinibrsl + + + ## v1.9.0 diff --git a/docs/pt-BR/concepts/llms.mdx b/docs/pt-BR/concepts/llms.mdx index 3343660ab..22f267c93 100644 --- a/docs/pt-BR/concepts/llms.mdx +++ b/docs/pt-BR/concepts/llms.mdx @@ -105,6 +105,15 @@ Existem diferentes locais no código do CrewAI onde você pode especificar o mod + + O CrewAI oferece integrações nativas via SDK para OpenAI, Anthropic, Google (Gemini API), Azure e AWS Bedrock — sem necessidade de instalação extra além dos extras específicos do provedor (ex.: `uv add "crewai[openai]"`). + + Todos os outros provedores são alimentados pelo **LiteLLM**. Se você planeja usar algum deles, adicione-o como dependência ao seu projeto: + ```bash + uv add 'crewai[litellm]' + ``` + + ## Exemplos de Configuração de Provedores O CrewAI suporta uma grande variedade de provedores de LLM, cada um com recursos, métodos de autenticação e capacidades de modelo únicos. @@ -214,6 +223,11 @@ Nesta seção, você encontrará exemplos detalhados que ajudam a selecionar, co | `meta_llama/Llama-4-Maverick-17B-128E-Instruct-FP8` | 128k | 4028 | Texto, Imagem | Texto | | `meta_llama/Llama-3.3-70B-Instruct` | 128k | 4028 | Texto | Texto | | `meta_llama/Llama-3.3-8B-Instruct` | 128k | 4028 | Texto | Texto | + + **Nota:** Este provedor usa o LiteLLM. Adicione-o como dependência ao seu projeto: + ```bash + uv add 'crewai[litellm]' + ``` @@ -354,6 +368,11 @@ Nesta seção, você encontrará exemplos detalhados que ajudam a selecionar, co | gemini-1.5-flash | 1M tokens | Modelo multimodal equilibrado, bom para maioria das tarefas | | gemini-1.5-flash-8B | 1M tokens | Mais rápido, mais eficiente em custo, adequado para tarefas de alta frequência | | gemini-1.5-pro | 2M tokens | Melhor desempenho para uma ampla variedade de tarefas de raciocínio, incluindo lógica, codificação e colaboração criativa | + + **Nota:** Este provedor usa o LiteLLM. Adicione-o como dependência ao seu projeto: + ```bash + uv add 'crewai[litellm]' + ``` @@ -438,6 +457,11 @@ Nesta seção, você encontrará exemplos detalhados que ajudam a selecionar, co model="sagemaker/" ) ``` + + **Nota:** Este provedor usa o LiteLLM. Adicione-o como dependência ao seu projeto: + ```bash + uv add 'crewai[litellm]' + ``` @@ -453,6 +477,11 @@ Nesta seção, você encontrará exemplos detalhados que ajudam a selecionar, co temperature=0.7 ) ``` + + **Nota:** Este provedor usa o LiteLLM. Adicione-o como dependência ao seu projeto: + ```bash + uv add 'crewai[litellm]' + ``` @@ -539,6 +568,11 @@ Nesta seção, você encontrará exemplos detalhados que ajudam a selecionar, co | rakuten/rakutenai-7b-instruct | 1.024 tokens | LLM topo de linha, compreensão, raciocínio e geração textual.| | rakuten/rakutenai-7b-chat | 1.024 tokens | LLM topo de linha, compreensão, raciocínio e geração textual.| | baichuan-inc/baichuan2-13b-chat | 4.096 tokens | Suporte a chat em chinês/inglês, programação, matemática, seguir instruções, resolver quizzes.| + + **Nota:** Este provedor usa o LiteLLM. Adicione-o como dependência ao seu projeto: + ```bash + uv add 'crewai[litellm]' + ``` @@ -579,6 +613,11 @@ Nesta seção, você encontrará exemplos detalhados que ajudam a selecionar, co # ... ``` + + **Nota:** Este provedor usa o LiteLLM. Adicione-o como dependência ao seu projeto: + ```bash + uv add 'crewai[litellm]' + ``` @@ -600,6 +639,11 @@ Nesta seção, você encontrará exemplos detalhados que ajudam a selecionar, co | Llama 3.1 70B/8B | 131.072 tokens | Alta performance e tarefas de contexto grande| | Llama 3.2 Série | 8.192 tokens | Tarefas gerais | | Mixtral 8x7B | 32.768 tokens | Equilíbrio entre performance e contexto | + + **Nota:** Este provedor usa o LiteLLM. Adicione-o como dependência ao seu projeto: + ```bash + uv add 'crewai[litellm]' + ``` @@ -622,6 +666,11 @@ Nesta seção, você encontrará exemplos detalhados que ajudam a selecionar, co base_url="https://api.watsonx.ai/v1" ) ``` + + **Nota:** Este provedor usa o LiteLLM. Adicione-o como dependência ao seu projeto: + ```bash + uv add 'crewai[litellm]' + ``` @@ -635,6 +684,11 @@ Nesta seção, você encontrará exemplos detalhados que ajudam a selecionar, co base_url="http://localhost:11434" ) ``` + + **Nota:** Este provedor usa o LiteLLM. Adicione-o como dependência ao seu projeto: + ```bash + uv add 'crewai[litellm]' + ``` @@ -650,6 +704,11 @@ Nesta seção, você encontrará exemplos detalhados que ajudam a selecionar, co temperature=0.7 ) ``` + + **Nota:** Este provedor usa o LiteLLM. Adicione-o como dependência ao seu projeto: + ```bash + uv add 'crewai[litellm]' + ``` @@ -665,6 +724,11 @@ Nesta seção, você encontrará exemplos detalhados que ajudam a selecionar, co base_url="https://api.perplexity.ai/" ) ``` + + **Nota:** Este provedor usa o LiteLLM. Adicione-o como dependência ao seu projeto: + ```bash + uv add 'crewai[litellm]' + ``` @@ -679,6 +743,11 @@ Nesta seção, você encontrará exemplos detalhados que ajudam a selecionar, co model="huggingface/meta-llama/Meta-Llama-3.1-8B-Instruct" ) ``` + + **Nota:** Este provedor usa o LiteLLM. Adicione-o como dependência ao seu projeto: + ```bash + uv add 'crewai[litellm]' + ``` @@ -702,6 +771,11 @@ Nesta seção, você encontrará exemplos detalhados que ajudam a selecionar, co | Llama 3.2 Série | 8.192 tokens | Tarefas gerais e multimodais | | Llama 3.3 70B | Até 131.072 tokens | Desempenho e qualidade de saída elevada | | Família Qwen2 | 8.192 tokens | Desempenho e qualidade de saída elevada | + + **Nota:** Este provedor usa o LiteLLM. Adicione-o como dependência ao seu projeto: + ```bash + uv add 'crewai[litellm]' + ``` @@ -727,6 +801,11 @@ Nesta seção, você encontrará exemplos detalhados que ajudam a selecionar, co - Equilíbrio entre velocidade e qualidade - Suporte a longas janelas de contexto + + **Nota:** Este provedor usa o LiteLLM. Adicione-o como dependência ao seu projeto: + ```bash + uv add 'crewai[litellm]' + ``` @@ -749,6 +828,11 @@ Nesta seção, você encontrará exemplos detalhados que ajudam a selecionar, co - openrouter/deepseek/deepseek-r1 - openrouter/deepseek/deepseek-chat + + **Nota:** Este provedor usa o LiteLLM. Adicione-o como dependência ao seu projeto: + ```bash + uv add 'crewai[litellm]' + ``` diff --git a/docs/pt-BR/enterprise/features/flow-hitl-management.mdx b/docs/pt-BR/enterprise/features/flow-hitl-management.mdx index 1a6651203..d1f05e55f 100644 --- a/docs/pt-BR/enterprise/features/flow-hitl-management.mdx +++ b/docs/pt-BR/enterprise/features/flow-hitl-management.mdx @@ -38,22 +38,21 @@ O CrewAI Enterprise oferece um sistema abrangente de gerenciamento Human-in-the- Configure checkpoints de revisão humana em seus Flows usando o decorador `@human_feedback`. Quando a execução atinge um ponto de revisão, o sistema pausa, notifica o responsável via email e aguarda uma resposta. ```python -from crewai.flow.flow import Flow, start, listen +from crewai.flow.flow import Flow, start, listen, or_ from crewai.flow.human_feedback import human_feedback, HumanFeedbackResult class ContentApprovalFlow(Flow): @start() def generate_content(self): - # IA gera conteúdo return "Texto de marketing gerado para campanha Q1..." - @listen(generate_content) @human_feedback( message="Por favor, revise este conteúdo para conformidade com a marca:", emit=["approved", "rejected", "needs_revision"], ) - def review_content(self, content): - return content + @listen(or_("generate_content", "needs_revision")) + def review_content(self): + return "Texto de marketing para revisão..." @listen("approved") def publish_content(self, result: HumanFeedbackResult): @@ -62,10 +61,6 @@ class ContentApprovalFlow(Flow): @listen("rejected") def archive_content(self, result: HumanFeedbackResult): print(f"Conteúdo rejeitado. Motivo: {result.feedback}") - - @listen("needs_revision") - def revise_content(self, result: HumanFeedbackResult): - print(f"Revisão solicitada: {result.feedback}") ``` Para detalhes completos de implementação, consulte o guia [Feedback Humano em Flows](/pt-BR/learn/human-feedback-in-flows). diff --git a/docs/pt-BR/enterprise/guides/deploy-to-amp.mdx b/docs/pt-BR/enterprise/guides/deploy-to-amp.mdx index c6dc35018..7d469b993 100644 --- a/docs/pt-BR/enterprise/guides/deploy-to-amp.mdx +++ b/docs/pt-BR/enterprise/guides/deploy-to-amp.mdx @@ -176,6 +176,11 @@ Você precisa enviar seu crew para um repositório do GitHub. Caso ainda não te ![Definir Variáveis de Ambiente](/images/enterprise/set-env-variables.png) + + Usando pacotes Python privados? Você também precisará adicionar suas credenciais de registro aqui. + Consulte [Registros de Pacotes Privados](/pt-BR/enterprise/guides/private-package-registry) para as variáveis necessárias. + + diff --git a/docs/pt-BR/enterprise/guides/prepare-for-deployment.mdx b/docs/pt-BR/enterprise/guides/prepare-for-deployment.mdx index bf81b8f7a..f22679759 100644 --- a/docs/pt-BR/enterprise/guides/prepare-for-deployment.mdx +++ b/docs/pt-BR/enterprise/guides/prepare-for-deployment.mdx @@ -256,6 +256,12 @@ Antes da implantação, certifique-se de ter: 1. **Chaves de API de LLM** prontas (OpenAI, Anthropic, Google, etc.) 2. **Chaves de API de ferramentas** se estiver usando ferramentas externas (Serper, etc.) + + Se seu projeto depende de pacotes de um **registro PyPI privado**, você também precisará configurar + credenciais de autenticação do registro como variáveis de ambiente. Consulte o guia + [Registros de Pacotes Privados](/pt-BR/enterprise/guides/private-package-registry) para mais detalhes. + + Teste seu projeto localmente com as mesmas variáveis de ambiente antes de implantar para detectar problemas de configuração antecipadamente. diff --git a/docs/pt-BR/enterprise/guides/private-package-registry.mdx b/docs/pt-BR/enterprise/guides/private-package-registry.mdx new file mode 100644 index 000000000..3950ead8d --- /dev/null +++ b/docs/pt-BR/enterprise/guides/private-package-registry.mdx @@ -0,0 +1,263 @@ +--- +title: "Registros de Pacotes Privados" +description: "Instale pacotes Python privados de registros PyPI autenticados no CrewAI AMP" +icon: "lock" +mode: "wide" +--- + + + Este guia aborda como configurar seu projeto CrewAI para instalar pacotes Python + de registros PyPI privados (Azure DevOps Artifacts, GitHub Packages, GitLab, AWS CodeArtifact, etc.) + ao implantar no CrewAI AMP. + + +## Quando Você Precisa Disso + +Se seu projeto depende de pacotes Python internos ou proprietários hospedados em um registro privado +em vez do PyPI público, você precisará: + +1. Informar ao UV **onde** encontrar o pacote (uma URL de index) +2. Informar ao UV **quais** pacotes vêm desse index (um mapeamento de source) +3. Fornecer **credenciais** para que o UV possa autenticar durante a instalação + +O CrewAI AMP usa [UV](https://docs.astral.sh/uv/) para resolução e instalação de dependências. +O UV suporta registros privados autenticados por meio da configuração do `pyproject.toml` combinada +com variáveis de ambiente para credenciais. + +## Passo 1: Configurar o pyproject.toml + +Três elementos trabalham juntos no seu `pyproject.toml`: + +### 1a. Declarar a dependência + +Adicione o pacote privado ao seu `[project.dependencies]` como qualquer outra dependência: + +```toml +[project] +dependencies = [ + "crewai[tools]>=0.100.1,<1.0.0", + "my-private-package>=1.2.0", +] +``` + +### 1b. Definir o index + +Registre seu registro privado como um index nomeado em `[[tool.uv.index]]`: + +```toml +[[tool.uv.index]] +name = "my-private-registry" +url = "https://pkgs.dev.azure.com/my-org/_packaging/my-feed/pypi/simple/" +explicit = true +``` + + + O campo `name` é importante — o UV o utiliza para construir os nomes das variáveis de ambiente + para autenticação (veja o [Passo 2](#passo-2-configurar-credenciais-de-autenticação) abaixo). + + Definir `explicit = true` significa que o UV não consultará esse index para todos os pacotes — apenas + os que você mapear explicitamente em `[tool.uv.sources]`. Isso evita consultas desnecessárias + ao seu registro privado e protege contra ataques de confusão de dependências. + + +### 1c. Mapear o pacote para o index + +Informe ao UV quais pacotes devem ser resolvidos a partir do seu index privado usando `[tool.uv.sources]`: + +```toml +[tool.uv.sources] +my-private-package = { index = "my-private-registry" } +``` + +### Exemplo completo + +```toml +[project] +name = "my-crew-project" +version = "0.1.0" +requires-python = ">=3.10,<=3.13" +dependencies = [ + "crewai[tools]>=0.100.1,<1.0.0", + "my-private-package>=1.2.0", +] + +[tool.crewai] +type = "crew" + +[[tool.uv.index]] +name = "my-private-registry" +url = "https://pkgs.dev.azure.com/my-org/_packaging/my-feed/pypi/simple/" +explicit = true + +[tool.uv.sources] +my-private-package = { index = "my-private-registry" } +``` + +Após atualizar o `pyproject.toml`, regenere seu arquivo lock: + +```bash +uv lock +``` + + + Sempre faça commit do `uv.lock` atualizado junto com as alterações no `pyproject.toml`. + O arquivo lock é obrigatório para implantação — veja [Preparar para Implantação](/pt-BR/enterprise/guides/prepare-for-deployment). + + +## Passo 2: Configurar Credenciais de Autenticação + +O UV autentica em indexes privados usando variáveis de ambiente que seguem uma convenção de nomenclatura +baseada no nome do index que você definiu no `pyproject.toml`: + +``` +UV_INDEX_{UPPER_NAME}_USERNAME +UV_INDEX_{UPPER_NAME}_PASSWORD +``` + +Onde `{UPPER_NAME}` é o nome do seu index convertido para **maiúsculas** com **hifens substituídos por underscores**. + +Por exemplo, um index chamado `my-private-registry` usa: + +| Variável | Valor | +|----------|-------| +| `UV_INDEX_MY_PRIVATE_REGISTRY_USERNAME` | Seu nome de usuário ou nome do token do registro | +| `UV_INDEX_MY_PRIVATE_REGISTRY_PASSWORD` | Sua senha ou token/PAT do registro | + + + Essas variáveis de ambiente **devem** ser adicionadas pelas configurações de **Variáveis de Ambiente** do CrewAI AMP — + globalmente ou no nível da implantação. Elas não podem ser definidas em arquivos `.env` ou codificadas no seu projeto. + + Veja [Configurar Variáveis de Ambiente no AMP](#configurar-variáveis-de-ambiente-no-amp) abaixo. + + +## Referência de Provedores de Registro + +A tabela abaixo mostra o formato da URL de index e os valores de credenciais para provedores de registro comuns. +Substitua os valores de exemplo pelos detalhes reais da sua organização e feed. + +| Provedor | URL do Index | Usuário | Senha | +|----------|-------------|---------|-------| +| **Azure DevOps Artifacts** | `https://pkgs.dev.azure.com/{org}/_packaging/{feed}/pypi/simple/` | Qualquer string não vazia (ex: `token`) | Personal Access Token (PAT) com escopo Packaging Read | +| **GitHub Packages** | `https://pypi.pkg.github.com/{owner}/simple/` | Nome de usuário do GitHub | Personal Access Token (classic) com escopo `read:packages` | +| **GitLab Package Registry** | `https://gitlab.com/api/v4/projects/{project_id}/packages/pypi/simple/` | `__token__` | Project ou Personal Access Token com escopo `read_api` | +| **AWS CodeArtifact** | Use a URL de `aws codeartifact get-repository-endpoint` | `aws` | Token de `aws codeartifact get-authorization-token` | +| **Google Artifact Registry** | `https://{region}-python.pkg.dev/{project}/{repo}/simple/` | `_json_key_base64` | Chave de conta de serviço codificada em Base64 | +| **JFrog Artifactory** | `https://{instance}.jfrog.io/artifactory/api/pypi/{repo}/simple/` | Nome de usuário ou email | Chave API ou token de identidade | +| **Auto-hospedado (devpi, Nexus, etc.)** | URL da API simple do seu registro | Nome de usuário do registro | Senha do registro | + + + Para **AWS CodeArtifact**, o token de autorização expira periodicamente. + Você precisará atualizar o valor de `UV_INDEX_*_PASSWORD` quando ele expirar. + Considere automatizar isso no seu pipeline de CI/CD. + + +## Configurar Variáveis de Ambiente no AMP + +As credenciais do registro privado devem ser configuradas como variáveis de ambiente no CrewAI AMP. +Você tem duas opções: + + + + 1. Faça login no [CrewAI AMP](https://app.crewai.com) + 2. Navegue até sua automação + 3. Abra a aba **Environment Variables** + 4. Adicione cada variável (`UV_INDEX_*_USERNAME` e `UV_INDEX_*_PASSWORD`) com seu valor + + Veja o passo [Deploy para AMP — Definir Variáveis de Ambiente](/pt-BR/enterprise/guides/deploy-to-amp#definir-as-variáveis-de-ambiente) para detalhes. + + + Adicione as variáveis ao seu arquivo `.env` local antes de executar `crewai deploy create`. + A CLI as transferirá com segurança para a plataforma: + + ```bash + # .env + OPENAI_API_KEY=sk-... + UV_INDEX_MY_PRIVATE_REGISTRY_USERNAME=token + UV_INDEX_MY_PRIVATE_REGISTRY_PASSWORD=your-pat-here + ``` + + ```bash + crewai deploy create + ``` + + + + + **Nunca** faça commit de credenciais no seu repositório. Use variáveis de ambiente do AMP para todos os segredos. + O arquivo `.env` deve estar listado no `.gitignore`. + + +Para atualizar credenciais em uma implantação existente, veja [Atualizar Seu Crew — Variáveis de Ambiente](/pt-BR/enterprise/guides/update-crew). + +## Como Tudo se Conecta + +Quando o CrewAI AMP faz o build da sua automação, o fluxo de resolução funciona assim: + + + + O AMP busca seu repositório e lê o `pyproject.toml` e o `uv.lock`. + + + O UV lê `[tool.uv.sources]` para determinar de qual index cada pacote deve vir. + + + Para cada index privado, o UV busca `UV_INDEX_{NAME}_USERNAME` e `UV_INDEX_{NAME}_PASSWORD` + nas variáveis de ambiente que você configurou no AMP. + + + O UV baixa e instala todos os pacotes — tanto públicos (do PyPI) quanto privados (do seu registro). + + + Seu crew ou flow inicia com todas as dependências disponíveis. + + + +## Solução de Problemas + +### Erros de Autenticação Durante o Build + +**Sintoma**: Build falha com `401 Unauthorized` ou `403 Forbidden` ao resolver um pacote privado. + +**Verifique**: +- Os nomes das variáveis de ambiente `UV_INDEX_*` correspondem exatamente ao nome do seu index (maiúsculas, hifens -> underscores) +- As credenciais estão definidas nas variáveis de ambiente do AMP, não apenas em um `.env` local +- Seu token/PAT tem as permissões de leitura necessárias para o feed de pacotes +- O token não expirou (especialmente relevante para AWS CodeArtifact) + +### Pacote Não Encontrado + +**Sintoma**: `No matching distribution found for my-private-package`. + +**Verifique**: +- A URL do index no `pyproject.toml` termina com `/simple/` +- A entrada `[tool.uv.sources]` mapeia o nome correto do pacote para o nome correto do index +- O pacote está realmente publicado no seu registro privado +- Execute `uv lock` localmente com as mesmas credenciais para verificar se a resolução funciona + +### Conflitos no Arquivo Lock + +**Sintoma**: `uv lock` falha ou produz resultados inesperados após adicionar um index privado. + +**Solução**: Defina as credenciais localmente e regenere: + +```bash +export UV_INDEX_MY_PRIVATE_REGISTRY_USERNAME=token +export UV_INDEX_MY_PRIVATE_REGISTRY_PASSWORD=your-pat +uv lock +``` + +Em seguida, faça commit do `uv.lock` atualizado. + +## Guias Relacionados + + + + Verifique a estrutura do projeto e as dependências antes de implantar. + + + Implante seu crew ou flow e configure variáveis de ambiente. + + + Atualize variáveis de ambiente e envie alterações para uma implantação em execução. + + diff --git a/docs/pt-BR/guides/migration/migrating-from-langgraph.mdx b/docs/pt-BR/guides/migration/migrating-from-langgraph.mdx new file mode 100644 index 000000000..4889c91f6 --- /dev/null +++ b/docs/pt-BR/guides/migration/migrating-from-langgraph.mdx @@ -0,0 +1,518 @@ +--- +title: "Migrando do LangGraph para o CrewAI: um guia prático para engenheiros" +description: Se você já construiu com LangGraph, saiba como portar rapidamente seus projetos para o CrewAI +icon: switch +mode: "wide" +--- + +Você construiu agentes com LangGraph. Já lutou com o `StateGraph`, ligou arestas condicionais e depurou dicionários de estado às 2 da manhã. Funciona — mas, em algum momento, você começou a se perguntar se existe um caminho melhor para produção. + +Existe. **CrewAI Flows** entrega o mesmo poder — orquestração orientada a eventos, roteamento condicional, estado compartilhado — com muito menos boilerplate e um modelo mental que se alinha a como você realmente pensa sobre fluxos de trabalho de IA em múltiplas etapas. + +Este artigo apresenta os conceitos principais lado a lado, mostra comparações reais de código e demonstra por que o CrewAI Flows é o framework que você vai querer usar a seguir. + +--- + +## A Mudança de Modelo Mental + +LangGraph pede que você pense em **grafos**: nós, arestas e dicionários de estado. Todo workflow é um grafo direcionado em que você conecta explicitamente as transições entre as etapas de computação. É poderoso, mas a abstração traz overhead — especialmente quando o seu fluxo é fundamentalmente sequencial com alguns pontos de decisão. + +CrewAI Flows pede que você pense em **eventos**: métodos que iniciam, métodos que escutam resultados e métodos que roteiam a execução. A topologia do workflow emerge de anotações com decorators, em vez de construção explícita do grafo. Isso não é apenas açúcar sintático — muda como você projeta, lê e mantém seus pipelines. + +Veja o mapeamento principal: + +| Conceito no LangGraph | Equivalente no CrewAI Flows | +| --- | --- | +| `StateGraph` class | `Flow` class | +| `add_node()` | Methods decorated with `@start`, `@listen` | +| `add_edge()` / `add_conditional_edges()` | `@listen()` / `@router()` decorators | +| `TypedDict` state | Pydantic `BaseModel` state | +| `START` / `END` constants | `@start()` decorator / natural method return | +| `graph.compile()` | `flow.kickoff()` | +| Checkpointer / persistence | Built-in memory (LanceDB-backed) | + +Vamos ver como isso fica na prática. + +--- + +## Demo 1: Um Pipeline Sequencial Simples + +Imagine que você está construindo um pipeline que recebe um tema, pesquisa, escreve um resumo e formata a saída. Veja como cada framework lida com isso. + +### Abordagem com LangGraph + +```python +from typing import TypedDict +from langgraph.graph import StateGraph, START, END + +class ResearchState(TypedDict): + topic: str + raw_research: str + summary: str + formatted_output: str + +def research_topic(state: ResearchState) -> dict: + # Call an LLM or search API + result = llm.invoke(f"Research the topic: {state['topic']}") + return {"raw_research": result} + +def write_summary(state: ResearchState) -> dict: + result = llm.invoke( + f"Summarize this research:\n{state['raw_research']}" + ) + return {"summary": result} + +def format_output(state: ResearchState) -> dict: + result = llm.invoke( + f"Format this summary as a polished article section:\n{state['summary']}" + ) + return {"formatted_output": result} + +# Build the graph +graph = StateGraph(ResearchState) +graph.add_node("research", research_topic) +graph.add_node("summarize", write_summary) +graph.add_node("format", format_output) + +graph.add_edge(START, "research") +graph.add_edge("research", "summarize") +graph.add_edge("summarize", "format") +graph.add_edge("format", END) + +# Compile and run +app = graph.compile() +result = app.invoke({"topic": "quantum computing advances in 2026"}) +print(result["formatted_output"]) +``` + +Você define funções, registra-as como nós e conecta manualmente cada transição. Para uma sequência simples como essa, há muita cerimônia. + +### Abordagem com CrewAI Flows + +```python +from crewai import LLM, Agent, Crew, Process, Task +from crewai.flow.flow import Flow, listen, start +from pydantic import BaseModel + +llm = LLM(model="openai/gpt-5.2") + +class ResearchState(BaseModel): + topic: str = "" + raw_research: str = "" + summary: str = "" + formatted_output: str = "" + +class ResearchFlow(Flow[ResearchState]): + @start() + def research_topic(self): + # Option 1: Direct LLM call + result = llm.call(f"Research the topic: {self.state.topic}") + self.state.raw_research = result + return result + + @listen(research_topic) + def write_summary(self, research_output): + # Option 2: A single agent + summarizer = Agent( + role="Research Summarizer", + goal="Produce concise, accurate summaries of research content", + backstory="You are an expert at distilling complex research into clear, " + "digestible summaries.", + llm=llm, + verbose=True, + ) + result = summarizer.kickoff( + f"Summarize this research:\n{self.state.raw_research}" + ) + self.state.summary = str(result) + return self.state.summary + + @listen(write_summary) + def format_output(self, summary_output): + # Option 3: a complete crew (with one or more agents) + formatter = Agent( + role="Content Formatter", + goal="Transform research summaries into polished, publication-ready article sections", + backstory="You are a skilled editor with expertise in structuring and " + "presenting technical content for a general audience.", + llm=llm, + verbose=True, + ) + format_task = Task( + description=f"Format this summary as a polished article section:\n{self.state.summary}", + expected_output="A well-structured, polished article section ready for publication.", + agent=formatter, + ) + crew = Crew( + agents=[formatter], + tasks=[format_task], + process=Process.sequential, + verbose=True, + ) + result = crew.kickoff() + self.state.formatted_output = str(result) + return self.state.formatted_output + +# Run the flow +flow = ResearchFlow() +flow.state.topic = "quantum computing advances in 2026" +result = flow.kickoff() +print(flow.state.formatted_output) + +``` + +Repare a diferença: nada de construção de grafo, de ligação de arestas, nem de etapa de compilação. A ordem de execução é declarada exatamente onde a lógica vive. `@start()` marca o ponto de entrada, e `@listen(method_name)` encadeia as etapas. O estado é um modelo Pydantic de verdade, com segurança de tipos, validação e auto-complete na IDE. + +--- + +## Demo 2: Roteamento Condicional + +Aqui é que fica interessante. Digamos que você está construindo um pipeline de conteúdo que roteia para diferentes caminhos de processamento com base no tipo de conteúdo detectado. + +### Abordagem com LangGraph + +```python +from typing import TypedDict, Literal +from langgraph.graph import StateGraph, START, END + +class ContentState(TypedDict): + input_text: str + content_type: str + result: str + +def classify_content(state: ContentState) -> dict: + content_type = llm.invoke( + f"Classify this content as 'technical', 'creative', or 'business':\n{state['input_text']}" + ) + return {"content_type": content_type.strip().lower()} + +def process_technical(state: ContentState) -> dict: + result = llm.invoke(f"Process as technical doc:\n{state['input_text']}") + return {"result": result} + +def process_creative(state: ContentState) -> dict: + result = llm.invoke(f"Process as creative writing:\n{state['input_text']}") + return {"result": result} + +def process_business(state: ContentState) -> dict: + result = llm.invoke(f"Process as business content:\n{state['input_text']}") + return {"result": result} + +# Routing function +def route_content(state: ContentState) -> Literal["technical", "creative", "business"]: + return state["content_type"] + +# Build the graph +graph = StateGraph(ContentState) +graph.add_node("classify", classify_content) +graph.add_node("technical", process_technical) +graph.add_node("creative", process_creative) +graph.add_node("business", process_business) + +graph.add_edge(START, "classify") +graph.add_conditional_edges( + "classify", + route_content, + { + "technical": "technical", + "creative": "creative", + "business": "business", + } +) +graph.add_edge("technical", END) +graph.add_edge("creative", END) +graph.add_edge("business", END) + +app = graph.compile() +result = app.invoke({"input_text": "Explain how TCP handshakes work"}) +``` + +Você precisa de uma função de roteamento separada, de um mapeamento explícito de arestas condicionais e de arestas de término para cada ramificação. A lógica de roteamento fica desacoplada do nó que produz a decisão. + +### Abordagem com CrewAI Flows + +```python +from crewai import LLM, Agent +from crewai.flow.flow import Flow, listen, router, start +from pydantic import BaseModel + +llm = LLM(model="openai/gpt-5.2") + +class ContentState(BaseModel): + input_text: str = "" + content_type: str = "" + result: str = "" + +class ContentFlow(Flow[ContentState]): + @start() + def classify_content(self): + self.state.content_type = ( + llm.call( + f"Classify this content as 'technical', 'creative', or 'business':\n" + f"{self.state.input_text}" + ) + .strip() + .lower() + ) + return self.state.content_type + + @router(classify_content) + def route_content(self, classification): + if classification == "technical": + return "process_technical" + elif classification == "creative": + return "process_creative" + else: + return "process_business" + + @listen("process_technical") + def handle_technical(self): + agent = Agent( + role="Technical Writer", + goal="Produce clear, accurate technical documentation", + backstory="You are an expert technical writer who specializes in " + "explaining complex technical concepts precisely.", + llm=llm, + verbose=True, + ) + self.state.result = str( + agent.kickoff(f"Process as technical doc:\n{self.state.input_text}") + ) + + @listen("process_creative") + def handle_creative(self): + agent = Agent( + role="Creative Writer", + goal="Craft engaging and imaginative creative content", + backstory="You are a talented creative writer with a flair for " + "compelling storytelling and vivid expression.", + llm=llm, + verbose=True, + ) + self.state.result = str( + agent.kickoff(f"Process as creative writing:\n{self.state.input_text}") + ) + + @listen("process_business") + def handle_business(self): + agent = Agent( + role="Business Writer", + goal="Produce professional, results-oriented business content", + backstory="You are an experienced business writer who communicates " + "strategy and value clearly to professional audiences.", + llm=llm, + verbose=True, + ) + self.state.result = str( + agent.kickoff(f"Process as business content:\n{self.state.input_text}") + ) + +flow = ContentFlow() +flow.state.input_text = "Explain how TCP handshakes work" +flow.kickoff() +print(flow.state.result) + +``` + +O decorator `@router()` transforma um método em um ponto de decisão. Ele retorna uma string que corresponde a um listener — sem dicionários de mapeamento, sem funções de roteamento separadas. A lógica de ramificação parece um `if` em Python porque *é* um. + +--- + +## Demo 3: Integrando Crews de Agentes de IA em Flows + +É aqui que o verdadeiro poder do CrewAI aparece. Flows não servem apenas para encadear chamadas de LLM — elas orquestram **Crews** completas de agentes autônomos. Isso é algo para o qual o LangGraph simplesmente não tem um equivalente nativo. + +```python +from crewai import Agent, Task, Crew +from crewai.flow.flow import Flow, listen, start +from pydantic import BaseModel + +class ArticleState(BaseModel): + topic: str = "" + research: str = "" + draft: str = "" + final_article: str = "" + +class ArticleFlow(Flow[ArticleState]): + + @start() + def run_research_crew(self): + """A full Crew of agents handles research.""" + researcher = Agent( + role="Senior Research Analyst", + goal=f"Produce comprehensive research on: {self.state.topic}", + backstory="You're a veteran analyst known for thorough, " + "well-sourced research reports.", + llm="gpt-4o" + ) + + research_task = Task( + description=f"Research '{self.state.topic}' thoroughly. " + "Cover key trends, data points, and expert opinions.", + expected_output="A detailed research brief with sources.", + agent=researcher + ) + + crew = Crew(agents=[researcher], tasks=[research_task]) + result = crew.kickoff() + self.state.research = result.raw + return result.raw + + @listen(run_research_crew) + def run_writing_crew(self, research_output): + """A different Crew handles writing.""" + writer = Agent( + role="Technical Writer", + goal="Write a compelling article based on provided research.", + backstory="You turn complex research into engaging, clear prose.", + llm="gpt-4o" + ) + + editor = Agent( + role="Senior Editor", + goal="Review and polish articles for publication quality.", + backstory="20 years of editorial experience at top tech publications.", + llm="gpt-4o" + ) + + write_task = Task( + description=f"Write an article based on this research:\n{self.state.research}", + expected_output="A well-structured draft article.", + agent=writer + ) + + edit_task = Task( + description="Review, fact-check, and polish the draft article.", + expected_output="A publication-ready article.", + agent=editor + ) + + crew = Crew(agents=[writer, editor], tasks=[write_task, edit_task]) + result = crew.kickoff() + self.state.final_article = result.raw + return result.raw + +# Run the full pipeline +flow = ArticleFlow() +flow.state.topic = "The Future of Edge AI" +flow.kickoff() +print(flow.state.final_article) +``` + +Este é o insight-chave: **Flows fornecem a camada de orquestração, e Crews fornecem a camada de inteligência.** Cada etapa em um Flow pode subir uma equipe completa de agentes colaborativos, cada um com seus próprios papéis, objetivos e ferramentas. Você obtém fluxo de controle estruturado e previsível *e* colaboração autônoma de agentes — o melhor dos dois mundos. + +No LangGraph, alcançar algo similar significa implementar manualmente protocolos de comunicação entre agentes, loops de chamada de ferramentas e lógica de delegação dentro das funções dos nós. É possível, mas é encanamento que você constrói do zero todas as vezes. + +--- + +## Demo 4: Execução Paralela e Sincronização + +Pipelines do mundo real frequentemente precisam dividir o trabalho e juntar os resultados. O CrewAI Flows lida com isso de forma elegante com os operadores `and_` e `or_`. + +```python +from crewai import LLM +from crewai.flow.flow import Flow, and_, listen, start +from pydantic import BaseModel + +llm = LLM(model="openai/gpt-5.2") + +class AnalysisState(BaseModel): + topic: str = "" + market_data: str = "" + tech_analysis: str = "" + competitor_intel: str = "" + final_report: str = "" + +class ParallelAnalysisFlow(Flow[AnalysisState]): + @start() + def start_method(self): + pass + + @listen(start_method) + def gather_market_data(self): + # Your agentic or deterministic code + pass + + @listen(start_method) + def run_tech_analysis(self): + # Your agentic or deterministic code + pass + + @listen(start_method) + def gather_competitor_intel(self): + # Your agentic or deterministic code + pass + + @listen(and_(gather_market_data, run_tech_analysis, gather_competitor_intel)) + def synthesize_report(self): + # Your agentic or deterministic code + pass + +flow = ParallelAnalysisFlow() +flow.state.topic = "AI-powered developer tools" +flow.kickoff() + +``` + +Vários decorators `@start()` disparam em paralelo. O combinador `and_()` no decorator `@listen` garante que `synthesize_report` só execute depois que *todos os três* métodos upstream forem concluídos. Também existe `or_()` para quando você quer prosseguir assim que *qualquer* tarefa upstream terminar. + +No LangGraph, você precisaria construir um padrão fan-out/fan-in com ramificações paralelas, um nó de sincronização e uma mesclagem de estado cuidadosa — tudo conectado explicitamente por arestas. + +--- + +## Por que CrewAI Flows em Produção + +Além de uma sintaxe mais limpa, Flows entrega várias vantagens críticas para produção: + +**Persistência de estado integrada.** O estado do Flow é respaldado pelo LanceDB, o que significa que seus workflows podem sobreviver a falhas, ser retomados e acumular conhecimento entre execuções. No LangGraph, você precisa configurar um checkpointer separado. + +**Gerenciamento de estado com segurança de tipos.** Modelos Pydantic oferecem validação, serialização e suporte de IDE prontos para uso. Estados `TypedDict` do LangGraph não validam em runtime. + +**Orquestração de agentes de primeira classe.** Crews são um primitivo nativo. Você define agentes com papéis, objetivos, histórias e ferramentas — e eles colaboram de forma autônoma dentro do envelope estruturado de um Flow. Não é preciso reinventar a coordenação multiagente. + +**Modelo mental mais simples.** Decorators declaram intenção. `@start` significa "comece aqui". `@listen(x)` significa "execute depois de x". `@router(x)` significa "decida para onde ir depois de x". O código lê como o workflow que ele descreve. + +**Integração com CLI.** Execute flows com `crewai run`. Sem etapa de compilação separada, sem serialização de grafo. Seu Flow é uma classe Python, e ele roda como tal. + +--- + +## Cheat Sheet de Migração + +Se você está com uma base de código LangGraph e quer migrar para o CrewAI Flows, aqui vai um guia prático de conversão: + +1. **Mapeie seu estado.** Converta seu `TypedDict` para um `BaseModel` do Pydantic. Adicione valores padrão para todos os campos. +2. **Converta nós em métodos.** Cada função de `add_node` vira um método na sua subclasse de `Flow`. Substitua leituras `state["field"]` por `self.state.field`. +3. **Substitua arestas por decorators.** `add_edge(START, "first_node")` vira `@start()` no primeiro método. A sequência `add_edge("a", "b")` vira `@listen(a)` no método `b`. +4. **Substitua arestas condicionais por `@router`.** A função de roteamento e o mapeamento do `add_conditional_edges()` viram um único método `@router()` que retorna a string de rota. +5. **Troque compile + invoke por kickoff.** Remova `graph.compile()`. Chame `flow.kickoff()`. +6. **Considere onde as Crews se encaixam.** Qualquer nó com lógica complexa de agentes em múltiplas etapas é um candidato a extração para uma Crew. É aqui que você verá a maior melhoria de qualidade. + +--- + +## Primeiros Passos + +Instale o CrewAI e crie o scaffold de um novo projeto Flow: + +```bash +pip install crewai +crewai create flow my_first_flow +cd my_first_flow +``` + +Isso gera uma estrutura de projeto com uma classe Flow pronta para edição, arquivos de configuração e um `pyproject.toml` com `type = "flow"` já definido. Execute com: + +```bash +crewai run +``` + +A partir daí, adicione seus agentes, conecte seus listeners e publique. + +--- + +## Considerações Finais + +O LangGraph ensinou ao ecossistema que workflows de IA precisam de estrutura. Essa foi uma lição importante. Mas o CrewAI Flows pega essa lição e a entrega de um jeito mais rápido de escrever, mais fácil de ler e mais poderoso em produção — especialmente quando seus workflows envolvem múltiplos agentes colaborando. + +Se você está construindo algo além de uma cadeia de agente único, dê uma olhada séria no Flows. O modelo baseado em decorators, a integração nativa com Crews e o gerenciamento de estado embutido significam menos tempo com encanamento e mais tempo nos problemas que importam. + +Comece com `crewai create flow`. Você não vai olhar para trás. diff --git a/docs/pt-BR/learn/human-feedback-in-flows.mdx b/docs/pt-BR/learn/human-feedback-in-flows.mdx index b25af542b..ad4d068cd 100644 --- a/docs/pt-BR/learn/human-feedback-in-flows.mdx +++ b/docs/pt-BR/learn/human-feedback-in-flows.mdx @@ -98,33 +98,43 @@ def handle_feedback(self, result): Quando você especifica `emit`, o decorador se torna um roteador. O feedback livre do humano é interpretado por um LLM e mapeado para um dos outcomes especificados: ```python Code -@start() -@human_feedback( - message="Você aprova este conteúdo para publicação?", - emit=["approved", "rejected", "needs_revision"], - llm="gpt-4o-mini", - default_outcome="needs_revision", -) -def review_content(self): - return "Rascunho do post do blog aqui..." +from crewai.flow.flow import Flow, start, listen, or_ +from crewai.flow.human_feedback import human_feedback -@listen("approved") -def publish(self, result): - print(f"Publicando! Usuário disse: {result.feedback}") +class ReviewFlow(Flow): + @start() + def generate_content(self): + return "Rascunho do post do blog aqui..." -@listen("rejected") -def discard(self, result): - print(f"Descartando. Motivo: {result.feedback}") + @human_feedback( + message="Você aprova este conteúdo para publicação?", + emit=["approved", "rejected", "needs_revision"], + llm="gpt-4o-mini", + default_outcome="needs_revision", + ) + @listen(or_("generate_content", "needs_revision")) + def review_content(self): + return "Rascunho do post do blog aqui..." -@listen("needs_revision") -def revise(self, result): - print(f"Revisando baseado em: {result.feedback}") + @listen("approved") + def publish(self, result): + print(f"Publicando! Usuário disse: {result.feedback}") + + @listen("rejected") + def discard(self, result): + print(f"Descartando. Motivo: {result.feedback}") ``` +Quando o humano diz algo como "precisa de mais detalhes", o LLM mapeia para `"needs_revision"`, que dispara `review_content` novamente via `or_()` — criando um loop de revisão. O loop continua até que o outcome seja `"approved"` ou `"rejected"`. + O LLM usa saídas estruturadas (function calling) quando disponível para garantir que a resposta seja um dos seus outcomes especificados. Isso torna o roteamento confiável e previsível. + +Um método `@start()` só executa uma vez no início do flow. Se você precisa de um loop de revisão, separe o método start do método de revisão e use `@listen(or_("trigger", "revision_outcome"))` no método de revisão para habilitar o self-loop. + + ## HumanFeedbackResult O dataclass `HumanFeedbackResult` contém todas as informações sobre uma interação de feedback humano: @@ -193,116 +203,162 @@ Aqui está um exemplo completo implementando um fluxo de revisão e aprovação ```python Code -from crewai.flow.flow import Flow, start, listen +from crewai.flow.flow import Flow, start, listen, or_ from crewai.flow.human_feedback import human_feedback, HumanFeedbackResult from pydantic import BaseModel class ContentState(BaseModel): - topic: str = "" draft: str = "" - final_content: str = "" revision_count: int = 0 + status: str = "pending" class ContentApprovalFlow(Flow[ContentState]): - """Um flow que gera conteúdo e obtém aprovação humana.""" + """Um flow que gera conteúdo e faz loop até o humano aprovar.""" @start() - def get_topic(self): - self.state.topic = input("Sobre qual tópico devo escrever? ") - return self.state.topic - - @listen(get_topic) - def generate_draft(self, topic): - # Em uso real, isso chamaria um LLM - self.state.draft = f"# {topic}\n\nEste é um rascunho sobre {topic}..." + def generate_draft(self): + self.state.draft = "# IA Segura\n\nEste é um rascunho sobre IA Segura..." return self.state.draft - @listen(generate_draft) @human_feedback( - message="Por favor, revise este rascunho. Responda 'approved', 'rejected', ou forneça feedback de revisão:", + message="Por favor, revise este rascunho. Aprove, rejeite ou descreva o que precisa mudar:", emit=["approved", "rejected", "needs_revision"], llm="gpt-4o-mini", default_outcome="needs_revision", ) - def review_draft(self, draft): - return draft + @listen(or_("generate_draft", "needs_revision")) + def review_draft(self): + self.state.revision_count += 1 + return f"{self.state.draft} (v{self.state.revision_count})" @listen("approved") def publish_content(self, result: HumanFeedbackResult): - self.state.final_content = result.output - print("\n✅ Conteúdo aprovado e publicado!") - print(f"Comentário do revisor: {result.feedback}") + self.state.status = "published" + print(f"Conteúdo aprovado e publicado! Revisor disse: {result.feedback}") return "published" @listen("rejected") def handle_rejection(self, result: HumanFeedbackResult): - print("\n❌ Conteúdo rejeitado") - print(f"Motivo: {result.feedback}") + self.state.status = "rejected" + print(f"Conteúdo rejeitado. Motivo: {result.feedback}") return "rejected" - @listen("needs_revision") - def revise_content(self, result: HumanFeedbackResult): - self.state.revision_count += 1 - print(f"\n📝 Revisão #{self.state.revision_count} solicitada") - print(f"Feedback: {result.feedback}") - # Em um flow real, você pode voltar para generate_draft - # Para este exemplo, apenas reconhecemos - return "revision_requested" - - -# Executar o flow flow = ContentApprovalFlow() result = flow.kickoff() -print(f"\nFlow concluído. Revisões solicitadas: {flow.state.revision_count}") +print(f"\nFlow finalizado. Status: {flow.state.status}, Revisões: {flow.state.revision_count}") ``` ```text Output -Sobre qual tópico devo escrever? Segurança em IA +================================================== +OUTPUT FOR REVIEW: +================================================== +# IA Segura + +Este é um rascunho sobre IA Segura... (v1) +================================================== + +Por favor, revise este rascunho. Aprove, rejeite ou descreva o que precisa mudar: +(Press Enter to skip, or type your feedback) + +Your feedback: Preciso de mais detalhes sobre segurança em IA. ================================================== OUTPUT FOR REVIEW: ================================================== -# Segurança em IA +# IA Segura -Este é um rascunho sobre Segurança em IA... +Este é um rascunho sobre IA Segura... (v2) ================================================== -Por favor, revise este rascunho. Responda 'approved', 'rejected', ou forneça feedback de revisão: +Por favor, revise este rascunho. Aprove, rejeite ou descreva o que precisa mudar: (Press Enter to skip, or type your feedback) Your feedback: Parece bom, aprovado! -✅ Conteúdo aprovado e publicado! -Comentário do revisor: Parece bom, aprovado! +Conteúdo aprovado e publicado! Revisor disse: Parece bom, aprovado! -Flow concluído. Revisões solicitadas: 0 +Flow finalizado. Status: published, Revisões: 2 ``` ## Combinando com Outros Decoradores -O decorador `@human_feedback` funciona com outros decoradores de flow. Coloque-o como o decorador mais interno (mais próximo da função): +O decorador `@human_feedback` funciona com `@start()`, `@listen()` e `or_()`. Ambas as ordens de decoradores funcionam — o framework propaga atributos em ambas as direções — mas os padrões recomendados são: ```python Code -# Correto: @human_feedback é o mais interno (mais próximo da função) +# Revisão única no início do flow (sem self-loop) @start() -@human_feedback(message="Revise isto:") +@human_feedback(message="Revise isto:", emit=["approved", "rejected"], llm="gpt-4o-mini") def my_start_method(self): return "content" +# Revisão linear em um listener (sem self-loop) @listen(other_method) -@human_feedback(message="Revise isto também:") +@human_feedback(message="Revise isto também:", emit=["good", "bad"], llm="gpt-4o-mini") def my_listener(self, data): return f"processed: {data}" + +# Self-loop: revisão que pode voltar para revisões +@human_feedback(message="Aprovar ou revisar?", emit=["approved", "revise"], llm="gpt-4o-mini") +@listen(or_("upstream_method", "revise")) +def review_with_loop(self): + return "content for review" ``` - -Coloque `@human_feedback` como o decorador mais interno (último/mais próximo da função) para que ele envolva o método diretamente e possa capturar o valor de retorno antes de passar para o sistema de flow. - +### Padrão de self-loop + +Para criar um loop de revisão, o método de revisão deve escutar **ambos** um gatilho upstream e seu próprio outcome de revisão usando `or_()`: + +```python Code +@start() +def generate(self): + return "initial draft" + +@human_feedback( + message="Aprovar ou solicitar alterações?", + emit=["revise", "approved"], + llm="gpt-4o-mini", + default_outcome="approved", +) +@listen(or_("generate", "revise")) +def review(self): + return "content" + +@listen("approved") +def publish(self): + return "published" +``` + +Quando o outcome é `"revise"`, o flow roteia de volta para `review` (porque ele escuta `"revise"` via `or_()`). Quando o outcome é `"approved"`, o flow continua para `publish`. Isso funciona porque o engine de flow isenta roteadores da regra "fire once", permitindo que eles re-executem em cada iteração do loop. + +### Roteadores encadeados + +Um listener disparado pelo outcome de um roteador pode ser ele mesmo um roteador: + +```python Code +@start() +@human_feedback(message="Primeira revisão:", emit=["approved", "rejected"], llm="gpt-4o-mini") +def draft(self): + return "draft content" + +@listen("approved") +@human_feedback(message="Revisão final:", emit=["publish", "revise"], llm="gpt-4o-mini") +def final_review(self, prev): + return "final content" + +@listen("publish") +def on_publish(self, prev): + return "published" +``` + +### Limitações + +- **Métodos `@start()` executam uma vez**: Um método `@start()` não pode fazer self-loop. Se você precisa de um ciclo de revisão, use um método `@start()` separado como ponto de entrada e coloque o `@human_feedback` em um método `@listen()`. +- **Sem `@start()` + `@listen()` no mesmo método**: Esta é uma restrição do framework de Flow. Um método é ou um ponto de início ou um listener, não ambos. ## Melhores Práticas @@ -516,9 +572,9 @@ class ContentPipeline(Flow): @start() @human_feedback( message="Aprova este conteúdo para publicação?", - emit=["approved", "rejected", "needs_revision"], + emit=["approved", "rejected"], llm="gpt-4o-mini", - default_outcome="needs_revision", + default_outcome="rejected", provider=SlackNotificationProvider("#content-reviews"), ) def generate_content(self): @@ -534,11 +590,6 @@ class ContentPipeline(Flow): print(f"Arquivado. Motivo: {result.feedback}") return {"status": "archived"} - @listen("needs_revision") - def queue_revision(self, result): - print(f"Na fila para revisão: {result.feedback}") - return {"status": "revision_needed"} - # Iniciando o flow (vai pausar e aguardar resposta do Slack) def start_content_pipeline(): @@ -594,22 +645,22 @@ Com o tempo, o humano vê saídas pré-revisadas progressivamente melhores porqu ```python Code class ArticleReviewFlow(Flow): @start() + def generate_article(self): + return self.crew.kickoff(inputs={"topic": "AI Safety"}).raw + @human_feedback( - message="Review this article draft:", + message="Revise este rascunho do artigo:", emit=["approved", "needs_revision"], llm="gpt-4o-mini", learn=True, # enable HITL learning ) - def generate_article(self): - return self.crew.kickoff(inputs={"topic": "AI Safety"}).raw + @listen(or_("generate_article", "needs_revision")) + def review_article(self): + return self.last_human_feedback.output if self.last_human_feedback else "article draft" @listen("approved") def publish(self): print(f"Publishing: {self.last_human_feedback.output}") - - @listen("needs_revision") - def revise(self): - print("Revising based on feedback...") ``` **Primeira execução**: O humano vê a saída bruta e diz "Sempre inclua citações para afirmações factuais." A lição é destilada e armazenada na memória. diff --git a/docs/pt-BR/learn/llm-connections.mdx b/docs/pt-BR/learn/llm-connections.mdx index 1021050cb..6c09e7c97 100644 --- a/docs/pt-BR/learn/llm-connections.mdx +++ b/docs/pt-BR/learn/llm-connections.mdx @@ -7,7 +7,7 @@ mode: "wide" ## Conecte o CrewAI a LLMs -O CrewAI utiliza o LiteLLM para conectar-se a uma grande variedade de Modelos de Linguagem (LLMs). Essa integração proporciona grande versatilidade, permitindo que você utilize modelos de inúmeros provedores por meio de uma interface simples e unificada. +O CrewAI conecta-se a LLMs por meio de integrações nativas via SDK para os provedores mais populares (OpenAI, Anthropic, Google Gemini, Azure e AWS Bedrock), e usa o LiteLLM como alternativa flexível para todos os demais provedores. Por padrão, o CrewAI usa o modelo `gpt-4o-mini`. Isso é determinado pela variável de ambiente `OPENAI_MODEL_NAME`, que tem como padrão "gpt-4o-mini" se não for definida. @@ -40,6 +40,14 @@ O LiteLLM oferece suporte a uma ampla gama de provedores, incluindo, mas não se Para uma lista completa e sempre atualizada dos provedores suportados, consulte a [documentação de Provedores do LiteLLM](https://docs.litellm.ai/docs/providers). + + Para usar qualquer provedor não coberto por uma integração nativa, adicione o LiteLLM como dependência ao seu projeto: + ```bash + uv add 'crewai[litellm]' + ``` + Provedores nativos (OpenAI, Anthropic, Google Gemini, Azure, AWS Bedrock) usam seus próprios extras de SDK — consulte os [Exemplos de Configuração de Provedores](/pt-BR/concepts/llms#exemplos-de-configuração-de-provedores). + + ## Alterando a LLM Para utilizar uma LLM diferente com seus agentes CrewAI, você tem várias opções: diff --git a/docs/pt-BR/tools/automation/composiotool.mdx b/docs/pt-BR/tools/automation/composiotool.mdx index eb0db8578..60cce293a 100644 --- a/docs/pt-BR/tools/automation/composiotool.mdx +++ b/docs/pt-BR/tools/automation/composiotool.mdx @@ -11,84 +11,53 @@ mode: "wide" Composio é uma plataforma de integração que permite conectar seus agentes de IA a mais de 250 ferramentas. Os principais recursos incluem: - **Autenticação de Nível Empresarial**: Suporte integrado para OAuth, Chaves de API, JWT com atualização automática de token -- **Observabilidade Completa**: Logs detalhados de uso das ferramentas, registros de execução, e muito mais +- **Observabilidade Completa**: Logs detalhados de uso das ferramentas, carimbos de data/hora de execução e muito mais ## Instalação Para incorporar as ferramentas Composio em seu projeto, siga as instruções abaixo: ```shell -pip install composio-crewai +pip install composio composio-crewai pip install crewai ``` -Após a conclusão da instalação, execute `composio login` ou exporte sua chave de API do composio como `COMPOSIO_API_KEY`. Obtenha sua chave de API Composio [aqui](https://app.composio.dev) +Após concluir a instalação, defina sua chave de API do Composio como `COMPOSIO_API_KEY`. Obtenha sua chave de API do Composio [aqui](https://platform.composio.dev) ## Exemplo -O exemplo a seguir demonstra como inicializar a ferramenta e executar uma ação do github: +O exemplo a seguir demonstra como inicializar a ferramenta e executar uma ação do GitHub: -1. Inicialize o conjunto de ferramentas Composio +1. Inicialize o Composio com o Provider do CrewAI ```python Code -from composio_crewai import ComposioToolSet, App, Action +from composio_crewai import ComposioProvider +from composio import Composio from crewai import Agent, Task, Crew -toolset = ComposioToolSet() +composio = Composio(provider=ComposioProvider()) ``` -2. Conecte sua conta do GitHub +2. Crie uma nova sessão Composio e recupere as ferramentas -```shell CLI -composio add github -``` -```python Code -request = toolset.initiate_connection(app=App.GITHUB) -print(f"Open this URL to authenticate: {request.redirectUrl}") +```python +session = composio.create( + user_id="your-user-id", + toolkits=["gmail", "github"] # optional, default is all toolkits +) +tools = session.tools() ``` +Leia mais sobre sessões e gerenciamento de usuários [aqui](https://docs.composio.dev/docs/configuring-sessions) -3. Obtenha ferramentas +3. Autenticação manual dos usuários -- Recuperando todas as ferramentas de um app (não recomendado em produção): +O Composio autentica automaticamente os usuários durante a sessão de chat do agente. No entanto, você também pode autenticar o usuário manualmente chamando o método `authorize`. ```python Code -tools = toolset.get_tools(apps=[App.GITHUB]) +connection_request = session.authorize("github") +print(f"Open this URL to authenticate: {connection_request.redirect_url}") ``` -- Filtrando ferramentas com base em tags: -```python Code -tag = "users" - -filtered_action_enums = toolset.find_actions_by_tags( - App.GITHUB, - tags=[tag], -) - -tools = toolset.get_tools(actions=filtered_action_enums) -``` - -- Filtrando ferramentas com base no caso de uso: -```python Code -use_case = "Star a repository on GitHub" - -filtered_action_enums = toolset.find_actions_by_use_case( - App.GITHUB, use_case=use_case, advanced=False -) - -tools = toolset.get_tools(actions=filtered_action_enums) -``` -Defina `advanced` como True para obter ações para casos de uso complexos - -- Usando ferramentas específicas: - -Neste exemplo, usaremos a ação `GITHUB_STAR_A_REPOSITORY_FOR_THE_AUTHENTICATED_USER` do app GitHub. -```python Code -tools = toolset.get_tools( - actions=[Action.GITHUB_STAR_A_REPOSITORY_FOR_THE_AUTHENTICATED_USER] -) -``` -Saiba mais sobre como filtrar ações [aqui](https://docs.composio.dev/patterns/tools/use-tools/use-specific-actions) - 4. Defina o agente ```python Code @@ -116,4 +85,4 @@ crew = Crew(agents=[crewai_agent], tasks=[task]) crew.kickoff() ``` -* Uma lista mais detalhada de ferramentas pode ser encontrada [aqui](https://app.composio.dev) \ No newline at end of file +* Uma lista mais detalhada de ferramentas pode ser encontrada [aqui](https://docs.composio.dev/toolkits) \ No newline at end of file diff --git a/lib/crewai-files/pyproject.toml b/lib/crewai-files/pyproject.toml index c53a1c1ff..3ca357622 100644 --- a/lib/crewai-files/pyproject.toml +++ b/lib/crewai-files/pyproject.toml @@ -8,8 +8,8 @@ authors = [ ] requires-python = ">=3.10, <3.14" dependencies = [ - "Pillow~=10.4.0", - "pypdf~=4.0.0", + "Pillow~=12.1.1", + "pypdf~=6.7.5", "python-magic>=0.4.27", "aiocache~=0.12.3", "aiofiles~=24.1.0", diff --git a/lib/crewai-files/src/crewai_files/__init__.py b/lib/crewai-files/src/crewai_files/__init__.py index e457867d1..33db66b65 100644 --- a/lib/crewai-files/src/crewai_files/__init__.py +++ b/lib/crewai-files/src/crewai_files/__init__.py @@ -152,4 +152,4 @@ __all__ = [ "wrap_file_source", ] -__version__ = "1.9.3" +__version__ = "1.10.1" diff --git a/lib/crewai-tools/pyproject.toml b/lib/crewai-tools/pyproject.toml index a683b9967..17b7c71b5 100644 --- a/lib/crewai-tools/pyproject.toml +++ b/lib/crewai-tools/pyproject.toml @@ -8,12 +8,10 @@ authors = [ ] requires-python = ">=3.10, <3.14" dependencies = [ - "lancedb~=0.5.4", "pytube~=15.0.0", "requests~=2.32.5", "docker~=7.1.0", - "crewai==1.9.3", - "lancedb~=0.5.4", + "crewai==1.10.1", "tiktoken~=0.8.0", "beautifulsoup4~=4.13.4", "python-docx~=1.2.0", @@ -110,7 +108,7 @@ stagehand = [ "stagehand>=0.4.1", ] github = [ - "gitpython==3.1.38", + "gitpython>=3.1.41,<4", "PyGithub==1.59.1", ] rag = [ diff --git a/lib/crewai-tools/src/crewai_tools/__init__.py b/lib/crewai-tools/src/crewai_tools/__init__.py index 1ecf814fd..0d4aa945d 100644 --- a/lib/crewai-tools/src/crewai_tools/__init__.py +++ b/lib/crewai-tools/src/crewai_tools/__init__.py @@ -10,7 +10,18 @@ from crewai_tools.aws.s3.writer_tool import S3WriterTool from crewai_tools.tools.ai_mind_tool.ai_mind_tool import AIMindTool from crewai_tools.tools.apify_actors_tool.apify_actors_tool import ApifyActorsTool from crewai_tools.tools.arxiv_paper_tool.arxiv_paper_tool import ArxivPaperTool +from crewai_tools.tools.brave_search_tool.brave_image_tool import BraveImageSearchTool +from crewai_tools.tools.brave_search_tool.brave_llm_context_tool import ( + BraveLLMContextTool, +) +from crewai_tools.tools.brave_search_tool.brave_local_pois_tool import ( + BraveLocalPOIsDescriptionTool, + BraveLocalPOIsTool, +) +from crewai_tools.tools.brave_search_tool.brave_news_tool import BraveNewsSearchTool from crewai_tools.tools.brave_search_tool.brave_search_tool import BraveSearchTool +from crewai_tools.tools.brave_search_tool.brave_video_tool import BraveVideoSearchTool +from crewai_tools.tools.brave_search_tool.brave_web_tool import BraveWebSearchTool from crewai_tools.tools.brightdata_tool.brightdata_dataset import ( BrightDataDatasetTool, ) @@ -201,7 +212,14 @@ __all__ = [ "ArxivPaperTool", "BedrockInvokeAgentTool", "BedrockKBRetrieverTool", + "BraveImageSearchTool", + "BraveLLMContextTool", + "BraveLocalPOIsDescriptionTool", + "BraveLocalPOIsTool", + "BraveNewsSearchTool", "BraveSearchTool", + "BraveVideoSearchTool", + "BraveWebSearchTool", "BrightDataDatasetTool", "BrightDataSearchTool", "BrightDataWebUnlockerTool", @@ -293,4 +311,4 @@ __all__ = [ "ZapierActionTools", ] -__version__ = "1.9.3" +__version__ = "1.10.1" diff --git a/lib/crewai-tools/src/crewai_tools/tools/__init__.py b/lib/crewai-tools/src/crewai_tools/tools/__init__.py index 1dfc614c5..ea7c968f4 100644 --- a/lib/crewai-tools/src/crewai_tools/tools/__init__.py +++ b/lib/crewai-tools/src/crewai_tools/tools/__init__.py @@ -1,7 +1,18 @@ from crewai_tools.tools.ai_mind_tool.ai_mind_tool import AIMindTool from crewai_tools.tools.apify_actors_tool.apify_actors_tool import ApifyActorsTool from crewai_tools.tools.arxiv_paper_tool.arxiv_paper_tool import ArxivPaperTool +from crewai_tools.tools.brave_search_tool.brave_image_tool import BraveImageSearchTool +from crewai_tools.tools.brave_search_tool.brave_llm_context_tool import ( + BraveLLMContextTool, +) +from crewai_tools.tools.brave_search_tool.brave_local_pois_tool import ( + BraveLocalPOIsDescriptionTool, + BraveLocalPOIsTool, +) +from crewai_tools.tools.brave_search_tool.brave_news_tool import BraveNewsSearchTool from crewai_tools.tools.brave_search_tool.brave_search_tool import BraveSearchTool +from crewai_tools.tools.brave_search_tool.brave_video_tool import BraveVideoSearchTool +from crewai_tools.tools.brave_search_tool.brave_web_tool import BraveWebSearchTool from crewai_tools.tools.brightdata_tool import ( BrightDataDatasetTool, BrightDataSearchTool, @@ -186,7 +197,14 @@ __all__ = [ "AIMindTool", "ApifyActorsTool", "ArxivPaperTool", + "BraveImageSearchTool", + "BraveLLMContextTool", + "BraveLocalPOIsDescriptionTool", + "BraveLocalPOIsTool", + "BraveNewsSearchTool", "BraveSearchTool", + "BraveVideoSearchTool", + "BraveWebSearchTool", "BrightDataDatasetTool", "BrightDataSearchTool", "BrightDataWebUnlockerTool", diff --git a/lib/crewai-tools/src/crewai_tools/tools/brave_search_tool/base.py b/lib/crewai-tools/src/crewai_tools/tools/brave_search_tool/base.py new file mode 100644 index 000000000..25e599736 --- /dev/null +++ b/lib/crewai-tools/src/crewai_tools/tools/brave_search_tool/base.py @@ -0,0 +1,322 @@ +from __future__ import annotations + +from abc import ABC, abstractmethod +from datetime import datetime +import json +import logging +import os +import threading +import time +from typing import Any, ClassVar + +from crewai.tools import BaseTool, EnvVar +from pydantic import BaseModel, Field +import requests + + +logger = logging.getLogger(__name__) + +# Brave API error codes that indicate non-retryable quota/usage exhaustion. +_QUOTA_CODES = frozenset({"QUOTA_LIMITED", "USAGE_LIMIT_EXCEEDED"}) + + +def _save_results_to_file(content: str) -> None: + """Saves the search results to a file.""" + filename = f"search_results_{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.txt" + with open(filename, "w") as file: + file.write(content) + + +def _parse_error_body(resp: requests.Response) -> dict[str, Any] | None: + """Extract the structured "error" object from a Brave API error response.""" + try: + body = resp.json() + error = body.get("error") + return error if isinstance(error, dict) else None + except (ValueError, KeyError): + return None + + +def _raise_for_error(resp: requests.Response) -> None: + """Brave Search API error responses contain helpful JSON payloads""" + status = resp.status_code + try: + body = json.dumps(resp.json()) + except (ValueError, KeyError): + body = resp.text[:500] + + raise RuntimeError(f"Brave Search API error (HTTP {status}): {body}") + + +def _is_retryable(resp: requests.Response) -> bool: + """Return True for transient failures that are worth retrying. + + * 429 + RATE_LIMITED — the per-second sliding window is full. + * 5xx — transient server-side errors. + + Quota exhaustion (QUOTA_LIMITED, USAGE_LIMIT_EXCEEDED) is + explicitly excluded: retrying will never succeed until the billing + period resets. + """ + if resp.status_code == 429: + error = _parse_error_body(resp) or {} + return error.get("code") not in _QUOTA_CODES + return 500 <= resp.status_code < 600 + + +def _retry_delay(resp: requests.Response, attempt: int) -> float: + """Compute wait time before the next retry attempt. + + Prefers the server-supplied Retry-After header when available; + falls back to exponential backoff (1s, 2s, 4s, ...). + """ + retry_after = resp.headers.get("Retry-After") + if retry_after is not None: + try: + return max(0.0, float(retry_after)) + except (ValueError, TypeError): + pass + return float(2**attempt) + + +class BraveSearchToolBase(BaseTool, ABC): + """ + Base class for Brave Search API interactions. + + Individual tool subclasses must provide the following: + - search_url + - header_schema (pydantic model) + - args_schema (pydantic model) + - _refine_payload() -> dict[str, Any] + """ + + search_url: str + raw: bool = False + args_schema: type[BaseModel] + header_schema: type[BaseModel] + + # Tool options (legacy parameters) + country: str | None = None + save_file: bool = False + n_results: int = 10 + + env_vars: list[EnvVar] = Field( + default_factory=lambda: [ + EnvVar( + name="BRAVE_API_KEY", + description="API key for Brave Search", + required=True, + ), + ] + ) + + def __init__( + self, + *, + api_key: str | None = None, + headers: dict[str, Any] | None = None, + requests_per_second: float = 1.0, + save_file: bool = False, + raw: bool = False, + timeout: int = 30, + **kwargs: Any, + ): + super().__init__(**kwargs) + + self._api_key = api_key or os.environ.get("BRAVE_API_KEY") + if not self._api_key: + raise ValueError("BRAVE_API_KEY environment variable is required") + + self.raw = bool(raw) + self._timeout = int(timeout) + self.save_file = bool(save_file) + self._requests_per_second = float(requests_per_second) + self._headers = self._build_and_validate_headers(headers or {}) + # Per-instance rate limiting: each instance has its own clock and lock. + # Total process rate is the sum of limits of instances you create. + self._last_request_time: float = 0 + self._rate_limit_lock = threading.Lock() + + @property + def api_key(self) -> str: + return self._api_key + + @property + def headers(self) -> dict[str, Any]: + return self._headers + + def set_headers(self, headers: dict[str, Any]) -> BraveSearchToolBase: + merged = {**self._headers, **{k.lower(): v for k, v in headers.items()}} + self._headers = self._build_and_validate_headers(merged) + return self + + def _build_and_validate_headers(self, headers: dict[str, Any]) -> dict[str, Any]: + normalized = {k.lower(): v for k, v in headers.items()} + normalized.setdefault("x-subscription-token", self._api_key) + normalized.setdefault("accept", "application/json") + + try: + self.header_schema(**normalized) + except Exception as e: + raise ValueError(f"Invalid headers: {e}") from e + + return normalized + + def _rate_limit(self) -> None: + """Enforce minimum interval between requests for this instance. Thread-safe.""" + if self._requests_per_second <= 0: + return + + min_interval = 1.0 / self._requests_per_second + with self._rate_limit_lock: + now = time.time() + next_allowed = self._last_request_time + min_interval + if now < next_allowed: + time.sleep(next_allowed - now) + now = time.time() + self._last_request_time = now + + def _make_request( + self, params: dict[str, Any], *, _max_retries: int = 3 + ) -> dict[str, Any]: + """Execute an HTTP GET against the Brave Search API with retry logic.""" + last_resp: requests.Response | None = None + + # Retry the request up to _max_retries times + for attempt in range(_max_retries): + self._rate_limit() + + # Make the request + try: + resp = requests.get( + self.search_url, + headers=self._headers, + params=params, + timeout=self._timeout, + ) + except requests.ConnectionError as exc: + raise RuntimeError( + f"Brave Search API connection failed: {exc}" + ) from exc + except requests.Timeout as exc: + raise RuntimeError( + f"Brave Search API request timed out after {self._timeout}s: {exc}" + ) from exc + + # Log the rate limit headers and request details + logger.debug( + "Brave Search API request: %s %s -> %d", + "GET", + resp.url, + resp.status_code, + ) + + # Response was OK, return the JSON body + if resp.ok: + try: + return resp.json() + except ValueError as exc: + raise RuntimeError( + f"Brave Search API returned invalid JSON (HTTP {resp.status_code}): {exc}" + ) from exc + + # Response was not OK, but is retryable + # (e.g., 429 Too Many Requests, 500 Internal Server Error) + if _is_retryable(resp) and attempt < _max_retries - 1: + delay = _retry_delay(resp, attempt) + logger.warning( + "Brave Search API returned %d. Retrying in %.1fs (attempt %d/%d)", + resp.status_code, + delay, + attempt + 1, + _max_retries, + ) + time.sleep(delay) + last_resp = resp + continue + + # Response was not OK, nor was it retryable + # (e.g., 422 Unprocessable Entity, 400 Bad Request (OPTION_NOT_IN_PLAN)) + _raise_for_error(resp) + + # All retries exhausted + _raise_for_error(last_resp or resp) # type: ignore[possibly-undefined] + return {} # unreachable (here to satisfy the type checker and linter) + + def _run(self, q: str | None = None, **params: Any) -> Any: + # Allow positional usage: tool.run("latest Brave browser features") + if q is not None: + params["q"] = q + + params = self._common_payload_refinement(params) + + # Validate only schema fields + schema_keys = self.args_schema.model_fields + payload_in = {k: v for k, v in params.items() if k in schema_keys} + + try: + validated = self.args_schema(**payload_in) + except Exception as e: + raise ValueError(f"Invalid parameters: {e}") from e + + # The subclass may have additional refinements to apply to the payload, such as goggles or other parameters + payload = self._refine_request_payload(validated.model_dump(exclude_none=True)) + response = self._make_request(payload) + + if not self.raw: + response = self._refine_response(response) + + if self.save_file: + _save_results_to_file(json.dumps(response, indent=2)) + + return response + + @abstractmethod + def _refine_request_payload(self, params: dict[str, Any]) -> dict[str, Any]: + """Subclass must implement: transform validated params dict into API request params.""" + raise NotImplementedError + + @abstractmethod + def _refine_response(self, response: dict[str, Any]) -> Any: + """Subclass must implement: transform response dict into a more useful format.""" + raise NotImplementedError + + _EMPTY_VALUES: ClassVar[tuple[None, str, str, list[Any]]] = (None, "", "null", []) + + def _common_payload_refinement(self, params: dict[str, Any]) -> dict[str, Any]: + """Common payload refinement for all tools.""" + # crewAI's schema pipeline (ensure_all_properties_required in + # pydantic_schema_utils.py) marks every property as required so + # that OpenAI strict-mode structured outputs work correctly. + # The side-effect is that the LLM fills in *every* parameter — + # even truly optional ones — using placeholder values such as + # None, "", "null", or []. Only optional fields are affected, + # so we limit the check to those. + fields = self.args_schema.model_fields + params = { + k: v + for k, v in params.items() + # Permit custom and required fields, and fields with non-empty values + if k not in fields or fields[k].is_required() or v not in self._EMPTY_VALUES + } + + # Make sure params has "q" for query instead of "query" or "search_query" + query = params.get("query") or params.get("search_query") + if query is not None and "q" not in params: + params["q"] = query + params.pop("query", None) + params.pop("search_query", None) + + # If "count" was not explicitly provided, use n_results + # (only when the schema actually supports a "count" field) + if "count" in self.args_schema.model_fields: + if "count" not in params and self.n_results is not None: + params["count"] = self.n_results + + # If "country" was not explicitly provided, but self.country is set, use it + # (only when the schema actually supports a "country" field) + if "country" in self.args_schema.model_fields: + if "country" not in params and self.country is not None: + params["country"] = self.country + + return params diff --git a/lib/crewai-tools/src/crewai_tools/tools/brave_search_tool/brave_image_tool.py b/lib/crewai-tools/src/crewai_tools/tools/brave_search_tool/brave_image_tool.py new file mode 100644 index 000000000..99aed4235 --- /dev/null +++ b/lib/crewai-tools/src/crewai_tools/tools/brave_search_tool/brave_image_tool.py @@ -0,0 +1,42 @@ +from typing import Any + +from pydantic import BaseModel + +from crewai_tools.tools.brave_search_tool.base import BraveSearchToolBase +from crewai_tools.tools.brave_search_tool.schemas import ( + ImageSearchHeaders, + ImageSearchParams, +) + + +class BraveImageSearchTool(BraveSearchToolBase): + """A tool that performs image searches using the Brave Search API.""" + + name: str = "Brave Image Search" + args_schema: type[BaseModel] = ImageSearchParams + header_schema: type[BaseModel] = ImageSearchHeaders + + description: str = ( + "A tool that performs image searches using the Brave Search API. " + "Results are returned as structured JSON data." + ) + + search_url: str = "https://api.search.brave.com/res/v1/images/search" + + def _refine_request_payload(self, params: dict[str, Any]) -> dict[str, Any]: + return params + + def _refine_response(self, response: dict[str, Any]) -> list[dict[str, Any]]: + # Make the response more concise, and easier to consume + results = response.get("results", []) + return [ + { + "title": result.get("title"), + "url": result.get("properties", {}).get("url"), + "dimensions": f"{w}x{h}" + if (w := result.get("properties", {}).get("width")) + and (h := result.get("properties", {}).get("height")) + else None, + } + for result in results + ] diff --git a/lib/crewai-tools/src/crewai_tools/tools/brave_search_tool/brave_llm_context_tool.py b/lib/crewai-tools/src/crewai_tools/tools/brave_search_tool/brave_llm_context_tool.py new file mode 100644 index 000000000..da28469bf --- /dev/null +++ b/lib/crewai-tools/src/crewai_tools/tools/brave_search_tool/brave_llm_context_tool.py @@ -0,0 +1,32 @@ +from typing import Any + +from pydantic import BaseModel + +from crewai_tools.tools.brave_search_tool.base import BraveSearchToolBase +from crewai_tools.tools.brave_search_tool.response_types import LLMContext +from crewai_tools.tools.brave_search_tool.schemas import ( + LLMContextHeaders, + LLMContextParams, +) + + +class BraveLLMContextTool(BraveSearchToolBase): + """A tool that retrieves context for LLM usage from the Brave Search API.""" + + name: str = "Brave LLM Context" + args_schema: type[BaseModel] = LLMContextParams + header_schema: type[BaseModel] = LLMContextHeaders + + description: str = ( + "A tool that retrieves context for LLM usage from the Brave Search API. " + "Results are returned as structured JSON data." + ) + + search_url: str = "https://api.search.brave.com/res/v1/llm/context" + + def _refine_request_payload(self, params: dict[str, Any]) -> dict[str, Any]: + return params + + def _refine_response(self, response: LLMContext.Response) -> LLMContext.Response: + """The LLM Context response schema is fairly simple. Return as is.""" + return response diff --git a/lib/crewai-tools/src/crewai_tools/tools/brave_search_tool/brave_local_pois_tool.py b/lib/crewai-tools/src/crewai_tools/tools/brave_search_tool/brave_local_pois_tool.py new file mode 100644 index 000000000..7667677dc --- /dev/null +++ b/lib/crewai-tools/src/crewai_tools/tools/brave_search_tool/brave_local_pois_tool.py @@ -0,0 +1,109 @@ +from typing import Any + +from pydantic import BaseModel + +from crewai_tools.tools.brave_search_tool.base import BraveSearchToolBase +from crewai_tools.tools.brave_search_tool.response_types import LocalPOIs +from crewai_tools.tools.brave_search_tool.schemas import ( + LocalPOIsDescriptionHeaders, + LocalPOIsDescriptionParams, + LocalPOIsHeaders, + LocalPOIsParams, +) + + +DayOpeningHours = LocalPOIs.DayOpeningHours +OpeningHours = LocalPOIs.OpeningHours +LocationResult = LocalPOIs.LocationResult +LocalPOIsResponse = LocalPOIs.Response + + +def _flatten_slots(slots: list[DayOpeningHours]) -> list[dict[str, str]]: + """Convert a list of DayOpeningHours dicts into simplified entries.""" + return [ + { + "day": slot["full_name"].lower(), + "opens": slot["opens"], + "closes": slot["closes"], + } + for slot in slots + ] + + +def _simplify_opening_hours(result: LocationResult) -> list[dict[str, str]] | None: + """Collapse opening_hours into a flat list of {day, opens, closes} dicts.""" + hours = result.get("opening_hours") + if not hours: + return None + + entries: list[dict[str, str]] = [] + + current = hours.get("current_day") + if current: + entries.extend(_flatten_slots(current)) + + days = hours.get("days") + if days: + for day_slots in days: + entries.extend(_flatten_slots(day_slots)) + + return entries or None + + +class BraveLocalPOIsTool(BraveSearchToolBase): + """A tool that retrieves local POIs using the Brave Search API.""" + + name: str = "Brave Local POIs" + args_schema: type[BaseModel] = LocalPOIsParams + header_schema: type[BaseModel] = LocalPOIsHeaders + description: str = ( + "A tool that retrieves local POIs using the Brave Search API. " + "Results are returned as structured JSON data." + ) + search_url: str = "https://api.search.brave.com/res/v1/local/pois" + + def _refine_request_payload(self, params: dict[str, Any]) -> dict[str, Any]: + return params + + def _refine_response(self, response: LocalPOIsResponse) -> list[dict[str, Any]]: + results = response.get("results", []) + return [ + { + "title": result.get("title"), + "url": result.get("url"), + "description": result.get("description"), + "address": result.get("postal_address", {}).get("displayAddress"), + "contact": result.get("contact", {}).get("telephone") + or result.get("contact", {}).get("email") + or None, + "opening_hours": _simplify_opening_hours(result), + } + for result in results + ] + + +class BraveLocalPOIsDescriptionTool(BraveSearchToolBase): + """A tool that retrieves AI-generated descriptions for local POIs using the Brave Search API.""" + + name: str = "Brave Local POI Descriptions" + args_schema: type[BaseModel] = LocalPOIsDescriptionParams + header_schema: type[BaseModel] = LocalPOIsDescriptionHeaders + description: str = ( + "A tool that retrieves AI-generated descriptions for local POIs using the Brave Search API. " + "Results are returned as structured JSON data." + ) + search_url: str = "https://api.search.brave.com/res/v1/local/descriptions" + + def _refine_request_payload(self, params: dict[str, Any]) -> dict[str, Any]: + return params + + def _refine_response(self, response: LocalPOIsResponse) -> list[dict[str, Any]]: + # Make the response more concise, and easier to consume + results = response.get("results", []) + return [ + { + "id": result.get("id"), + "description": result.get("description"), + } + for result in results + ] diff --git a/lib/crewai-tools/src/crewai_tools/tools/brave_search_tool/brave_news_tool.py b/lib/crewai-tools/src/crewai_tools/tools/brave_search_tool/brave_news_tool.py new file mode 100644 index 000000000..80872433c --- /dev/null +++ b/lib/crewai-tools/src/crewai_tools/tools/brave_search_tool/brave_news_tool.py @@ -0,0 +1,39 @@ +from typing import Any + +from pydantic import BaseModel + +from crewai_tools.tools.brave_search_tool.base import BraveSearchToolBase +from crewai_tools.tools.brave_search_tool.schemas import ( + NewsSearchHeaders, + NewsSearchParams, +) + + +class BraveNewsSearchTool(BraveSearchToolBase): + """A tool that performs news searches using the Brave Search API.""" + + name: str = "Brave News Search" + args_schema: type[BaseModel] = NewsSearchParams + header_schema: type[BaseModel] = NewsSearchHeaders + + description: str = ( + "A tool that performs news searches using the Brave Search API. " + "Results are returned as structured JSON data." + ) + + search_url: str = "https://api.search.brave.com/res/v1/news/search" + + def _refine_request_payload(self, params: dict[str, Any]) -> dict[str, Any]: + return params + + def _refine_response(self, response: dict[str, Any]) -> list[dict[str, Any]]: + # Make the response more concise, and easier to consume + results = response.get("results", []) + return [ + { + "url": result.get("url"), + "title": result.get("title"), + "description": result.get("description"), + } + for result in results + ] diff --git a/lib/crewai-tools/src/crewai_tools/tools/brave_search_tool/brave_search_tool.py b/lib/crewai-tools/src/crewai_tools/tools/brave_search_tool/brave_search_tool.py index 415810c1b..2fb385770 100644 --- a/lib/crewai-tools/src/crewai_tools/tools/brave_search_tool/brave_search_tool.py +++ b/lib/crewai-tools/src/crewai_tools/tools/brave_search_tool/brave_search_tool.py @@ -10,16 +10,13 @@ from pydantic import BaseModel, Field from pydantic.types import StringConstraints import requests +from crewai_tools.tools.brave_search_tool.schemas import WebSearchParams +from crewai_tools.tools.brave_search_tool.base import _save_results_to_file + + load_dotenv() -def _save_results_to_file(content: str) -> None: - """Saves the search results to a file.""" - filename = f"search_results_{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.txt" - with open(filename, "w") as file: - file.write(content) - - FreshnessPreset = Literal["pd", "pw", "pm", "py"] FreshnessRange = Annotated[ str, StringConstraints(pattern=r"^\d{4}-\d{2}-\d{2}to\d{4}-\d{2}-\d{2}$") @@ -28,51 +25,6 @@ Freshness = FreshnessPreset | FreshnessRange SafeSearch = Literal["off", "moderate", "strict"] -class BraveSearchToolSchema(BaseModel): - """Input for BraveSearchTool""" - - query: str = Field(..., description="Search query to perform") - country: str | None = Field( - default=None, - description="Country code for geo-targeting (e.g., 'US', 'BR').", - ) - search_language: str | None = Field( - default=None, - description="Language code for the search results (e.g., 'en', 'es').", - ) - count: int | None = Field( - default=None, - description="The maximum number of results to return. Actual number may be less.", - ) - offset: int | None = Field( - default=None, description="Skip the first N result sets/pages. Max is 9." - ) - safesearch: SafeSearch | None = Field( - default=None, - description="Filter out explicit content. Options: off/moderate/strict", - ) - spellcheck: bool | None = Field( - default=None, - description="Attempt to correct spelling errors in the search query.", - ) - freshness: Freshness | None = Field( - default=None, - description="Enforce freshness of results. Options: pd/pw/pm/py, or YYYY-MM-DDtoYYYY-MM-DD", - ) - text_decorations: bool | None = Field( - default=None, - description="Include markup to highlight search terms in the results.", - ) - extra_snippets: bool | None = Field( - default=None, - description="Include up to 5 text snippets for each page if possible.", - ) - operators: bool | None = Field( - default=None, - description="Whether to apply search operators (e.g., site:example.com).", - ) - - # TODO: Extend support to additional endpoints (e.g., /images, /news, etc.) class BraveSearchTool(BaseTool): """A tool that performs web searches using the Brave Search API.""" @@ -82,7 +34,7 @@ class BraveSearchTool(BaseTool): "A tool that performs web searches using the Brave Search API. " "Results are returned as structured JSON data." ) - args_schema: type[BaseModel] = BraveSearchToolSchema + args_schema: type[BaseModel] = WebSearchParams search_url: str = "https://api.search.brave.com/res/v1/web/search" n_results: int = 10 save_file: bool = False @@ -119,8 +71,8 @@ class BraveSearchTool(BaseTool): # Construct and send the request try: - # Maintain both "search_query" and "query" for backwards compatibility - query = kwargs.get("search_query") or kwargs.get("query") + # Fallback to "query" or "search_query" for backwards compatibility + query = kwargs.get("q") or kwargs.get("query") or kwargs.get("search_query") if not query: raise ValueError("Query is required") @@ -129,8 +81,11 @@ class BraveSearchTool(BaseTool): if country := kwargs.get("country"): payload["country"] = country - if search_language := kwargs.get("search_language"): - payload["search_language"] = search_language + # Fallback to "search_language" for backwards compatibility + if search_lang := kwargs.get("search_lang") or kwargs.get( + "search_language" + ): + payload["search_lang"] = search_lang # Fallback to deprecated n_results parameter if no count is provided count = kwargs.get("count") diff --git a/lib/crewai-tools/src/crewai_tools/tools/brave_search_tool/brave_video_tool.py b/lib/crewai-tools/src/crewai_tools/tools/brave_search_tool/brave_video_tool.py new file mode 100644 index 000000000..c69cfc7fc --- /dev/null +++ b/lib/crewai-tools/src/crewai_tools/tools/brave_search_tool/brave_video_tool.py @@ -0,0 +1,39 @@ +from typing import Any + +from pydantic import BaseModel + +from crewai_tools.tools.brave_search_tool.base import BraveSearchToolBase +from crewai_tools.tools.brave_search_tool.schemas import ( + VideoSearchHeaders, + VideoSearchParams, +) + + +class BraveVideoSearchTool(BraveSearchToolBase): + """A tool that performs video searches using the Brave Search API.""" + + name: str = "Brave Video Search" + args_schema: type[BaseModel] = VideoSearchParams + header_schema: type[BaseModel] = VideoSearchHeaders + + description: str = ( + "A tool that performs video searches using the Brave Search API. " + "Results are returned as structured JSON data." + ) + + search_url: str = "https://api.search.brave.com/res/v1/videos/search" + + def _refine_request_payload(self, params: dict[str, Any]) -> dict[str, Any]: + return params + + def _refine_response(self, response: dict[str, Any]) -> list[dict[str, Any]]: + # Make the response more concise, and easier to consume + results = response.get("results", []) + return [ + { + "url": result.get("url"), + "title": result.get("title"), + "description": result.get("description"), + } + for result in results + ] diff --git a/lib/crewai-tools/src/crewai_tools/tools/brave_search_tool/brave_web_tool.py b/lib/crewai-tools/src/crewai_tools/tools/brave_search_tool/brave_web_tool.py new file mode 100644 index 000000000..843c38cd2 --- /dev/null +++ b/lib/crewai-tools/src/crewai_tools/tools/brave_search_tool/brave_web_tool.py @@ -0,0 +1,45 @@ +from typing import Any + +from pydantic import BaseModel + +from crewai_tools.tools.brave_search_tool.base import BraveSearchToolBase +from crewai_tools.tools.brave_search_tool.schemas import ( + WebSearchHeaders, + WebSearchParams, +) + + +class BraveWebSearchTool(BraveSearchToolBase): + """A tool that performs web searches using the Brave Search API.""" + + name: str = "Brave Web Search" + args_schema: type[BaseModel] = WebSearchParams + header_schema: type[BaseModel] = WebSearchHeaders + + description: str = ( + "A tool that performs web searches using the Brave Search API. " + "Results are returned as structured JSON data." + ) + + search_url: str = "https://api.search.brave.com/res/v1/web/search" + + def _refine_request_payload(self, params: dict[str, Any]) -> dict[str, Any]: + return params + + def _refine_response(self, response: dict[str, Any]) -> list[dict[str, Any]]: + results = response.get("web", {}).get("results", []) + refined = [] + for result in results: + snippets = result.get("extra_snippets") or [] + if not snippets: + desc = result.get("description") + if desc: + snippets = [desc] + refined.append( + { + "url": result.get("url"), + "title": result.get("title"), + "snippets": snippets, + } + ) + return refined diff --git a/lib/crewai-tools/src/crewai_tools/tools/brave_search_tool/response_types.py b/lib/crewai-tools/src/crewai_tools/tools/brave_search_tool/response_types.py new file mode 100644 index 000000000..63a7dc32d --- /dev/null +++ b/lib/crewai-tools/src/crewai_tools/tools/brave_search_tool/response_types.py @@ -0,0 +1,67 @@ +from __future__ import annotations + +from typing import Literal, TypedDict + + +class LocalPOIs: + class PostalAddress(TypedDict, total=False): + type: Literal["PostalAddress"] + country: str + postalCode: str + streetAddress: str + addressRegion: str + addressLocality: str + displayAddress: str + + class DayOpeningHours(TypedDict): + abbr_name: str + full_name: str + opens: str + closes: str + + class OpeningHours(TypedDict, total=False): + current_day: list[LocalPOIs.DayOpeningHours] + days: list[list[LocalPOIs.DayOpeningHours]] + + class LocationResult(TypedDict, total=False): + provider_url: str + title: str + url: str + id: str | None + opening_hours: LocalPOIs.OpeningHours | None + postal_address: LocalPOIs.PostalAddress | None + + class Response(TypedDict, total=False): + type: Literal["local_pois"] + results: list[LocalPOIs.LocationResult] + + +class LLMContext: + class LLMContextItem(TypedDict, total=False): + snippets: list[str] + title: str + url: str + + class LLMContextMapItem(TypedDict, total=False): + name: str + snippets: list[str] + title: str + url: str + + class LLMContextPOIItem(TypedDict, total=False): + name: str + snippets: list[str] + title: str + url: str + + class Grounding(TypedDict, total=False): + generic: list[LLMContext.LLMContextItem] + poi: LLMContext.LLMContextPOIItem + map: list[LLMContext.LLMContextMapItem] + + class Sources(TypedDict, total=False): + pass + + class Response(TypedDict, total=False): + grounding: LLMContext.Grounding + sources: LLMContext.Sources diff --git a/lib/crewai-tools/src/crewai_tools/tools/brave_search_tool/schemas.py b/lib/crewai-tools/src/crewai_tools/tools/brave_search_tool/schemas.py new file mode 100644 index 000000000..dae121558 --- /dev/null +++ b/lib/crewai-tools/src/crewai_tools/tools/brave_search_tool/schemas.py @@ -0,0 +1,525 @@ +from typing import Annotated, Literal + +from pydantic import BaseModel, Field +from pydantic.types import StringConstraints + + +# Common types +Units = Literal["metric", "imperial"] +SafeSearch = Literal["off", "moderate", "strict"] +Freshness = ( + Literal["pd", "pw", "pm", "py"] + | Annotated[ + str, StringConstraints(pattern=r"^\d{4}-\d{2}-\d{2}to\d{4}-\d{2}-\d{2}$") + ] +) +ResultFilter = list[ + Literal[ + "discussions", + "faq", + "infobox", + "news", + "query", + "summarizer", + "videos", + "web", + "locations", + ] +] + + +class LLMContextParams(BaseModel): + """Parameters for Brave LLM Context endpoint.""" + + q: str = Field( + description="Search query to perform", + min_length=1, + max_length=400, + ) + country: str | None = Field( + default=None, + description="Country code for geo-targeting (e.g., 'US', 'BR').", + pattern=r"^[A-Z]{2}$", + ) + search_lang: str | None = Field( + default=None, + description="Language code for the search results (e.g., 'en', 'es').", + pattern=r"^[a-z]{2}$", + ) + count: int | None = Field( + default=None, + description="The maximum number of results to return. Actual number may be less.", + ge=1, + le=50, + ) + maximum_number_of_urls: int | None = Field( + default=None, + description="The maximum number of URLs to include in the context.", + ge=1, + le=50, + ) + maximum_number_of_tokens: int | None = Field( + default=None, + description="The approximate maximum number of tokens to include in the context.", + ge=1, + le=32768, + ) + maximum_number_of_snippets: int | None = Field( + default=None, + description="The maximum number of different snippets to include in the context.", + ge=1, + le=100, + ) + context_threshold_mode: ( + Literal["disabled", "strict", "lenient", "balanced"] | None + ) = Field( + default=None, + description="The mode to use for the context thresholding.", + ) + maximum_number_of_tokens_per_url: int | None = Field( + default=None, + description="The maximum number of tokens to include for each URL in the context.", + ge=1, + le=8192, + ) + maximum_number_of_snippets_per_url: int | None = Field( + default=None, + description="The maximum number of snippets to include per URL.", + ge=1, + le=100, + ) + goggles: str | list[str] | None = Field( + default=None, + description="Goggles act as a custom re-ranking mechanism. Goggle source or URLs.", + ) + enable_local: bool | None = Field( + default=None, + description="Whether to enable local recall. Not setting this value means auto-detect and uses local recall if any of the localization headers are provided.", + ) + + +class WebSearchParams(BaseModel): + """Parameters for Brave Web Search endpoint.""" + + q: str = Field( + description="Search query to perform", + min_length=1, + max_length=400, + ) + country: str | None = Field( + default=None, + description="Country code for geo-targeting (e.g., 'US', 'BR').", + pattern=r"^[A-Z]{2}$", + ) + search_lang: str | None = Field( + default=None, + description="Language code for the search results (e.g., 'en', 'es').", + pattern=r"^[a-z]{2}$", + ) + ui_lang: str | None = Field( + default=None, + description="Language code for the user interface (e.g., 'en-US', 'es-AR').", + pattern=r"^[a-z]{2}-[A-Z]{2}$", + ) + count: int | None = Field( + default=None, + description="The maximum number of results to return. Actual number may be less.", + ge=1, + le=20, + ) + offset: int | None = Field( + default=None, + description="Skip the first N result sets/pages. Max is 9.", + ge=0, + le=9, + ) + safesearch: Literal["off", "moderate", "strict"] | None = Field( + default=None, + description="Filter out explicit content. Options: off/moderate/strict", + ) + spellcheck: bool | None = Field( + default=None, + description="Attempt to correct spelling errors in the search query.", + ) + freshness: Freshness | None = Field( + default=None, + description="Enforce freshness of results. Options: pd/pw/pm/py, or YYYY-MM-DDtoYYYY-MM-DD", + ) + text_decorations: bool | None = Field( + default=None, + description="Include markup to highlight search terms in the results.", + ) + extra_snippets: bool | None = Field( + default=None, + description="Include up to 5 text snippets for each page if possible.", + ) + result_filter: ResultFilter | None = Field( + default=None, + description="Filter the results by type. Options: discussions/faq/infobox/news/query/summarizer/videos/web/locations. Note: The `count` parameter is applied only to the `web` results.", + ) + units: Units | None = Field( + default=None, + description="The units to use for the results. Options: metric/imperial", + ) + goggles: str | list[str] | None = Field( + default=None, + description="Goggles act as a custom re-ranking mechanism. Goggle source or URLs.", + ) + summary: bool | None = Field( + default=None, + description="Whether to generate a summarizer ID for the results.", + ) + enable_rich_callback: bool | None = Field( + default=None, + description="Whether to enable rich callbacks for the results. Requires Pro level subscription.", + ) + include_fetch_metadata: bool | None = Field( + default=None, + description="Whether to include fetch metadata (e.g., last fetch time) in the results.", + ) + operators: bool | None = Field( + default=None, + description="Whether to apply search operators (e.g., site:example.com).", + ) + + +class LocalPOIsParams(BaseModel): + """Parameters for Brave Local POIs endpoint.""" + + ids: list[str] = Field( + description="List of POI IDs to retrieve. Maximum of 20. IDs are valid for 8 hours.", + min_length=1, + max_length=20, + ) + search_lang: str | None = Field( + default=None, + description="Language code for the search results (e.g., 'en', 'es').", + pattern=r"^[a-z]{2}$", + ) + ui_lang: str | None = Field( + default=None, + description="Language code for the user interface (e.g., 'en-US', 'es-AR').", + pattern=r"^[a-z]{2}-[A-Z]{2}$", + ) + units: Units | None = Field( + default=None, + description="The units to use for the results. Options: metric/imperial", + ) + + +class LocalPOIsDescriptionParams(BaseModel): + """Parameters for Brave Local POI Descriptions endpoint.""" + + ids: list[str] = Field( + description="List of POI IDs to retrieve. Maximum of 20. IDs are valid for 8 hours.", + min_length=1, + max_length=20, + ) + + +class ImageSearchParams(BaseModel): + """Parameters for Brave Image Search endpoint.""" + + q: str = Field( + description="Search query to perform", + min_length=1, + max_length=400, + ) + search_lang: str | None = Field( + default=None, + description="Language code for the search results (e.g., 'en', 'es').", + pattern=r"^[a-z]{2}$", + ) + country: str | None = Field( + default=None, + description="Country code for geo-targeting (e.g., 'US', 'BR').", + pattern=r"^[A-Z]{2}$", + ) + safesearch: Literal["off", "strict"] | None = Field( + default=None, + description="Filter out explicit content. Default is strict.", + ) + count: int | None = Field( + default=None, + description="The maximum number of results to return.", + ge=1, + le=200, + ) + spellcheck: bool | None = Field( + default=None, + description="Attempt to correct spelling errors in the search query.", + ) + + +class VideoSearchParams(BaseModel): + """Parameters for Brave Video Search endpoint.""" + + q: str = Field( + description="Search query to perform", + min_length=1, + max_length=400, + ) + search_lang: str | None = Field( + default=None, + description="Language code for the search results (e.g., 'en', 'es').", + pattern=r"^[a-z]{2}$", + ) + ui_lang: str | None = Field( + default=None, + description="Language code for the user interface (e.g., 'en-US', 'es-AR').", + pattern=r"^[a-z]{2}-[A-Z]{2}$", + ) + country: str | None = Field( + default=None, + description="Country code for geo-targeting (e.g., 'US', 'BR').", + pattern=r"^[A-Z]{2}$", + ) + safesearch: SafeSearch | None = Field( + default=None, + description="Filter out explicit content. Options: off/moderate/strict", + ) + count: int | None = Field( + default=None, + description="The maximum number of results to return.", + ge=1, + le=50, + ) + offset: int | None = Field( + default=None, + description="Skip the first N result sets/pages. Max is 9.", + ge=0, + le=9, + ) + spellcheck: bool | None = Field( + default=None, + description="Attempt to correct spelling errors in the search query.", + ) + freshness: Freshness | None = Field( + default=None, + description="Enforce freshness of results. Options: pd/pw/pm/py, or YYYY-MM-DDtoYYYY-MM-DD", + ) + include_fetch_metadata: bool | None = Field( + default=None, + description="Whether to include fetch metadata (e.g., last fetch time) in the results.", + ) + operators: bool | None = Field( + default=None, + description="Whether to apply search operators (e.g., site:example.com).", + ) + + +class NewsSearchParams(BaseModel): + """Parameters for Brave News Search endpoint.""" + + q: str = Field( + description="Search query to perform", + min_length=1, + max_length=400, + ) + search_lang: str | None = Field( + default=None, + description="Language code for the search results (e.g., 'en', 'es').", + pattern=r"^[a-z]{2}$", + ) + ui_lang: str | None = Field( + default=None, + description="Language code for the user interface (e.g., 'en-US', 'es-AR').", + pattern=r"^[a-z]{2}-[A-Z]{2}$", + ) + country: str | None = Field( + default=None, + description="Country code for geo-targeting (e.g., 'US', 'BR').", + pattern=r"^[A-Z]{2}$", + ) + safesearch: Literal["off", "moderate", "strict"] | None = Field( + default=None, + description="Filter out explicit content. Options: off/moderate/strict", + ) + count: int | None = Field( + default=None, + description="The maximum number of results to return.", + ge=1, + le=50, + ) + offset: int | None = Field( + default=None, + description="Skip the first N result sets/pages. Max is 9.", + ge=0, + le=9, + ) + spellcheck: bool | None = Field( + default=None, + description="Attempt to correct spelling errors in the search query.", + ) + freshness: Freshness | None = Field( + default=None, + description="Enforce freshness of results. Options: pd/pw/pm/py, or YYYY-MM-DDtoYYYY-MM-DD", + ) + extra_snippets: bool | None = Field( + default=None, + description="Include up to 5 text snippets for each page if possible.", + ) + goggles: str | list[str] | None = Field( + default=None, + description="Goggles act as a custom re-ranking mechanism. Goggle source or URLs.", + ) + include_fetch_metadata: bool | None = Field( + default=None, + description="Whether to include fetch metadata in the results.", + ) + operators: bool | None = Field( + default=None, + description="Whether to apply search operators (e.g., site:example.com).", + ) + + +class BaseSearchHeaders(BaseModel): + """Common headers for Brave Search endpoints.""" + + x_subscription_token: str = Field( + alias="x-subscription-token", + description="API key for Brave Search", + ) + api_version: str | None = Field( + alias="api-version", + default=None, + description="API version to use. Default is latest available.", + pattern=r"^\d{4}-\d{2}-\d{2}$", # YYYY-MM-DD + ) + accept: Literal["application/json"] | Literal["*/*"] | None = Field( + default=None, + description="Accept header for the request.", + ) + cache_control: Literal["no-cache"] | None = Field( + alias="cache-control", + default=None, + description="Cache control header for the request.", + ) + user_agent: str | None = Field( + alias="user-agent", + default=None, + description="User agent for the request.", + ) + + +class LLMContextHeaders(BaseSearchHeaders): + """Headers for Brave LLM Context endpoint.""" + + x_loc_lat: float | None = Field( + alias="x-loc-lat", + default=None, + description="Latitude of the user's location.", + ge=-90.0, + le=90.0, + ) + x_loc_long: float | None = Field( + alias="x-loc-long", + default=None, + description="Longitude of the user's location.", + ge=-180.0, + le=180.0, + ) + x_loc_city: str | None = Field( + alias="x-loc-city", + default=None, + description="City of the user's location.", + ) + x_loc_state: str | None = Field( + alias="x-loc-state", + default=None, + description="State of the user's location.", + ) + x_loc_state_name: str | None = Field( + alias="x-loc-state-name", + default=None, + description="Name of the state of the user's location.", + ) + x_loc_country: str | None = Field( + alias="x-loc-country", + default=None, + description="The ISO 3166-1 alpha-2 country code of the user's location.", + ) + + +class LocalPOIsHeaders(BaseSearchHeaders): + """Headers for Brave Local POIs endpoint.""" + + x_loc_lat: float | None = Field( + alias="x-loc-lat", + default=None, + description="Latitude of the user's location.", + ge=-90.0, + le=90.0, + ) + x_loc_long: float | None = Field( + alias="x-loc-long", + default=None, + description="Longitude of the user's location.", + ge=-180.0, + le=180.0, + ) + + +class LocalPOIsDescriptionHeaders(BaseSearchHeaders): + """Headers for Brave Local POI Descriptions endpoint.""" + + +class VideoSearchHeaders(BaseSearchHeaders): + """Headers for Brave Video Search endpoint.""" + + +class ImageSearchHeaders(BaseSearchHeaders): + """Headers for Brave Image Search endpoint.""" + + +class NewsSearchHeaders(BaseSearchHeaders): + """Headers for Brave News Search endpoint.""" + + +class WebSearchHeaders(BaseSearchHeaders): + """Headers for Brave Web Search endpoint.""" + + x_loc_lat: float | None = Field( + alias="x-loc-lat", + default=None, + description="Latitude of the user's location.", + ge=-90.0, + le=90.0, + ) + x_loc_long: float | None = Field( + alias="x-loc-long", + default=None, + description="Longitude of the user's location.", + ge=-180.0, + le=180.0, + ) + x_loc_timezone: str | None = Field( + alias="x-loc-timezone", + default=None, + description="Timezone of the user's location.", + ) + x_loc_city: str | None = Field( + alias="x-loc-city", + default=None, + description="City of the user's location.", + ) + x_loc_state: str | None = Field( + alias="x-loc-state", + default=None, + description="State of the user's location.", + ) + x_loc_state_name: str | None = Field( + alias="x-loc-state-name", + default=None, + description="Name of the state of the user's location.", + ) + x_loc_country: str | None = Field( + alias="x-loc-country", + default=None, + description="The ISO 3166-1 alpha-2 country code of the user's location.", + ) + x_loc_postal_code: str | None = Field( + alias="x-loc-postal-code", + default=None, + description="The postal code of the user's location.", + ) diff --git a/lib/crewai-tools/src/crewai_tools/tools/multion_tool/example.py b/lib/crewai-tools/src/crewai_tools/tools/multion_tool/example.py index 28354efa3..00646a0d4 100644 --- a/lib/crewai-tools/src/crewai_tools/tools/multion_tool/example.py +++ b/lib/crewai-tools/src/crewai_tools/tools/multion_tool/example.py @@ -1,7 +1,7 @@ import os from crewai import Agent, Crew, Task -from multion_tool import MultiOnTool # type: ignore[import-not-found] +from multion_tool import MultiOnTool # type: ignore[import-not-found] os.environ["OPENAI_API_KEY"] = "Your Key" diff --git a/lib/crewai-tools/src/crewai_tools/tools/stagehand_tool/example.py b/lib/crewai-tools/src/crewai_tools/tools/stagehand_tool/example.py index a14df60df..4b1215792 100644 --- a/lib/crewai-tools/src/crewai_tools/tools/stagehand_tool/example.py +++ b/lib/crewai-tools/src/crewai_tools/tools/stagehand_tool/example.py @@ -17,11 +17,11 @@ Usage: import os +from crewai import Agent, Crew, Process, Task from crewai.utilities.printer import Printer from dotenv import load_dotenv from stagehand.schemas import AvailableModel # type: ignore[import-untyped] -from crewai import Agent, Crew, Process, Task from crewai_tools import StagehandTool diff --git a/lib/crewai-tools/tests/tools/brave_search_tool_test.py b/lib/crewai-tools/tests/tools/brave_search_tool_test.py index 6e1300622..52ef88f47 100644 --- a/lib/crewai-tools/tests/tools/brave_search_tool_test.py +++ b/lib/crewai-tools/tests/tools/brave_search_tool_test.py @@ -1,80 +1,777 @@ -import json -from unittest.mock import patch +import os +from unittest.mock import MagicMock, patch import pytest +import requests as requests_lib -from crewai_tools.tools.brave_search_tool.brave_search_tool import BraveSearchTool +from crewai_tools.tools.brave_search_tool.base import BraveSearchToolBase +from crewai_tools.tools.brave_search_tool.brave_web_tool import BraveWebSearchTool +from crewai_tools.tools.brave_search_tool.brave_image_tool import BraveImageSearchTool +from crewai_tools.tools.brave_search_tool.brave_news_tool import BraveNewsSearchTool +from crewai_tools.tools.brave_search_tool.brave_video_tool import BraveVideoSearchTool +from crewai_tools.tools.brave_search_tool.brave_llm_context_tool import ( + BraveLLMContextTool, +) +from crewai_tools.tools.brave_search_tool.brave_local_pois_tool import ( + BraveLocalPOIsTool, + BraveLocalPOIsDescriptionTool, +) +from crewai_tools.tools.brave_search_tool.schemas import ( + WebSearchParams, + WebSearchHeaders, + ImageSearchParams, + ImageSearchHeaders, + NewsSearchParams, + NewsSearchHeaders, + VideoSearchParams, + VideoSearchHeaders, + LLMContextParams, + LLMContextHeaders, + LocalPOIsParams, + LocalPOIsHeaders, + LocalPOIsDescriptionParams, + LocalPOIsDescriptionHeaders, +) + + +def _mock_response( + status_code: int = 200, + json_data: dict | None = None, + headers: dict | None = None, + text: str = "", +) -> MagicMock: + """Build a ``requests.Response``-like mock with the attributes used by ``_make_request``.""" + resp = MagicMock(spec=requests_lib.Response) + resp.status_code = status_code + resp.ok = 200 <= status_code < 400 + resp.url = "https://api.search.brave.com/res/v1/web/search?q=test" + resp.text = text or (str(json_data) if json_data else "") + resp.headers = headers or {} + resp.json.return_value = json_data if json_data is not None else {} + return resp + + +# Fixtures + + +@pytest.fixture(autouse=True) +def _brave_env_and_rate_limit(): + """Set BRAVE_API_KEY for every test. Rate limiting is per-instance (each tool starts with a fresh clock).""" + with patch.dict(os.environ, {"BRAVE_API_KEY": "test-api-key"}): + yield @pytest.fixture -def brave_tool(): - return BraveSearchTool(n_results=2) +def web_tool(): + return BraveWebSearchTool() -def test_brave_tool_initialization(): - tool = BraveSearchTool() - assert tool.n_results == 10 +@pytest.fixture +def image_tool(): + return BraveImageSearchTool() + + +@pytest.fixture +def news_tool(): + return BraveNewsSearchTool() + + +@pytest.fixture +def video_tool(): + return BraveVideoSearchTool() + + +# Initialization + +ALL_TOOL_CLASSES = [ + BraveWebSearchTool, + BraveImageSearchTool, + BraveNewsSearchTool, + BraveVideoSearchTool, + BraveLLMContextTool, + BraveLocalPOIsTool, + BraveLocalPOIsDescriptionTool, +] + + +@pytest.mark.parametrize("tool_cls", ALL_TOOL_CLASSES) +def test_instantiation_with_env_var(tool_cls): + """Each tool can be created when BRAVE_API_KEY is in the environment.""" + tool = tool_cls() + assert tool.api_key == "test-api-key" + + +@pytest.mark.parametrize("tool_cls", ALL_TOOL_CLASSES) +def test_instantiation_with_explicit_key(tool_cls): + """An explicit api_key takes precedence over the environment.""" + tool = tool_cls(api_key="explicit-key") + assert tool.api_key == "explicit-key" + + +def test_missing_api_key_raises(): + with patch.dict(os.environ, {}, clear=True): + with pytest.raises(ValueError, match="BRAVE_API_KEY"): + BraveWebSearchTool() + + +def test_default_attributes(): + tool = BraveWebSearchTool() assert tool.save_file is False + assert tool.n_results == 10 + assert tool._timeout == 30 + assert tool._requests_per_second == 1.0 + assert tool.raw is False -@patch("requests.get") -def test_brave_tool_search(mock_get, brave_tool): - mock_response = { +def test_custom_constructor_args(): + tool = BraveWebSearchTool( + save_file=True, + timeout=60, + n_results=5, + requests_per_second=0.5, + raw=True, + ) + assert tool.save_file is True + assert tool._timeout == 60 + assert tool.n_results == 5 + assert tool._requests_per_second == 0.5 + assert tool.raw is True + + +# Headers + + +def test_default_headers(): + tool = BraveWebSearchTool() + assert tool.headers["x-subscription-token"] == "test-api-key" + assert tool.headers["accept"] == "application/json" + + +def test_set_headers_merges_and_normalizes(): + tool = BraveWebSearchTool() + tool.set_headers({"Cache-Control": "no-cache"}) + assert tool.headers["cache-control"] == "no-cache" + assert tool.headers["x-subscription-token"] == "test-api-key" + + +def test_set_headers_returns_self_for_chaining(): + tool = BraveWebSearchTool() + assert tool.set_headers({"Cache-Control": "no-cache"}) is tool + + +def test_invalid_header_value_raises(): + tool = BraveImageSearchTool() + with pytest.raises(ValueError, match="Invalid headers"): + tool.set_headers({"Accept": "text/xml"}) + + +# Endpoint & Schema Wiring + + +@pytest.mark.parametrize( + "tool_cls, expected_url, expected_params, expected_headers", + [ + ( + BraveWebSearchTool, + "https://api.search.brave.com/res/v1/web/search", + WebSearchParams, + WebSearchHeaders, + ), + ( + BraveImageSearchTool, + "https://api.search.brave.com/res/v1/images/search", + ImageSearchParams, + ImageSearchHeaders, + ), + ( + BraveNewsSearchTool, + "https://api.search.brave.com/res/v1/news/search", + NewsSearchParams, + NewsSearchHeaders, + ), + ( + BraveVideoSearchTool, + "https://api.search.brave.com/res/v1/videos/search", + VideoSearchParams, + VideoSearchHeaders, + ), + ( + BraveLLMContextTool, + "https://api.search.brave.com/res/v1/llm/context", + LLMContextParams, + LLMContextHeaders, + ), + ( + BraveLocalPOIsTool, + "https://api.search.brave.com/res/v1/local/pois", + LocalPOIsParams, + LocalPOIsHeaders, + ), + ( + BraveLocalPOIsDescriptionTool, + "https://api.search.brave.com/res/v1/local/descriptions", + LocalPOIsDescriptionParams, + LocalPOIsDescriptionHeaders, + ), + ], +) +def test_tool_wiring(tool_cls, expected_url, expected_params, expected_headers): + tool = tool_cls() + assert tool.search_url == expected_url + assert tool.args_schema is expected_params + assert tool.header_schema is expected_headers + + +# Payload Refinement (e.g., `query` -> `q`, `count` fallback, param pass-through) + + +def test_web_refine_request_payload_passes_all_params(web_tool): + params = web_tool._common_payload_refinement( + { + "query": "test", + "country": "US", + "search_lang": "en", + "count": 5, + "offset": 2, + "safesearch": "moderate", + "freshness": "pw", + } + ) + refined_params = web_tool._refine_request_payload(params) + + assert refined_params["q"] == "test" + assert "query" not in refined_params + assert refined_params["count"] == 5 + assert refined_params["country"] == "US" + assert refined_params["search_lang"] == "en" + assert refined_params["offset"] == 2 + assert refined_params["safesearch"] == "moderate" + assert refined_params["freshness"] == "pw" + + +def test_image_refine_request_payload_passes_all_params(image_tool): + params = image_tool._common_payload_refinement( + { + "query": "cat photos", + "country": "US", + "search_lang": "en", + "safesearch": "strict", + "count": 50, + "spellcheck": True, + } + ) + refined_params = image_tool._refine_request_payload(params) + + assert refined_params["q"] == "cat photos" + assert "query" not in refined_params + assert refined_params["country"] == "US" + assert refined_params["safesearch"] == "strict" + assert refined_params["count"] == 50 + assert refined_params["spellcheck"] is True + + +def test_news_refine_request_payload_passes_all_params(news_tool): + params = news_tool._common_payload_refinement( + { + "query": "breaking news", + "country": "US", + "count": 10, + "offset": 1, + "freshness": "pd", + "extra_snippets": True, + } + ) + refined_params = news_tool._refine_request_payload(params) + + assert refined_params["q"] == "breaking news" + assert "query" not in refined_params + assert refined_params["country"] == "US" + assert refined_params["offset"] == 1 + assert refined_params["freshness"] == "pd" + assert refined_params["extra_snippets"] is True + + +def test_video_refine_request_payload_passes_all_params(video_tool): + params = video_tool._common_payload_refinement( + { + "query": "tutorial", + "country": "US", + "count": 25, + "offset": 0, + "safesearch": "strict", + "freshness": "pm", + } + ) + refined_params = video_tool._refine_request_payload(params) + + assert refined_params["q"] == "tutorial" + assert "query" not in refined_params + assert refined_params["country"] == "US" + assert refined_params["offset"] == 0 + assert refined_params["freshness"] == "pm" + + +def test_legacy_constructor_params_flow_into_query_params(): + """The legacy n_results and country constructor params are applied as defaults + when count/country are not explicitly provided at call time.""" + tool = BraveWebSearchTool(n_results=3, country="BR") + params = tool._common_payload_refinement({"query": "test"}) + + assert params["count"] == 3 + assert params["country"] == "BR" + + +def test_legacy_constructor_params_do_not_override_explicit_query_params(): + """Explicit query-time count/country take precedence over constructor defaults.""" + tool = BraveWebSearchTool(n_results=3, country="BR") + params = tool._common_payload_refinement( + {"query": "test", "count": 10, "country": "US"} + ) + + assert params["count"] == 10 + assert params["country"] == "US" + + +def test_refine_request_payload_passes_multiple_goggles_as_multiple_params(web_tool): + result = web_tool._refine_request_payload( + { + "query": "test", + "goggles": ["goggle1", "goggle2"], + } + ) + assert result["goggles"] == ["goggle1", "goggle2"] + + +# Null-like / empty value stripping +# +# crewAI's ensure_all_properties_required (pydantic_schema_utils.py) marks +# every schema property as required for OpenAI strict-mode compatibility. +# Because optional Brave API parameters look required to the LLM, it fills +# them with placeholder junk — None, "", "null", or []. The test below +# verifies that _common_payload_refinement strips these from optional fields. + + +def test_common_refinement_strips_null_like_values(web_tool): + """_common_payload_refinement drops optional keys with None / '' / 'null' / [].""" + params = web_tool._common_payload_refinement( + { + "query": "test", + "country": "US", + "search_lang": "", + "freshness": "null", + "count": 5, + "goggles": [], + } + ) + assert params["q"] == "test" + assert params["country"] == "US" + assert params["count"] == 5 + assert "search_lang" not in params + assert "freshness" not in params + assert "goggles" not in params + + +# End-to-End _run() with Mocked HTTP Response + + +@patch("crewai_tools.tools.brave_search_tool.base.requests.get") +def test_web_search_end_to_end(mock_get, web_tool): + web_tool.raw = True + data = {"web": {"results": [{"title": "R", "url": "http://r.co"}]}} + mock_get.return_value = _mock_response(json_data=data) + + result = web_tool._run(query="test") + + mock_get.assert_called_once() + call_args = mock_get.call_args.kwargs + assert call_args["params"]["q"] == "test" + assert call_args["headers"]["x-subscription-token"] == "test-api-key" + assert result == data + + +@patch("crewai_tools.tools.brave_search_tool.base.requests.get") +def test_image_search_end_to_end(mock_get, image_tool): + image_tool.raw = True + data = {"results": [{"url": "http://img.co/a.jpg"}]} + mock_get.return_value = _mock_response(json_data=data) + + assert image_tool._run(query="cats") == data + + +@patch("crewai_tools.tools.brave_search_tool.base.requests.get") +def test_news_search_end_to_end(mock_get, news_tool): + news_tool.raw = True + data = {"results": [{"title": "News", "url": "http://n.co"}]} + mock_get.return_value = _mock_response(json_data=data) + + assert news_tool._run(query="headlines") == data + + +@patch("crewai_tools.tools.brave_search_tool.base.requests.get") +def test_video_search_end_to_end(mock_get, video_tool): + video_tool.raw = True + data = {"results": [{"title": "Vid", "url": "http://v.co"}]} + mock_get.return_value = _mock_response(json_data=data) + + assert video_tool._run(query="python tutorial") == data + + +@patch("crewai_tools.tools.brave_search_tool.base.requests.get") +def test_raw_false_calls_refine_response(mock_get, web_tool): + """With raw=False (the default), _refine_response transforms the API response.""" + api_response = { "web": { "results": [ { - "title": "Test Title", - "url": "http://test.com", - "description": "Test Description", + "title": "CrewAI", + "url": "https://crewai.com", + "description": "AI agent framework", } ] } } - mock_get.return_value.json.return_value = mock_response + mock_get.return_value = _mock_response(json_data=api_response) - result = brave_tool.run(query="test") - data = json.loads(result) - assert isinstance(data, list) - assert len(data) >= 1 - assert data[0]["title"] == "Test Title" - assert data[0]["url"] == "http://test.com" + assert web_tool.raw is False + result = web_tool._run(query="crewai") + + # The web tool's _refine_response extracts and reshapes results. + # The key assertion: we should NOT get back the raw API envelope. + assert result != api_response -@patch("requests.get") -def test_brave_tool(mock_get): - mock_response = { - "web": { - "results": [ - { - "title": "Brave Browser", - "url": "https://brave.com", - "description": "Brave Browser description", - } - ] - } - } - mock_get.return_value.json.return_value = mock_response - - tool = BraveSearchTool(n_results=2) - result = tool.run(query="Brave Browser") - assert result is not None - - # Parse JSON so we can examine the structure - data = json.loads(result) - assert isinstance(data, list) - assert len(data) >= 1 - - # First item should have expected fields: title, url, and description - first = data[0] - assert "title" in first - assert first["title"] == "Brave Browser" - assert "url" in first - assert first["url"] == "https://brave.com" - assert "description" in first - assert first["description"] == "Brave Browser description" +# Backward Compatibility & Legacy Parameter Support -if __name__ == "__main__": - test_brave_tool() - test_brave_tool_initialization() - # test_brave_tool_search(brave_tool) +@patch("crewai_tools.tools.brave_search_tool.base.requests.get") +def test_positional_query_argument(mock_get, web_tool): + """tool.run('my query') works as a positional argument.""" + mock_get.return_value = _mock_response(json_data={}) + + web_tool._run("positional test") + + assert mock_get.call_args.kwargs["params"]["q"] == "positional test" + + +@patch("crewai_tools.tools.brave_search_tool.base.requests.get") +def test_search_query_backward_compat(mock_get, web_tool): + """The legacy 'search_query' param is mapped to 'query'.""" + mock_get.return_value = _mock_response(json_data={}) + + web_tool._run(search_query="legacy test") + + assert mock_get.call_args.kwargs["params"]["q"] == "legacy test" + + +@patch("crewai_tools.tools.brave_search_tool.base.requests.get") +@patch("crewai_tools.tools.brave_search_tool.base._save_results_to_file") +def test_save_file_called_when_enabled(mock_save, mock_get): + mock_get.return_value = _mock_response(json_data={"results": []}) + + tool = BraveWebSearchTool(save_file=True) + tool._run(query="test") + + mock_save.assert_called_once() + + +# Error Handling + + +@patch("crewai_tools.tools.brave_search_tool.base.requests.get") +def test_connection_error_raises_runtime_error(mock_get, web_tool): + mock_get.side_effect = requests_lib.exceptions.ConnectionError("refused") + with pytest.raises(RuntimeError, match="Brave Search API connection failed"): + web_tool._run(query="test") + + +@patch("crewai_tools.tools.brave_search_tool.base.requests.get") +def test_timeout_raises_runtime_error(mock_get, web_tool): + mock_get.side_effect = requests_lib.exceptions.Timeout("timed out") + with pytest.raises(RuntimeError, match="timed out"): + web_tool._run(query="test") + + +@patch("crewai_tools.tools.brave_search_tool.base.requests.get") +def test_invalid_params_raises_value_error(mock_get, web_tool): + """count=999 exceeds WebSearchParams.count le=20.""" + with pytest.raises(ValueError, match="Invalid parameters"): + web_tool._run(query="test", count=999) + + +@patch("crewai_tools.tools.brave_search_tool.base.requests.get") +def test_4xx_error_raises_with_api_detail(mock_get, web_tool): + """A 422 with a structured error body includes code and detail in the message.""" + mock_get.return_value = _mock_response( + status_code=422, + json_data={ + "error": { + "id": "abc-123", + "status": 422, + "code": "OPTION_NOT_IN_PLAN", + "detail": "extra_snippets requires a Pro plan", + } + }, + ) + with pytest.raises(RuntimeError, match="OPTION_NOT_IN_PLAN") as exc_info: + web_tool._run(query="test") + assert "extra_snippets requires a Pro plan" in str(exc_info.value) + assert "HTTP 422" in str(exc_info.value) + + +@patch("crewai_tools.tools.brave_search_tool.base.requests.get") +def test_auth_error_raises_immediately(mock_get, web_tool): + """A 401 with SUBSCRIPTION_TOKEN_INVALID is not retried.""" + mock_get.return_value = _mock_response( + status_code=401, + json_data={ + "error": { + "id": "xyz", + "status": 401, + "code": "SUBSCRIPTION_TOKEN_INVALID", + "detail": "The subscription token is invalid", + } + }, + ) + with pytest.raises(RuntimeError, match="SUBSCRIPTION_TOKEN_INVALID"): + web_tool._run(query="test") + # Should NOT have retried — only one call. + assert mock_get.call_count == 1 + + +@patch("crewai_tools.tools.brave_search_tool.base.requests.get") +def test_quota_limited_429_raises_immediately(mock_get, web_tool): + """A 429 with QUOTA_LIMITED is NOT retried — quota exhaustion is terminal.""" + mock_get.return_value = _mock_response( + status_code=429, + json_data={ + "error": { + "id": "ql-1", + "status": 429, + "code": "QUOTA_LIMITED", + "detail": "Monthly quota exceeded", + } + }, + ) + with pytest.raises(RuntimeError, match="QUOTA_LIMITED") as exc_info: + web_tool._run(query="test") + assert "Monthly quota exceeded" in str(exc_info.value) + # Terminal — only one HTTP call, no retries. + assert mock_get.call_count == 1 + + +@patch("crewai_tools.tools.brave_search_tool.base.requests.get") +def test_usage_limit_exceeded_429_raises_immediately(mock_get, web_tool): + """USAGE_LIMIT_EXCEEDED is also non-retryable, just like QUOTA_LIMITED.""" + mock_get.return_value = _mock_response( + status_code=429, + json_data={ + "error": { + "id": "ule-1", + "status": 429, + "code": "USAGE_LIMIT_EXCEEDED", + } + }, + text="usage limit exceeded", + ) + with pytest.raises(RuntimeError, match="USAGE_LIMIT_EXCEEDED"): + web_tool._run(query="test") + assert mock_get.call_count == 1 + + +@patch("crewai_tools.tools.brave_search_tool.base.requests.get") +def test_error_body_is_fully_included_in_message(mock_get, web_tool): + """The full JSON error body is included in the RuntimeError message.""" + mock_get.return_value = _mock_response( + status_code=429, + json_data={ + "error": { + "id": "x", + "status": 429, + "code": "QUOTA_LIMITED", + "detail": "Exceeded", + "meta": {"plan": "free", "limit": 1000}, + } + }, + ) + with pytest.raises(RuntimeError) as exc_info: + web_tool._run(query="test") + msg = str(exc_info.value) + assert "HTTP 429" in msg + assert "QUOTA_LIMITED" in msg + assert "free" in msg + assert "1000" in msg + + +@patch("crewai_tools.tools.brave_search_tool.base.requests.get") +def test_error_without_json_body_falls_back_to_text(mock_get, web_tool): + """When the error response isn't valid JSON, resp.text is used as the detail.""" + resp = _mock_response(status_code=500, text="Internal Server Error") + resp.json.side_effect = ValueError("No JSON") + mock_get.return_value = resp + + with pytest.raises(RuntimeError, match="Internal Server Error"): + web_tool._run(query="test") + + +@patch("crewai_tools.tools.brave_search_tool.base.requests.get") +def test_invalid_json_on_success_raises_runtime_error(mock_get, web_tool): + """A 200 OK with a non-JSON body raises RuntimeError.""" + resp = _mock_response(status_code=200) + resp.json.side_effect = ValueError("Expecting value") + mock_get.return_value = resp + + with pytest.raises(RuntimeError, match="invalid JSON"): + web_tool._run(query="test") + + +# Rate Limiting + + +@patch("crewai_tools.tools.brave_search_tool.base.requests.get") +@patch("crewai_tools.tools.brave_search_tool.base.time") +def test_rate_limit_sleeps_when_too_fast(mock_time, mock_get, web_tool): + """Back-to-back calls within the interval trigger a sleep.""" + mock_get.return_value = _mock_response(json_data={}) + + # Simulate: last request was at t=100, "now" is t=100.2 (only 0.2s elapsed). + # With default 1 req/s the min interval is 1.0s, so it should sleep ~0.8s. + mock_time.time.return_value = 100.2 + web_tool._last_request_time = 100.0 + + web_tool._run(query="test") + + mock_time.sleep.assert_called_once() + sleep_duration = mock_time.sleep.call_args[0][0] + assert 0.7 < sleep_duration < 0.9 # approximately 0.8s + + +@patch("crewai_tools.tools.brave_search_tool.base.requests.get") +@patch("crewai_tools.tools.brave_search_tool.base.time") +def test_rate_limit_skips_sleep_when_enough_time_passed(mock_time, mock_get, web_tool): + """No sleep when the elapsed time already exceeds the interval.""" + mock_get.return_value = _mock_response(json_data={}) + + # Last request was at t=100, "now" is t=102 (2s elapsed > 1s interval). + mock_time.time.return_value = 102.0 + web_tool._last_request_time = 100.0 + + web_tool._run(query="test") + + mock_time.sleep.assert_not_called() + + +@patch("crewai_tools.tools.brave_search_tool.base.requests.get") +@patch("crewai_tools.tools.brave_search_tool.base.time") +def test_rate_limit_disabled_when_zero(mock_time, mock_get, web_tool): + """requests_per_second=0 disables rate limiting entirely.""" + mock_get.return_value = _mock_response(json_data={}) + + web_tool._last_request_time = 100.0 + mock_time.time.return_value = 100.0 # same instant + + web_tool._run(query="test") + + mock_time.sleep.assert_not_called() + + +@patch("crewai_tools.tools.brave_search_tool.base.requests.get") +@patch("crewai_tools.tools.brave_search_tool.base.time") +def test_rate_limit_per_instance_independent(mock_time, mock_get, web_tool, image_tool): + """Each instance has its own rate-limit clock; a request on one does not delay the other.""" + mock_get.return_value = _mock_response(json_data={}) + + # Web tool fires at t=100 (its clock goes 0 -> 100). + mock_time.time.return_value = 100.0 + web_tool._run(query="test") + + # Image tool fires at t=100.3. Its clock is still 0 (separate instance), so + # next_allowed = 1.0 and 100.3 > 1.0 — no sleep. Total process rate can be sum of instance limits. + mock_time.time.return_value = 100.3 + image_tool._run(query="cats") + + mock_time.sleep.assert_not_called() + + +# Retry Behavior + + +@patch("crewai_tools.tools.brave_search_tool.base.requests.get") +@patch("crewai_tools.tools.brave_search_tool.base.time") +def test_429_rate_limited_retries_then_succeeds(mock_time, mock_get, web_tool): + """A transient RATE_LIMITED 429 is retried; success on the second attempt.""" + mock_time.time.return_value = 200.0 + + resp_429 = _mock_response( + status_code=429, + json_data={"error": {"id": "r", "status": 429, "code": "RATE_LIMITED"}}, + headers={"Retry-After": "2"}, + ) + resp_200 = _mock_response(status_code=200, json_data={"web": {"results": []}}) + mock_get.side_effect = [resp_429, resp_200] + + web_tool.raw = True + result = web_tool._run(query="test") + + assert result == {"web": {"results": []}} + assert mock_get.call_count == 2 + # Slept for the Retry-After value. + retry_sleeps = [c for c in mock_time.sleep.call_args_list if c[0][0] == 2.0] + assert len(retry_sleeps) == 1 + + +@patch("crewai_tools.tools.brave_search_tool.base.requests.get") +@patch("crewai_tools.tools.brave_search_tool.base.time") +def test_5xx_is_retried(mock_time, mock_get, web_tool): + """A 502 server error is retried; success on the second attempt.""" + mock_time.time.return_value = 200.0 + + resp_502 = _mock_response(status_code=502, text="Bad Gateway") + resp_502.json.side_effect = ValueError("no json") + resp_200 = _mock_response(status_code=200, json_data={"web": {"results": []}}) + mock_get.side_effect = [resp_502, resp_200] + + web_tool.raw = True + result = web_tool._run(query="test") + + assert result == {"web": {"results": []}} + assert mock_get.call_count == 2 + + +@patch("crewai_tools.tools.brave_search_tool.base.requests.get") +@patch("crewai_tools.tools.brave_search_tool.base.time") +def test_429_rate_limited_exhausts_retries(mock_time, mock_get, web_tool): + """Persistent RATE_LIMITED 429s exhaust retries and raise RuntimeError.""" + mock_time.time.return_value = 200.0 + + resp_429 = _mock_response( + status_code=429, + json_data={"error": {"id": "r", "status": 429, "code": "RATE_LIMITED"}}, + ) + mock_get.return_value = resp_429 + + with pytest.raises(RuntimeError, match="RATE_LIMITED"): + web_tool._run(query="test") + # 3 attempts (default _max_retries). + assert mock_get.call_count == 3 + + +@patch("crewai_tools.tools.brave_search_tool.base.requests.get") +@patch("crewai_tools.tools.brave_search_tool.base.time") +def test_retry_uses_exponential_backoff_when_no_retry_after( + mock_time, mock_get, web_tool +): + """Without Retry-After, backoff is 2^attempt (1s, 2s, ...).""" + mock_time.time.return_value = 200.0 + + resp_503 = _mock_response(status_code=503, text="Service Unavailable") + resp_503.json.side_effect = ValueError("no json") + resp_200 = _mock_response(status_code=200, json_data={"ok": True}) + mock_get.side_effect = [resp_503, resp_503, resp_200] + + web_tool.raw = True + web_tool._run(query="test") + + # Two retries: attempt 0 → sleep(1.0), attempt 1 → sleep(2.0). + retry_sleeps = [c[0][0] for c in mock_time.sleep.call_args_list] + assert 1.0 in retry_sleeps + assert 2.0 in retry_sleeps diff --git a/lib/crewai-tools/tool.specs.json b/lib/crewai-tools/tool.specs.json index 5cdc0c066..ffd094aad 100644 --- a/lib/crewai-tools/tool.specs.json +++ b/lib/crewai-tools/tool.specs.json @@ -196,6 +196,1033 @@ "type": "object" } }, + { + "description": "A tool that performs image searches using the Brave Search API. Results are returned as structured JSON data.", + "env_vars": [ + { + "default": null, + "description": "API key for Brave Search", + "name": "BRAVE_API_KEY", + "required": true + } + ], + "humanized_name": "Brave Image Search", + "init_params_schema": { + "$defs": { + "EnvVar": { + "properties": { + "default": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Default" + }, + "description": { + "title": "Description", + "type": "string" + }, + "name": { + "title": "Name", + "type": "string" + }, + "required": { + "default": true, + "title": "Required", + "type": "boolean" + } + }, + "required": [ + "name", + "description" + ], + "title": "EnvVar", + "type": "object" + } + }, + "description": "A tool that performs image searches using the Brave Search API.", + "properties": { + "country": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Country" + }, + "header_schema": { + "title": "Header Schema" + }, + "n_results": { + "default": 10, + "title": "N Results", + "type": "integer" + }, + "raw": { + "default": false, + "title": "Raw", + "type": "boolean" + }, + "save_file": { + "default": false, + "title": "Save File", + "type": "boolean" + }, + "search_url": { + "default": "https://api.search.brave.com/res/v1/images/search", + "title": "Search Url", + "type": "string" + } + }, + "title": "BraveImageSearchTool", + "type": "object" + }, + "name": "BraveImageSearchTool", + "package_dependencies": [], + "run_params_schema": { + "description": "Parameters for Brave Image Search endpoint.", + "properties": { + "count": { + "anyOf": [ + { + "maximum": 200, + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The maximum number of results to return.", + "title": "Count" + }, + "country": { + "anyOf": [ + { + "pattern": "^[A-Z]{2}$", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Country code for geo-targeting (e.g., 'US', 'BR').", + "title": "Country" + }, + "q": { + "description": "Search query to perform", + "maxLength": 400, + "minLength": 1, + "title": "Q", + "type": "string" + }, + "safesearch": { + "anyOf": [ + { + "enum": [ + "off", + "strict" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter out explicit content. Default is strict.", + "title": "Safesearch" + }, + "search_lang": { + "anyOf": [ + { + "pattern": "^[a-z]{2}$", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Language code for the search results (e.g., 'en', 'es').", + "title": "Search Lang" + }, + "spellcheck": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Attempt to correct spelling errors in the search query.", + "title": "Spellcheck" + } + }, + "required": [ + "q" + ], + "title": "ImageSearchParams", + "type": "object" + } + }, + { + "description": "A tool that retrieves context for LLM usage from the Brave Search API. Results are returned as structured JSON data.", + "env_vars": [ + { + "default": null, + "description": "API key for Brave Search", + "name": "BRAVE_API_KEY", + "required": true + } + ], + "humanized_name": "Brave LLM Context", + "init_params_schema": { + "$defs": { + "EnvVar": { + "properties": { + "default": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Default" + }, + "description": { + "title": "Description", + "type": "string" + }, + "name": { + "title": "Name", + "type": "string" + }, + "required": { + "default": true, + "title": "Required", + "type": "boolean" + } + }, + "required": [ + "name", + "description" + ], + "title": "EnvVar", + "type": "object" + } + }, + "description": "A tool that retrieves context for LLM usage from the Brave Search API.", + "properties": { + "country": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Country" + }, + "header_schema": { + "title": "Header Schema" + }, + "n_results": { + "default": 10, + "title": "N Results", + "type": "integer" + }, + "raw": { + "default": false, + "title": "Raw", + "type": "boolean" + }, + "save_file": { + "default": false, + "title": "Save File", + "type": "boolean" + }, + "search_url": { + "default": "https://api.search.brave.com/res/v1/llm/context", + "title": "Search Url", + "type": "string" + } + }, + "title": "BraveLLMContextTool", + "type": "object" + }, + "name": "BraveLLMContextTool", + "package_dependencies": [], + "run_params_schema": { + "description": "Parameters for Brave LLM Context endpoint.", + "properties": { + "context_threshold_mode": { + "anyOf": [ + { + "enum": [ + "disabled", + "strict", + "lenient", + "balanced" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The mode to use for the context thresholding.", + "title": "Context Threshold Mode" + }, + "count": { + "anyOf": [ + { + "maximum": 50, + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The maximum number of results to return. Actual number may be less.", + "title": "Count" + }, + "country": { + "anyOf": [ + { + "pattern": "^[A-Z]{2}$", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Country code for geo-targeting (e.g., 'US', 'BR').", + "title": "Country" + }, + "enable_local": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Whether to enable local recall. Not setting this value means auto-detect and uses local recall if any of the localization headers are provided.", + "title": "Enable Local" + }, + "goggles": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Goggles act as a custom re-ranking mechanism. Goggle source or URLs.", + "title": "Goggles" + }, + "maximum_number_of_snippets": { + "anyOf": [ + { + "maximum": 100, + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The maximum number of different snippets to include in the context.", + "title": "Maximum Number Of Snippets" + }, + "maximum_number_of_snippets_per_url": { + "anyOf": [ + { + "maximum": 100, + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The maximum number of snippets to include per URL.", + "title": "Maximum Number Of Snippets Per Url" + }, + "maximum_number_of_tokens": { + "anyOf": [ + { + "maximum": 32768, + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The approximate maximum number of tokens to include in the context.", + "title": "Maximum Number Of Tokens" + }, + "maximum_number_of_tokens_per_url": { + "anyOf": [ + { + "maximum": 8192, + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The maximum number of tokens to include for each URL in the context.", + "title": "Maximum Number Of Tokens Per Url" + }, + "maximum_number_of_urls": { + "anyOf": [ + { + "maximum": 50, + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The maximum number of URLs to include in the context.", + "title": "Maximum Number Of Urls" + }, + "q": { + "description": "Search query to perform", + "maxLength": 400, + "minLength": 1, + "title": "Q", + "type": "string" + }, + "search_lang": { + "anyOf": [ + { + "pattern": "^[a-z]{2}$", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Language code for the search results (e.g., 'en', 'es').", + "title": "Search Lang" + } + }, + "required": [ + "q" + ], + "title": "LLMContextParams", + "type": "object" + } + }, + { + "description": "A tool that retrieves AI-generated descriptions for local POIs using the Brave Search API. Results are returned as structured JSON data.", + "env_vars": [ + { + "default": null, + "description": "API key for Brave Search", + "name": "BRAVE_API_KEY", + "required": true + } + ], + "humanized_name": "Brave Local POI Descriptions", + "init_params_schema": { + "$defs": { + "EnvVar": { + "properties": { + "default": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Default" + }, + "description": { + "title": "Description", + "type": "string" + }, + "name": { + "title": "Name", + "type": "string" + }, + "required": { + "default": true, + "title": "Required", + "type": "boolean" + } + }, + "required": [ + "name", + "description" + ], + "title": "EnvVar", + "type": "object" + } + }, + "description": "A tool that retrieves AI-generated descriptions for local POIs using the Brave Search API.", + "properties": { + "country": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Country" + }, + "header_schema": { + "title": "Header Schema" + }, + "n_results": { + "default": 10, + "title": "N Results", + "type": "integer" + }, + "raw": { + "default": false, + "title": "Raw", + "type": "boolean" + }, + "save_file": { + "default": false, + "title": "Save File", + "type": "boolean" + }, + "search_url": { + "default": "https://api.search.brave.com/res/v1/local/descriptions", + "title": "Search Url", + "type": "string" + } + }, + "title": "BraveLocalPOIsDescriptionTool", + "type": "object" + }, + "name": "BraveLocalPOIsDescriptionTool", + "package_dependencies": [], + "run_params_schema": { + "description": "Parameters for Brave Local POI Descriptions endpoint.", + "properties": { + "ids": { + "description": "List of POI IDs to retrieve. Maximum of 20. IDs are valid for 8 hours.", + "items": { + "type": "string" + }, + "maxItems": 20, + "minItems": 1, + "title": "Ids", + "type": "array" + } + }, + "required": [ + "ids" + ], + "title": "LocalPOIsDescriptionParams", + "type": "object" + } + }, + { + "description": "A tool that retrieves local POIs using the Brave Search API. Results are returned as structured JSON data.", + "env_vars": [ + { + "default": null, + "description": "API key for Brave Search", + "name": "BRAVE_API_KEY", + "required": true + } + ], + "humanized_name": "Brave Local POIs", + "init_params_schema": { + "$defs": { + "EnvVar": { + "properties": { + "default": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Default" + }, + "description": { + "title": "Description", + "type": "string" + }, + "name": { + "title": "Name", + "type": "string" + }, + "required": { + "default": true, + "title": "Required", + "type": "boolean" + } + }, + "required": [ + "name", + "description" + ], + "title": "EnvVar", + "type": "object" + } + }, + "description": "A tool that retrieves local POIs using the Brave Search API.", + "properties": { + "country": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Country" + }, + "header_schema": { + "title": "Header Schema" + }, + "n_results": { + "default": 10, + "title": "N Results", + "type": "integer" + }, + "raw": { + "default": false, + "title": "Raw", + "type": "boolean" + }, + "save_file": { + "default": false, + "title": "Save File", + "type": "boolean" + }, + "search_url": { + "default": "https://api.search.brave.com/res/v1/local/pois", + "title": "Search Url", + "type": "string" + } + }, + "title": "BraveLocalPOIsTool", + "type": "object" + }, + "name": "BraveLocalPOIsTool", + "package_dependencies": [], + "run_params_schema": { + "description": "Parameters for Brave Local POIs endpoint.", + "properties": { + "ids": { + "description": "List of POI IDs to retrieve. Maximum of 20. IDs are valid for 8 hours.", + "items": { + "type": "string" + }, + "maxItems": 20, + "minItems": 1, + "title": "Ids", + "type": "array" + }, + "search_lang": { + "anyOf": [ + { + "pattern": "^[a-z]{2}$", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Language code for the search results (e.g., 'en', 'es').", + "title": "Search Lang" + }, + "ui_lang": { + "anyOf": [ + { + "pattern": "^[a-z]{2}-[A-Z]{2}$", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Language code for the user interface (e.g., 'en-US', 'es-AR').", + "title": "Ui Lang" + }, + "units": { + "anyOf": [ + { + "enum": [ + "metric", + "imperial" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The units to use for the results. Options: metric/imperial", + "title": "Units" + } + }, + "required": [ + "ids" + ], + "title": "LocalPOIsParams", + "type": "object" + } + }, + { + "description": "A tool that performs news searches using the Brave Search API. Results are returned as structured JSON data.", + "env_vars": [ + { + "default": null, + "description": "API key for Brave Search", + "name": "BRAVE_API_KEY", + "required": true + } + ], + "humanized_name": "Brave News Search", + "init_params_schema": { + "$defs": { + "EnvVar": { + "properties": { + "default": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Default" + }, + "description": { + "title": "Description", + "type": "string" + }, + "name": { + "title": "Name", + "type": "string" + }, + "required": { + "default": true, + "title": "Required", + "type": "boolean" + } + }, + "required": [ + "name", + "description" + ], + "title": "EnvVar", + "type": "object" + } + }, + "description": "A tool that performs news searches using the Brave Search API.", + "properties": { + "country": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Country" + }, + "header_schema": { + "title": "Header Schema" + }, + "n_results": { + "default": 10, + "title": "N Results", + "type": "integer" + }, + "raw": { + "default": false, + "title": "Raw", + "type": "boolean" + }, + "save_file": { + "default": false, + "title": "Save File", + "type": "boolean" + }, + "search_url": { + "default": "https://api.search.brave.com/res/v1/news/search", + "title": "Search Url", + "type": "string" + } + }, + "title": "BraveNewsSearchTool", + "type": "object" + }, + "name": "BraveNewsSearchTool", + "package_dependencies": [], + "run_params_schema": { + "description": "Parameters for Brave News Search endpoint.", + "properties": { + "count": { + "anyOf": [ + { + "maximum": 50, + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The maximum number of results to return.", + "title": "Count" + }, + "country": { + "anyOf": [ + { + "pattern": "^[A-Z]{2}$", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Country code for geo-targeting (e.g., 'US', 'BR').", + "title": "Country" + }, + "extra_snippets": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Include up to 5 text snippets for each page if possible.", + "title": "Extra Snippets" + }, + "freshness": { + "anyOf": [ + { + "enum": [ + "pd", + "pw", + "pm", + "py" + ], + "type": "string" + }, + { + "pattern": "^\\d{4}-\\d{2}-\\d{2}to\\d{4}-\\d{2}-\\d{2}$", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Enforce freshness of results. Options: pd/pw/pm/py, or YYYY-MM-DDtoYYYY-MM-DD", + "title": "Freshness" + }, + "goggles": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Goggles act as a custom re-ranking mechanism. Goggle source or URLs.", + "title": "Goggles" + }, + "include_fetch_metadata": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Whether to include fetch metadata in the results.", + "title": "Include Fetch Metadata" + }, + "offset": { + "anyOf": [ + { + "maximum": 9, + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Skip the first N result sets/pages. Max is 9.", + "title": "Offset" + }, + "operators": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Whether to apply search operators (e.g., site:example.com).", + "title": "Operators" + }, + "q": { + "description": "Search query to perform", + "maxLength": 400, + "minLength": 1, + "title": "Q", + "type": "string" + }, + "safesearch": { + "anyOf": [ + { + "enum": [ + "off", + "moderate", + "strict" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter out explicit content. Options: off/moderate/strict", + "title": "Safesearch" + }, + "search_lang": { + "anyOf": [ + { + "pattern": "^[a-z]{2}$", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Language code for the search results (e.g., 'en', 'es').", + "title": "Search Lang" + }, + "spellcheck": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Attempt to correct spelling errors in the search query.", + "title": "Spellcheck" + }, + "ui_lang": { + "anyOf": [ + { + "pattern": "^[a-z]{2}-[A-Z]{2}$", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Language code for the user interface (e.g., 'en-US', 'es-AR').", + "title": "Ui Lang" + } + }, + "required": [ + "q" + ], + "title": "NewsSearchParams", + "type": "object" + } + }, { "description": "A tool that performs web searches using the Brave Search API. Results are returned as structured JSON data.", "env_vars": [ @@ -269,11 +1296,13 @@ "name": "BraveSearchTool", "package_dependencies": [], "run_params_schema": { - "description": "Input for BraveSearchTool", + "description": "Parameters for Brave Web Search endpoint.", "properties": { "count": { "anyOf": [ { + "maximum": 20, + "minimum": 1, "type": "integer" }, { @@ -287,6 +1316,7 @@ "country": { "anyOf": [ { + "pattern": "^[A-Z]{2}$", "type": "string" }, { @@ -297,6 +1327,19 @@ "description": "Country code for geo-targeting (e.g., 'US', 'BR').", "title": "Country" }, + "enable_rich_callback": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Whether to enable rich callbacks for the results. Requires Pro level subscription.", + "title": "Enable Rich Callback" + }, "extra_snippets": { "anyOf": [ { @@ -333,9 +1376,43 @@ "description": "Enforce freshness of results. Options: pd/pw/pm/py, or YYYY-MM-DDtoYYYY-MM-DD", "title": "Freshness" }, + "goggles": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Goggles act as a custom re-ranking mechanism. Goggle source or URLs.", + "title": "Goggles" + }, + "include_fetch_metadata": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Whether to include fetch metadata (e.g., last fetch time) in the results.", + "title": "Include Fetch Metadata" + }, "offset": { "anyOf": [ { + "maximum": 9, + "minimum": 0, "type": "integer" }, { @@ -359,9 +1436,343 @@ "description": "Whether to apply search operators (e.g., site:example.com).", "title": "Operators" }, - "query": { + "q": { "description": "Search query to perform", - "title": "Query", + "maxLength": 400, + "minLength": 1, + "title": "Q", + "type": "string" + }, + "result_filter": { + "anyOf": [ + { + "items": { + "enum": [ + "discussions", + "faq", + "infobox", + "news", + "query", + "summarizer", + "videos", + "web", + "locations" + ], + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter the results by type. Options: discussions/faq/infobox/news/query/summarizer/videos/web/locations. Note: The `count` parameter is applied only to the `web` results.", + "title": "Result Filter" + }, + "safesearch": { + "anyOf": [ + { + "enum": [ + "off", + "moderate", + "strict" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter out explicit content. Options: off/moderate/strict", + "title": "Safesearch" + }, + "search_lang": { + "anyOf": [ + { + "pattern": "^[a-z]{2}$", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Language code for the search results (e.g., 'en', 'es').", + "title": "Search Lang" + }, + "spellcheck": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Attempt to correct spelling errors in the search query.", + "title": "Spellcheck" + }, + "summary": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Whether to generate a summarizer ID for the results.", + "title": "Summary" + }, + "text_decorations": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Include markup to highlight search terms in the results.", + "title": "Text Decorations" + }, + "ui_lang": { + "anyOf": [ + { + "pattern": "^[a-z]{2}-[A-Z]{2}$", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Language code for the user interface (e.g., 'en-US', 'es-AR').", + "title": "Ui Lang" + }, + "units": { + "anyOf": [ + { + "enum": [ + "metric", + "imperial" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The units to use for the results. Options: metric/imperial", + "title": "Units" + } + }, + "required": [ + "q" + ], + "title": "WebSearchParams", + "type": "object" + } + }, + { + "description": "A tool that performs video searches using the Brave Search API. Results are returned as structured JSON data.", + "env_vars": [ + { + "default": null, + "description": "API key for Brave Search", + "name": "BRAVE_API_KEY", + "required": true + } + ], + "humanized_name": "Brave Video Search", + "init_params_schema": { + "$defs": { + "EnvVar": { + "properties": { + "default": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Default" + }, + "description": { + "title": "Description", + "type": "string" + }, + "name": { + "title": "Name", + "type": "string" + }, + "required": { + "default": true, + "title": "Required", + "type": "boolean" + } + }, + "required": [ + "name", + "description" + ], + "title": "EnvVar", + "type": "object" + } + }, + "description": "A tool that performs video searches using the Brave Search API.", + "properties": { + "country": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Country" + }, + "header_schema": { + "title": "Header Schema" + }, + "n_results": { + "default": 10, + "title": "N Results", + "type": "integer" + }, + "raw": { + "default": false, + "title": "Raw", + "type": "boolean" + }, + "save_file": { + "default": false, + "title": "Save File", + "type": "boolean" + }, + "search_url": { + "default": "https://api.search.brave.com/res/v1/videos/search", + "title": "Search Url", + "type": "string" + } + }, + "title": "BraveVideoSearchTool", + "type": "object" + }, + "name": "BraveVideoSearchTool", + "package_dependencies": [], + "run_params_schema": { + "description": "Parameters for Brave Video Search endpoint.", + "properties": { + "count": { + "anyOf": [ + { + "maximum": 50, + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The maximum number of results to return.", + "title": "Count" + }, + "country": { + "anyOf": [ + { + "pattern": "^[A-Z]{2}$", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Country code for geo-targeting (e.g., 'US', 'BR').", + "title": "Country" + }, + "freshness": { + "anyOf": [ + { + "enum": [ + "pd", + "pw", + "pm", + "py" + ], + "type": "string" + }, + { + "pattern": "^\\d{4}-\\d{2}-\\d{2}to\\d{4}-\\d{2}-\\d{2}$", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Enforce freshness of results. Options: pd/pw/pm/py, or YYYY-MM-DDtoYYYY-MM-DD", + "title": "Freshness" + }, + "include_fetch_metadata": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Whether to include fetch metadata (e.g., last fetch time) in the results.", + "title": "Include Fetch Metadata" + }, + "offset": { + "anyOf": [ + { + "maximum": 9, + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Skip the first N result sets/pages. Max is 9.", + "title": "Offset" + }, + "operators": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Whether to apply search operators (e.g., site:example.com).", + "title": "Operators" + }, + "q": { + "description": "Search query to perform", + "maxLength": 400, + "minLength": 1, + "title": "Q", "type": "string" }, "safesearch": { @@ -382,9 +1793,10 @@ "description": "Filter out explicit content. Options: off/moderate/strict", "title": "Safesearch" }, - "search_language": { + "search_lang": { "anyOf": [ { + "pattern": "^[a-z]{2}$", "type": "string" }, { @@ -393,7 +1805,7 @@ ], "default": null, "description": "Language code for the search results (e.g., 'en', 'es').", - "title": "Search Language" + "title": "Search Lang" }, "spellcheck": { "anyOf": [ @@ -408,6 +1820,353 @@ "description": "Attempt to correct spelling errors in the search query.", "title": "Spellcheck" }, + "ui_lang": { + "anyOf": [ + { + "pattern": "^[a-z]{2}-[A-Z]{2}$", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Language code for the user interface (e.g., 'en-US', 'es-AR').", + "title": "Ui Lang" + } + }, + "required": [ + "q" + ], + "title": "VideoSearchParams", + "type": "object" + } + }, + { + "description": "A tool that performs web searches using the Brave Search API. Results are returned as structured JSON data.", + "env_vars": [ + { + "default": null, + "description": "API key for Brave Search", + "name": "BRAVE_API_KEY", + "required": true + } + ], + "humanized_name": "Brave Web Search", + "init_params_schema": { + "$defs": { + "EnvVar": { + "properties": { + "default": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Default" + }, + "description": { + "title": "Description", + "type": "string" + }, + "name": { + "title": "Name", + "type": "string" + }, + "required": { + "default": true, + "title": "Required", + "type": "boolean" + } + }, + "required": [ + "name", + "description" + ], + "title": "EnvVar", + "type": "object" + } + }, + "description": "A tool that performs web searches using the Brave Search API.", + "properties": { + "country": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Country" + }, + "header_schema": { + "title": "Header Schema" + }, + "n_results": { + "default": 10, + "title": "N Results", + "type": "integer" + }, + "raw": { + "default": false, + "title": "Raw", + "type": "boolean" + }, + "save_file": { + "default": false, + "title": "Save File", + "type": "boolean" + }, + "search_url": { + "default": "https://api.search.brave.com/res/v1/web/search", + "title": "Search Url", + "type": "string" + } + }, + "title": "BraveWebSearchTool", + "type": "object" + }, + "name": "BraveWebSearchTool", + "package_dependencies": [], + "run_params_schema": { + "description": "Parameters for Brave Web Search endpoint.", + "properties": { + "count": { + "anyOf": [ + { + "maximum": 20, + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The maximum number of results to return. Actual number may be less.", + "title": "Count" + }, + "country": { + "anyOf": [ + { + "pattern": "^[A-Z]{2}$", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Country code for geo-targeting (e.g., 'US', 'BR').", + "title": "Country" + }, + "enable_rich_callback": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Whether to enable rich callbacks for the results. Requires Pro level subscription.", + "title": "Enable Rich Callback" + }, + "extra_snippets": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Include up to 5 text snippets for each page if possible.", + "title": "Extra Snippets" + }, + "freshness": { + "anyOf": [ + { + "enum": [ + "pd", + "pw", + "pm", + "py" + ], + "type": "string" + }, + { + "pattern": "^\\d{4}-\\d{2}-\\d{2}to\\d{4}-\\d{2}-\\d{2}$", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Enforce freshness of results. Options: pd/pw/pm/py, or YYYY-MM-DDtoYYYY-MM-DD", + "title": "Freshness" + }, + "goggles": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Goggles act as a custom re-ranking mechanism. Goggle source or URLs.", + "title": "Goggles" + }, + "include_fetch_metadata": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Whether to include fetch metadata (e.g., last fetch time) in the results.", + "title": "Include Fetch Metadata" + }, + "offset": { + "anyOf": [ + { + "maximum": 9, + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Skip the first N result sets/pages. Max is 9.", + "title": "Offset" + }, + "operators": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Whether to apply search operators (e.g., site:example.com).", + "title": "Operators" + }, + "q": { + "description": "Search query to perform", + "maxLength": 400, + "minLength": 1, + "title": "Q", + "type": "string" + }, + "result_filter": { + "anyOf": [ + { + "items": { + "enum": [ + "discussions", + "faq", + "infobox", + "news", + "query", + "summarizer", + "videos", + "web", + "locations" + ], + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter the results by type. Options: discussions/faq/infobox/news/query/summarizer/videos/web/locations. Note: The `count` parameter is applied only to the `web` results.", + "title": "Result Filter" + }, + "safesearch": { + "anyOf": [ + { + "enum": [ + "off", + "moderate", + "strict" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Filter out explicit content. Options: off/moderate/strict", + "title": "Safesearch" + }, + "search_lang": { + "anyOf": [ + { + "pattern": "^[a-z]{2}$", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Language code for the search results (e.g., 'en', 'es').", + "title": "Search Lang" + }, + "spellcheck": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Attempt to correct spelling errors in the search query.", + "title": "Spellcheck" + }, + "summary": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Whether to generate a summarizer ID for the results.", + "title": "Summary" + }, "text_decorations": { "anyOf": [ { @@ -420,12 +2179,43 @@ "default": null, "description": "Include markup to highlight search terms in the results.", "title": "Text Decorations" + }, + "ui_lang": { + "anyOf": [ + { + "pattern": "^[a-z]{2}-[A-Z]{2}$", + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Language code for the user interface (e.g., 'en-US', 'es-AR').", + "title": "Ui Lang" + }, + "units": { + "anyOf": [ + { + "enum": [ + "metric", + "imperial" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The units to use for the results. Options: metric/imperial", + "title": "Units" } }, "required": [ - "query" + "q" ], - "title": "BraveSearchToolSchema", + "title": "WebSearchParams", "type": "object" } }, @@ -3874,6 +5664,10 @@ "title": "Bucket Name", "type": "string" }, + "cluster": { + "description": "An instance of the Couchbase Cluster connected to the desired Couchbase server.", + "title": "Cluster" + }, "collection_name": { "description": "The name of the Couchbase collection to search", "title": "Collection Name", @@ -3922,6 +5716,7 @@ } }, "required": [ + "cluster", "collection_name", "scope_name", "bucket_name", @@ -12800,9 +14595,13 @@ "properties": { "config": { "$ref": "#/$defs/OxylabsAmazonProductScraperConfig" + }, + "oxylabs_api": { + "title": "Oxylabs Api" } }, "required": [ + "oxylabs_api", "config" ], "title": "OxylabsAmazonProductScraperTool", @@ -13025,9 +14824,13 @@ "properties": { "config": { "$ref": "#/$defs/OxylabsAmazonSearchScraperConfig" + }, + "oxylabs_api": { + "title": "Oxylabs Api" } }, "required": [ + "oxylabs_api", "config" ], "title": "OxylabsAmazonSearchScraperTool", @@ -13263,9 +15066,13 @@ "properties": { "config": { "$ref": "#/$defs/OxylabsGoogleSearchScraperConfig" + }, + "oxylabs_api": { + "title": "Oxylabs Api" } }, "required": [ + "oxylabs_api", "config" ], "title": "OxylabsGoogleSearchScraperTool", @@ -13449,9 +15256,13 @@ "properties": { "config": { "$ref": "#/$defs/OxylabsUniversalScraperConfig" + }, + "oxylabs_api": { + "title": "Oxylabs Api" } }, "required": [ + "oxylabs_api", "config" ], "title": "OxylabsUniversalScraperTool", @@ -21553,6 +23364,26 @@ "description": "The Tavily API key. If not provided, it will be loaded from the environment variable TAVILY_API_KEY.", "title": "Api Key" }, + "async_client": { + "anyOf": [ + {}, + { + "type": "null" + } + ], + "default": null, + "title": "Async Client" + }, + "client": { + "anyOf": [ + {}, + { + "type": "null" + } + ], + "default": null, + "title": "Client" + }, "extract_depth": { "default": "basic", "description": "The depth of extraction. 'basic' for basic extraction, 'advanced' for advanced extraction.", @@ -21688,6 +23519,26 @@ "description": "The Tavily API key. If not provided, it will be loaded from the environment variable TAVILY_API_KEY.", "title": "Api Key" }, + "async_client": { + "anyOf": [ + {}, + { + "type": "null" + } + ], + "default": null, + "title": "Async Client" + }, + "client": { + "anyOf": [ + {}, + { + "type": "null" + } + ], + "default": null, + "title": "Client" + }, "days": { "default": 7, "description": "The number of days to search back.", diff --git a/lib/crewai/pyproject.toml b/lib/crewai/pyproject.toml index ff1866696..b0d70f388 100644 --- a/lib/crewai/pyproject.toml +++ b/lib/crewai/pyproject.toml @@ -21,7 +21,7 @@ dependencies = [ "opentelemetry-exporter-otlp-proto-http~=1.34.0", # Data Handling "chromadb~=1.1.0", - "tokenizers~=0.20.3", + "tokenizers>=0.21,<1", "openpyxl~=3.1.5", # Authentication and Security "python-dotenv~=1.1.1", @@ -38,10 +38,11 @@ dependencies = [ "json5~=0.10.0", "portalocker~=2.7.0", "pydantic-settings~=2.10.1", + "httpx~=0.28.1", "mcp~=1.26.0", "uv~=0.9.13", "aiosqlite~=0.21.0", - "lancedb>=0.4.0", + "lancedb>=0.29.2", ] [project.urls] @@ -52,7 +53,7 @@ Repository = "https://github.com/crewAIInc/crewAI" [project.optional-dependencies] tools = [ - "crewai-tools==1.9.3", + "crewai-tools==1.10.1", ] embeddings = [ "tiktoken~=0.8.0" @@ -65,7 +66,7 @@ openpyxl = [ ] mem0 = ["mem0ai~=0.1.94"] docling = [ - "docling~=2.63.0", + "docling~=2.75.0", ] qdrant = [ "qdrant-client[fastembed]~=1.14.3", @@ -87,7 +88,7 @@ bedrock = [ "boto3~=1.40.45", ] google-genai = [ - "google-genai~=1.49.0", + "google-genai~=1.65.0", ] azure-ai-inference = [ "azure-ai-inference~=1.0.0b9", diff --git a/lib/crewai/src/crewai/__init__.py b/lib/crewai/src/crewai/__init__.py index 87ffd144e..46c1f8f95 100644 --- a/lib/crewai/src/crewai/__init__.py +++ b/lib/crewai/src/crewai/__init__.py @@ -10,7 +10,6 @@ from crewai.flow.flow import Flow from crewai.knowledge.knowledge import Knowledge from crewai.llm import LLM from crewai.llms.base_llm import BaseLLM -from crewai.memory.unified_memory import Memory from crewai.process import Process from crewai.task import Task from crewai.tasks.llm_guardrail import LLMGuardrail @@ -41,7 +40,7 @@ def _suppress_pydantic_deprecation_warnings() -> None: _suppress_pydantic_deprecation_warnings() -__version__ = "1.9.3" +__version__ = "1.10.1" _telemetry_submitted = False @@ -72,6 +71,25 @@ def _track_install_async() -> None: _track_install_async() + +_LAZY_IMPORTS: dict[str, tuple[str, str]] = { + "Memory": ("crewai.memory.unified_memory", "Memory"), +} + + +def __getattr__(name: str) -> Any: + """Lazily import heavy modules (e.g. Memory → lancedb) on first access.""" + if name in _LAZY_IMPORTS: + module_path, attr = _LAZY_IMPORTS[name] + import importlib + + mod = importlib.import_module(module_path) + val = getattr(mod, attr) + globals()[name] = val + return val + raise AttributeError(f"module 'crewai' has no attribute {name!r}") + + __all__ = [ "LLM", "Agent", diff --git a/lib/crewai/src/crewai/a2a/utils/agent_card.py b/lib/crewai/src/crewai/a2a/utils/agent_card.py index c548cd1e7..45819bebd 100644 --- a/lib/crewai/src/crewai/a2a/utils/agent_card.py +++ b/lib/crewai/src/crewai/a2a/utils/agent_card.py @@ -4,6 +4,7 @@ from __future__ import annotations import asyncio from collections.abc import MutableMapping +import concurrent.futures from functools import lru_cache import ssl import time @@ -138,14 +139,17 @@ def fetch_agent_card( ttl_hash = int(time.time() // cache_ttl) return _fetch_agent_card_cached(endpoint, auth_hash, timeout, ttl_hash) - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) + coro = afetch_agent_card(endpoint=endpoint, auth=auth, timeout=timeout) try: - return loop.run_until_complete( - afetch_agent_card(endpoint=endpoint, auth=auth, timeout=timeout) - ) - finally: - loop.close() + asyncio.get_running_loop() + has_running_loop = True + except RuntimeError: + has_running_loop = False + + if has_running_loop: + with concurrent.futures.ThreadPoolExecutor(max_workers=1) as pool: + return pool.submit(asyncio.run, coro).result() + return asyncio.run(coro) async def afetch_agent_card( @@ -203,14 +207,17 @@ def _fetch_agent_card_cached( """Cached sync version of fetch_agent_card.""" auth = _auth_store.get(auth_hash) - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) + coro = _afetch_agent_card_impl(endpoint=endpoint, auth=auth, timeout=timeout) try: - return loop.run_until_complete( - _afetch_agent_card_impl(endpoint=endpoint, auth=auth, timeout=timeout) - ) - finally: - loop.close() + asyncio.get_running_loop() + has_running_loop = True + except RuntimeError: + has_running_loop = False + + if has_running_loop: + with concurrent.futures.ThreadPoolExecutor(max_workers=1) as pool: + return pool.submit(asyncio.run, coro).result() + return asyncio.run(coro) @cached(ttl=300, serializer=PickleSerializer()) # type: ignore[untyped-decorator] diff --git a/lib/crewai/src/crewai/a2a/utils/delegation.py b/lib/crewai/src/crewai/a2a/utils/delegation.py index cfcf51f36..3a6795c34 100644 --- a/lib/crewai/src/crewai/a2a/utils/delegation.py +++ b/lib/crewai/src/crewai/a2a/utils/delegation.py @@ -5,6 +5,7 @@ from __future__ import annotations import asyncio import base64 from collections.abc import AsyncIterator, Callable, MutableMapping +import concurrent.futures from contextlib import asynccontextmanager import logging from typing import TYPE_CHECKING, Any, Final, Literal @@ -194,56 +195,43 @@ def execute_a2a_delegation( Returns: TaskStateResult with status, result/error, history, and agent_card. - - Raises: - RuntimeError: If called from an async context with a running event loop. """ + coro = aexecute_a2a_delegation( + endpoint=endpoint, + auth=auth, + timeout=timeout, + task_description=task_description, + context=context, + context_id=context_id, + task_id=task_id, + reference_task_ids=reference_task_ids, + metadata=metadata, + extensions=extensions, + conversation_history=conversation_history, + agent_id=agent_id, + agent_role=agent_role, + agent_branch=agent_branch, + response_model=response_model, + turn_number=turn_number, + updates=updates, + from_task=from_task, + from_agent=from_agent, + skill_id=skill_id, + client_extensions=client_extensions, + transport=transport, + accepted_output_modes=accepted_output_modes, + input_files=input_files, + ) try: asyncio.get_running_loop() - raise RuntimeError( - "execute_a2a_delegation() cannot be called from an async context. " - "Use 'await aexecute_a2a_delegation()' instead." - ) - except RuntimeError as e: - if "no running event loop" not in str(e).lower(): - raise + has_running_loop = True + except RuntimeError: + has_running_loop = False - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - try: - return loop.run_until_complete( - aexecute_a2a_delegation( - endpoint=endpoint, - auth=auth, - timeout=timeout, - task_description=task_description, - context=context, - context_id=context_id, - task_id=task_id, - reference_task_ids=reference_task_ids, - metadata=metadata, - extensions=extensions, - conversation_history=conversation_history, - agent_id=agent_id, - agent_role=agent_role, - agent_branch=agent_branch, - response_model=response_model, - turn_number=turn_number, - updates=updates, - from_task=from_task, - from_agent=from_agent, - skill_id=skill_id, - client_extensions=client_extensions, - transport=transport, - accepted_output_modes=accepted_output_modes, - input_files=input_files, - ) - ) - finally: - try: - loop.run_until_complete(loop.shutdown_asyncgens()) - finally: - loop.close() + if has_running_loop: + with concurrent.futures.ThreadPoolExecutor(max_workers=1) as pool: + return pool.submit(asyncio.run, coro).result() + return asyncio.run(coro) async def aexecute_a2a_delegation( diff --git a/lib/crewai/src/crewai/agent/core.py b/lib/crewai/src/crewai/agent/core.py index 02b286093..418ebe73d 100644 --- a/lib/crewai/src/crewai/agent/core.py +++ b/lib/crewai/src/crewai/agent/core.py @@ -8,11 +8,9 @@ import time from typing import ( TYPE_CHECKING, Any, - Final, Literal, cast, ) -from urllib.parse import urlparse from pydantic import ( BaseModel, @@ -61,16 +59,8 @@ from crewai.knowledge.knowledge import Knowledge from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource from crewai.lite_agent_output import LiteAgentOutput from crewai.llms.base_llm import BaseLLM -from crewai.mcp import ( - MCPClient, - MCPServerConfig, - MCPServerHTTP, - MCPServerSSE, - MCPServerStdio, -) -from crewai.mcp.transports.http import HTTPTransport -from crewai.mcp.transports.sse import SSETransport -from crewai.mcp.transports.stdio import StdioTransport +from crewai.mcp import MCPServerConfig +from crewai.mcp.tool_resolver import MCPToolResolver from crewai.rag.embeddings.types import EmbedderConfig from crewai.security.fingerprint import Fingerprint from crewai.tools.agent_tools.agent_tools import AgentTools @@ -111,18 +101,8 @@ if TYPE_CHECKING: from crewai.utilities.types import LLMMessage -# MCP Connection timeout constants (in seconds) -MCP_CONNECTION_TIMEOUT: Final[int] = 10 -MCP_TOOL_EXECUTION_TIMEOUT: Final[int] = 30 -MCP_DISCOVERY_TIMEOUT: Final[int] = 15 -MCP_MAX_RETRIES: Final[int] = 3 - _passthrough_exceptions: tuple[type[Exception], ...] = () -# Simple in-memory cache for MCP tool schemas (duration: 5 minutes) -_mcp_schema_cache: dict[str, Any] = {} -_cache_ttl: Final[int] = 300 # 5 minutes - class Agent(BaseAgent): """Represents an agent in a system. @@ -154,7 +134,7 @@ class Agent(BaseAgent): model_config = ConfigDict() _times_executed: int = PrivateAttr(default=0) - _mcp_clients: list[Any] = PrivateAttr(default_factory=list) + _mcp_resolver: MCPToolResolver | None = PrivateAttr(default=None) _last_messages: list[LLMMessage] = PrivateAttr(default_factory=list) max_execution_time: int | None = Field( default=None, @@ -384,10 +364,10 @@ class Agent(BaseAgent): ) if unified_memory is not None: query = task.description - matches = unified_memory.recall(query, limit=10) + matches = unified_memory.recall(query, limit=5) if matches: memory = "Relevant memories:\n" + "\n".join( - f"- {m.record.content}" for m in matches + m.format() for m in matches ) if memory.strip() != "": task_prompt += self.i18n.slice("memory").format(memory=memory) @@ -622,10 +602,10 @@ class Agent(BaseAgent): ) if unified_memory is not None: query = task.description - matches = unified_memory.recall(query, limit=10) + matches = unified_memory.recall(query, limit=5) if matches: memory = "Relevant memories:\n" + "\n".join( - f"- {m.record.content}" for m in matches + m.format() for m in matches ) if memory.strip() != "": task_prompt += self.i18n.slice("memory").format(memory=memory) @@ -864,7 +844,11 @@ class Agent(BaseAgent): respect_context_window=self.respect_context_window, request_within_rpm_limit=rpm_limit_fn, callbacks=[TokenCalcHandler(self._token_process)], - response_model=task.response_model if task else None, + response_model=( + task.response_model or task.output_pydantic or task.output_json + ) + if task + else None, ) def _update_executor_parameters( @@ -893,7 +877,11 @@ class Agent(BaseAgent): self.agent_executor.stop = stop_words self.agent_executor.tools_names = get_tool_names(tools) self.agent_executor.tools_description = render_text_description_and_args(tools) - self.agent_executor.response_model = task.response_model if task else None + self.agent_executor.response_model = ( + (task.response_model or task.output_pydantic or task.output_json) + if task + else None + ) self.agent_executor.tools_handler = self.tools_handler self.agent_executor.request_within_rpm_limit = rpm_limit_fn @@ -926,544 +914,17 @@ class Agent(BaseAgent): def get_mcp_tools(self, mcps: list[str | MCPServerConfig]) -> list[BaseTool]: """Convert MCP server references/configs to CrewAI tools. - Supports both string references (backwards compatible) and structured - configuration objects (MCPServerStdio, MCPServerHTTP, MCPServerSSE). - - Args: - mcps: List of MCP server references (strings) or configurations. - - Returns: - List of BaseTool instances from MCP servers. + Delegates to :class:`~crewai.mcp.tool_resolver.MCPToolResolver`. """ - all_tools = [] - clients = [] - - for mcp_config in mcps: - if isinstance(mcp_config, str): - tools = self._get_mcp_tools_from_string(mcp_config) - else: - tools, client = self._get_native_mcp_tools(mcp_config) - if client: - clients.append(client) - - all_tools.extend(tools) - - # Store clients for cleanup - self._mcp_clients.extend(clients) - return all_tools + self._cleanup_mcp_clients() + self._mcp_resolver = MCPToolResolver(agent=self, logger=self._logger) + return self._mcp_resolver.resolve(mcps) def _cleanup_mcp_clients(self) -> None: """Cleanup MCP client connections after task execution.""" - if not self._mcp_clients: - return - - async def _disconnect_all() -> None: - for client in self._mcp_clients: - if client and hasattr(client, "connected") and client.connected: - await client.disconnect() - - try: - asyncio.run(_disconnect_all()) - except Exception as e: - self._logger.log("error", f"Error during MCP client cleanup: {e}") - finally: - self._mcp_clients.clear() - - def _get_mcp_tools_from_string(self, mcp_ref: str) -> list[BaseTool]: - """Get tools from legacy string-based MCP references. - - This method maintains backwards compatibility with string-based - MCP references (https://... and crewai-amp:...). - - Args: - mcp_ref: String reference to MCP server. - - Returns: - List of BaseTool instances. - """ - if mcp_ref.startswith("crewai-amp:"): - return self._get_amp_mcp_tools(mcp_ref) - if mcp_ref.startswith("https://"): - return self._get_external_mcp_tools(mcp_ref) - return [] - - def _get_external_mcp_tools(self, mcp_ref: str) -> list[BaseTool]: - """Get tools from external HTTPS MCP server with graceful error handling.""" - from crewai.tools.mcp_tool_wrapper import MCPToolWrapper - - # Parse server URL and optional tool name - if "#" in mcp_ref: - server_url, specific_tool = mcp_ref.split("#", 1) - else: - server_url, specific_tool = mcp_ref, None - - server_params = {"url": server_url} - server_name = self._extract_server_name(server_url) - - try: - # Get tool schemas with timeout and error handling - tool_schemas = self._get_mcp_tool_schemas(server_params) - - if not tool_schemas: - self._logger.log( - "warning", f"No tools discovered from MCP server: {server_url}" - ) - return [] - - tools = [] - for tool_name, schema in tool_schemas.items(): - # Skip if specific tool requested and this isn't it - if specific_tool and tool_name != specific_tool: - continue - - try: - wrapper = MCPToolWrapper( - mcp_server_params=server_params, - tool_name=tool_name, - tool_schema=schema, - server_name=server_name, - ) - tools.append(wrapper) - except Exception as e: - self._logger.log( - "warning", - f"Failed to create MCP tool wrapper for {tool_name}: {e}", - ) - continue - - if specific_tool and not tools: - self._logger.log( - "warning", - f"Specific tool '{specific_tool}' not found on MCP server: {server_url}", - ) - - return cast(list[BaseTool], tools) - - except Exception as e: - self._logger.log( - "warning", f"Failed to connect to MCP server {server_url}: {e}" - ) - return [] - - def _get_native_mcp_tools( - self, mcp_config: MCPServerConfig - ) -> tuple[list[BaseTool], Any | None]: - """Get tools from MCP server using structured configuration. - - This method creates an MCP client based on the configuration type, - connects to the server, discovers tools, applies filtering, and - returns wrapped tools along with the client instance for cleanup. - - Args: - mcp_config: MCP server configuration (MCPServerStdio, MCPServerHTTP, or MCPServerSSE). - - Returns: - Tuple of (list of BaseTool instances, MCPClient instance for cleanup). - """ - from crewai.tools.base_tool import BaseTool - from crewai.tools.mcp_native_tool import MCPNativeTool - - transport: StdioTransport | HTTPTransport | SSETransport - if isinstance(mcp_config, MCPServerStdio): - transport = StdioTransport( - command=mcp_config.command, - args=mcp_config.args, - env=mcp_config.env, - ) - server_name = f"{mcp_config.command}_{'_'.join(mcp_config.args)}" - elif isinstance(mcp_config, MCPServerHTTP): - transport = HTTPTransport( - url=mcp_config.url, - headers=mcp_config.headers, - streamable=mcp_config.streamable, - ) - server_name = self._extract_server_name(mcp_config.url) - elif isinstance(mcp_config, MCPServerSSE): - transport = SSETransport( - url=mcp_config.url, - headers=mcp_config.headers, - ) - server_name = self._extract_server_name(mcp_config.url) - else: - raise ValueError(f"Unsupported MCP server config type: {type(mcp_config)}") - - client = MCPClient( - transport=transport, - cache_tools_list=mcp_config.cache_tools_list, - ) - - async def _setup_client_and_list_tools() -> list[dict[str, Any]]: - """Async helper to connect and list tools in same event loop.""" - - try: - if not client.connected: - await client.connect() - - tools_list = await client.list_tools() - - try: - await client.disconnect() - # Small delay to allow background tasks to finish cleanup - # This helps prevent "cancel scope in different task" errors - # when asyncio.run() closes the event loop - await asyncio.sleep(0.1) - except Exception as e: - self._logger.log("error", f"Error during disconnect: {e}") - - return tools_list - except Exception as e: - if client.connected: - await client.disconnect() - await asyncio.sleep(0.1) - raise RuntimeError( - f"Error during setup client and list tools: {e}" - ) from e - - try: - try: - asyncio.get_running_loop() - import concurrent.futures - - with concurrent.futures.ThreadPoolExecutor() as executor: - future = executor.submit( - asyncio.run, _setup_client_and_list_tools() - ) - tools_list = future.result() - except RuntimeError: - try: - tools_list = asyncio.run(_setup_client_and_list_tools()) - except RuntimeError as e: - error_msg = str(e).lower() - if "cancel scope" in error_msg or "task" in error_msg: - raise ConnectionError( - "MCP connection failed due to event loop cleanup issues. " - "This may be due to authentication errors or server unavailability." - ) from e - except asyncio.CancelledError as e: - raise ConnectionError( - "MCP connection was cancelled. This may indicate an authentication " - "error or server unavailability." - ) from e - - if mcp_config.tool_filter: - filtered_tools = [] - for tool in tools_list: - if callable(mcp_config.tool_filter): - try: - from crewai.mcp.filters import ToolFilterContext - - context = ToolFilterContext( - agent=self, - server_name=server_name, - run_context=None, - ) - if mcp_config.tool_filter(context, tool): # type: ignore[call-arg, arg-type] - filtered_tools.append(tool) - except (TypeError, AttributeError): - if mcp_config.tool_filter(tool): # type: ignore[call-arg, arg-type] - filtered_tools.append(tool) - else: - # Not callable - include tool - filtered_tools.append(tool) - tools_list = filtered_tools - - tools = [] - for tool_def in tools_list: - tool_name = tool_def.get("name", "") - if not tool_name: - continue - - # Convert inputSchema to Pydantic model if present - args_schema = None - if tool_def.get("inputSchema"): - args_schema = self._json_schema_to_pydantic( - tool_name, tool_def["inputSchema"] - ) - - tool_schema = { - "description": tool_def.get("description", ""), - "args_schema": args_schema, - } - - try: - native_tool = MCPNativeTool( - mcp_client=client, - tool_name=tool_name, - tool_schema=tool_schema, - server_name=server_name, - ) - tools.append(native_tool) - except Exception as e: - self._logger.log("error", f"Failed to create native MCP tool: {e}") - continue - - return cast(list[BaseTool], tools), client - except Exception as e: - if client.connected: - asyncio.run(client.disconnect()) - - raise RuntimeError(f"Failed to get native MCP tools: {e}") from e - - def _get_amp_mcp_tools(self, amp_ref: str) -> list[BaseTool]: - """Get tools from CrewAI AMP MCP marketplace.""" - # Parse: "crewai-amp:mcp-name" or "crewai-amp:mcp-name#tool_name" - amp_part = amp_ref.replace("crewai-amp:", "") - if "#" in amp_part: - mcp_name, specific_tool = amp_part.split("#", 1) - else: - mcp_name, specific_tool = amp_part, None - - # Call AMP API to get MCP server URLs - mcp_servers = self._fetch_amp_mcp_servers(mcp_name) - - tools = [] - for server_config in mcp_servers: - server_ref = server_config["url"] - if specific_tool: - server_ref += f"#{specific_tool}" - server_tools = self._get_external_mcp_tools(server_ref) - tools.extend(server_tools) - - return tools - - @staticmethod - def _extract_server_name(server_url: str) -> str: - """Extract clean server name from URL for tool prefixing.""" - - parsed = urlparse(server_url) - domain = parsed.netloc.replace(".", "_") - path = parsed.path.replace("/", "_").strip("_") - return f"{domain}_{path}" if path else domain - - def _get_mcp_tool_schemas( - self, server_params: dict[str, Any] - ) -> dict[str, dict[str, Any]]: - """Get tool schemas from MCP server for wrapper creation with caching.""" - server_url = server_params["url"] - - # Check cache first - cache_key = server_url - current_time = time.time() - - if cache_key in _mcp_schema_cache: - cached_data, cache_time = _mcp_schema_cache[cache_key] - if current_time - cache_time < _cache_ttl: - self._logger.log( - "debug", f"Using cached MCP tool schemas for {server_url}" - ) - return cached_data # type: ignore[no-any-return] - - try: - schemas = asyncio.run(self._get_mcp_tool_schemas_async(server_params)) - - # Cache successful results - _mcp_schema_cache[cache_key] = (schemas, current_time) - - return schemas - except Exception as e: - # Log warning but don't raise - this allows graceful degradation - self._logger.log( - "warning", f"Failed to get MCP tool schemas from {server_url}: {e}" - ) - return {} - - async def _get_mcp_tool_schemas_async( - self, server_params: dict[str, Any] - ) -> dict[str, dict[str, Any]]: - """Async implementation of MCP tool schema retrieval with timeouts and retries.""" - server_url = server_params["url"] - return await self._retry_mcp_discovery( - self._discover_mcp_tools_with_timeout, server_url - ) - - async def _retry_mcp_discovery( - self, operation_func: Any, server_url: str - ) -> dict[str, dict[str, Any]]: - """Retry MCP discovery operation with exponential backoff, avoiding try-except in loop.""" - last_error = None - - for attempt in range(MCP_MAX_RETRIES): - # Execute single attempt outside try-except loop structure - result, error, should_retry = await self._attempt_mcp_discovery( - operation_func, server_url - ) - - # Success case - return immediately - if result is not None: - return result - - # Non-retryable error - raise immediately - if not should_retry: - raise RuntimeError(error) - - # Retryable error - continue with backoff - last_error = error - if attempt < MCP_MAX_RETRIES - 1: - wait_time = 2**attempt # Exponential backoff - await asyncio.sleep(wait_time) - - raise RuntimeError( - f"Failed to discover MCP tools after {MCP_MAX_RETRIES} attempts: {last_error}" - ) - - @staticmethod - async def _attempt_mcp_discovery( - operation_func: Any, server_url: str - ) -> tuple[dict[str, dict[str, Any]] | None, str, bool]: - """Attempt single MCP discovery operation and return (result, error_message, should_retry).""" - try: - result = await operation_func(server_url) - return result, "", False - - except ImportError: - return ( - None, - "MCP library not available. Please install with: pip install mcp", - False, - ) - - except asyncio.TimeoutError: - return ( - None, - f"MCP discovery timed out after {MCP_DISCOVERY_TIMEOUT} seconds", - True, - ) - - except Exception as e: - error_str = str(e).lower() - - # Classify errors as retryable or non-retryable - if "authentication" in error_str or "unauthorized" in error_str: - return None, f"Authentication failed for MCP server: {e!s}", False - if "connection" in error_str or "network" in error_str: - return None, f"Network connection failed: {e!s}", True - if "json" in error_str or "parsing" in error_str: - return None, f"Server response parsing error: {e!s}", True - return None, f"MCP discovery error: {e!s}", False - - async def _discover_mcp_tools_with_timeout( - self, server_url: str - ) -> dict[str, dict[str, Any]]: - """Discover MCP tools with timeout wrapper.""" - return await asyncio.wait_for( - self._discover_mcp_tools(server_url), timeout=MCP_DISCOVERY_TIMEOUT - ) - - async def _discover_mcp_tools(self, server_url: str) -> dict[str, dict[str, Any]]: - """Discover tools from MCP server with proper timeout handling.""" - from mcp import ClientSession - from mcp.client.streamable_http import streamablehttp_client - - async with streamablehttp_client(server_url) as (read, write, _): - async with ClientSession(read, write) as session: - # Initialize the connection with timeout - await asyncio.wait_for( - session.initialize(), timeout=MCP_CONNECTION_TIMEOUT - ) - - # List available tools with timeout - tools_result = await asyncio.wait_for( - session.list_tools(), - timeout=MCP_DISCOVERY_TIMEOUT - MCP_CONNECTION_TIMEOUT, - ) - - schemas = {} - for tool in tools_result.tools: - args_schema = None - if hasattr(tool, "inputSchema") and tool.inputSchema: - args_schema = self._json_schema_to_pydantic( - sanitize_tool_name(tool.name), tool.inputSchema - ) - - schemas[sanitize_tool_name(tool.name)] = { - "description": getattr(tool, "description", ""), - "args_schema": args_schema, - } - return schemas - - def _json_schema_to_pydantic( - self, tool_name: str, json_schema: dict[str, Any] - ) -> type: - """Convert JSON Schema to Pydantic model for tool arguments. - - Args: - tool_name: Name of the tool (used for model naming) - json_schema: JSON Schema dict with 'properties', 'required', etc. - - Returns: - Pydantic BaseModel class - """ - from pydantic import Field, create_model - - properties = json_schema.get("properties", {}) - required_fields = json_schema.get("required", []) - - field_definitions: dict[str, Any] = {} - - for field_name, field_schema in properties.items(): - field_type = self._json_type_to_python(field_schema) - field_description = field_schema.get("description", "") - - is_required = field_name in required_fields - - if is_required: - field_definitions[field_name] = ( - field_type, - Field(..., description=field_description), - ) - else: - field_definitions[field_name] = ( - field_type | None, - Field(default=None, description=field_description), - ) - - model_name = f"{tool_name.replace('-', '_').replace(' ', '_')}Schema" - return create_model(model_name, **field_definitions) # type: ignore[no-any-return] - - def _json_type_to_python(self, field_schema: dict[str, Any]) -> type: - """Convert JSON Schema type to Python type. - - Args: - field_schema: JSON Schema field definition - - Returns: - Python type - """ - - json_type = field_schema.get("type") - - if "anyOf" in field_schema: - types: list[type] = [] - for option in field_schema["anyOf"]: - if "const" in option: - types.append(str) - else: - types.append(self._json_type_to_python(option)) - unique_types = list(set(types)) - if len(unique_types) > 1: - result: Any = unique_types[0] - for t in unique_types[1:]: - result = result | t - return result # type: ignore[no-any-return] - return unique_types[0] - - type_mapping: dict[str | None, type] = { - "string": str, - "number": float, - "integer": int, - "boolean": bool, - "array": list, - "object": dict, - } - - return type_mapping.get(json_type, Any) - - @staticmethod - def _fetch_amp_mcp_servers(mcp_name: str) -> list[dict[str, Any]]: - """Fetch MCP server configurations from CrewAI AMP API.""" - # TODO: Implement AMP API call to "integrations/mcps" endpoint - # Should return list of server configs with URLs - return [] + if self._mcp_resolver is not None: + self._mcp_resolver.cleanup() + self._mcp_resolver = None @staticmethod def get_multimodal_tools() -> Sequence[BaseTool]: @@ -1695,11 +1156,15 @@ class Agent(BaseAgent): # Process platform apps and MCP tools if self.apps: platform_tools = self.get_platform_tools(self.apps) - if platform_tools and self.tools is not None: + if platform_tools: + if self.tools is None: + self.tools = [] self.tools.extend(platform_tools) if self.mcps: mcps = self.get_mcp_tools(self.mcps) - if mcps and self.tools is not None: + if mcps: + if self.tools is None: + self.tools = [] self.tools.extend(mcps) # Prepare tools @@ -1712,7 +1177,8 @@ class Agent(BaseAgent): existing_names = {sanitize_tool_name(t.name) for t in raw_tools} raw_tools.extend( - mt for mt in create_memory_tools(agent_memory) + mt + for mt in create_memory_tools(agent_memory) if sanitize_tool_name(mt.name) not in existing_names ) @@ -1802,11 +1268,11 @@ class Agent(BaseAgent): ), ) start_time = time.time() - matches = agent_memory.recall(formatted_messages, limit=10) + matches = agent_memory.recall(formatted_messages, limit=20) memory_block = "" if matches: memory_block = "Relevant memories:\n" + "\n".join( - f"- {m.record.content}" for m in matches + m.format() for m in matches ) if memory_block: formatted_messages += "\n\n" + self.i18n.slice("memory").format( @@ -1937,14 +1403,15 @@ class Agent(BaseAgent): if isinstance(messages, str): input_str = messages else: - input_str = "\n".join( - str(msg.get("content", "")) for msg in messages if msg.get("content") - ) or "User request" - raw = ( - f"Input: {input_str}\n" - f"Agent: {self.role}\n" - f"Result: {output_text}" - ) + input_str = ( + "\n".join( + str(msg.get("content", "")) + for msg in messages + if msg.get("content") + ) + or "User request" + ) + raw = f"Input: {input_str}\nAgent: {self.role}\nResult: {output_text}" extracted = agent_memory.extract_memories(raw) if extracted: agent_memory.remember_many(extracted) diff --git a/lib/crewai/src/crewai/agents/agent_builder/base_agent.py b/lib/crewai/src/crewai/agents/agent_builder/base_agent.py index 286f244ed..8b2b9737c 100644 --- a/lib/crewai/src/crewai/agents/agent_builder/base_agent.py +++ b/lib/crewai/src/crewai/agents/agent_builder/base_agent.py @@ -4,7 +4,8 @@ from abc import ABC, abstractmethod from collections.abc import Callable from copy import copy as shallow_copy from hashlib import md5 -from typing import Any, Literal +import re +from typing import Any, Final, Literal import uuid from pydantic import ( @@ -36,6 +37,11 @@ from crewai.utilities.rpm_controller import RPMController from crewai.utilities.string_utils import interpolate_only +_SLUG_RE: Final[re.Pattern[str]] = re.compile( + r"^(?:crewai-amp:)?[a-zA-Z0-9][a-zA-Z0-9_-]*(?:#\w+)?$" +) + + PlatformApp = Literal[ "asana", "box", @@ -197,7 +203,7 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta): ) mcps: list[str | MCPServerConfig] | None = Field( default=None, - description="List of MCP server references. Supports 'https://server.com/path' for external servers and 'crewai-amp:mcp-name' for AMP marketplace. Use '#tool_name' suffix for specific tools.", + description="List of MCP server references. Supports 'https://server.com/path' for external servers and bare slugs like 'notion' for connected MCP integrations. Use '#tool_name' suffix for specific tools.", ) memory: Any = Field( default=None, @@ -276,14 +282,16 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta): validated_mcps: list[str | MCPServerConfig] = [] for mcp in mcps: if isinstance(mcp, str): - if mcp.startswith(("https://", "crewai-amp:")): + if mcp.startswith("https://"): + validated_mcps.append(mcp) + elif _SLUG_RE.match(mcp): validated_mcps.append(mcp) else: raise ValueError( - f"Invalid MCP reference: {mcp}. " - "String references must start with 'https://' or 'crewai-amp:'" + f"Invalid MCP reference: {mcp!r}. " + "String references must be an 'https://' URL or a valid " + "slug (e.g. 'notion', 'notion#search', 'crewai-amp:notion')." ) - elif isinstance(mcp, (MCPServerConfig)): validated_mcps.append(mcp) else: diff --git a/lib/crewai/src/crewai/agents/agent_builder/base_agent_executor_mixin.py b/lib/crewai/src/crewai/agents/agent_builder/base_agent_executor_mixin.py index b36595ec9..9dd1e2396 100644 --- a/lib/crewai/src/crewai/agents/agent_builder/base_agent_executor_mixin.py +++ b/lib/crewai/src/crewai/agents/agent_builder/base_agent_executor_mixin.py @@ -30,12 +30,9 @@ class CrewAgentExecutorMixin: memory = getattr(self.agent, "memory", None) or ( getattr(self.crew, "_memory", None) if self.crew else None ) - if memory is None or not self.task: + if memory is None or not self.task or memory.read_only: return - if ( - f"Action: {sanitize_tool_name('Delegate work to coworker')}" - in output.text - ): + if f"Action: {sanitize_tool_name('Delegate work to coworker')}" in output.text: return try: raw = ( @@ -48,6 +45,4 @@ class CrewAgentExecutorMixin: if extracted: memory.remember_many(extracted, agent_role=self.agent.role) except Exception as e: - self.agent._logger.log( - "error", f"Failed to save to memory: {e}" - ) + self.agent._logger.log("error", f"Failed to save to memory: {e}") diff --git a/lib/crewai/src/crewai/agents/cache/__init__.py b/lib/crewai/src/crewai/agents/cache/__init__.py index d18771ca3..6cc557fd9 100644 --- a/lib/crewai/src/crewai/agents/cache/__init__.py +++ b/lib/crewai/src/crewai/agents/cache/__init__.py @@ -1,5 +1,4 @@ from crewai.agents.cache.cache_handler import CacheHandler - __all__ = ["CacheHandler"] diff --git a/lib/crewai/src/crewai/agents/crew_agent_executor.py b/lib/crewai/src/crewai/agents/crew_agent_executor.py index b734556af..ffa733d6b 100644 --- a/lib/crewai/src/crewai/agents/crew_agent_executor.py +++ b/lib/crewai/src/crewai/agents/crew_agent_executor.py @@ -6,7 +6,11 @@ and memory management. from __future__ import annotations +import asyncio from collections.abc import Callable +from concurrent.futures import ThreadPoolExecutor, as_completed +import contextvars +import inspect import logging from typing import TYPE_CHECKING, Any, Literal, cast @@ -47,6 +51,7 @@ from crewai.utilities.agent_utils import ( handle_unknown_error, has_reached_max_iterations, is_context_length_exceeded, + parse_tool_call_args, process_llm_response, track_delegation_if_needed, ) @@ -483,8 +488,8 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): # No tools available, fall back to simple LLM call return self._invoke_loop_native_no_tools() - openai_tools, available_functions = convert_tools_to_openai_schema( - self.original_tools + openai_tools, available_functions, self._tool_name_mapping = ( + convert_tools_to_openai_schema(self.original_tools) ) while True: @@ -685,30 +690,141 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): Returns: AgentFinish if tool has result_as_answer=True, None otherwise. """ - from datetime import datetime - import json - - from crewai.events import crewai_event_bus - from crewai.events.types.tool_usage_events import ( - ToolUsageErrorEvent, - ToolUsageFinishedEvent, - ToolUsageStartedEvent, - ) - if not tool_calls: return None - # Only process the FIRST tool call for sequential execution with reflection - tool_call = tool_calls[0] + parsed_calls = [ + parsed + for tool_call in tool_calls + if (parsed := self._parse_native_tool_call(tool_call)) is not None + ] + if not parsed_calls: + return None - # Extract tool call info - handle OpenAI-style, Anthropic-style, and Gemini-style + original_tools_by_name: dict[str, Any] = dict(self._tool_name_mapping) + + if len(parsed_calls) > 1: + has_result_as_answer_in_batch = any( + bool( + original_tools_by_name.get(func_name) + and getattr( + original_tools_by_name.get(func_name), "result_as_answer", False + ) + ) + for _, func_name, _ in parsed_calls + ) + has_max_usage_count_in_batch = any( + bool( + original_tools_by_name.get(func_name) + and getattr( + original_tools_by_name.get(func_name), + "max_usage_count", + None, + ) + is not None + ) + for _, func_name, _ in parsed_calls + ) + + # Preserve historical sequential behavior for result_as_answer batches. + # Also avoid threading around usage counters for max_usage_count tools. + if has_result_as_answer_in_batch or has_max_usage_count_in_batch: + logger.debug( + "Skipping parallel native execution because batch includes result_as_answer or max_usage_count tool" + ) + else: + execution_plan: list[ + tuple[str, str, str | dict[str, Any], Any | None] + ] = [] + for call_id, func_name, func_args in parsed_calls: + original_tool = original_tools_by_name.get(func_name) + execution_plan.append( + (call_id, func_name, func_args, original_tool) + ) + + self._append_assistant_tool_calls_message( + [ + (call_id, func_name, func_args) + for call_id, func_name, func_args, _ in execution_plan + ] + ) + + max_workers = min(8, len(execution_plan)) + ordered_results: list[dict[str, Any] | None] = [None] * len( + execution_plan + ) + with ThreadPoolExecutor(max_workers=max_workers) as pool: + futures = { + pool.submit( + contextvars.copy_context().run, + self._execute_single_native_tool_call, + call_id=call_id, + func_name=func_name, + func_args=func_args, + available_functions=available_functions, + original_tool=original_tool, + should_execute=True, + ): idx + for idx, ( + call_id, + func_name, + func_args, + original_tool, + ) in enumerate(execution_plan) + } + for future in as_completed(futures): + idx = futures[future] + ordered_results[idx] = future.result() + + for execution_result in ordered_results: + if not execution_result: + continue + tool_finish = self._append_tool_result_and_check_finality( + execution_result + ) + if tool_finish: + return tool_finish + + reasoning_prompt = self._i18n.slice("post_tool_reasoning") + reasoning_message: LLMMessage = { + "role": "user", + "content": reasoning_prompt, + } + self.messages.append(reasoning_message) + return None + + # Sequential behavior: process only first tool call, then force reflection. + call_id, func_name, func_args = parsed_calls[0] + self._append_assistant_tool_calls_message([(call_id, func_name, func_args)]) + + execution_result = self._execute_single_native_tool_call( + call_id=call_id, + func_name=func_name, + func_args=func_args, + available_functions=available_functions, + original_tool=original_tools_by_name.get(func_name), + should_execute=True, + ) + tool_finish = self._append_tool_result_and_check_finality(execution_result) + if tool_finish: + return tool_finish + + reasoning_prompt = self._i18n.slice("post_tool_reasoning") + reasoning_message = { + "role": "user", + "content": reasoning_prompt, + } + self.messages.append(reasoning_message) + return None + + def _parse_native_tool_call( + self, tool_call: Any + ) -> tuple[str, str, str | dict[str, Any]] | None: if hasattr(tool_call, "function"): - # OpenAI-style: has .function.name and .function.arguments call_id = getattr(tool_call, "id", f"call_{id(tool_call)}") func_name = sanitize_tool_name(tool_call.function.name) - func_args = tool_call.function.arguments - elif hasattr(tool_call, "function_call") and tool_call.function_call: - # Gemini-style: has .function_call.name and .function_call.args + return call_id, func_name, tool_call.function.arguments + if hasattr(tool_call, "function_call") and tool_call.function_call: call_id = f"call_{id(tool_call)}" func_name = sanitize_tool_name(tool_call.function_call.name) func_args = ( @@ -716,13 +832,12 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): if tool_call.function_call.args else {} ) - elif hasattr(tool_call, "name") and hasattr(tool_call, "input"): - # Anthropic format: has .name and .input (ToolUseBlock) + return call_id, func_name, func_args + if hasattr(tool_call, "name") and hasattr(tool_call, "input"): call_id = getattr(tool_call, "id", f"call_{id(tool_call)}") func_name = sanitize_tool_name(tool_call.name) - func_args = tool_call.input # Already a dict in Anthropic - elif isinstance(tool_call, dict): - # Support OpenAI "id", Bedrock "toolUseId", or generate one + return call_id, func_name, tool_call.input + if isinstance(tool_call, dict): call_id = ( tool_call.get("id") or tool_call.get("toolUseId") @@ -733,10 +848,15 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): func_info.get("name", "") or tool_call.get("name", "") ) func_args = func_info.get("arguments", "{}") or tool_call.get("input", {}) - else: - return None + return call_id, func_name, func_args + return None + + def _append_assistant_tool_calls_message( + self, + parsed_calls: list[tuple[str, str, str | dict[str, Any]]], + ) -> None: + import json - # Append assistant message with single tool call assistant_message: LLMMessage = { "role": "assistant", "content": None, @@ -751,42 +871,54 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): else json.dumps(func_args), }, } + for call_id, func_name, func_args in parsed_calls ], } - self.messages.append(assistant_message) - # Parse arguments for the single tool call - if isinstance(func_args, str): - try: - args_dict = json.loads(func_args) - except json.JSONDecodeError: - args_dict = {} - else: - args_dict = func_args + def _execute_single_native_tool_call( + self, + *, + call_id: str, + func_name: str, + func_args: str | dict[str, Any], + available_functions: dict[str, Callable[..., Any]], + original_tool: Any | None = None, + should_execute: bool = True, + ) -> dict[str, Any]: + from datetime import datetime + import json - agent_key = getattr(self.agent, "key", "unknown") if self.agent else "unknown" + from crewai.events.types.tool_usage_events import ( + ToolUsageErrorEvent, + ToolUsageFinishedEvent, + ToolUsageStartedEvent, + ) - # Find original tool by matching sanitized name (needed for cache_function and result_as_answer) + args_dict, parse_error = parse_tool_call_args(func_args, func_name, call_id, original_tool) + if parse_error is not None: + return parse_error - original_tool = None - for tool in self.original_tools or []: - if sanitize_tool_name(tool.name) == func_name: - original_tool = tool - break + if original_tool is None: + for tool in self.original_tools or []: + if sanitize_tool_name(tool.name) == func_name: + original_tool = tool + break - # Check if tool has reached max usage count max_usage_reached = False - if original_tool: - if ( - hasattr(original_tool, "max_usage_count") - and original_tool.max_usage_count is not None - and original_tool.current_usage_count >= original_tool.max_usage_count - ): - max_usage_reached = True + if not should_execute and original_tool: + max_usage_reached = True + elif ( + should_execute + and original_tool + and (max_count := getattr(original_tool, "max_usage_count", None)) + is not None + and getattr(original_tool, "current_usage_count", 0) >= max_count + ): + max_usage_reached = True - # Check cache before executing from_cache = False + result: str = "Tool not found" input_str = json.dumps(args_dict) if args_dict else "" if self.tools_handler and self.tools_handler.cache: cached_result = self.tools_handler.cache.read( @@ -800,7 +932,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): ) from_cache = True - # Emit tool usage started event + agent_key = getattr(self.agent, "key", "unknown") if self.agent else "unknown" started_at = datetime.now() crewai_event_bus.emit( self, @@ -816,14 +948,18 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): track_delegation_if_needed(func_name, args_dict, self.task) - # Find the structured tool for hook context structured_tool: CrewStructuredTool | None = None - for structured in self.tools or []: - if sanitize_tool_name(structured.name) == func_name: - structured_tool = structured - break + if original_tool is not None: + for structured in self.tools or []: + if getattr(structured, "_original_tool", None) is original_tool: + structured_tool = structured + break + if structured_tool is None: + for structured in self.tools or []: + if sanitize_tool_name(structured.name) == func_name: + structured_tool = structured + break - # Execute before_tool_call hooks hook_blocked = False before_hook_context = ToolCallHookContext( tool_name=func_name, @@ -847,58 +983,48 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): color="red", ) - # If hook blocked execution, set result and skip tool execution if hook_blocked: result = f"Tool execution blocked by hook. Tool: {func_name}" - # Execute the tool (only if not cached, not at max usage, and not blocked by hook) - elif not from_cache and not max_usage_reached: - result = "Tool not found" - if func_name in available_functions: - try: - tool_func = available_functions[func_name] - raw_result = tool_func(**args_dict) - - # Add to cache after successful execution (before string conversion) - if self.tools_handler and self.tools_handler.cache: - should_cache = True - if ( - original_tool - and hasattr(original_tool, "cache_function") - and callable(original_tool.cache_function) - ): - should_cache = original_tool.cache_function( - args_dict, raw_result - ) - if should_cache: - self.tools_handler.cache.add( - tool=func_name, input=input_str, output=raw_result - ) - - # Convert to string for message - result = ( - str(raw_result) - if not isinstance(raw_result, str) - else raw_result - ) - except Exception as e: - result = f"Error executing tool: {e}" - if self.task: - self.task.increment_tools_errors() - crewai_event_bus.emit( - self, - event=ToolUsageErrorEvent( - tool_name=func_name, - tool_args=args_dict, - from_agent=self.agent, - from_task=self.task, - agent_key=agent_key, - error=e, - ), - ) - error_event_emitted = True elif max_usage_reached and original_tool: - # Return error message when max usage limit is reached result = f"Tool '{func_name}' has reached its usage limit of {original_tool.max_usage_count} times and cannot be used anymore." + elif not from_cache and func_name in available_functions: + try: + raw_result = available_functions[func_name](**args_dict) + + if self.tools_handler and self.tools_handler.cache: + should_cache = True + if ( + original_tool + and hasattr(original_tool, "cache_function") + and callable(original_tool.cache_function) + ): + should_cache = original_tool.cache_function( + args_dict, raw_result + ) + if should_cache: + self.tools_handler.cache.add( + tool=func_name, input=input_str, output=raw_result + ) + + result = ( + str(raw_result) if not isinstance(raw_result, str) else raw_result + ) + except Exception as e: + result = f"Error executing tool: {e}" + if self.task: + self.task.increment_tools_errors() + crewai_event_bus.emit( + self, + event=ToolUsageErrorEvent( + tool_name=func_name, + tool_args=args_dict, + from_agent=self.agent, + from_task=self.task, + agent_key=agent_key, + error=e, + ), + ) + error_event_emitted = True after_hook_context = ToolCallHookContext( tool_name=func_name, @@ -938,7 +1064,23 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): ), ) - # Append tool result message + return { + "call_id": call_id, + "func_name": func_name, + "result": result, + "from_cache": from_cache, + "original_tool": original_tool, + } + + def _append_tool_result_and_check_finality( + self, execution_result: dict[str, Any] + ) -> AgentFinish | None: + call_id = cast(str, execution_result["call_id"]) + func_name = cast(str, execution_result["func_name"]) + result = cast(str, execution_result["result"]) + from_cache = cast(bool, execution_result["from_cache"]) + original_tool = execution_result["original_tool"] + tool_message: LLMMessage = { "role": "tool", "tool_call_id": call_id, @@ -947,7 +1089,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): } self.messages.append(tool_message) - # Log the tool execution if self.agent and self.agent.verbose: cache_info = " (from cache)" if from_cache else "" self._printer.print( @@ -960,20 +1101,11 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): and hasattr(original_tool, "result_as_answer") and original_tool.result_as_answer ): - # Return immediately with tool result as final answer return AgentFinish( thought="Tool result is the final answer", output=result, text=result, ) - - # Inject post-tool reasoning prompt to enforce analysis - reasoning_prompt = self._i18n.slice("post_tool_reasoning") - reasoning_message: LLMMessage = { - "role": "user", - "content": reasoning_prompt, - } - self.messages.append(reasoning_message) return None async def ainvoke(self, inputs: dict[str, Any]) -> dict[str, Any]: @@ -1133,7 +1265,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): formatted_answer, tool_result ) - self._invoke_step_callback(formatted_answer) # type: ignore[arg-type] + await self._ainvoke_step_callback(formatted_answer) # type: ignore[arg-type] self._append_message(formatted_answer.text) # type: ignore[union-attr] except OutputParserError as e: @@ -1186,8 +1318,8 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): if not self.original_tools: return await self._ainvoke_loop_native_no_tools() - openai_tools, available_functions = convert_tools_to_openai_schema( - self.original_tools + openai_tools, available_functions, self._tool_name_mapping = ( + convert_tools_to_openai_schema(self.original_tools) ) while True: @@ -1248,7 +1380,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): output=answer, text=answer, ) - self._invoke_step_callback(formatted_answer) + await self._ainvoke_step_callback(formatted_answer) self._append_message(answer) # Save final answer to messages self._show_logs(formatted_answer) return formatted_answer @@ -1260,7 +1392,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): output=answer, text=output_json, ) - self._invoke_step_callback(formatted_answer) + await self._ainvoke_step_callback(formatted_answer) self._append_message(output_json) self._show_logs(formatted_answer) return formatted_answer @@ -1271,7 +1403,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): output=str(answer), text=str(answer), ) - self._invoke_step_callback(formatted_answer) + await self._ainvoke_step_callback(formatted_answer) self._append_message(str(answer)) # Save final answer to messages self._show_logs(formatted_answer) return formatted_answer @@ -1365,13 +1497,28 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): def _invoke_step_callback( self, formatted_answer: AgentAction | AgentFinish ) -> None: - """Invoke step callback. + """Invoke step callback (sync context). Args: formatted_answer: Current agent response. """ if self.step_callback: - self.step_callback(formatted_answer) + cb_result = self.step_callback(formatted_answer) + if inspect.iscoroutine(cb_result): + asyncio.run(cb_result) + + async def _ainvoke_step_callback( + self, formatted_answer: AgentAction | AgentFinish + ) -> None: + """Invoke step callback (async context). + + Args: + formatted_answer: Current agent response. + """ + if self.step_callback: + cb_result = self.step_callback(formatted_answer) + if inspect.iscoroutine(cb_result): + await cb_result def _append_message( self, text: str, role: Literal["user", "assistant", "system"] = "assistant" diff --git a/lib/crewai/src/crewai/cli/authentication/__init__.py b/lib/crewai/src/crewai/cli/authentication/__init__.py index 687ccdfa9..98070be42 100644 --- a/lib/crewai/src/crewai/cli/authentication/__init__.py +++ b/lib/crewai/src/crewai/cli/authentication/__init__.py @@ -1,5 +1,4 @@ from crewai.cli.authentication.main import AuthenticationCommand - __all__ = ["AuthenticationCommand"] diff --git a/lib/crewai/src/crewai/cli/authentication/main.py b/lib/crewai/src/crewai/cli/authentication/main.py index 996fd7c63..7bbda61d5 100644 --- a/lib/crewai/src/crewai/cli/authentication/main.py +++ b/lib/crewai/src/crewai/cli/authentication/main.py @@ -2,8 +2,8 @@ import time from typing import TYPE_CHECKING, Any, TypeVar, cast import webbrowser +import httpx from pydantic import BaseModel, Field -import requests from rich.console import Console from crewai.cli.authentication.utils import validate_jwt_token @@ -98,7 +98,7 @@ class AuthenticationCommand: "scope": " ".join(self.oauth2_provider.get_oauth_scopes()), "audience": self.oauth2_provider.get_audience(), } - response = requests.post( + response = httpx.post( url=self.oauth2_provider.get_authorize_url(), data=device_code_payload, timeout=20, @@ -130,7 +130,7 @@ class AuthenticationCommand: attempts = 0 while True and attempts < 10: - response = requests.post( + response = httpx.post( self.oauth2_provider.get_token_url(), data=token_payload, timeout=30 ) token_data = response.json() @@ -149,7 +149,7 @@ class AuthenticationCommand: return if token_data["error"] not in ("authorization_pending", "slow_down"): - raise requests.HTTPError( + raise httpx.HTTPError( token_data.get("error_description") or token_data.get("error") ) diff --git a/lib/crewai/src/crewai/cli/command.py b/lib/crewai/src/crewai/cli/command.py index 3f85318fb..139f69373 100644 --- a/lib/crewai/src/crewai/cli/command.py +++ b/lib/crewai/src/crewai/cli/command.py @@ -1,5 +1,6 @@ -import requests -from requests.exceptions import JSONDecodeError +import json + +import httpx from rich.console import Console from crewai.cli.authentication.token import get_auth_token @@ -30,16 +31,16 @@ class PlusAPIMixin: console.print("Run 'crewai login' to sign up/login.", style="bold green") raise SystemExit from None - def _validate_response(self, response: requests.Response) -> None: + def _validate_response(self, response: httpx.Response) -> None: """ Handle and display error messages from API responses. Args: - response (requests.Response): The response from the Plus API + response (httpx.Response): The response from the Plus API """ try: json_response = response.json() - except (JSONDecodeError, ValueError): + except (json.JSONDecodeError, ValueError): console.print( "Failed to parse response from Enterprise API failed. Details:", style="bold red", @@ -62,7 +63,7 @@ class PlusAPIMixin: ) raise SystemExit - if not response.ok: + if not response.is_success: console.print( "Request to Enterprise API failed. Details:", style="bold red" ) diff --git a/lib/crewai/src/crewai/cli/constants.py b/lib/crewai/src/crewai/cli/constants.py index 4de0d0082..2ef8dcc7f 100644 --- a/lib/crewai/src/crewai/cli/constants.py +++ b/lib/crewai/src/crewai/cli/constants.py @@ -69,7 +69,7 @@ ENV_VARS: dict[str, list[dict[str, Any]]] = { }, { "prompt": "Enter your AWS Region Name (press Enter to skip)", - "key_name": "AWS_REGION_NAME", + "key_name": "AWS_DEFAULT_REGION", }, ], "azure": [ diff --git a/lib/crewai/src/crewai/cli/create_crew.py b/lib/crewai/src/crewai/cli/create_crew.py index 7f4fe2e6e..9bca7c499 100644 --- a/lib/crewai/src/crewai/cli/create_crew.py +++ b/lib/crewai/src/crewai/cli/create_crew.py @@ -143,7 +143,7 @@ def create_folder_structure( (folder_path / "src" / folder_name).mkdir(parents=True) (folder_path / "src" / folder_name / "tools").mkdir(parents=True) (folder_path / "src" / folder_name / "config").mkdir(parents=True) - + # Copy AGENTS.md to project root (top-level projects only) package_dir = Path(__file__).parent agents_md_src = package_dir / "templates" / "AGENTS.md" diff --git a/lib/crewai/src/crewai/cli/create_flow.py b/lib/crewai/src/crewai/cli/create_flow.py index 76c68db32..2156d422c 100644 --- a/lib/crewai/src/crewai/cli/create_flow.py +++ b/lib/crewai/src/crewai/cli/create_flow.py @@ -1,5 +1,5 @@ -import shutil from pathlib import Path +import shutil import click diff --git a/lib/crewai/src/crewai/cli/enterprise/main.py b/lib/crewai/src/crewai/cli/enterprise/main.py index 2a73f1ae0..395de418b 100644 --- a/lib/crewai/src/crewai/cli/enterprise/main.py +++ b/lib/crewai/src/crewai/cli/enterprise/main.py @@ -1,7 +1,7 @@ +import json from typing import Any, cast -import requests -from requests.exceptions import JSONDecodeError, RequestException +import httpx from rich.console import Console from crewai.cli.authentication.main import Oauth2Settings, ProviderFactory @@ -47,12 +47,12 @@ class EnterpriseConfigureCommand(BaseCommand): "User-Agent": f"CrewAI-CLI/{get_crewai_version()}", "X-Crewai-Version": get_crewai_version(), } - response = requests.get(oauth_endpoint, timeout=30, headers=headers) + response = httpx.get(oauth_endpoint, timeout=30, headers=headers) response.raise_for_status() try: oauth_config = response.json() - except JSONDecodeError as e: + except json.JSONDecodeError as e: raise ValueError(f"Invalid JSON response from {oauth_endpoint}") from e self._validate_oauth_config(oauth_config) @@ -62,7 +62,7 @@ class EnterpriseConfigureCommand(BaseCommand): ) return cast(dict[str, Any], oauth_config) - except RequestException as e: + except httpx.HTTPError as e: raise ValueError(f"Failed to connect to enterprise URL: {e!s}") from e except Exception as e: raise ValueError(f"Error fetching OAuth2 configuration: {e!s}") from e diff --git a/lib/crewai/src/crewai/cli/memory_tui.py b/lib/crewai/src/crewai/cli/memory_tui.py index 98576670d..9dd91a42c 100644 --- a/lib/crewai/src/crewai/cli/memory_tui.py +++ b/lib/crewai/src/crewai/cli/memory_tui.py @@ -290,13 +290,20 @@ class MemoryTUI(App[None]): if self._memory is None: panel.update(self._init_error or "No memory loaded.") return + display_limit = 1000 info = self._memory.info(path) self._last_scope_info = info - self._entries = self._memory.list_records(scope=path, limit=200) + self._entries = self._memory.list_records(scope=path, limit=display_limit) panel.update(_format_scope_info(info)) panel.border_title = "Detail" entry_list = self.query_one("#entry-list", OptionList) - entry_list.border_title = f"Entries ({len(self._entries)})" + capped = info.record_count > display_limit + count_label = ( + f"Entries (showing {display_limit} of {info.record_count} — display limit)" + if capped + else f"Entries ({len(self._entries)})" + ) + entry_list.border_title = count_label self._populate_entry_list() def on_option_list_option_highlighted( @@ -376,6 +383,11 @@ class MemoryTUI(App[None]): return info_lines: list[str] = [] + info_lines.append( + "[dim italic]Searched the full dataset" + + (f" within [bold]{scope}[/]" if scope else "") + + " using the recall flow (semantic + recency + importance).[/]\n" + ) if not self._custom_embedder: info_lines.append( "[dim italic]Note: Using default OpenAI embedder. " diff --git a/lib/crewai/src/crewai/cli/organization/main.py b/lib/crewai/src/crewai/cli/organization/main.py index 4ee954698..fe61ec202 100644 --- a/lib/crewai/src/crewai/cli/organization/main.py +++ b/lib/crewai/src/crewai/cli/organization/main.py @@ -1,4 +1,4 @@ -from requests import HTTPError +from httpx import HTTPStatusError from rich.console import Console from rich.table import Table @@ -10,11 +10,11 @@ console = Console() class OrganizationCommand(BaseCommand, PlusAPIMixin): - def __init__(self): + def __init__(self) -> None: BaseCommand.__init__(self) PlusAPIMixin.__init__(self, telemetry=self._telemetry) - def list(self): + def list(self) -> None: try: response = self.plus_api_client.get_organizations() response.raise_for_status() @@ -33,7 +33,7 @@ class OrganizationCommand(BaseCommand, PlusAPIMixin): table.add_row(org["name"], org["uuid"]) console.print(table) - except HTTPError as e: + except HTTPStatusError as e: if e.response.status_code == 401: console.print( "You are not logged in to any organization. Use 'crewai login' to login.", @@ -50,7 +50,7 @@ class OrganizationCommand(BaseCommand, PlusAPIMixin): ) raise SystemExit(1) from e - def switch(self, org_id): + def switch(self, org_id: str) -> None: try: response = self.plus_api_client.get_organizations() response.raise_for_status() @@ -72,7 +72,7 @@ class OrganizationCommand(BaseCommand, PlusAPIMixin): f"Successfully switched to {org['name']} ({org['uuid']})", style="bold green", ) - except HTTPError as e: + except HTTPStatusError as e: if e.response.status_code == 401: console.print( "You are not logged in to any organization. Use 'crewai login' to login.", @@ -87,7 +87,7 @@ class OrganizationCommand(BaseCommand, PlusAPIMixin): console.print(f"Failed to switch organization: {e!s}", style="bold red") raise SystemExit(1) from e - def current(self): + def current(self) -> None: settings = Settings() if settings.org_uuid: console.print( diff --git a/lib/crewai/src/crewai/cli/plus_api.py b/lib/crewai/src/crewai/cli/plus_api.py index e07d44d10..e32e5220d 100644 --- a/lib/crewai/src/crewai/cli/plus_api.py +++ b/lib/crewai/src/crewai/cli/plus_api.py @@ -3,7 +3,6 @@ from typing import Any from urllib.parse import urljoin import httpx -import requests from crewai.cli.config import Settings from crewai.cli.constants import DEFAULT_CREWAI_ENTERPRISE_URL @@ -23,14 +22,15 @@ class PlusAPI: EPHEMERAL_TRACING_RESOURCE = "/crewai_plus/api/v1/tracing/ephemeral" INTEGRATIONS_RESOURCE = "/crewai_plus/api/v1/integrations" - def __init__(self, api_key: str) -> None: + def __init__(self, api_key: str | None = None) -> None: self.api_key = api_key self.headers = { - "Authorization": f"Bearer {api_key}", "Content-Type": "application/json", "User-Agent": f"CrewAI-CLI/{get_crewai_version()}", "X-Crewai-Version": get_crewai_version(), } + if api_key: + self.headers["Authorization"] = f"Bearer {api_key}" settings = Settings() if settings.org_uuid: self.headers["X-Crewai-Organization-Id"] = settings.org_uuid @@ -43,16 +43,21 @@ class PlusAPI: def _make_request( self, method: str, endpoint: str, **kwargs: Any - ) -> requests.Response: + ) -> httpx.Response: url = urljoin(self.base_url, endpoint) - session = requests.Session() - session.trust_env = False - return session.request(method, url, headers=self.headers, **kwargs) + verify = kwargs.pop("verify", True) + with httpx.Client(trust_env=False, verify=verify) as client: + return client.request(method, url, headers=self.headers, **kwargs) - def login_to_tool_repository(self) -> requests.Response: - return self._make_request("POST", f"{self.TOOLS_RESOURCE}/login") + def login_to_tool_repository( + self, user_identifier: str | None = None + ) -> httpx.Response: + payload = {} + if user_identifier: + payload["user_identifier"] = user_identifier + return self._make_request("POST", f"{self.TOOLS_RESOURCE}/login", json=payload) - def get_tool(self, handle: str) -> requests.Response: + def get_tool(self, handle: str) -> httpx.Response: return self._make_request("GET", f"{self.TOOLS_RESOURCE}/{handle}") async def get_agent(self, handle: str) -> httpx.Response: @@ -68,7 +73,7 @@ class PlusAPI: description: str | None, encoded_file: str, available_exports: list[dict[str, Any]] | None = None, - ) -> requests.Response: + ) -> httpx.Response: params = { "handle": handle, "public": is_public, @@ -79,54 +84,52 @@ class PlusAPI: } return self._make_request("POST", f"{self.TOOLS_RESOURCE}", json=params) - def deploy_by_name(self, project_name: str) -> requests.Response: + def deploy_by_name(self, project_name: str) -> httpx.Response: return self._make_request( "POST", f"{self.CREWS_RESOURCE}/by-name/{project_name}/deploy" ) - def deploy_by_uuid(self, uuid: str) -> requests.Response: + def deploy_by_uuid(self, uuid: str) -> httpx.Response: return self._make_request("POST", f"{self.CREWS_RESOURCE}/{uuid}/deploy") - def crew_status_by_name(self, project_name: str) -> requests.Response: + def crew_status_by_name(self, project_name: str) -> httpx.Response: return self._make_request( "GET", f"{self.CREWS_RESOURCE}/by-name/{project_name}/status" ) - def crew_status_by_uuid(self, uuid: str) -> requests.Response: + def crew_status_by_uuid(self, uuid: str) -> httpx.Response: return self._make_request("GET", f"{self.CREWS_RESOURCE}/{uuid}/status") def crew_by_name( self, project_name: str, log_type: str = "deployment" - ) -> requests.Response: + ) -> httpx.Response: return self._make_request( "GET", f"{self.CREWS_RESOURCE}/by-name/{project_name}/logs/{log_type}" ) - def crew_by_uuid( - self, uuid: str, log_type: str = "deployment" - ) -> requests.Response: + def crew_by_uuid(self, uuid: str, log_type: str = "deployment") -> httpx.Response: return self._make_request( "GET", f"{self.CREWS_RESOURCE}/{uuid}/logs/{log_type}" ) - def delete_crew_by_name(self, project_name: str) -> requests.Response: + def delete_crew_by_name(self, project_name: str) -> httpx.Response: return self._make_request( "DELETE", f"{self.CREWS_RESOURCE}/by-name/{project_name}" ) - def delete_crew_by_uuid(self, uuid: str) -> requests.Response: + def delete_crew_by_uuid(self, uuid: str) -> httpx.Response: return self._make_request("DELETE", f"{self.CREWS_RESOURCE}/{uuid}") - def list_crews(self) -> requests.Response: + def list_crews(self) -> httpx.Response: return self._make_request("GET", self.CREWS_RESOURCE) - def create_crew(self, payload: dict[str, Any]) -> requests.Response: + def create_crew(self, payload: dict[str, Any]) -> httpx.Response: return self._make_request("POST", self.CREWS_RESOURCE, json=payload) - def get_organizations(self) -> requests.Response: + def get_organizations(self) -> httpx.Response: return self._make_request("GET", self.ORGANIZATIONS_RESOURCE) - def initialize_trace_batch(self, payload: dict[str, Any]) -> requests.Response: + def initialize_trace_batch(self, payload: dict[str, Any]) -> httpx.Response: return self._make_request( "POST", f"{self.TRACING_RESOURCE}/batches", @@ -136,7 +139,7 @@ class PlusAPI: def initialize_ephemeral_trace_batch( self, payload: dict[str, Any] - ) -> requests.Response: + ) -> httpx.Response: return self._make_request( "POST", f"{self.EPHEMERAL_TRACING_RESOURCE}/batches", @@ -145,7 +148,7 @@ class PlusAPI: def send_trace_events( self, trace_batch_id: str, payload: dict[str, Any] - ) -> requests.Response: + ) -> httpx.Response: return self._make_request( "POST", f"{self.TRACING_RESOURCE}/batches/{trace_batch_id}/events", @@ -155,7 +158,7 @@ class PlusAPI: def send_ephemeral_trace_events( self, trace_batch_id: str, payload: dict[str, Any] - ) -> requests.Response: + ) -> httpx.Response: return self._make_request( "POST", f"{self.EPHEMERAL_TRACING_RESOURCE}/batches/{trace_batch_id}/events", @@ -165,7 +168,7 @@ class PlusAPI: def finalize_trace_batch( self, trace_batch_id: str, payload: dict[str, Any] - ) -> requests.Response: + ) -> httpx.Response: return self._make_request( "PATCH", f"{self.TRACING_RESOURCE}/batches/{trace_batch_id}/finalize", @@ -175,7 +178,7 @@ class PlusAPI: def finalize_ephemeral_trace_batch( self, trace_batch_id: str, payload: dict[str, Any] - ) -> requests.Response: + ) -> httpx.Response: return self._make_request( "PATCH", f"{self.EPHEMERAL_TRACING_RESOURCE}/batches/{trace_batch_id}/finalize", @@ -185,7 +188,7 @@ class PlusAPI: def mark_trace_batch_as_failed( self, trace_batch_id: str, error_message: str - ) -> requests.Response: + ) -> httpx.Response: return self._make_request( "PATCH", f"{self.TRACING_RESOURCE}/batches/{trace_batch_id}", @@ -193,13 +196,20 @@ class PlusAPI: timeout=30, ) - def get_triggers(self) -> requests.Response: + def get_mcp_configs(self, slugs: list[str]) -> httpx.Response: + """Get MCP server configurations for the given slugs.""" + return self._make_request( + "GET", + f"{self.INTEGRATIONS_RESOURCE}/mcp_configs", + params={"slugs": ",".join(slugs)}, + timeout=30, + ) + + def get_triggers(self) -> httpx.Response: """Get all available triggers from integrations.""" return self._make_request("GET", f"{self.INTEGRATIONS_RESOURCE}/apps") - def get_trigger_payload( - self, app_slug: str, trigger_slug: str - ) -> requests.Response: + def get_trigger_payload(self, app_slug: str, trigger_slug: str) -> httpx.Response: """Get sample payload for a specific trigger.""" return self._make_request( "GET", f"{self.INTEGRATIONS_RESOURCE}/{app_slug}/{trigger_slug}/payload" diff --git a/lib/crewai/src/crewai/cli/provider.py b/lib/crewai/src/crewai/cli/provider.py index 6de337b85..1f1e4ec40 100644 --- a/lib/crewai/src/crewai/cli/provider.py +++ b/lib/crewai/src/crewai/cli/provider.py @@ -8,7 +8,7 @@ from typing import Any import certifi import click -import requests +import httpx from crewai.cli.constants import JSON_URL, MODELS, PROVIDERS @@ -165,20 +165,20 @@ def fetch_provider_data(cache_file: Path) -> dict[str, Any] | None: ssl_config = os.environ["SSL_CERT_FILE"] = certifi.where() try: - response = requests.get(JSON_URL, stream=True, timeout=60, verify=ssl_config) - response.raise_for_status() - data = download_data(response) - with open(cache_file, "w") as f: - json.dump(data, f) - return data - except requests.RequestException as e: + with httpx.stream("GET", JSON_URL, timeout=60, verify=ssl_config) as response: + response.raise_for_status() + data = download_data(response) + with open(cache_file, "w") as f: + json.dump(data, f) + return data + except httpx.HTTPError as e: click.secho(f"Error fetching provider data: {e}", fg="red") except json.JSONDecodeError: click.secho("Error parsing provider data. Invalid JSON format.", fg="red") return None -def download_data(response: requests.Response) -> dict[str, Any]: +def download_data(response: httpx.Response) -> dict[str, Any]: """Downloads data from a given HTTP response and returns the JSON content. Args: @@ -194,7 +194,7 @@ def download_data(response: requests.Response) -> dict[str, Any]: with click.progressbar( length=total_size, label="Downloading", show_pos=True ) as bar: - for chunk in response.iter_content(block_size): + for chunk in response.iter_bytes(block_size): if chunk: data_chunks.append(chunk) bar.update(len(chunk)) diff --git a/lib/crewai/src/crewai/cli/templates/crew/pyproject.toml b/lib/crewai/src/crewai/cli/templates/crew/pyproject.toml index 301c6e553..de884767c 100644 --- a/lib/crewai/src/crewai/cli/templates/crew/pyproject.toml +++ b/lib/crewai/src/crewai/cli/templates/crew/pyproject.toml @@ -5,7 +5,7 @@ description = "{{name}} using crewAI" authors = [{ name = "Your Name", email = "you@example.com" }] requires-python = ">=3.10,<3.14" dependencies = [ - "crewai[tools]==1.9.3" + "crewai[tools]==1.10.1" ] [project.scripts] diff --git a/lib/crewai/src/crewai/cli/templates/flow/pyproject.toml b/lib/crewai/src/crewai/cli/templates/flow/pyproject.toml index d80f05bee..cfc68f74b 100644 --- a/lib/crewai/src/crewai/cli/templates/flow/pyproject.toml +++ b/lib/crewai/src/crewai/cli/templates/flow/pyproject.toml @@ -5,7 +5,7 @@ description = "{{name}} using crewAI" authors = [{ name = "Your Name", email = "you@example.com" }] requires-python = ">=3.10,<3.14" dependencies = [ - "crewai[tools]==1.9.3" + "crewai[tools]==1.10.1" ] [project.scripts] diff --git a/lib/crewai/src/crewai/cli/templates/tool/pyproject.toml b/lib/crewai/src/crewai/cli/templates/tool/pyproject.toml index 61d4343b9..0e8c784f0 100644 --- a/lib/crewai/src/crewai/cli/templates/tool/pyproject.toml +++ b/lib/crewai/src/crewai/cli/templates/tool/pyproject.toml @@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}" readme = "README.md" requires-python = ">=3.10,<3.14" dependencies = [ - "crewai[tools]>=0.203.1" + "crewai[tools]==1.10.1" ] [tool.crewai] diff --git a/lib/crewai/src/crewai/cli/tools/main.py b/lib/crewai/src/crewai/cli/tools/main.py index e2dd21dde..0a9f68af0 100644 --- a/lib/crewai/src/crewai/cli/tools/main.py +++ b/lib/crewai/src/crewai/cli/tools/main.py @@ -23,6 +23,7 @@ from crewai.cli.utils import ( tree_copy, tree_find_and_replace, ) +from crewai.events.listeners.tracing.utils import get_user_id console = Console() @@ -169,7 +170,9 @@ class ToolCommand(BaseCommand, PlusAPIMixin): console.print(f"Successfully installed {handle}", style="bold green") def login(self) -> None: - login_response = self.plus_api_client.login_to_tool_repository() + login_response = self.plus_api_client.login_to_tool_repository( + user_identifier=get_user_id() + ) if login_response.status_code != 200: console.print( diff --git a/lib/crewai/src/crewai/crews/__init__.py b/lib/crewai/src/crewai/crews/__init__.py index 8b46d5c2b..10bee3117 100644 --- a/lib/crewai/src/crewai/crews/__init__.py +++ b/lib/crewai/src/crewai/crews/__init__.py @@ -1,5 +1,4 @@ from crewai.crews.crew_output import CrewOutput - __all__ = ["CrewOutput"] diff --git a/lib/crewai/src/crewai/events/__init__.py b/lib/crewai/src/crewai/events/__init__.py index a6f213a54..36933fc45 100644 --- a/lib/crewai/src/crewai/events/__init__.py +++ b/lib/crewai/src/crewai/events/__init__.py @@ -63,6 +63,7 @@ from crewai.events.types.logging_events import ( AgentLogsStartedEvent, ) from crewai.events.types.mcp_events import ( + MCPConfigFetchFailedEvent, MCPConnectionCompletedEvent, MCPConnectionFailedEvent, MCPConnectionStartedEvent, @@ -165,6 +166,7 @@ __all__ = [ "LiteAgentExecutionCompletedEvent", "LiteAgentExecutionErrorEvent", "LiteAgentExecutionStartedEvent", + "MCPConfigFetchFailedEvent", "MCPConnectionCompletedEvent", "MCPConnectionFailedEvent", "MCPConnectionStartedEvent", diff --git a/lib/crewai/src/crewai/events/base_event_listener.py b/lib/crewai/src/crewai/events/base_event_listener.py index 2319c9f97..c0187674b 100644 --- a/lib/crewai/src/crewai/events/base_event_listener.py +++ b/lib/crewai/src/crewai/events/base_event_listener.py @@ -23,4 +23,3 @@ class BaseEventListener(ABC): Args: crewai_event_bus: The event bus to register listeners on. """ - pass diff --git a/lib/crewai/src/crewai/events/event_listener.py b/lib/crewai/src/crewai/events/event_listener.py index 5f22d0188..09dc25316 100644 --- a/lib/crewai/src/crewai/events/event_listener.py +++ b/lib/crewai/src/crewai/events/event_listener.py @@ -68,6 +68,7 @@ from crewai.events.types.logging_events import ( AgentLogsStartedEvent, ) from crewai.events.types.mcp_events import ( + MCPConfigFetchFailedEvent, MCPConnectionCompletedEvent, MCPConnectionFailedEvent, MCPConnectionStartedEvent, @@ -665,6 +666,16 @@ class EventListener(BaseEventListener): event.error_type, ) + @crewai_event_bus.on(MCPConfigFetchFailedEvent) + def on_mcp_config_fetch_failed( + _: Any, event: MCPConfigFetchFailedEvent + ) -> None: + self.formatter.handle_mcp_config_fetch_failed( + event.slug, + event.error, + event.error_type, + ) + @crewai_event_bus.on(MCPToolExecutionStartedEvent) def on_mcp_tool_execution_started( _: Any, event: MCPToolExecutionStartedEvent diff --git a/lib/crewai/src/crewai/events/event_types.py b/lib/crewai/src/crewai/events/event_types.py index 5fca4bd7d..63b6cdfc8 100644 --- a/lib/crewai/src/crewai/events/event_types.py +++ b/lib/crewai/src/crewai/events/event_types.py @@ -67,6 +67,7 @@ from crewai.events.types.llm_guardrail_events import ( LLMGuardrailStartedEvent, ) from crewai.events.types.mcp_events import ( + MCPConfigFetchFailedEvent, MCPConnectionCompletedEvent, MCPConnectionFailedEvent, MCPConnectionStartedEvent, @@ -181,4 +182,5 @@ EventTypes = ( | MCPToolExecutionStartedEvent | MCPToolExecutionCompletedEvent | MCPToolExecutionFailedEvent + | MCPConfigFetchFailedEvent ) diff --git a/lib/crewai/src/crewai/events/listeners/tracing/trace_batch_manager.py b/lib/crewai/src/crewai/events/listeners/tracing/trace_batch_manager.py index 6c45f63ef..da25792fb 100644 --- a/lib/crewai/src/crewai/events/listeners/tracing/trace_batch_manager.py +++ b/lib/crewai/src/crewai/events/listeners/tracing/trace_batch_manager.py @@ -15,6 +15,7 @@ from crewai.cli.plus_api import PlusAPI from crewai.cli.version import get_crewai_version from crewai.events.listeners.tracing.types import TraceEvent from crewai.events.listeners.tracing.utils import ( + get_user_id, is_tracing_enabled_in_context, should_auto_collect_first_time_traces, ) @@ -67,7 +68,7 @@ class TraceBatchManager: api_key=get_auth_token(), ) except AuthError: - self.plus_api = PlusAPI(api_key="") + self.plus_api = PlusAPI() self.ephemeral_trace_url = None def initialize_batch( @@ -120,7 +121,6 @@ class TraceBatchManager: payload = { "trace_id": self.current_batch.batch_id, "execution_type": execution_metadata.get("execution_type", "crew"), - "user_identifier": execution_metadata.get("user_context", None), "execution_context": { "crew_fingerprint": execution_metadata.get("crew_fingerprint"), "crew_name": execution_metadata.get("crew_name", None), @@ -140,6 +140,7 @@ class TraceBatchManager: } if use_ephemeral: payload["ephemeral_trace_id"] = self.current_batch.batch_id + payload["user_identifier"] = get_user_id() response = ( self.plus_api.initialize_ephemeral_trace_batch(payload) diff --git a/lib/crewai/src/crewai/events/types/llm_events.py b/lib/crewai/src/crewai/events/types/llm_events.py index 87087f100..73d743804 100644 --- a/lib/crewai/src/crewai/events/types/llm_events.py +++ b/lib/crewai/src/crewai/events/types/llm_events.py @@ -86,3 +86,11 @@ class LLMStreamChunkEvent(LLMEventBase): tool_call: ToolCall | None = None call_type: LLMCallType | None = None response_id: str | None = None + + +class LLMThinkingChunkEvent(LLMEventBase): + """Event emitted when a thinking/reasoning chunk is received from a thinking model""" + + type: str = "llm_thinking_chunk" + chunk: str + response_id: str | None = None diff --git a/lib/crewai/src/crewai/events/types/mcp_events.py b/lib/crewai/src/crewai/events/types/mcp_events.py index d360aa62a..d6ca9b99a 100644 --- a/lib/crewai/src/crewai/events/types/mcp_events.py +++ b/lib/crewai/src/crewai/events/types/mcp_events.py @@ -83,3 +83,16 @@ class MCPToolExecutionFailedEvent(MCPEvent): error_type: str | None = None # "timeout", "validation", "server_error", etc. started_at: datetime | None = None failed_at: datetime | None = None + + +class MCPConfigFetchFailedEvent(BaseEvent): + """Event emitted when fetching an AMP MCP server config fails. + + This covers cases where the slug is not connected, the API call + failed, or native MCP resolution failed after config was fetched. + """ + + type: str = "mcp_config_fetch_failed" + slug: str + error: str + error_type: str | None = None # "not_connected", "api_error", "connection_failed" diff --git a/lib/crewai/src/crewai/events/utils/console_formatter.py b/lib/crewai/src/crewai/events/utils/console_formatter.py index 157d812ef..77cc76f4b 100644 --- a/lib/crewai/src/crewai/events/utils/console_formatter.py +++ b/lib/crewai/src/crewai/events/utils/console_formatter.py @@ -1512,6 +1512,34 @@ To enable tracing, do any one of these: self.print(panel) self.print() + def handle_mcp_config_fetch_failed( + self, + slug: str, + error: str = "", + error_type: str | None = None, + ) -> None: + """Handle MCP config fetch failed event (AMP resolution failures).""" + if not self.verbose: + return + + content = Text() + content.append("MCP Config Fetch Failed\n\n", style="red bold") + content.append("Server: ", style="white") + content.append(f"{slug}\n", style="red") + + if error_type: + content.append("Error Type: ", style="white") + content.append(f"{error_type}\n", style="red") + + if error: + content.append("\nError: ", style="white bold") + error_preview = error[:500] + "..." if len(error) > 500 else error + content.append(f"{error_preview}\n", style="red") + + panel = self.create_panel(content, "❌ MCP Config Failed", "red") + self.print(panel) + self.print() + def handle_mcp_tool_execution_started( self, server_name: str, diff --git a/lib/crewai/src/crewai/experimental/agent_executor.py b/lib/crewai/src/crewai/experimental/agent_executor.py index 0656b59e7..034f7ba32 100644 --- a/lib/crewai/src/crewai/experimental/agent_executor.py +++ b/lib/crewai/src/crewai/experimental/agent_executor.py @@ -1,7 +1,11 @@ from __future__ import annotations +import asyncio from collections.abc import Callable, Coroutine +from concurrent.futures import ThreadPoolExecutor, as_completed +import contextvars from datetime import datetime +import inspect import json import threading from typing import TYPE_CHECKING, Any, Literal, cast @@ -49,6 +53,8 @@ from crewai.hooks.types import ( BeforeLLMCallHookCallable, BeforeLLMCallHookType, ) +from crewai.tools.base_tool import BaseTool +from crewai.tools.structured_tool import CrewStructuredTool from crewai.utilities.agent_utils import ( convert_tools_to_openai_schema, enforce_rpm_limit, @@ -63,6 +69,7 @@ from crewai.utilities.agent_utils import ( has_reached_max_iterations, is_context_length_exceeded, is_inside_event_loop, + parse_tool_call_args, process_llm_response, track_delegation_if_needed, ) @@ -81,8 +88,6 @@ if TYPE_CHECKING: from crewai.crew import Crew from crewai.llms.base_llm import BaseLLM from crewai.task import Task - from crewai.tools.base_tool import BaseTool - from crewai.tools.structured_tool import CrewStructuredTool from crewai.tools.tool_types import ToolResult from crewai.utilities.prompts import StandardPromptResult, SystemPromptResult @@ -298,6 +303,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin): super().__init__( suppress_flow_events=True, tracing=current_tracing if current_tracing else None, + max_method_calls=self.max_iter * 10, ) self._flow_initialized = True @@ -317,7 +323,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin): def _setup_native_tools(self) -> None: """Convert tools to OpenAI schema format for native function calling.""" if self.original_tools: - self._openai_tools, self._available_functions = ( + self._openai_tools, self._available_functions, self._tool_name_mapping = ( convert_tools_to_openai_schema(self.original_tools) ) @@ -399,7 +405,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin): self._setup_native_tools() return "initialized" - @listen("force_final_answer") + @listen("max_iterations_exceeded") def force_final_answer(self) -> Literal["agent_finished"]: """Force agent to provide final answer when max iterations exceeded.""" formatted_answer = handle_max_iterations_exceeded( @@ -590,21 +596,19 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin): def execute_tool_action(self) -> Literal["tool_completed", "tool_result_is_final"]: """Execute the tool action and handle the result.""" + action = cast(AgentAction, self.state.current_answer) + + fingerprint_context = {} + if ( + self.agent + and hasattr(self.agent, "security_config") + and hasattr(self.agent.security_config, "fingerprint") + ): + fingerprint_context = { + "agent_fingerprint": str(self.agent.security_config.fingerprint) + } + try: - action = cast(AgentAction, self.state.current_answer) - - # Extract fingerprint context for tool execution - fingerprint_context = {} - if ( - self.agent - and hasattr(self.agent, "security_config") - and hasattr(self.agent.security_config, "fingerprint") - ): - fingerprint_context = { - "agent_fingerprint": str(self.agent.security_config.fingerprint) - } - - # Execute the tool tool_result = execute_tool_and_check_finality( agent_action=action, fingerprint_context=fingerprint_context, @@ -618,24 +622,19 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin): function_calling_llm=self.function_calling_llm, crew=self.crew, ) + except Exception as e: + if self.agent and self.agent.verbose: + self._printer.print( + content=f"Error in tool execution: {e}", color="red" + ) + if self.task: + self.task.increment_tools_errors() - # Handle agent action and append observation to messages - result = self._handle_agent_action(action, tool_result) - self.state.current_answer = result + error_observation = f"\nObservation: Error executing tool: {e}" + action.text += error_observation + action.result = str(e) + self._append_message_to_state(action.text) - # Invoke step callback if configured - self._invoke_step_callback(result) - - # Append result message to conversation state - if hasattr(result, "text"): - self._append_message_to_state(result.text) - - # Check if tool result became a final answer (result_as_answer flag) - if isinstance(result, AgentFinish): - self.state.is_finished = True - return "tool_result_is_final" - - # Inject post-tool reasoning prompt to enforce analysis reasoning_prompt = self._i18n.slice("post_tool_reasoning") reasoning_message: LLMMessage = { "role": "user", @@ -645,12 +644,26 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin): return "tool_completed" - except Exception as e: - error_text = Text() - error_text.append("❌ Error in tool execution: ", style="red bold") - error_text.append(str(e), style="red") - self._console.print(error_text) - raise + result = self._handle_agent_action(action, tool_result) + self.state.current_answer = result + + self._invoke_step_callback(result) + + if hasattr(result, "text"): + self._append_message_to_state(result.text) + + if isinstance(result, AgentFinish): + self.state.is_finished = True + return "tool_result_is_final" + + reasoning_prompt = self._i18n.slice("post_tool_reasoning") + reasoning_message_post: LLMMessage = { + "role": "user", + "content": reasoning_prompt, + } + self.state.messages.append(reasoning_message_post) + + return "tool_completed" @listen("native_tool_calls") def execute_native_tool( @@ -668,9 +681,12 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin): if not self.state.pending_tool_calls: return "native_tool_completed" + pending_tool_calls = list(self.state.pending_tool_calls) + self.state.pending_tool_calls.clear() + # Group all tool calls into a single assistant message tool_calls_to_report = [] - for tool_call in self.state.pending_tool_calls: + for tool_call in pending_tool_calls: info = extract_tool_call_info(tool_call) if not info: continue @@ -695,202 +711,99 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin): "content": None, "tool_calls": tool_calls_to_report, } - if all( - type(tc).__qualname__ == "Part" for tc in self.state.pending_tool_calls - ): - assistant_message["raw_tool_call_parts"] = list( - self.state.pending_tool_calls - ) + if all(type(tc).__qualname__ == "Part" for tc in pending_tool_calls): + assistant_message["raw_tool_call_parts"] = list(pending_tool_calls) self.state.messages.append(assistant_message) - # Now execute each tool - while self.state.pending_tool_calls: - tool_call = self.state.pending_tool_calls.pop(0) - info = extract_tool_call_info(tool_call) - if not info: - continue + runnable_tool_calls = [ + tool_call + for tool_call in pending_tool_calls + if extract_tool_call_info(tool_call) is not None + ] + should_parallelize = self._should_parallelize_native_tool_calls( + runnable_tool_calls + ) - call_id, func_name, func_args = info - - # Parse arguments - if isinstance(func_args, str): - try: - args_dict = json.loads(func_args) - except json.JSONDecodeError: - args_dict = {} - else: - args_dict = func_args - - # Get agent_key for event tracking - agent_key = ( - getattr(self.agent, "key", "unknown") if self.agent else "unknown" - ) - - # Find original tool by matching sanitized name (needed for cache_function and result_as_answer) - original_tool = None - for tool in self.original_tools or []: - if sanitize_tool_name(tool.name) == func_name: - original_tool = tool - break - - # Check if tool has reached max usage count - max_usage_reached = False - if ( - original_tool - and original_tool.max_usage_count is not None - and original_tool.current_usage_count >= original_tool.max_usage_count - ): - max_usage_reached = True - - # Check cache before executing - from_cache = False - input_str = json.dumps(args_dict) if args_dict else "" - if self.tools_handler and self.tools_handler.cache: - cached_result = self.tools_handler.cache.read( - tool=func_name, input=input_str + execution_results: list[dict[str, Any]] = [] + if should_parallelize: + max_workers = min(8, len(runnable_tool_calls)) + with ThreadPoolExecutor(max_workers=max_workers) as pool: + future_to_idx = { + pool.submit(contextvars.copy_context().run, self._execute_single_native_tool_call, tool_call): idx + for idx, tool_call in enumerate(runnable_tool_calls) + } + ordered_results: list[dict[str, Any] | None] = [None] * len( + runnable_tool_calls ) - if cached_result is not None: - result = ( - str(cached_result) - if not isinstance(cached_result, str) - else cached_result - ) - from_cache = True - - # Emit tool usage started event - started_at = datetime.now() - crewai_event_bus.emit( - self, - event=ToolUsageStartedEvent( - tool_name=func_name, - tool_args=args_dict, - from_agent=self.agent, - from_task=self.task, - agent_key=agent_key, - ), - ) - error_event_emitted = False - - track_delegation_if_needed(func_name, args_dict, self.task) - - structured_tool: CrewStructuredTool | None = None - for structured in self.tools or []: - if sanitize_tool_name(structured.name) == func_name: - structured_tool = structured - break - - hook_blocked = False - before_hook_context = ToolCallHookContext( - tool_name=func_name, - tool_input=args_dict, - tool=structured_tool, # type: ignore[arg-type] - agent=self.agent, - task=self.task, - crew=self.crew, - ) - before_hooks = get_before_tool_call_hooks() - try: - for hook in before_hooks: - hook_result = hook(before_hook_context) - if hook_result is False: - hook_blocked = True - break - except Exception as hook_error: - if self.agent.verbose: - self._printer.print( - content=f"Error in before_tool_call hook: {hook_error}", - color="red", - ) - - if hook_blocked: - result = f"Tool execution blocked by hook. Tool: {func_name}" - elif not from_cache and not max_usage_reached: - result = "Tool not found" - if func_name in self._available_functions: + for future in as_completed(future_to_idx): + idx = future_to_idx[future] try: - tool_func = self._available_functions[func_name] - raw_result = tool_func(**args_dict) - - # Add to cache after successful execution (before string conversion) - if self.tools_handler and self.tools_handler.cache: - should_cache = True - if original_tool: - should_cache = original_tool.cache_function( - args_dict, raw_result - ) - if should_cache: - self.tools_handler.cache.add( - tool=func_name, input=input_str, output=raw_result - ) - - # Convert to string for message - result = ( - str(raw_result) - if not isinstance(raw_result, str) - else raw_result - ) + ordered_results[idx] = future.result() except Exception as e: - result = f"Error executing tool: {e}" - if self.task: - self.task.increment_tools_errors() - # Emit tool usage error event - crewai_event_bus.emit( - self, - event=ToolUsageErrorEvent( - tool_name=func_name, - tool_args=args_dict, - from_agent=self.agent, - from_task=self.task, - agent_key=agent_key, - error=e, - ), - ) - error_event_emitted = True - elif max_usage_reached and original_tool: - # Return error message when max usage limit is reached - result = f"Tool '{func_name}' has reached its usage limit of {original_tool.max_usage_count} times and cannot be used anymore." + tool_call = runnable_tool_calls[idx] + info = extract_tool_call_info(tool_call) + call_id = info[0] if info else "unknown" + func_name = info[1] if info else "unknown" + ordered_results[idx] = { + "call_id": call_id, + "func_name": func_name, + "result": f"Error executing tool: {e}", + "from_cache": False, + "original_tool": None, + } + execution_results = [ + result for result in ordered_results if result is not None + ] + else: + # Execute sequentially so result_as_answer tools can short-circuit + # immediately without running remaining calls. + for tool_call in runnable_tool_calls: + execution_result = self._execute_single_native_tool_call(tool_call) + call_id = cast(str, execution_result["call_id"]) + func_name = cast(str, execution_result["func_name"]) + result = cast(str, execution_result["result"]) + from_cache = cast(bool, execution_result["from_cache"]) + original_tool = execution_result["original_tool"] - # Execute after_tool_call hooks (even if blocked, to allow logging/monitoring) - after_hook_context = ToolCallHookContext( - tool_name=func_name, - tool_input=args_dict, - tool=structured_tool, # type: ignore[arg-type] - agent=self.agent, - task=self.task, - crew=self.crew, - tool_result=result, - ) - after_hooks = get_after_tool_call_hooks() - try: - for after_hook in after_hooks: - after_hook_result = after_hook(after_hook_context) - if after_hook_result is not None: - result = after_hook_result - after_hook_context.tool_result = result - except Exception as hook_error: - if self.agent.verbose: + tool_message: LLMMessage = { + "role": "tool", + "tool_call_id": call_id, + "name": func_name, + "content": result, + } + self.state.messages.append(tool_message) + + # Log the tool execution + if self.agent and self.agent.verbose: + cache_info = " (from cache)" if from_cache else "" self._printer.print( - content=f"Error in after_tool_call hook: {hook_error}", - color="red", + content=f"Tool {func_name} executed with result{cache_info}: {result[:200]}...", + color="green", ) - if not error_event_emitted: - crewai_event_bus.emit( - self, - event=ToolUsageFinishedEvent( + if ( + original_tool + and hasattr(original_tool, "result_as_answer") + and original_tool.result_as_answer + ): + self.state.current_answer = AgentFinish( + thought="Tool result is the final answer", output=result, - tool_name=func_name, - tool_args=args_dict, - from_agent=self.agent, - from_task=self.task, - agent_key=agent_key, - started_at=started_at, - finished_at=datetime.now(), - ), - ) + text=result, + ) + self.state.is_finished = True + return "tool_result_is_final" - # Append tool result message - tool_message: LLMMessage = { + return "native_tool_completed" + + for execution_result in execution_results: + call_id = cast(str, execution_result["call_id"]) + func_name = cast(str, execution_result["func_name"]) + result = cast(str, execution_result["result"]) + from_cache = cast(bool, execution_result["from_cache"]) + original_tool = execution_result["original_tool"] + + tool_message = { "role": "tool", "tool_call_id": call_id, "name": func_name, @@ -922,6 +835,249 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin): return "native_tool_completed" + def _should_parallelize_native_tool_calls(self, tool_calls: list[Any]) -> bool: + """Determine if native tool calls are safe to run in parallel.""" + if len(tool_calls) <= 1: + return False + + for tool_call in tool_calls: + info = extract_tool_call_info(tool_call) + if not info: + continue + _, func_name, _ = info + + mapping = getattr(self, "_tool_name_mapping", None) + original_tool: BaseTool | None = None + if mapping and func_name in mapping: + mapped = mapping[func_name] + if isinstance(mapped, BaseTool): + original_tool = mapped + if original_tool is None: + for tool in self.original_tools or []: + if sanitize_tool_name(tool.name) == func_name: + original_tool = tool + break + + if not original_tool: + continue + + if getattr(original_tool, "result_as_answer", False): + return False + if getattr(original_tool, "max_usage_count", None) is not None: + return False + + return True + + def _execute_single_native_tool_call(self, tool_call: Any) -> dict[str, Any]: + """Execute a single native tool call and return metadata/result.""" + info = extract_tool_call_info(tool_call) + if not info: + call_id = ( + getattr(tool_call, "id", None) + or (tool_call.get("id") if isinstance(tool_call, dict) else None) + or "unknown" + ) + return { + "call_id": call_id, + "func_name": "unknown", + "result": "Error: Invalid native tool call format", + "from_cache": False, + "original_tool": None, + } + + call_id, func_name, func_args = info + + # Parse arguments + parsed_args, parse_error = parse_tool_call_args(func_args, func_name, call_id) + if parse_error is not None: + return parse_error + args_dict: dict[str, Any] = parsed_args or {} + + # Get agent_key for event tracking + agent_key = getattr(self.agent, "key", "unknown") if self.agent else "unknown" + + original_tool: BaseTool | None = None + mapping = getattr(self, "_tool_name_mapping", None) + if mapping and func_name in mapping: + mapped = mapping[func_name] + if isinstance(mapped, BaseTool): + original_tool = mapped + if original_tool is None: + for tool in self.original_tools or []: + if sanitize_tool_name(tool.name) == func_name: + original_tool = tool + break + + # Check if tool has reached max usage count + max_usage_reached = False + if ( + original_tool + and original_tool.max_usage_count is not None + and original_tool.current_usage_count >= original_tool.max_usage_count + ): + max_usage_reached = True + + # Check cache before executing + from_cache = False + input_str = json.dumps(args_dict) if args_dict else "" + if self.tools_handler and self.tools_handler.cache: + cached_result = self.tools_handler.cache.read( + tool=func_name, input=input_str + ) + if cached_result is not None: + result = ( + str(cached_result) + if not isinstance(cached_result, str) + else cached_result + ) + from_cache = True + + # Emit tool usage started event + started_at = datetime.now() + crewai_event_bus.emit( + self, + event=ToolUsageStartedEvent( + tool_name=func_name, + tool_args=args_dict, + from_agent=self.agent, + from_task=self.task, + agent_key=agent_key, + ), + ) + error_event_emitted = False + + track_delegation_if_needed(func_name, args_dict, self.task) + + structured_tool: CrewStructuredTool | None = None + if original_tool is not None: + for structured in self.tools or []: + if getattr(structured, "_original_tool", None) is original_tool: + structured_tool = structured + break + if structured_tool is None: + for structured in self.tools or []: + if sanitize_tool_name(structured.name) == func_name: + structured_tool = structured + break + + hook_blocked = False + before_hook_context = ToolCallHookContext( + tool_name=func_name, + tool_input=args_dict, + tool=structured_tool, # type: ignore[arg-type] + agent=self.agent, + task=self.task, + crew=self.crew, + ) + before_hooks = get_before_tool_call_hooks() + try: + for hook in before_hooks: + hook_result = hook(before_hook_context) + if hook_result is False: + hook_blocked = True + break + except Exception as hook_error: + if self.agent.verbose: + self._printer.print( + content=f"Error in before_tool_call hook: {hook_error}", + color="red", + ) + + if hook_blocked: + result = f"Tool execution blocked by hook. Tool: {func_name}" + elif not from_cache and not max_usage_reached: + result = "Tool not found" + if func_name in self._available_functions: + try: + tool_func = self._available_functions[func_name] + raw_result = tool_func(**args_dict) + + # Add to cache after successful execution (before string conversion) + if self.tools_handler and self.tools_handler.cache: + should_cache = True + if original_tool: + should_cache = original_tool.cache_function( + args_dict, raw_result + ) + if should_cache: + self.tools_handler.cache.add( + tool=func_name, input=input_str, output=raw_result + ) + + # Convert to string for message + result = ( + str(raw_result) + if not isinstance(raw_result, str) + else raw_result + ) + except Exception as e: + result = f"Error executing tool: {e}" + if self.task: + self.task.increment_tools_errors() + # Emit tool usage error event + crewai_event_bus.emit( + self, + event=ToolUsageErrorEvent( + tool_name=func_name, + tool_args=args_dict, + from_agent=self.agent, + from_task=self.task, + agent_key=agent_key, + error=e, + ), + ) + error_event_emitted = True + elif max_usage_reached and original_tool: + # Return error message when max usage limit is reached + result = f"Tool '{func_name}' has reached its usage limit of {original_tool.max_usage_count} times and cannot be used anymore." + + # Execute after_tool_call hooks (even if blocked, to allow logging/monitoring) + after_hook_context = ToolCallHookContext( + tool_name=func_name, + tool_input=args_dict, + tool=structured_tool, # type: ignore[arg-type] + agent=self.agent, + task=self.task, + crew=self.crew, + tool_result=result, + ) + after_hooks = get_after_tool_call_hooks() + try: + for after_hook in after_hooks: + after_hook_result = after_hook(after_hook_context) + if after_hook_result is not None: + result = after_hook_result + after_hook_context.tool_result = result + except Exception as hook_error: + if self.agent.verbose: + self._printer.print( + content=f"Error in after_tool_call hook: {hook_error}", + color="red", + ) + + if not error_event_emitted: + crewai_event_bus.emit( + self, + event=ToolUsageFinishedEvent( + output=result, + tool_name=func_name, + tool_args=args_dict, + from_agent=self.agent, + from_task=self.task, + agent_key=agent_key, + started_at=started_at, + finished_at=datetime.now(), + ), + ) + + return { + "call_id": call_id, + "func_name": func_name, + "result": result, + "from_cache": from_cache, + "original_tool": original_tool, + } + def _extract_tool_name(self, tool_call: Any) -> str: """Extract tool name from various tool call formats.""" if hasattr(tool_call, "function"): @@ -954,11 +1110,11 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin): def check_max_iterations( self, ) -> Literal[ - "force_final_answer", "continue_reasoning", "continue_reasoning_native" + "max_iterations_exceeded", "continue_reasoning", "continue_reasoning_native" ]: """Check if max iterations reached before proceeding with reasoning.""" if has_reached_max_iterations(self.state.iterations, self.max_iter): - return "force_final_answer" + return "max_iterations_exceeded" if self.state.use_native_tools: return "continue_reasoning_native" return "continue_reasoning" @@ -1252,7 +1408,9 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin): formatted_answer: Current agent response. """ if self.step_callback: - self.step_callback(formatted_answer) + cb_result = self.step_callback(formatted_answer) + if inspect.iscoroutine(cb_result): + asyncio.run(cb_result) def _append_message_to_state( self, text: str, role: Literal["user", "assistant", "system"] = "assistant" diff --git a/lib/crewai/src/crewai/flow/flow.py b/lib/crewai/src/crewai/flow/flow.py index d8e74fc08..64c4059ad 100644 --- a/lib/crewai/src/crewai/flow/flow.py +++ b/lib/crewai/src/crewai/flow/flow.py @@ -10,13 +10,15 @@ import asyncio from collections.abc import ( Callable, ItemsView, + Iterable, Iterator, KeysView, Sequence, ValuesView, ) -from concurrent.futures import Future +from concurrent.futures import Future, ThreadPoolExecutor import copy +import enum import inspect import logging import threading @@ -27,8 +29,10 @@ from typing import ( Generic, Literal, ParamSpec, + SupportsIndex, TypeVar, cast, + overload, ) from uuid import uuid4 @@ -77,7 +81,12 @@ from crewai.flow.flow_wrappers import ( StartMethod, ) from crewai.flow.persistence.base import FlowPersistence -from crewai.flow.types import FlowExecutionData, FlowMethodName, InputHistoryEntry, PendingListenerKey +from crewai.flow.types import ( + FlowExecutionData, + FlowMethodName, + InputHistoryEntry, + PendingListenerKey, +) from crewai.flow.utils import ( _extract_all_methods, _extract_all_methods_recursive, @@ -426,8 +435,7 @@ class LockedListProxy(list, Generic[T]): # type: ignore[type-arg] """ def __init__(self, lst: list[T], lock: threading.Lock) -> None: - # Do NOT call super().__init__() -- we don't want to copy data into - # the builtin list storage. All access goes through self._list. + super().__init__() # empty builtin list; all access goes through self._list self._list = lst self._lock = lock @@ -435,11 +443,11 @@ class LockedListProxy(list, Generic[T]): # type: ignore[type-arg] with self._lock: self._list.append(item) - def extend(self, items: list[T]) -> None: + def extend(self, items: Iterable[T]) -> None: with self._lock: self._list.extend(items) - def insert(self, index: int, item: T) -> None: + def insert(self, index: SupportsIndex, item: T) -> None: with self._lock: self._list.insert(index, item) @@ -447,7 +455,7 @@ class LockedListProxy(list, Generic[T]): # type: ignore[type-arg] with self._lock: self._list.remove(item) - def pop(self, index: int = -1) -> T: + def pop(self, index: SupportsIndex = -1) -> T: with self._lock: return self._list.pop(index) @@ -455,15 +463,23 @@ class LockedListProxy(list, Generic[T]): # type: ignore[type-arg] with self._lock: self._list.clear() - def __setitem__(self, index: int, value: T) -> None: + @overload + def __setitem__(self, index: SupportsIndex, value: T) -> None: ... + @overload + def __setitem__(self, index: slice, value: Iterable[T]) -> None: ... + def __setitem__(self, index: Any, value: Any) -> None: with self._lock: self._list[index] = value - def __delitem__(self, index: int) -> None: + def __delitem__(self, index: SupportsIndex | slice) -> None: with self._lock: del self._list[index] - def __getitem__(self, index: int) -> T: + @overload + def __getitem__(self, index: SupportsIndex) -> T: ... + @overload + def __getitem__(self, index: slice) -> list[T]: ... + def __getitem__(self, index: Any) -> Any: return self._list[index] def __len__(self) -> int: @@ -481,7 +497,51 @@ class LockedListProxy(list, Generic[T]): # type: ignore[type-arg] def __bool__(self) -> bool: return bool(self._list) - def __eq__(self, other: object) -> bool: # type: ignore[override] + def index(self, value: T, start: SupportsIndex = 0, stop: SupportsIndex | None = None) -> int: # type: ignore[override] + if stop is None: + return self._list.index(value, start) + return self._list.index(value, start, stop) + + def count(self, value: T) -> int: + return self._list.count(value) + + def sort(self, *, key: Any = None, reverse: bool = False) -> None: + with self._lock: + self._list.sort(key=key, reverse=reverse) + + def reverse(self) -> None: + with self._lock: + self._list.reverse() + + def copy(self) -> list[T]: + return self._list.copy() + + def __add__(self, other: list[T]) -> list[T]: + return self._list + other + + def __radd__(self, other: list[T]) -> list[T]: + return other + self._list + + def __iadd__(self, other: Iterable[T]) -> LockedListProxy[T]: + with self._lock: + self._list += list(other) + return self + + def __mul__(self, n: SupportsIndex) -> list[T]: + return self._list * n + + def __rmul__(self, n: SupportsIndex) -> list[T]: + return self._list * n + + def __imul__(self, n: SupportsIndex) -> LockedListProxy[T]: + with self._lock: + self._list *= n + return self + + def __reversed__(self) -> Iterator[T]: + return reversed(self._list) + + def __eq__(self, other: object) -> bool: """Compare based on the underlying list contents.""" if isinstance(other, LockedListProxy): # Avoid deadlocks by acquiring locks in a consistent order. @@ -492,7 +552,7 @@ class LockedListProxy(list, Generic[T]): # type: ignore[type-arg] with self._lock: return self._list == other - def __ne__(self, other: object) -> bool: # type: ignore[override] + def __ne__(self, other: object) -> bool: return not self.__eq__(other) @@ -505,8 +565,7 @@ class LockedDictProxy(dict, Generic[T]): # type: ignore[type-arg] """ def __init__(self, d: dict[str, T], lock: threading.Lock) -> None: - # Do NOT call super().__init__() -- we don't want to copy data into - # the builtin dict storage. All access goes through self._dict. + super().__init__() # empty builtin dict; all access goes through self._dict self._dict = d self._lock = lock @@ -518,11 +577,11 @@ class LockedDictProxy(dict, Generic[T]): # type: ignore[type-arg] with self._lock: del self._dict[key] - def pop(self, key: str, *default: T) -> T: + def pop(self, key: str, *default: T) -> T: # type: ignore[override] with self._lock: return self._dict.pop(key, *default) - def update(self, other: dict[str, T]) -> None: + def update(self, other: dict[str, T]) -> None: # type: ignore[override] with self._lock: self._dict.update(other) @@ -530,7 +589,7 @@ class LockedDictProxy(dict, Generic[T]): # type: ignore[type-arg] with self._lock: self._dict.clear() - def setdefault(self, key: str, default: T) -> T: + def setdefault(self, key: str, default: T) -> T: # type: ignore[override] with self._lock: return self._dict.setdefault(key, default) @@ -546,16 +605,16 @@ class LockedDictProxy(dict, Generic[T]): # type: ignore[type-arg] def __contains__(self, key: object) -> bool: return key in self._dict - def keys(self) -> KeysView[str]: + def keys(self) -> KeysView[str]: # type: ignore[override] return self._dict.keys() - def values(self) -> ValuesView[T]: + def values(self) -> ValuesView[T]: # type: ignore[override] return self._dict.values() - def items(self) -> ItemsView[str, T]: + def items(self) -> ItemsView[str, T]: # type: ignore[override] return self._dict.items() - def get(self, key: str, default: T | None = None) -> T | None: + def get(self, key: str, default: T | None = None) -> T | None: # type: ignore[override] return self._dict.get(key, default) def __repr__(self) -> str: @@ -564,7 +623,24 @@ class LockedDictProxy(dict, Generic[T]): # type: ignore[type-arg] def __bool__(self) -> bool: return bool(self._dict) - def __eq__(self, other: object) -> bool: # type: ignore[override] + def copy(self) -> dict[str, T]: + return self._dict.copy() + + def __or__(self, other: dict[str, T]) -> dict[str, T]: + return self._dict | other + + def __ror__(self, other: dict[str, T]) -> dict[str, T]: + return other | self._dict + + def __ior__(self, other: dict[str, T]) -> LockedDictProxy[T]: + with self._lock: + self._dict |= other + return self + + def __reversed__(self) -> Iterator[str]: + return reversed(self._dict) + + def __eq__(self, other: object) -> bool: """Compare based on the underlying dict contents.""" if isinstance(other, LockedDictProxy): # Avoid deadlocks by acquiring locks in a consistent order. @@ -575,7 +651,7 @@ class LockedDictProxy(dict, Generic[T]): # type: ignore[type-arg] with self._lock: return self._dict == other - def __ne__(self, other: object) -> bool: # type: ignore[override] + def __ne__(self, other: object) -> bool: return not self.__eq__(other) @@ -605,6 +681,10 @@ class StateProxy(Generic[T]): if name in ("_proxy_state", "_proxy_lock"): object.__setattr__(self, name, value) else: + if isinstance(value, LockedListProxy): + value = value._list + elif isinstance(value, LockedDictProxy): + value = value._dict with object.__getattribute__(self, "_proxy_lock"): setattr(object.__getattribute__(self, "_proxy_state"), name, value) @@ -677,6 +757,7 @@ class FlowMeta(type): condition_type = getattr( attr_value, "__condition_type__", OR_CONDITION ) + if ( hasattr(attr_value, "__trigger_condition__") and attr_value.__trigger_condition__ is not None @@ -737,7 +818,9 @@ class Flow(Generic[T], metaclass=FlowMeta): name: str | None = None tracing: bool | None = None stream: bool = False - memory: Any = None # Memory | MemoryScope | MemorySlice | None; auto-created if not set + memory: Any = ( + None # Memory | MemoryScope | MemorySlice | None; auto-created if not set + ) input_provider: Any = None # InputProvider | None; per-flow override for self.ask() def __class_getitem__(cls: type[Flow[T]], item: type[T]) -> type[Flow[T]]: @@ -752,6 +835,7 @@ class Flow(Generic[T], metaclass=FlowMeta): persistence: FlowPersistence | None = None, tracing: bool | None = None, suppress_flow_events: bool = False, + max_method_calls: int = 100, **kwargs: Any, ) -> None: """Initialize a new Flow instance. @@ -760,6 +844,7 @@ class Flow(Generic[T], metaclass=FlowMeta): persistence: Optional persistence backend for storing flow states tracing: Whether to enable tracing. True=always enable, False=always disable, None=check environment/user settings suppress_flow_events: Whether to suppress flow event emissions (internal use) + max_method_calls: Maximum times a single method can be called per execution before raising RecursionError **kwargs: Additional state values to initialize or override """ # Initialize basic instance attributes @@ -775,6 +860,8 @@ class Flow(Generic[T], metaclass=FlowMeta): self._completed_methods: set[FlowMethodName] = ( set() ) # Track completed methods for reload + self._method_call_counts: dict[FlowMethodName, int] = {} + self._max_method_calls = max_method_calls self._persistence: FlowPersistence | None = persistence self._is_execution_resuming: bool = False self._event_futures: list[Future[None]] = [] @@ -881,7 +968,8 @@ class Flow(Generic[T], metaclass=FlowMeta): """ if self.memory is None: raise ValueError("No memory configured for this flow") - return self.memory.extract_memories(content) + result: list[str] = self.memory.extract_memories(content) + return result def _mark_or_listener_fired(self, listener_name: FlowMethodName) -> bool: """Mark an OR listener as fired atomically. @@ -1352,8 +1440,10 @@ class Flow(Generic[T], metaclass=FlowMeta): ValueError: If structured state model lacks 'id' field TypeError: If state is neither BaseModel nor dictionary """ + init_state = self.initial_state + # Handle case where initial_state is None but we have a type parameter - if self.initial_state is None and hasattr(self, "_initial_state_t"): + if init_state is None and hasattr(self, "_initial_state_t"): state_type = self._initial_state_t if isinstance(state_type, type): if issubclass(state_type, FlowState): @@ -1377,12 +1467,12 @@ class Flow(Generic[T], metaclass=FlowMeta): return cast(T, {"id": str(uuid4())}) # Handle case where no initial state is provided - if self.initial_state is None: + if init_state is None: return cast(T, {"id": str(uuid4())}) # Handle case where initial_state is a type (class) - if isinstance(self.initial_state, type): - state_class: type[T] = self.initial_state + if isinstance(init_state, type): + state_class = init_state if issubclass(state_class, FlowState): return state_class() if issubclass(state_class, BaseModel): @@ -1393,19 +1483,19 @@ class Flow(Generic[T], metaclass=FlowMeta): if not getattr(model_instance, "id", None): object.__setattr__(model_instance, "id", str(uuid4())) return model_instance - if self.initial_state is dict: + if init_state is dict: return cast(T, {"id": str(uuid4())}) # Handle dictionary instance case - if isinstance(self.initial_state, dict): - new_state = dict(self.initial_state) # Copy to avoid mutations + if isinstance(init_state, dict): + new_state = dict(init_state) # Copy to avoid mutations if "id" not in new_state: new_state["id"] = str(uuid4()) return cast(T, new_state) # Handle BaseModel instance case - if isinstance(self.initial_state, BaseModel): - model = cast(BaseModel, self.initial_state) + if isinstance(init_state, BaseModel): + model = cast(BaseModel, init_state) if not hasattr(model, "id"): raise ValueError("Flow state model must have an 'id' field") @@ -1719,7 +1809,12 @@ class Flow(Generic[T], metaclass=FlowMeta): async def _run_flow() -> Any: return await self.kickoff_async(inputs, input_files) - return asyncio.run(_run_flow()) + try: + asyncio.get_running_loop() + with ThreadPoolExecutor(max_workers=1) as pool: + return pool.submit(asyncio.run, _run_flow()).result() + except RuntimeError: + return asyncio.run(_run_flow()) async def kickoff_async( self, @@ -1803,9 +1898,15 @@ class Flow(Generic[T], metaclass=FlowMeta): self._method_outputs.clear() self._pending_and_listeners.clear() self._clear_or_listeners() + self._method_call_counts.clear() else: - # We're restoring from persistence, set the flag - self._is_execution_resuming = True + # Only enter resumption mode if there are completed methods to + # replay. When _completed_methods is empty (e.g. a pure + # state-reload via kickoff(inputs={"id": ...})), the flow + # executes from scratch and the flag would incorrectly + # suppress cyclic re-execution on the second iteration. + if self._completed_methods: + self._is_execution_resuming = True if inputs: # Override the id in the state if it exists in inputs @@ -2173,6 +2274,8 @@ class Flow(Generic[T], metaclass=FlowMeta): from crewai.flow.async_feedback.types import HumanFeedbackPending if isinstance(e, HumanFeedbackPending): + e.context.method_name = method_name + # Auto-save pending feedback (create default persistence if needed) if self._persistence is None: from crewai.flow.persistence import SQLiteFlowPersistence @@ -2272,14 +2375,23 @@ class Flow(Generic[T], metaclass=FlowMeta): router_name, router_input, current_triggering_event_id ) if router_result: # Only add non-None results - router_results.append(FlowMethodName(str(router_result))) + router_result_str = ( + router_result.value + if isinstance(router_result, enum.Enum) + else str(router_result) + ) + router_results.append(FlowMethodName(router_result_str)) # If this was a human_feedback router, map the outcome to the feedback if self.last_human_feedback is not None: - router_result_to_feedback[str(router_result)] = ( + router_result_to_feedback[router_result_str] = ( self.last_human_feedback ) current_trigger = ( - FlowMethodName(str(router_result)) + FlowMethodName( + router_result.value + if isinstance(router_result, enum.Enum) + else str(router_result) + ) if router_result is not None else FlowMethodName("") # Update for next iteration of router chain ) @@ -2528,6 +2640,16 @@ class Flow(Generic[T], metaclass=FlowMeta): - Skips execution if method was already completed (e.g., after reload) - Catches and logs any exceptions during execution, preventing individual listener failures from breaking the entire flow """ + count = self._method_call_counts.get(listener_name, 0) + 1 + if count > self._max_method_calls: + raise RecursionError( + f"Method '{listener_name}' has been called {self._max_method_calls} times in " + f"this flow execution, which indicates an infinite loop. " + f"This commonly happens when a @listen label matches the " + f"method's own name." + ) + self._method_call_counts[listener_name] = count + if listener_name in self._completed_methods: if self._is_execution_resuming: # During resumption, skip execution but continue listeners @@ -2696,7 +2818,10 @@ class Flow(Generic[T], metaclass=FlowMeta): return topic ``` """ - from concurrent.futures import ThreadPoolExecutor, TimeoutError as FuturesTimeoutError + from concurrent.futures import ( + ThreadPoolExecutor, + TimeoutError as FuturesTimeoutError, + ) from datetime import datetime from crewai.events.types.flow_events import ( @@ -2765,14 +2890,16 @@ class Flow(Generic[T], metaclass=FlowMeta): response = None # Record in history - self._input_history.append({ - "message": message, - "response": response, - "method_name": method_name, - "timestamp": datetime.now(), - "metadata": metadata, - "response_metadata": response_metadata, - }) + self._input_history.append( + { + "message": message, + "response": response, + "method_name": method_name, + "timestamp": datetime.now(), + "metadata": metadata, + "response_metadata": response_metadata, + } + ) # Emit input received event crewai_event_bus.emit( diff --git a/lib/crewai/src/crewai/flow/human_feedback.py b/lib/crewai/src/crewai/flow/human_feedback.py index 4a191da99..096687d7a 100644 --- a/lib/crewai/src/crewai/flow/human_feedback.py +++ b/lib/crewai/src/crewai/flow/human_feedback.py @@ -408,7 +408,7 @@ def human_feedback( emit=list(emit) if emit else None, default_outcome=default_outcome, metadata=metadata or {}, - llm=llm if isinstance(llm, str) else None, + llm=llm if isinstance(llm, str) else getattr(llm, "model", None), ) # Determine effective provider: diff --git a/lib/crewai/src/crewai/lite_agent.py b/lib/crewai/src/crewai/lite_agent.py index 7a7097bf2..4e7d22280 100644 --- a/lib/crewai/src/crewai/lite_agent.py +++ b/lib/crewai/src/crewai/lite_agent.py @@ -2,10 +2,10 @@ from __future__ import annotations import asyncio from collections.abc import Callable -import time from functools import wraps import inspect import json +import time from types import MethodType from typing import ( TYPE_CHECKING, @@ -49,15 +49,20 @@ from crewai.events.types.agent_events import ( LiteAgentExecutionErrorEvent, LiteAgentExecutionStartedEvent, ) +from crewai.events.types.logging_events import AgentLogsExecutionEvent from crewai.events.types.memory_events import ( MemoryRetrievalCompletedEvent, MemoryRetrievalFailedEvent, MemoryRetrievalStartedEvent, ) -from crewai.events.types.logging_events import AgentLogsExecutionEvent from crewai.flow.flow_trackable import FlowTrackable from crewai.hooks.llm_hooks import get_after_llm_call_hooks, get_before_llm_call_hooks -from crewai.hooks.types import AfterLLMCallHookType, BeforeLLMCallHookType +from crewai.hooks.types import ( + AfterLLMCallHookCallable, + AfterLLMCallHookType, + BeforeLLMCallHookCallable, + BeforeLLMCallHookType, +) from crewai.lite_agent_output import LiteAgentOutput from crewai.llm import LLM from crewai.llms.base_llm import BaseLLM @@ -270,11 +275,11 @@ class LiteAgent(FlowTrackable, BaseModel): _guardrail: GuardrailCallable | None = PrivateAttr(default=None) _guardrail_retry_count: int = PrivateAttr(default=0) _callbacks: list[TokenCalcHandler] = PrivateAttr(default_factory=list) - _before_llm_call_hooks: list[BeforeLLMCallHookType] = PrivateAttr( - default_factory=get_before_llm_call_hooks + _before_llm_call_hooks: list[BeforeLLMCallHookType | BeforeLLMCallHookCallable] = ( + PrivateAttr(default_factory=get_before_llm_call_hooks) ) - _after_llm_call_hooks: list[AfterLLMCallHookType] = PrivateAttr( - default_factory=get_after_llm_call_hooks + _after_llm_call_hooks: list[AfterLLMCallHookType | AfterLLMCallHookCallable] = ( + PrivateAttr(default_factory=get_after_llm_call_hooks) ) _memory: Any = PrivateAttr(default=None) @@ -440,12 +445,16 @@ class LiteAgent(FlowTrackable, BaseModel): return self.role @property - def before_llm_call_hooks(self) -> list[BeforeLLMCallHookType]: + def before_llm_call_hooks( + self, + ) -> list[BeforeLLMCallHookType | BeforeLLMCallHookCallable]: """Get the before_llm_call hooks for this agent.""" return self._before_llm_call_hooks @property - def after_llm_call_hooks(self) -> list[AfterLLMCallHookType]: + def after_llm_call_hooks( + self, + ) -> list[AfterLLMCallHookType | AfterLLMCallHookCallable]: """Get the after_llm_call hooks for this agent.""" return self._after_llm_call_hooks @@ -482,11 +491,12 @@ class LiteAgent(FlowTrackable, BaseModel): # Inject memory tools once if memory is configured (mirrors Agent._prepare_kickoff) if self._memory is not None: from crewai.tools.memory_tools import create_memory_tools - from crewai.utilities.agent_utils import sanitize_tool_name + from crewai.utilities.string_utils import sanitize_tool_name existing_names = {sanitize_tool_name(t.name) for t in self._parsed_tools} memory_tools = [ - mt for mt in create_memory_tools(self._memory) + mt + for mt in create_memory_tools(self._memory) if sanitize_tool_name(mt.name) not in existing_names ] if memory_tools: @@ -565,9 +575,10 @@ class LiteAgent(FlowTrackable, BaseModel): if memory_block: formatted = self.i18n.slice("memory").format(memory=memory_block) if self._messages and self._messages[0].get("role") == "system": - self._messages[0]["content"] = ( - self._messages[0].get("content", "") + "\n\n" + formatted - ) + existing_content = self._messages[0].get("content", "") + if not isinstance(existing_content, str): + existing_content = "" + self._messages[0]["content"] = existing_content + "\n\n" + formatted crewai_event_bus.emit( self, event=MemoryRetrievalCompletedEvent( @@ -588,16 +599,12 @@ class LiteAgent(FlowTrackable, BaseModel): ) def _save_to_memory(self, output_text: str) -> None: - """Extract discrete memories from the run and remember each. No-op if _memory is None.""" - if self._memory is None: + """Extract discrete memories from the run and remember each. No-op if _memory is None or read-only.""" + if self._memory is None or self._memory.read_only: return input_str = self._get_last_user_content() or "User request" try: - raw = ( - f"Input: {input_str}\n" - f"Agent: {self.role}\n" - f"Result: {output_text}" - ) + raw = f"Input: {input_str}\nAgent: {self.role}\nResult: {output_text}" extracted = self._memory.extract_memories(raw) if extracted: self._memory.remember_many(extracted, agent_role=self.role) @@ -622,13 +629,20 @@ class LiteAgent(FlowTrackable, BaseModel): ) # Execute the agent using invoke loop - agent_finish = self._invoke_loop() + active_response_format = response_format or self.response_format + agent_finish = self._invoke_loop(response_model=active_response_format) if self._memory is not None: - self._save_to_memory(agent_finish.output) + output_text = ( + agent_finish.output.model_dump_json() + if isinstance(agent_finish.output, BaseModel) + else agent_finish.output + ) + self._save_to_memory(output_text) formatted_result: BaseModel | None = None - active_response_format = response_format or self.response_format - if active_response_format: + if isinstance(agent_finish.output, BaseModel): + formatted_result = agent_finish.output + elif active_response_format: try: model_schema = generate_model_description(active_response_format) schema = json.dumps(model_schema, indent=2) @@ -660,8 +674,13 @@ class LiteAgent(FlowTrackable, BaseModel): usage_metrics = self._token_process.get_summary() # Create output + raw_output = ( + agent_finish.output.model_dump_json() + if isinstance(agent_finish.output, BaseModel) + else agent_finish.output + ) output = LiteAgentOutput( - raw=agent_finish.output, + raw=raw_output, pydantic=formatted_result, agent_role=self.role, usage_metrics=usage_metrics.model_dump() if usage_metrics else None, @@ -838,10 +857,15 @@ class LiteAgent(FlowTrackable, BaseModel): return formatted_messages - def _invoke_loop(self) -> AgentFinish: + def _invoke_loop( + self, response_model: type[BaseModel] | None = None + ) -> AgentFinish: """ Run the agent's thought process until it reaches a conclusion or max iterations. + Args: + response_model: Optional Pydantic model for native structured output. + Returns: AgentFinish: The final result of the agent execution. """ @@ -870,12 +894,19 @@ class LiteAgent(FlowTrackable, BaseModel): printer=self._printer, from_agent=self, executor_context=self, + response_model=response_model, verbose=self.verbose, ) except Exception as e: raise e + if isinstance(answer, BaseModel): + formatted_answer = AgentFinish( + thought="", output=answer, text=answer.model_dump_json() + ) + break + formatted_answer = process_llm_response( cast(str, answer), self.use_stop_words ) @@ -901,7 +932,7 @@ class LiteAgent(FlowTrackable, BaseModel): ) self._append_message(formatted_answer.text, role="assistant") - except OutputParserError as e: # noqa: PERF203 + except OutputParserError as e: if self.verbose: self._printer.print( content="Failed to parse LLM output. Retrying...", diff --git a/lib/crewai/src/crewai/llm.py b/lib/crewai/src/crewai/llm.py index 20a0373cb..8a4ac2edd 100644 --- a/lib/crewai/src/crewai/llm.py +++ b/lib/crewai/src/crewai/llm.py @@ -427,7 +427,7 @@ class LLM(BaseLLM): f"installed.\n\n" f"To fix this, either:\n" f" 1. Install LiteLLM for broad model support: " - f"uv add litellm\n" + f"uv add 'crewai[litellm]'\n" f"or\n" f"pip install litellm\n\n" f"For more details, see: " diff --git a/lib/crewai/src/crewai/llms/base_llm.py b/lib/crewai/src/crewai/llms/base_llm.py index dcb261fd7..1ab710706 100644 --- a/lib/crewai/src/crewai/llms/base_llm.py +++ b/lib/crewai/src/crewai/llms/base_llm.py @@ -26,6 +26,7 @@ from crewai.events.types.llm_events import ( LLMCallStartedEvent, LLMCallType, LLMStreamChunkEvent, + LLMThinkingChunkEvent, ) from crewai.events.types.tool_usage_events import ( ToolUsageErrorEvent, @@ -368,9 +369,6 @@ class BaseLLM(ABC): """Emit LLM call started event.""" from crewai.utilities.serialization import to_serializable - if not hasattr(crewai_event_bus, "emit"): - raise ValueError("crewai_event_bus does not have an emit method") from None - crewai_event_bus.emit( self, event=LLMCallStartedEvent( @@ -416,9 +414,6 @@ class BaseLLM(ABC): from_agent: Agent | None = None, ) -> None: """Emit LLM call failed event.""" - if not hasattr(crewai_event_bus, "emit"): - raise ValueError("crewai_event_bus does not have an emit method") from None - crewai_event_bus.emit( self, event=LLMCallFailedEvent( @@ -449,9 +444,6 @@ class BaseLLM(ABC): call_type: The type of LLM call (LLM_CALL or TOOL_CALL). response_id: Unique ID for a particular LLM response, chunks have same response_id. """ - if not hasattr(crewai_event_bus, "emit"): - raise ValueError("crewai_event_bus does not have an emit method") from None - crewai_event_bus.emit( self, event=LLMStreamChunkEvent( @@ -465,6 +457,32 @@ class BaseLLM(ABC): ), ) + def _emit_thinking_chunk_event( + self, + chunk: str, + from_task: Task | None = None, + from_agent: Agent | None = None, + response_id: str | None = None, + ) -> None: + """Emit thinking/reasoning chunk event from a thinking model. + + Args: + chunk: The thinking text content. + from_task: The task that initiated the call. + from_agent: The agent that initiated the call. + response_id: Unique ID for a particular LLM response. + """ + crewai_event_bus.emit( + self, + event=LLMThinkingChunkEvent( + chunk=chunk, + from_task=from_task, + from_agent=from_agent, + response_id=response_id, + call_id=get_current_call_id(), + ), + ) + def _handle_tool_execution( self, function_name: str, diff --git a/lib/crewai/src/crewai/llms/providers/bedrock/completion.py b/lib/crewai/src/crewai/llms/providers/bedrock/completion.py index 47946d949..c707be3af 100644 --- a/lib/crewai/src/crewai/llms/providers/bedrock/completion.py +++ b/lib/crewai/src/crewai/llms/providers/bedrock/completion.py @@ -234,7 +234,7 @@ class BedrockCompletion(BaseLLM): aws_access_key_id: str | None = None, aws_secret_access_key: str | None = None, aws_session_token: str | None = None, - region_name: str = "us-east-1", + region_name: str | None = None, temperature: float | None = None, max_tokens: int | None = None, top_p: float | None = None, @@ -287,15 +287,6 @@ class BedrockCompletion(BaseLLM): **kwargs, ) - # Initialize Bedrock client with proper configuration - session = Session( - aws_access_key_id=aws_access_key_id or os.getenv("AWS_ACCESS_KEY_ID"), - aws_secret_access_key=aws_secret_access_key - or os.getenv("AWS_SECRET_ACCESS_KEY"), - aws_session_token=aws_session_token or os.getenv("AWS_SESSION_TOKEN"), - region_name=region_name, - ) - # Configure client with timeouts and retries following AWS best practices config = Config( read_timeout=300, @@ -306,8 +297,12 @@ class BedrockCompletion(BaseLLM): tcp_keepalive=True, ) - self.client = session.client("bedrock-runtime", config=config) - self.region_name = region_name + self.region_name = ( + region_name + or os.getenv("AWS_DEFAULT_REGION") + or os.getenv("AWS_REGION_NAME") + or "us-east-1" + ) self.aws_access_key_id = aws_access_key_id or os.getenv("AWS_ACCESS_KEY_ID") self.aws_secret_access_key = aws_secret_access_key or os.getenv( @@ -315,6 +310,16 @@ class BedrockCompletion(BaseLLM): ) self.aws_session_token = aws_session_token or os.getenv("AWS_SESSION_TOKEN") + # Initialize Bedrock client with proper configuration + session = Session( + aws_access_key_id=self.aws_access_key_id, + aws_secret_access_key=self.aws_secret_access_key, + aws_session_token=self.aws_session_token, + region_name=self.region_name, + ) + + self.client = session.client("bedrock-runtime", config=config) + self._async_exit_stack = AsyncExitStack() if AIOBOTOCORE_AVAILABLE else None self._async_client_initialized = False diff --git a/lib/crewai/src/crewai/llms/providers/gemini/completion.py b/lib/crewai/src/crewai/llms/providers/gemini/completion.py index 14603b7d2..fd0530abe 100644 --- a/lib/crewai/src/crewai/llms/providers/gemini/completion.py +++ b/lib/crewai/src/crewai/llms/providers/gemini/completion.py @@ -61,6 +61,7 @@ class GeminiCompletion(BaseLLM): interceptor: BaseInterceptor[Any, Any] | None = None, use_vertexai: bool | None = None, response_format: type[BaseModel] | None = None, + thinking_config: types.ThinkingConfig | None = None, **kwargs: Any, ): """Initialize Google Gemini chat completion client. @@ -93,6 +94,10 @@ class GeminiCompletion(BaseLLM): api_version="v1" is automatically configured. response_format: Pydantic model for structured output. Used as default when response_model is not passed to call()/acall() methods. + thinking_config: ThinkingConfig for thinking models (gemini-2.5+, gemini-3+). + Controls thought output via include_thoughts, thinking_budget, + and thinking_level. When None, thinking models automatically + get include_thoughts=True so thought content is surfaced. **kwargs: Additional parameters """ if interceptor is not None: @@ -139,6 +144,14 @@ class GeminiCompletion(BaseLLM): version_match and float(version_match.group(1)) >= 2.0 ) + self.thinking_config = thinking_config + if ( + self.thinking_config is None + and version_match + and float(version_match.group(1)) >= 2.5 + ): + self.thinking_config = types.ThinkingConfig(include_thoughts=True) + @property def stop(self) -> list[str]: """Get stop sequences sent to the API.""" @@ -520,6 +533,9 @@ class GeminiCompletion(BaseLLM): if self.safety_settings: config_params["safety_settings"] = self.safety_settings + if self.thinking_config is not None: + config_params["thinking_config"] = self.thinking_config + return types.GenerateContentConfig(**config_params) def _convert_tools_for_interference( # type: ignore[override] @@ -618,9 +634,17 @@ class GeminiCompletion(BaseLLM): function_response_part = types.Part.from_function_response( name=tool_name, response=response_data ) - contents.append( - types.Content(role="user", parts=[function_response_part]) - ) + if ( + contents + and contents[-1].role == "user" + and contents[-1].parts + and contents[-1].parts[-1].function_response is not None + ): + contents[-1].parts.append(function_response_part) + else: + contents.append( + types.Content(role="user", parts=[function_response_part]) + ) elif role == "assistant" and message.get("tool_calls"): raw_parts: list[Any] | None = message.get("raw_tool_call_parts") if raw_parts and all(isinstance(p, types.Part) for p in raw_parts): @@ -894,7 +918,7 @@ class GeminiCompletion(BaseLLM): content = self._extract_text_from_response(response) effective_response_model = None if self.tools else response_model - if not effective_response_model: + if not response_model: content = self._apply_stop_words(content) return self._finalize_completion_response( @@ -931,15 +955,6 @@ class GeminiCompletion(BaseLLM): if chunk.usage_metadata: usage_data = self._extract_token_usage(chunk) - if chunk.text: - full_response += chunk.text - self._emit_stream_chunk_event( - chunk=chunk.text, - from_task=from_task, - from_agent=from_agent, - response_id=response_id, - ) - if chunk.candidates: candidate = chunk.candidates[0] if candidate.content and candidate.content.parts: @@ -976,6 +991,21 @@ class GeminiCompletion(BaseLLM): call_type=LLMCallType.TOOL_CALL, response_id=response_id, ) + elif part.thought and part.text: + self._emit_thinking_chunk_event( + chunk=part.text, + from_task=from_task, + from_agent=from_agent, + response_id=response_id, + ) + elif part.text: + full_response += part.text + self._emit_stream_chunk_event( + chunk=part.text, + from_task=from_task, + from_agent=from_agent, + response_id=response_id, + ) return full_response, function_calls, usage_data @@ -1329,7 +1359,7 @@ class GeminiCompletion(BaseLLM): text_parts = [ part.text for part in candidate.content.parts - if hasattr(part, "text") and part.text + if part.text and not part.thought ] return "".join(text_parts) diff --git a/lib/crewai/src/crewai/mcp/__init__.py b/lib/crewai/src/crewai/mcp/__init__.py index 282cb1f56..e078919fd 100644 --- a/lib/crewai/src/crewai/mcp/__init__.py +++ b/lib/crewai/src/crewai/mcp/__init__.py @@ -18,6 +18,7 @@ from crewai.mcp.filters import ( create_dynamic_tool_filter, create_static_tool_filter, ) +from crewai.mcp.tool_resolver import MCPToolResolver from crewai.mcp.transports.base import BaseTransport, TransportType @@ -28,6 +29,7 @@ __all__ = [ "MCPServerHTTP", "MCPServerSSE", "MCPServerStdio", + "MCPToolResolver", "StaticToolFilter", "ToolFilter", "ToolFilterContext", diff --git a/lib/crewai/src/crewai/mcp/client.py b/lib/crewai/src/crewai/mcp/client.py index f608933f6..2b5d75371 100644 --- a/lib/crewai/src/crewai/mcp/client.py +++ b/lib/crewai/src/crewai/mcp/client.py @@ -6,7 +6,7 @@ from contextlib import AsyncExitStack from datetime import datetime import logging import time -from typing import Any +from typing import Any, NamedTuple from typing_extensions import Self @@ -34,6 +34,13 @@ from crewai.mcp.transports.stdio import StdioTransport from crewai.utilities.string_utils import sanitize_tool_name +class _MCPToolResult(NamedTuple): + """Internal result from an MCP tool call, carrying the ``isError`` flag.""" + + content: str + is_error: bool + + # MCP Connection timeout constants (in seconds) MCP_CONNECTION_TIMEOUT = 30 # Increased for slow servers MCP_TOOL_EXECUTION_TIMEOUT = 30 @@ -420,6 +427,7 @@ class MCPClient: return [ { "name": sanitize_tool_name(tool.name), + "original_name": tool.name, "description": getattr(tool, "description", ""), "inputSchema": getattr(tool, "inputSchema", {}), } @@ -461,29 +469,46 @@ class MCPClient: ) try: - result = await self._retry_operation( + tool_result: _MCPToolResult = await self._retry_operation( lambda: self._call_tool_impl(tool_name, cleaned_arguments), timeout=self.execution_timeout, ) - completed_at = datetime.now() - execution_duration_ms = (completed_at - started_at).total_seconds() * 1000 - crewai_event_bus.emit( - self, - MCPToolExecutionCompletedEvent( - server_name=server_name, - server_url=server_url, - transport_type=transport_type, - tool_name=tool_name, - tool_args=cleaned_arguments, - result=result, - started_at=started_at, - completed_at=completed_at, - execution_duration_ms=execution_duration_ms, - ), - ) + finished_at = datetime.now() + execution_duration_ms = (finished_at - started_at).total_seconds() * 1000 - return result + if tool_result.is_error: + crewai_event_bus.emit( + self, + MCPToolExecutionFailedEvent( + server_name=server_name, + server_url=server_url, + transport_type=transport_type, + tool_name=tool_name, + tool_args=cleaned_arguments, + error=tool_result.content, + error_type="tool_error", + started_at=started_at, + failed_at=finished_at, + ), + ) + else: + crewai_event_bus.emit( + self, + MCPToolExecutionCompletedEvent( + server_name=server_name, + server_url=server_url, + transport_type=transport_type, + tool_name=tool_name, + tool_args=cleaned_arguments, + result=tool_result.content, + started_at=started_at, + completed_at=finished_at, + execution_duration_ms=execution_duration_ms, + ), + ) + + return tool_result.content except Exception as e: failed_at = datetime.now() error_type = ( @@ -564,23 +589,27 @@ class MCPClient: return cleaned - async def _call_tool_impl(self, tool_name: str, arguments: dict[str, Any]) -> Any: + async def _call_tool_impl( + self, tool_name: str, arguments: dict[str, Any] + ) -> _MCPToolResult: """Internal implementation of call_tool.""" result = await asyncio.wait_for( self.session.call_tool(tool_name, arguments), timeout=self.execution_timeout, ) + is_error = getattr(result, "isError", False) or False + # Extract result content if hasattr(result, "content") and result.content: if isinstance(result.content, list) and len(result.content) > 0: content_item = result.content[0] if hasattr(content_item, "text"): - return str(content_item.text) - return str(content_item) - return str(result.content) + return _MCPToolResult(str(content_item.text), is_error) + return _MCPToolResult(str(content_item), is_error) + return _MCPToolResult(str(result.content), is_error) - return str(result) + return _MCPToolResult(str(result), is_error) async def list_prompts(self) -> list[dict[str, Any]]: """List available prompts from MCP server. diff --git a/lib/crewai/src/crewai/mcp/tool_resolver.py b/lib/crewai/src/crewai/mcp/tool_resolver.py new file mode 100644 index 000000000..34af189f2 --- /dev/null +++ b/lib/crewai/src/crewai/mcp/tool_resolver.py @@ -0,0 +1,592 @@ +"""MCP tool resolution for CrewAI agents. + +This module extracts all MCP-related tool resolution logic from the Agent class +into a standalone MCPToolResolver. It handles three flavours of MCP reference: + + 1. Native configs: MCPServerStdio / MCPServerHTTP / MCPServerSSE objects. + 2. HTTPS URLs: e.g. "https://mcp.example.com/api" + 3. AMP references: e.g. "notion" or "notion#search" (legacy "crewai-amp:" prefix also works) +""" + +from __future__ import annotations + +import asyncio +import time +from typing import TYPE_CHECKING, Any, Final, cast +from urllib.parse import urlparse + +from crewai.mcp.client import MCPClient +from crewai.mcp.config import ( + MCPServerConfig, + MCPServerHTTP, + MCPServerSSE, + MCPServerStdio, +) +from crewai.mcp.transports.http import HTTPTransport +from crewai.mcp.transports.sse import SSETransport +from crewai.mcp.transports.stdio import StdioTransport + + +if TYPE_CHECKING: + from crewai.tools.base_tool import BaseTool + from crewai.utilities.logger import Logger + +MCP_CONNECTION_TIMEOUT: Final[int] = 10 +MCP_TOOL_EXECUTION_TIMEOUT: Final[int] = 30 +MCP_DISCOVERY_TIMEOUT: Final[int] = 15 +MCP_MAX_RETRIES: Final[int] = 3 + +_mcp_schema_cache: dict[str, Any] = {} +_cache_ttl: Final[int] = 300 # 5 minutes + + +class MCPToolResolver: + """Resolves MCP server references / configs into CrewAI ``BaseTool`` instances. + + Typical lifecycle:: + + resolver = MCPToolResolver(agent=my_agent, logger=my_agent._logger) + tools = resolver.resolve(my_agent.mcps) + # … agent executes tasks using *tools* … + resolver.cleanup() + + The resolver owns the MCP client connections it creates and is responsible + for tearing them down via :meth:`cleanup`. + """ + + def __init__(self, agent: Any, logger: Logger) -> None: + self._agent = agent + self._logger = logger + self._clients: list[Any] = [] + + @property + def clients(self) -> list[Any]: + return list(self._clients) + + def resolve(self, mcps: list[str | MCPServerConfig]) -> list[BaseTool]: + """Convert MCP server references/configs to CrewAI tools.""" + all_tools: list[BaseTool] = [] + amp_refs: list[tuple[str, str | None]] = [] + + for mcp_config in mcps: + if isinstance(mcp_config, str) and mcp_config.startswith("https://"): + all_tools.extend(self._resolve_external(mcp_config)) + elif isinstance(mcp_config, str): + amp_refs.append(self._parse_amp_ref(mcp_config)) + else: + tools, client = self._resolve_native(mcp_config) + all_tools.extend(tools) + if client: + self._clients.append(client) + + if amp_refs: + tools, clients = self._resolve_amp(amp_refs) + all_tools.extend(tools) + self._clients.extend(clients) + + return all_tools + + def cleanup(self) -> None: + """Disconnect all MCP client connections.""" + if not self._clients: + return + + async def _disconnect_all() -> None: + for client in self._clients: + if client and hasattr(client, "connected") and client.connected: + await client.disconnect() + + try: + asyncio.run(_disconnect_all()) + except Exception as e: + self._logger.log("error", f"Error during MCP client cleanup: {e}") + finally: + self._clients.clear() + + @staticmethod + def _parse_amp_ref(mcp_config: str) -> tuple[str, str | None]: + """Parse an AMP reference into *(slug, optional tool name)*. + + Accepts both bare slugs (``"notion"``, ``"notion#search"``) and the + legacy ``"crewai-amp:notion"`` form. + """ + bare = mcp_config.removeprefix("crewai-amp:") + slug, _, specific_tool = bare.partition("#") + return slug, specific_tool or None + + def _resolve_amp( + self, amp_refs: list[tuple[str, str | None]] + ) -> tuple[list[BaseTool], list[Any]]: + """Fetch AMP configs in bulk and return their tools and clients. + + Resolves each unique slug only once (single connection per server), + then applies per-ref tool filters to select specific tools. + """ + from crewai.events.event_bus import crewai_event_bus + from crewai.events.types.mcp_events import MCPConfigFetchFailedEvent + + unique_slugs = list(dict.fromkeys(slug for slug, _ in amp_refs)) + amp_configs_map = self._fetch_amp_mcp_configs(unique_slugs) + + all_tools: list[BaseTool] = [] + all_clients: list[Any] = [] + + resolved_cache: dict[str, tuple[list[BaseTool], Any | None]] = {} + + for slug in unique_slugs: + config_dict = amp_configs_map.get(slug) + if not config_dict: + crewai_event_bus.emit( + self, + MCPConfigFetchFailedEvent( + slug=slug, + error=f"Config for '{slug}' not found. Make sure it is connected in your account.", + error_type="not_connected", + ), + ) + continue + + mcp_server_config = self._build_mcp_config_from_dict(config_dict) + + try: + tools, client = self._resolve_native(mcp_server_config) + resolved_cache[slug] = (tools, client) + if client: + all_clients.append(client) + except Exception as e: + crewai_event_bus.emit( + self, + MCPConfigFetchFailedEvent( + slug=slug, + error=str(e), + error_type="connection_failed", + ), + ) + + for slug, specific_tool in amp_refs: + cached = resolved_cache.get(slug) + if not cached: + continue + + slug_tools, _ = cached + if specific_tool: + all_tools.extend( + t for t in slug_tools if t.name.endswith(f"_{specific_tool}") + ) + else: + all_tools.extend(slug_tools) + + return all_tools, all_clients + + def _fetch_amp_mcp_configs(self, slugs: list[str]) -> dict[str, dict[str, Any]]: + """Fetch MCP server configurations via CrewAI+ API. + + Sends a GET request to the CrewAI+ mcps/configs endpoint with + comma-separated slugs. CrewAI+ proxies the request to crewai-oauth. + + API-level failures return ``{}``; individual slugs will then + surface as ``MCPConfigFetchFailedEvent`` in :meth:`_resolve_amp`. + """ + import httpx + + try: + from crewai_tools.tools.crewai_platform_tools.misc import ( + get_platform_integration_token, + ) + + from crewai.cli.plus_api import PlusAPI + + plus_api = PlusAPI(api_key=get_platform_integration_token()) + response = plus_api.get_mcp_configs(slugs) + + if response.status_code == 200: + configs: dict[str, dict[str, Any]] = response.json().get("configs", {}) + return configs + + self._logger.log( + "debug", + f"Failed to fetch MCP configs: HTTP {response.status_code}", + ) + return {} + + except httpx.HTTPError as e: + self._logger.log("debug", f"Failed to fetch MCP configs: {e}") + return {} + except Exception as e: + self._logger.log("debug", f"Cannot fetch AMP MCP configs: {e}") + return {} + + def _resolve_external(self, mcp_ref: str) -> list[BaseTool]: + """Resolve an HTTPS MCP server URL into tools.""" + from crewai.tools.mcp_tool_wrapper import MCPToolWrapper + + if "#" in mcp_ref: + server_url, specific_tool = mcp_ref.split("#", 1) + else: + server_url, specific_tool = mcp_ref, None + + server_params = {"url": server_url} + server_name = self._extract_server_name(server_url) + + try: + tool_schemas = self._get_mcp_tool_schemas(server_params) + + if not tool_schemas: + self._logger.log( + "warning", f"No tools discovered from MCP server: {server_url}" + ) + return [] + + tools = [] + for tool_name, schema in tool_schemas.items(): + if specific_tool and tool_name != specific_tool: + continue + + try: + wrapper = MCPToolWrapper( + mcp_server_params=server_params, + tool_name=tool_name, + tool_schema=schema, + server_name=server_name, + ) + tools.append(wrapper) + except Exception as e: + self._logger.log( + "warning", + f"Failed to create MCP tool wrapper for {tool_name}: {e}", + ) + continue + + if specific_tool and not tools: + self._logger.log( + "warning", + f"Specific tool '{specific_tool}' not found on MCP server: {server_url}", + ) + + return cast(list[BaseTool], tools) + + except Exception as e: + self._logger.log( + "warning", f"Failed to connect to MCP server {server_url}: {e}" + ) + return [] + + def _resolve_native( + self, mcp_config: MCPServerConfig + ) -> tuple[list[BaseTool], Any | None]: + """Resolve an ``MCPServerConfig`` into tools, returning the client for cleanup.""" + from crewai.tools.base_tool import BaseTool + from crewai.tools.mcp_native_tool import MCPNativeTool + + transport: StdioTransport | HTTPTransport | SSETransport + if isinstance(mcp_config, MCPServerStdio): + transport = StdioTransport( + command=mcp_config.command, + args=mcp_config.args, + env=mcp_config.env, + ) + server_name = f"{mcp_config.command}_{'_'.join(mcp_config.args)}" + elif isinstance(mcp_config, MCPServerHTTP): + transport = HTTPTransport( + url=mcp_config.url, + headers=mcp_config.headers, + streamable=mcp_config.streamable, + ) + server_name = self._extract_server_name(mcp_config.url) + elif isinstance(mcp_config, MCPServerSSE): + transport = SSETransport( + url=mcp_config.url, + headers=mcp_config.headers, + ) + server_name = self._extract_server_name(mcp_config.url) + else: + raise ValueError(f"Unsupported MCP server config type: {type(mcp_config)}") + + client = MCPClient( + transport=transport, + cache_tools_list=mcp_config.cache_tools_list, + ) + + async def _setup_client_and_list_tools() -> list[dict[str, Any]]: + try: + if not client.connected: + await client.connect() + + tools_list = await client.list_tools() + + try: + await client.disconnect() + await asyncio.sleep(0.1) + except Exception as e: + self._logger.log("error", f"Error during disconnect: {e}") + + return tools_list + except Exception as e: + if client.connected: + await client.disconnect() + await asyncio.sleep(0.1) + raise RuntimeError( + f"Error during setup client and list tools: {e}" + ) from e + + try: + try: + asyncio.get_running_loop() + import concurrent.futures + + with concurrent.futures.ThreadPoolExecutor() as executor: + future = executor.submit( + asyncio.run, _setup_client_and_list_tools() + ) + tools_list = future.result() + except RuntimeError: + try: + tools_list = asyncio.run(_setup_client_and_list_tools()) + except RuntimeError as e: + error_msg = str(e).lower() + if "cancel scope" in error_msg or "task" in error_msg: + raise ConnectionError( + "MCP connection failed due to event loop cleanup issues. " + "This may be due to authentication errors or server unavailability." + ) from e + except asyncio.CancelledError as e: + raise ConnectionError( + "MCP connection was cancelled. This may indicate an authentication " + "error or server unavailability." + ) from e + + if mcp_config.tool_filter: + filtered_tools = [] + for tool in tools_list: + if callable(mcp_config.tool_filter): + try: + from crewai.mcp.filters import ToolFilterContext + + context = ToolFilterContext( + agent=self._agent, + server_name=server_name, + run_context=None, + ) + if mcp_config.tool_filter(context, tool): # type: ignore[call-arg, arg-type] + filtered_tools.append(tool) + except (TypeError, AttributeError): + if mcp_config.tool_filter(tool): # type: ignore[call-arg, arg-type] + filtered_tools.append(tool) + else: + filtered_tools.append(tool) + tools_list = filtered_tools + + tools = [] + for tool_def in tools_list: + tool_name = tool_def.get("name", "") + original_tool_name = tool_def.get("original_name", tool_name) + if not tool_name: + continue + + args_schema = None + if tool_def.get("inputSchema"): + args_schema = self._json_schema_to_pydantic( + tool_name, tool_def["inputSchema"] + ) + + tool_schema = { + "description": tool_def.get("description", ""), + "args_schema": args_schema, + } + + try: + native_tool = MCPNativeTool( + mcp_client=client, + tool_name=tool_name, + tool_schema=tool_schema, + server_name=server_name, + original_tool_name=original_tool_name, + ) + tools.append(native_tool) + except Exception as e: + self._logger.log("error", f"Failed to create native MCP tool: {e}") + continue + + return cast(list[BaseTool], tools), client + except Exception as e: + if client.connected: + asyncio.run(client.disconnect()) + + raise RuntimeError(f"Failed to get native MCP tools: {e}") from e + + @staticmethod + def _build_mcp_config_from_dict( + config_dict: dict[str, Any], + ) -> MCPServerConfig: + """Convert a config dict from crewai-oauth into an MCPServerConfig.""" + config_type = config_dict.get("type", "http") + + if config_type == "sse": + return MCPServerSSE( + url=config_dict["url"], + headers=config_dict.get("headers"), + cache_tools_list=config_dict.get("cache_tools_list", False), + ) + + return MCPServerHTTP( + url=config_dict["url"], + headers=config_dict.get("headers"), + streamable=config_dict.get("streamable", True), + cache_tools_list=config_dict.get("cache_tools_list", False), + ) + + @staticmethod + def _extract_server_name(server_url: str) -> str: + """Extract clean server name from URL for tool prefixing.""" + parsed = urlparse(server_url) + domain = parsed.netloc.replace(".", "_") + path = parsed.path.replace("/", "_").strip("_") + return f"{domain}_{path}" if path else domain + + def _get_mcp_tool_schemas( + self, server_params: dict[str, Any] + ) -> dict[str, dict[str, Any]]: + """Get tool schemas from MCP server with caching.""" + server_url = server_params["url"] + + cache_key = server_url + current_time = time.time() + + if cache_key in _mcp_schema_cache: + cached_data, cache_time = _mcp_schema_cache[cache_key] + if current_time - cache_time < _cache_ttl: + self._logger.log( + "debug", f"Using cached MCP tool schemas for {server_url}" + ) + return cached_data # type: ignore[no-any-return] + + try: + schemas = asyncio.run(self._get_mcp_tool_schemas_async(server_params)) + _mcp_schema_cache[cache_key] = (schemas, current_time) + return schemas + except Exception as e: + self._logger.log( + "warning", f"Failed to get MCP tool schemas from {server_url}: {e}" + ) + return {} + + async def _get_mcp_tool_schemas_async( + self, server_params: dict[str, Any] + ) -> dict[str, dict[str, Any]]: + """Async implementation of MCP tool schema retrieval.""" + server_url = server_params["url"] + return await self._retry_mcp_discovery( + self._discover_mcp_tools_with_timeout, server_url + ) + + async def _retry_mcp_discovery( + self, operation_func: Any, server_url: str + ) -> dict[str, dict[str, Any]]: + """Retry MCP discovery with exponential backoff.""" + last_error = None + + for attempt in range(MCP_MAX_RETRIES): + result, error, should_retry = await self._attempt_mcp_discovery( + operation_func, server_url + ) + + if result is not None: + return result + + if not should_retry: + raise RuntimeError(error) + + last_error = error + if attempt < MCP_MAX_RETRIES - 1: + wait_time = 2**attempt + await asyncio.sleep(wait_time) + + raise RuntimeError( + f"Failed to discover MCP tools after {MCP_MAX_RETRIES} attempts: {last_error}" + ) + + @staticmethod + async def _attempt_mcp_discovery( + operation_func: Any, server_url: str + ) -> tuple[dict[str, dict[str, Any]] | None, str, bool]: + """Attempt single MCP discovery; returns *(result, error_message, should_retry)*.""" + try: + result = await operation_func(server_url) + return result, "", False + + except ImportError: + return ( + None, + "MCP library not available. Please install with: pip install mcp", + False, + ) + + except asyncio.TimeoutError: + return ( + None, + f"MCP discovery timed out after {MCP_DISCOVERY_TIMEOUT} seconds", + True, + ) + + except Exception as e: + error_str = str(e).lower() + + if "authentication" in error_str or "unauthorized" in error_str: + return None, f"Authentication failed for MCP server: {e!s}", False + if "connection" in error_str or "network" in error_str: + return None, f"Network connection failed: {e!s}", True + if "json" in error_str or "parsing" in error_str: + return None, f"Server response parsing error: {e!s}", True + return None, f"MCP discovery error: {e!s}", False + + async def _discover_mcp_tools_with_timeout( + self, server_url: str + ) -> dict[str, dict[str, Any]]: + """Discover MCP tools with timeout wrapper.""" + return await asyncio.wait_for( + self._discover_mcp_tools(server_url), timeout=MCP_DISCOVERY_TIMEOUT + ) + + async def _discover_mcp_tools(self, server_url: str) -> dict[str, dict[str, Any]]: + """Discover tools from an MCP server (HTTPS / streamable-HTTP path).""" + from mcp import ClientSession + from mcp.client.streamable_http import streamablehttp_client + + from crewai.utilities.string_utils import sanitize_tool_name + + async with streamablehttp_client(server_url) as (read, write, _): + async with ClientSession(read, write) as session: + await asyncio.wait_for( + session.initialize(), timeout=MCP_CONNECTION_TIMEOUT + ) + + tools_result = await asyncio.wait_for( + session.list_tools(), + timeout=MCP_DISCOVERY_TIMEOUT - MCP_CONNECTION_TIMEOUT, + ) + + schemas = {} + for tool in tools_result.tools: + args_schema = None + if hasattr(tool, "inputSchema") and tool.inputSchema: + args_schema = self._json_schema_to_pydantic( + sanitize_tool_name(tool.name), tool.inputSchema + ) + + schemas[sanitize_tool_name(tool.name)] = { + "description": getattr(tool, "description", ""), + "args_schema": args_schema, + } + return schemas + + @staticmethod + def _json_schema_to_pydantic(tool_name: str, json_schema: dict[str, Any]) -> type: + """Convert JSON Schema to a Pydantic model for tool arguments.""" + from crewai.utilities.pydantic_schema_utils import create_model_from_schema + + model_name = f"{tool_name.replace('-', '_').replace(' ', '_')}Schema" + return create_model_from_schema( + json_schema, + model_name=model_name, + enrich_descriptions=True, + ) diff --git a/lib/crewai/src/crewai/memory/__init__.py b/lib/crewai/src/crewai/memory/__init__.py index 084a57a87..eb7b140b9 100644 --- a/lib/crewai/src/crewai/memory/__init__.py +++ b/lib/crewai/src/crewai/memory/__init__.py @@ -1,6 +1,14 @@ -"""Memory module: unified Memory with LLM analysis and pluggable storage.""" +"""Memory module: unified Memory with LLM analysis and pluggable storage. + +Heavy dependencies are lazily imported so that +``import crewai`` does not initialise at runtime — critical for +Celery pre-fork and similar deployment patterns. +""" + +from __future__ import annotations + +from typing import Any -from crewai.memory.encoding_flow import EncodingFlow from crewai.memory.memory_scope import MemoryScope, MemorySlice from crewai.memory.types import ( MemoryMatch, @@ -10,7 +18,25 @@ from crewai.memory.types import ( embed_text, embed_texts, ) -from crewai.memory.unified_memory import Memory + + +_LAZY_IMPORTS: dict[str, tuple[str, str]] = { + "Memory": ("crewai.memory.unified_memory", "Memory"), + "EncodingFlow": ("crewai.memory.encoding_flow", "EncodingFlow"), +} + + +def __getattr__(name: str) -> Any: + """Lazily import Memory / EncodingFlow to avoid pulling in lancedb at import time.""" + if name in _LAZY_IMPORTS: + import importlib + + module_path, attr = _LAZY_IMPORTS[name] + mod = importlib.import_module(module_path) + val = getattr(mod, attr) + globals()[name] = val + return val + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") __all__ = [ diff --git a/lib/crewai/src/crewai/memory/memory_scope.py b/lib/crewai/src/crewai/memory/memory_scope.py index b828e3faf..6c252f9f2 100644 --- a/lib/crewai/src/crewai/memory/memory_scope.py +++ b/lib/crewai/src/crewai/memory/memory_scope.py @@ -3,11 +3,9 @@ from __future__ import annotations from datetime import datetime -from typing import TYPE_CHECKING, Any +from typing import Any, Literal - -if TYPE_CHECKING: - from crewai.memory.unified_memory import Memory +from pydantic import BaseModel, ConfigDict, Field, PrivateAttr, model_validator from crewai.memory.types import ( _RECALL_OVERSAMPLE_FACTOR, @@ -15,22 +13,38 @@ from crewai.memory.types import ( MemoryRecord, ScopeInfo, ) +from crewai.memory.unified_memory import Memory -class MemoryScope: +class MemoryScope(BaseModel): """View of Memory restricted to a root path. All operations are scoped under that path.""" - def __init__(self, memory: Memory, root_path: str) -> None: - """Initialize scope. + model_config = ConfigDict(arbitrary_types_allowed=True) - Args: - memory: The underlying Memory instance. - root_path: Root path for this scope (e.g. /agent/1). - """ - self._memory = memory - self._root = root_path.rstrip("/") or "" - if self._root and not self._root.startswith("/"): - self._root = "/" + self._root + root_path: str = Field(default="/") + + _memory: Memory = PrivateAttr() + _root: str = PrivateAttr() + + @model_validator(mode="wrap") + @classmethod + def _accept_memory(cls, data: Any, handler: Any) -> MemoryScope: + """Extract memory dependency and normalize root path before validation.""" + if isinstance(data, MemoryScope): + return data + memory = data.pop("memory") + instance: MemoryScope = handler(data) + instance._memory = memory + root = instance.root_path.rstrip("/") or "" + if root and not root.startswith("/"): + root = "/" + root + instance._root = root + return instance + + @property + def read_only(self) -> bool: + """Whether the underlying memory is read-only.""" + return self._memory.read_only def _scope_path(self, scope: str | None) -> str: if not scope or scope == "/": @@ -52,7 +66,7 @@ class MemoryScope: importance: float | None = None, source: str | None = None, private: bool = False, - ) -> MemoryRecord: + ) -> MemoryRecord | None: """Remember content; scope is relative to this scope's root.""" path = self._scope_path(scope) return self._memory.remember( @@ -71,7 +85,7 @@ class MemoryScope: scope: str | None = None, categories: list[str] | None = None, limit: int = 10, - depth: str = "deep", + depth: Literal["shallow", "deep"] = "deep", source: str | None = None, include_private: bool = False, ) -> list[MemoryMatch]: @@ -138,34 +152,34 @@ class MemoryScope: """Return a narrower scope under this scope.""" child = path.strip("/") if not child: - return MemoryScope(self._memory, self._root or "/") + return MemoryScope(memory=self._memory, root_path=self._root or "/") base = self._root.rstrip("/") or "" new_root = f"{base}/{child}" if base else f"/{child}" - return MemoryScope(self._memory, new_root) + return MemoryScope(memory=self._memory, root_path=new_root) -class MemorySlice: - """View over multiple scopes: recall searches all, remember requires explicit scope unless read_only.""" +class MemorySlice(BaseModel): + """View over multiple scopes: recall searches all, remember is a no-op when read_only.""" - def __init__( - self, - memory: Memory, - scopes: list[str], - categories: list[str] | None = None, - read_only: bool = True, - ) -> None: - """Initialize slice. + model_config = ConfigDict(arbitrary_types_allowed=True) - Args: - memory: The underlying Memory instance. - scopes: List of scope paths to include. - categories: Optional category filter for recall. - read_only: If True, remember() raises PermissionError. - """ - self._memory = memory - self._scopes = [s.rstrip("/") or "/" for s in scopes] - self._categories = categories - self._read_only = read_only + scopes: list[str] = Field(default_factory=list) + categories: list[str] | None = Field(default=None) + read_only: bool = Field(default=True) + + _memory: Memory = PrivateAttr() + + @model_validator(mode="wrap") + @classmethod + def _accept_memory(cls, data: Any, handler: Any) -> MemorySlice: + """Extract memory dependency and normalize scopes before validation.""" + if isinstance(data, MemorySlice): + return data + memory = data.pop("memory") + data["scopes"] = [s.rstrip("/") or "/" for s in data.get("scopes", [])] + instance: MemorySlice = handler(data) + instance._memory = memory + return instance def remember( self, @@ -176,10 +190,10 @@ class MemorySlice: importance: float | None = None, source: str | None = None, private: bool = False, - ) -> MemoryRecord: - """Remember into an explicit scope. Required when read_only=False.""" - if self._read_only: - raise PermissionError("This MemorySlice is read-only") + ) -> MemoryRecord | None: + """Remember into an explicit scope. No-op when read_only=True.""" + if self.read_only: + return None return self._memory.remember( content, scope=scope, @@ -196,14 +210,14 @@ class MemorySlice: scope: str | None = None, categories: list[str] | None = None, limit: int = 10, - depth: str = "deep", + depth: Literal["shallow", "deep"] = "deep", source: str | None = None, include_private: bool = False, ) -> list[MemoryMatch]: """Recall across all slice scopes; results merged and re-ranked.""" - cats = categories or self._categories + cats = categories or self.categories all_matches: list[MemoryMatch] = [] - for sc in self._scopes: + for sc in self.scopes: matches = self._memory.recall( query, scope=sc, @@ -231,7 +245,7 @@ class MemorySlice: def list_scopes(self, path: str = "/") -> list[str]: """List scopes across all slice roots.""" out: list[str] = [] - for sc in self._scopes: + for sc in self.scopes: full = f"{sc.rstrip('/')}{path}" if sc != "/" else path out.extend(self._memory.list_scopes(full)) return sorted(set(out)) @@ -243,15 +257,23 @@ class MemorySlice: oldest: datetime | None = None newest: datetime | None = None children: list[str] = [] - for sc in self._scopes: + for sc in self.scopes: full = f"{sc.rstrip('/')}{path}" if sc != "/" else path inf = self._memory.info(full) total_records += inf.record_count all_categories.update(inf.categories) if inf.oldest_record: - oldest = inf.oldest_record if oldest is None else min(oldest, inf.oldest_record) + oldest = ( + inf.oldest_record + if oldest is None + else min(oldest, inf.oldest_record) + ) if inf.newest_record: - newest = inf.newest_record if newest is None else max(newest, inf.newest_record) + newest = ( + inf.newest_record + if newest is None + else max(newest, inf.newest_record) + ) children.extend(inf.child_scopes) return ScopeInfo( path=path, @@ -265,7 +287,7 @@ class MemorySlice: def list_categories(self, path: str | None = None) -> dict[str, int]: """Categories and counts across slice scopes.""" counts: dict[str, int] = {} - for sc in self._scopes: + for sc in self.scopes: full = (f"{sc.rstrip('/')}{path}" if sc != "/" else path) if path else sc for k, v in self._memory.list_categories(full).items(): counts[k] = counts.get(k, 0) + v diff --git a/lib/crewai/src/crewai/memory/recall_flow.py b/lib/crewai/src/crewai/memory/recall_flow.py index 053eb8d97..e0f238861 100644 --- a/lib/crewai/src/crewai/memory/recall_flow.py +++ b/lib/crewai/src/crewai/memory/recall_flow.py @@ -2,7 +2,6 @@ Implements adaptive-depth retrieval with: - LLM query distillation into targeted sub-queries -- Keyword-driven category filtering - Time-based filtering from temporal hints - Parallel multi-query, multi-scope search - Confidence-based routing with iterative deepening (budget loop) @@ -37,7 +36,6 @@ class RecallState(BaseModel): query: str = "" scope: str | None = None categories: list[str] | None = None - inferred_categories: list[str] = Field(default_factory=list) time_cutoff: datetime | None = None source: str | None = None include_private: bool = False @@ -82,11 +80,8 @@ class RecallFlow(Flow[RecallState]): # ------------------------------------------------------------------ def _merged_categories(self) -> list[str] | None: - """Merge caller-supplied and LLM-inferred categories.""" - merged = list( - set((self.state.categories or []) + self.state.inferred_categories) - ) - return merged or None + """Return caller-supplied categories, or None if empty.""" + return self.state.categories or None def _do_search(self) -> list[dict[str, Any]]: """Run parallel search across (embeddings x scopes) with filters. @@ -212,10 +207,6 @@ class RecallFlow(Flow[RecallState]): ) self.state.query_analysis = analysis - # Wire keywords -> category filter - if analysis.keywords: - self.state.inferred_categories = analysis.keywords - # Parse time_filter into a datetime cutoff if analysis.time_filter: try: diff --git a/lib/crewai/src/crewai/memory/storage/lancedb_storage.py b/lib/crewai/src/crewai/memory/storage/lancedb_storage.py index d40999985..e514edcac 100644 --- a/lib/crewai/src/crewai/memory/storage/lancedb_storage.py +++ b/lib/crewai/src/crewai/memory/storage/lancedb_storage.py @@ -53,6 +53,7 @@ class LanceDBStorage: path: str | Path | None = None, table_name: str = "memories", vector_dim: int | None = None, + compact_every: int = 100, ) -> None: """Initialize LanceDB storage. @@ -64,6 +65,10 @@ class LanceDBStorage: vector_dim: Dimensionality of the embedding vector. When ``None`` (default), the dimension is auto-detected from the existing table schema or from the first saved embedding. + compact_every: Number of ``save()`` calls between automatic + background compactions. Each ``save()`` creates one new + fragment file; compaction merges them, keeping query + performance consistent. Set to 0 to disable. """ if path is None: storage_dir = os.environ.get("CREWAI_STORAGE_DIR") @@ -78,6 +83,22 @@ class LanceDBStorage: self._table_name = table_name self._db = lancedb.connect(str(self._path)) + # On macOS and Linux the default per-process open-file limit is 256. + # A LanceDB table stores one file per fragment (one fragment per save() + # call by default). With hundreds of fragments, a single full-table + # scan opens all of them simultaneously, exhausting the limit. + # Raise it proactively so scans on large tables never hit OS error 24. + try: + import resource + soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) + if soft < 4096: + resource.setrlimit(resource.RLIMIT_NOFILE, (min(hard, 4096), hard)) + except Exception: # noqa: S110 + pass # Windows or already at the max hard limit — safe to ignore + + self._compact_every = compact_every + self._save_count = 0 + # Get or create a shared write lock for this database path. resolved = str(self._path.resolve()) with LanceDBStorage._path_locks_guard: @@ -91,6 +112,11 @@ class LanceDBStorage: try: self._table: lancedb.table.Table | None = self._db.open_table(self._table_name) self._vector_dim: int = self._infer_dim_from_table(self._table) + # Best-effort: create the scope index if it doesn't exist yet. + self._ensure_scope_index() + # Compact in the background if the table has accumulated many + # fragments from previous runs (each save() creates one). + self._compact_if_needed() except Exception: self._table = None self._vector_dim = vector_dim or 0 # 0 = not yet known @@ -178,6 +204,56 @@ class LanceDBStorage: table.delete("id = '__schema_placeholder__'") return table + def _ensure_scope_index(self) -> None: + """Create a BTREE scalar index on the ``scope`` column if not present. + + A scalar index lets LanceDB skip a full table scan when filtering by + scope prefix, which is the hot path for ``list_records``, + ``get_scope_info``, and ``list_scopes``. The call is best-effort: + if the table is empty or the index already exists the exception is + swallowed silently. + """ + if self._table is None: + return + try: + self._table.create_scalar_index("scope", index_type="BTREE", replace=False) + except Exception: # noqa: S110 + pass # index already exists, table empty, or unsupported version + + # ------------------------------------------------------------------ + # Automatic background compaction + # ------------------------------------------------------------------ + + def _compact_if_needed(self) -> None: + """Spawn a background compaction on startup. + + Called whenever an existing table is opened so that fragments + accumulated in previous sessions are silently merged before the + first query. ``optimize()`` returns quickly when the table is + already compact, so the cost is negligible in the common case. + """ + if self._table is None or self._compact_every <= 0: + return + self._compact_async() + + def _compact_async(self) -> None: + """Fire-and-forget: compact the table in a daemon background thread.""" + threading.Thread( + target=self._compact_safe, + daemon=True, + name="lancedb-compact", + ).start() + + def _compact_safe(self) -> None: + """Run ``table.optimize()`` in a background thread, absorbing errors.""" + try: + if self._table is not None: + self._table.optimize() + # Refresh the scope index so new fragments are covered. + self._ensure_scope_index() + except Exception: + _logger.debug("LanceDB background compaction failed", exc_info=True) + def _ensure_table(self, vector_dim: int | None = None) -> lancedb.table.Table: """Return the table, creating it lazily if needed. @@ -239,6 +315,7 @@ class LanceDBStorage: if r.embedding and len(r.embedding) > 0: dim = len(r.embedding) break + is_new_table = self._table is None with self._write_lock: self._ensure_table(vector_dim=dim) rows = [self._record_to_row(r) for r in records] @@ -246,6 +323,13 @@ class LanceDBStorage: if r["vector"] is None or len(r["vector"]) != self._vector_dim: r["vector"] = [0.0] * self._vector_dim self._retry_write("add", rows) + # Create the scope index on the first save so it covers the initial dataset. + if is_new_table: + self._ensure_scope_index() + # Auto-compact every N saves so fragment files don't pile up. + self._save_count += 1 + if self._compact_every > 0 and self._save_count % self._compact_every == 0: + self._compact_async() def update(self, record: MemoryRecord) -> None: """Update a record by ID. Preserves created_at, updates last_accessed.""" @@ -261,6 +345,10 @@ class LanceDBStorage: def touch_records(self, record_ids: list[str]) -> None: """Update last_accessed to now for the given record IDs. + Uses a single batch ``table.update()`` call instead of N + delete-and-re-add cycles, which is both faster and avoids + unnecessary write amplification. + Args: record_ids: IDs of records to touch. """ @@ -268,25 +356,20 @@ class LanceDBStorage: return with self._write_lock: now = datetime.utcnow().isoformat() - for rid in record_ids: - safe_id = str(rid).replace("'", "''") - rows = ( - self._table.search([0.0] * self._vector_dim) - .where(f"id = '{safe_id}'") - .limit(1) - .to_list() - ) - if rows: - rows[0]["last_accessed"] = now - self._retry_write("delete", f"id = '{safe_id}'") - self._retry_write("add", [rows[0]]) + safe_ids = [str(rid).replace("'", "''") for rid in record_ids] + ids_expr = ", ".join(f"'{rid}'" for rid in safe_ids) + self._retry_write( + "update", + where=f"id IN ({ids_expr})", + values={"last_accessed": now}, + ) def get_record(self, record_id: str) -> MemoryRecord | None: """Return a single record by ID, or None if not found.""" if self._table is None: return None safe_id = str(record_id).replace("'", "''") - rows = self._table.search([0.0] * self._vector_dim).where(f"id = '{safe_id}'").limit(1).to_list() + rows = self._table.search().where(f"id = '{safe_id}'").limit(1).to_list() if not rows: return None return self._row_to_record(rows[0]) @@ -374,13 +457,31 @@ class LanceDBStorage: self._retry_write("delete", where_expr) return before - self._table.count_rows() - def _scan_rows(self, scope_prefix: str | None = None, limit: int = _SCAN_ROWS_LIMIT) -> list[dict[str, Any]]: - """Scan rows optionally filtered by scope prefix.""" + def _scan_rows( + self, + scope_prefix: str | None = None, + limit: int = _SCAN_ROWS_LIMIT, + columns: list[str] | None = None, + ) -> list[dict[str, Any]]: + """Scan rows optionally filtered by scope prefix. + + Uses a full table scan (no vector query) so the limit is applied after + the scope filter, not to ANN candidates before filtering. + + Args: + scope_prefix: Optional scope path prefix to filter by. + limit: Maximum number of rows to return (applied after filtering). + columns: Optional list of column names to fetch. Pass only the + columns you need for metadata operations to avoid reading the + heavy ``vector`` column unnecessarily. + """ if self._table is None: return [] - q = self._table.search([0.0] * self._vector_dim) + q = self._table.search() if scope_prefix is not None and scope_prefix.strip("/"): q = q.where(f"scope LIKE '{scope_prefix.rstrip('/')}%'") + if columns is not None: + q = q.select(columns) return q.limit(limit).to_list() def list_records( @@ -406,7 +507,10 @@ class LanceDBStorage: prefix = scope if scope != "/" else "" if prefix and not prefix.startswith("/"): prefix = "/" + prefix - rows = self._scan_rows(prefix or None) + rows = self._scan_rows( + prefix or None, + columns=["scope", "categories_str", "created_at"], + ) if not rows: return ScopeInfo( path=scope or "/", @@ -453,7 +557,7 @@ class LanceDBStorage: def list_scopes(self, parent: str = "/") -> list[str]: parent = parent.rstrip("/") or "" prefix = (parent + "/") if parent else "/" - rows = self._scan_rows(prefix if prefix != "/" else None) + rows = self._scan_rows(prefix if prefix != "/" else None, columns=["scope"]) children: set[str] = set() for row in rows: sc = str(row.get("scope", "")) @@ -465,7 +569,7 @@ class LanceDBStorage: return sorted(children) def list_categories(self, scope_prefix: str | None = None) -> dict[str, int]: - rows = self._scan_rows(scope_prefix) + rows = self._scan_rows(scope_prefix, columns=["categories_str"]) counts: dict[str, int] = {} for row in rows: cat_str = row.get("categories_str") or "[]" @@ -498,6 +602,21 @@ class LanceDBStorage: if prefix: self._table.delete(f"scope >= '{prefix}' AND scope < '{prefix}/\uFFFF'") + def optimize(self) -> None: + """Compact the table synchronously and refresh the scope index. + + Under normal usage this is called automatically in the background + (every ``compact_every`` saves and on startup when the table is + fragmented). Call this explicitly only when you need the compaction + to be complete before the next operation — for example immediately + after a large bulk import, before a latency-sensitive recall. + It is a no-op if the table does not exist. + """ + if self._table is None: + return + self._table.optimize() + self._ensure_scope_index() + async def asave(self, records: list[MemoryRecord]) -> None: self.save(records) diff --git a/lib/crewai/src/crewai/memory/types.py b/lib/crewai/src/crewai/memory/types.py index e67ad163f..929e10092 100644 --- a/lib/crewai/src/crewai/memory/types.py +++ b/lib/crewai/src/crewai/memory/types.py @@ -87,6 +87,22 @@ class MemoryMatch(BaseModel): description="Information the system looked for but could not find.", ) + def format(self) -> str: + """Format this match as a human-readable string including metadata. + + Returns: + A multi-line string with score, content, categories, and non-empty + metadata fields. + """ + lines = [f"- (score={self.score:.2f}) {self.record.content}"] + if self.record.categories: + lines.append(f" categories: {', '.join(self.record.categories)}") + if self.record.metadata: + for key, value in self.record.metadata.items(): + if value is not None: + lines.append(f" {key}: {value}") + return "\n".join(lines) + class ScopeInfo(BaseModel): """Information about a scope in the memory hierarchy.""" @@ -291,7 +307,7 @@ def embed_text(embedder: Any, text: str) -> list[float]: return [] first = result[0] if hasattr(first, "tolist"): - return first.tolist() + return list(first.tolist()) if isinstance(first, list): return [float(x) for x in first] return list(first) diff --git a/lib/crewai/src/crewai/memory/unified_memory.py b/lib/crewai/src/crewai/memory/unified_memory.py index a15f77afd..cb4954c39 100644 --- a/lib/crewai/src/crewai/memory/unified_memory.py +++ b/lib/crewai/src/crewai/memory/unified_memory.py @@ -6,7 +6,9 @@ from concurrent.futures import Future, ThreadPoolExecutor from datetime import datetime import threading import time -from typing import Any, Literal +from typing import TYPE_CHECKING, Annotated, Any, Literal + +from pydantic import BaseModel, ConfigDict, Field, PlainValidator, PrivateAttr from crewai.events.event_bus import crewai_event_bus from crewai.events.types.memory_events import ( @@ -21,7 +23,6 @@ from crewai.llms.base_llm import BaseLLM from crewai.memory.analyze import extract_memories_from_content from crewai.memory.recall_flow import RecallFlow from crewai.memory.storage.backend import StorageBackend -from crewai.memory.storage.lancedb_storage import LanceDBStorage from crewai.memory.types import ( MemoryConfig, MemoryMatch, @@ -30,16 +31,28 @@ from crewai.memory.types import ( compute_composite_score, embed_text, ) +from crewai.rag.embeddings.factory import build_embedder +from crewai.rag.embeddings.providers.openai.types import OpenAIProviderSpec -def _default_embedder() -> Any: +if TYPE_CHECKING: + from chromadb.utils.embedding_functions.openai_embedding_function import ( + OpenAIEmbeddingFunction, + ) + + +def _passthrough(v: Any) -> Any: + """PlainValidator that accepts any value, bypassing strict union discrimination.""" + return v + + +def _default_embedder() -> OpenAIEmbeddingFunction: """Build default OpenAI embedder for memory.""" - from crewai.rag.embeddings.factory import build_embedder - - return build_embedder({"provider": "openai", "config": {}}) + spec: OpenAIProviderSpec = {"provider": "openai", "config": {}} + return build_embedder(spec) -class Memory: +class Memory(BaseModel): """Unified memory: standalone, LLM-analyzed, with intelligent recall flow. Works without agent/crew. Uses LLM to infer scope, categories, importance on save. @@ -47,109 +60,119 @@ class Memory: pluggable storage (LanceDB default). """ - def __init__( - self, - llm: BaseLLM | str = "gpt-4o-mini", - storage: StorageBackend | str = "lancedb", - embedder: Any = None, - # -- Scoring weights -- - # These three weights control how recall results are ranked. - # The composite score is: semantic_weight * similarity + recency_weight * decay + importance_weight * importance. - # They should sum to ~1.0 for intuitive scoring. - recency_weight: float = 0.3, - semantic_weight: float = 0.5, - importance_weight: float = 0.2, - # How quickly old memories lose relevance. The recency score halves every - # N days (exponential decay). Lower = faster forgetting; higher = longer relevance. - recency_half_life_days: int = 30, - # -- Consolidation -- - # When remembering new content, if an existing record has similarity >= this - # threshold, the LLM is asked to merge/update/delete. Set to 1.0 to disable. - consolidation_threshold: float = 0.85, - # Max existing records to compare against when checking for consolidation. - consolidation_limit: int = 5, - # -- Save defaults -- - # Importance assigned to new memories when no explicit value is given and - # the LLM analysis path is skipped (all fields provided by the caller). - default_importance: float = 0.5, - # -- Recall depth control -- - # These thresholds govern the RecallFlow router that decides between - # returning results immediately ("synthesize") vs. doing an extra - # LLM-driven exploration round ("explore_deeper"). - # confidence >= confidence_threshold_high => always synthesize - # confidence < confidence_threshold_low => explore deeper (if budget > 0) - # complex query + confidence < complex_query_threshold => explore deeper - confidence_threshold_high: float = 0.8, - confidence_threshold_low: float = 0.5, - complex_query_threshold: float = 0.7, - # How many LLM-driven exploration rounds the RecallFlow is allowed to run. - # 0 = always shallow (vector search only); higher = more thorough but slower. - exploration_budget: int = 1, - # Queries shorter than this skip LLM analysis (saving ~1-3s). - # Longer queries (full task descriptions) benefit from LLM distillation. - query_analysis_threshold: int = 200, - ) -> None: - """Initialize Memory. + model_config = ConfigDict(arbitrary_types_allowed=True) - Args: - llm: LLM for analysis (model name or BaseLLM instance). - storage: Backend: "lancedb" or a StorageBackend instance. - embedder: Embedding callable, provider config dict, or None (default OpenAI). - recency_weight: Weight for recency in the composite relevance score. - semantic_weight: Weight for semantic similarity in the composite relevance score. - importance_weight: Weight for importance in the composite relevance score. - recency_half_life_days: Recency score halves every N days (exponential decay). - consolidation_threshold: Similarity above which consolidation is triggered on save. - consolidation_limit: Max existing records to compare during consolidation. - default_importance: Default importance when not provided or inferred. - confidence_threshold_high: Recall confidence above which results are returned directly. - confidence_threshold_low: Recall confidence below which deeper exploration is triggered. - complex_query_threshold: For complex queries, explore deeper below this confidence. - exploration_budget: Number of LLM-driven exploration rounds during deep recall. - query_analysis_threshold: Queries shorter than this skip LLM analysis during deep recall. - """ - self._config = MemoryConfig( - recency_weight=recency_weight, - semantic_weight=semantic_weight, - importance_weight=importance_weight, - recency_half_life_days=recency_half_life_days, - consolidation_threshold=consolidation_threshold, - consolidation_limit=consolidation_limit, - default_importance=default_importance, - confidence_threshold_high=confidence_threshold_high, - confidence_threshold_low=confidence_threshold_low, - complex_query_threshold=complex_query_threshold, - exploration_budget=exploration_budget, - query_analysis_threshold=query_analysis_threshold, - ) + llm: Annotated[BaseLLM | str, PlainValidator(_passthrough)] = Field( + default="gpt-4o-mini", + description="LLM for analysis (model name or BaseLLM instance).", + ) + storage: Annotated[StorageBackend | str, PlainValidator(_passthrough)] = Field( + default="lancedb", + description="Storage backend instance or path string.", + ) + embedder: Any = Field( + default=None, + description="Embedding callable, provider config dict, or None for default OpenAI.", + ) + recency_weight: float = Field( + default=0.3, + description="Weight for recency in the composite relevance score.", + ) + semantic_weight: float = Field( + default=0.5, + description="Weight for semantic similarity in the composite relevance score.", + ) + importance_weight: float = Field( + default=0.2, + description="Weight for importance in the composite relevance score.", + ) + recency_half_life_days: int = Field( + default=30, + description="Recency score halves every N days (exponential decay).", + ) + consolidation_threshold: float = Field( + default=0.85, + description="Similarity above which consolidation is triggered on save.", + ) + consolidation_limit: int = Field( + default=5, + description="Max existing records to compare during consolidation.", + ) + default_importance: float = Field( + default=0.5, + description="Default importance when not provided or inferred.", + ) + confidence_threshold_high: float = Field( + default=0.8, + description="Recall confidence above which results are returned directly.", + ) + confidence_threshold_low: float = Field( + default=0.5, + description="Recall confidence below which deeper exploration is triggered.", + ) + complex_query_threshold: float = Field( + default=0.7, + description="For complex queries, explore deeper below this confidence.", + ) + exploration_budget: int = Field( + default=1, + description="Number of LLM-driven exploration rounds during deep recall.", + ) + query_analysis_threshold: int = Field( + default=200, + description="Queries shorter than this skip LLM analysis during deep recall.", + ) + read_only: bool = Field( + default=False, + description="If True, remember() and remember_many() are silent no-ops.", + ) - # Store raw config for lazy initialization. LLM and embedder are only - # built on first access so that Memory() never fails at construction - # time (e.g. when auto-created by Flow without an API key set). - self._llm_config: BaseLLM | str = llm - self._llm_instance: BaseLLM | None = None if isinstance(llm, str) else llm - self._embedder_config: Any = embedder - self._embedder_instance: Any = ( - embedder if (embedder is not None and not isinstance(embedder, dict)) else None - ) - - # Storage is initialized eagerly (local, no API key needed). - if storage == "lancedb": - self._storage = LanceDBStorage() - elif isinstance(storage, str): - self._storage = LanceDBStorage(path=storage) - else: - self._storage = storage - - # Background save queue. max_workers=1 serializes saves to avoid - # concurrent storage mutations (two saves finding the same similar - # record and both trying to update/delete it). Within each save, - # the parallel LLM calls still run on their own thread pool. - self._save_pool = ThreadPoolExecutor( + _config: MemoryConfig = PrivateAttr() + _llm_instance: BaseLLM | None = PrivateAttr(default=None) + _embedder_instance: Any = PrivateAttr(default=None) + _storage: StorageBackend = PrivateAttr() + _save_pool: ThreadPoolExecutor = PrivateAttr( + default_factory=lambda: ThreadPoolExecutor( max_workers=1, thread_name_prefix="memory-save" ) - self._pending_saves: list[Future[Any]] = [] - self._pending_lock = threading.Lock() + ) + _pending_saves: list[Future[Any]] = PrivateAttr(default_factory=list) + _pending_lock: threading.Lock = PrivateAttr(default_factory=threading.Lock) + + def model_post_init(self, __context: Any) -> None: + """Initialize runtime state from field values.""" + self._config = MemoryConfig( + recency_weight=self.recency_weight, + semantic_weight=self.semantic_weight, + importance_weight=self.importance_weight, + recency_half_life_days=self.recency_half_life_days, + consolidation_threshold=self.consolidation_threshold, + consolidation_limit=self.consolidation_limit, + default_importance=self.default_importance, + confidence_threshold_high=self.confidence_threshold_high, + confidence_threshold_low=self.confidence_threshold_low, + complex_query_threshold=self.complex_query_threshold, + exploration_budget=self.exploration_budget, + query_analysis_threshold=self.query_analysis_threshold, + ) + + self._llm_instance = None if isinstance(self.llm, str) else self.llm + self._embedder_instance = ( + self.embedder + if (self.embedder is not None and not isinstance(self.embedder, dict)) + else None + ) + + if isinstance(self.storage, str): + from crewai.memory.storage.lancedb_storage import LanceDBStorage + + self._storage = ( + LanceDBStorage() + if self.storage == "lancedb" + else LanceDBStorage(path=self.storage) + ) + else: + self._storage = self.storage _MEMORY_DOCS_URL = "https://docs.crewai.com/concepts/memory" @@ -160,12 +183,13 @@ class Memory: from crewai.llm import LLM try: - self._llm_instance = LLM(model=self._llm_config) + model_name = self.llm if isinstance(self.llm, str) else str(self.llm) + self._llm_instance = LLM(model=model_name) except Exception as e: raise RuntimeError( f"Memory requires an LLM for analysis but initialization failed: {e}\n\n" "To fix this, do one of the following:\n" - ' - Set OPENAI_API_KEY for the default model (gpt-4o-mini)\n' + " - Set OPENAI_API_KEY for the default model (gpt-4o-mini)\n" ' - Pass a different model: Memory(llm="anthropic/claude-3-haiku-20240307")\n' ' - Pass any LLM instance: Memory(llm=LLM(model="your-model"))\n' " - To skip LLM analysis, pass all fields explicitly to remember()\n" @@ -179,10 +203,8 @@ class Memory: """Lazy embedder initialization -- only created when first needed.""" if self._embedder_instance is None: try: - if isinstance(self._embedder_config, dict): - from crewai.rag.embeddings.factory import build_embedder - - self._embedder_instance = build_embedder(self._embedder_config) + if isinstance(self.embedder, dict): + self._embedder_instance = build_embedder(self.embedder) else: self._embedder_instance = _default_embedder() except Exception as e: @@ -317,7 +339,7 @@ class Memory: source: str | None = None, private: bool = False, agent_role: str | None = None, - ) -> MemoryRecord: + ) -> MemoryRecord | None: """Store a single item in memory (synchronous). Routes through the same serialized save pool as ``remember_many`` @@ -335,11 +357,13 @@ class Memory: agent_role: Optional agent role for event metadata. Returns: - The created MemoryRecord. + The created MemoryRecord, or None if this memory is read-only. Raises: Exception: On save failure (events emitted). """ + if self.read_only: + return None _source_type = "unified_memory" try: crewai_event_bus.emit( @@ -356,7 +380,13 @@ class Memory: # then immediately wait for the result. future = self._submit_save( self._encode_batch, - [content], scope, categories, metadata, importance, source, private, + [content], + scope, + categories, + metadata, + importance, + source, + private, ) records = future.result() record = records[0] if records else None @@ -420,13 +450,19 @@ class Memory: Returns: Empty list (records are not available until the background save completes). """ - if not contents: + if not contents or self.read_only: return [] self._submit_save( self._background_encode_batch, - contents, scope, categories, metadata, - importance, source, private, agent_role, + contents, + scope, + categories, + metadata, + importance, + source, + private, + agent_role, ) return [] @@ -566,14 +602,13 @@ class Memory: # Privacy filter if not include_private: raw = [ - (r, s) for r, s in raw + (r, s) + for r, s in raw if not r.private or r.source == source ] results = [] for r, s in raw: - composite, reasons = compute_composite_score( - r, s, self._config - ) + composite, reasons = compute_composite_score(r, s, self._config) results.append( MemoryMatch( record=r, @@ -739,7 +774,9 @@ class Memory: limit: Maximum number of records to return. offset: Number of records to skip (for pagination). """ - return self._storage.list_records(scope_prefix=scope, limit=limit, offset=offset) + return self._storage.list_records( + scope_prefix=scope, limit=limit, offset=offset + ) def info(self, path: str = "/") -> ScopeInfo: """Return scope info for path.""" @@ -781,7 +818,7 @@ class Memory: importance: float | None = None, source: str | None = None, private: bool = False, - ) -> MemoryRecord: + ) -> MemoryRecord | None: """Async remember: delegates to sync for now.""" return self.remember( content, diff --git a/lib/crewai/src/crewai/rag/embeddings/factory.py b/lib/crewai/src/crewai/rag/embeddings/factory.py index 41a9233da..802779320 100644 --- a/lib/crewai/src/crewai/rag/embeddings/factory.py +++ b/lib/crewai/src/crewai/rag/embeddings/factory.py @@ -216,6 +216,10 @@ def build_embedder_from_dict( def build_embedder_from_dict(spec: ONNXProviderSpec) -> ONNXMiniLM_L6_V2: ... +@overload +def build_embedder_from_dict(spec: dict[str, Any]) -> EmbeddingFunction[Any]: ... + + def build_embedder_from_dict(spec): # type: ignore[no-untyped-def] """Build an embedding function instance from a dictionary specification. @@ -341,6 +345,10 @@ def build_embedder(spec: Text2VecProviderSpec) -> Text2VecEmbeddingFunction: ... def build_embedder(spec: ONNXProviderSpec) -> ONNXMiniLM_L6_V2: ... +@overload +def build_embedder(spec: dict[str, Any]) -> EmbeddingFunction[Any]: ... + + def build_embedder(spec): # type: ignore[no-untyped-def] """Build an embedding function from either a provider spec or a provider instance. diff --git a/lib/crewai/src/crewai/task.py b/lib/crewai/src/crewai/task.py index eac42f956..cfcb01799 100644 --- a/lib/crewai/src/crewai/task.py +++ b/lib/crewai/src/crewai/task.py @@ -1,5 +1,6 @@ from __future__ import annotations +import asyncio from concurrent.futures import Future from copy import copy as shallow_copy import datetime @@ -585,16 +586,29 @@ class Task(BaseModel): self._post_agent_execution(agent) - if not self._guardrails and not self._guardrail: + if isinstance(result, BaseModel): + raw = result.model_dump_json() + if self.output_pydantic: + pydantic_output = result + json_output = None + elif self.output_json: + pydantic_output = None + json_output = result.model_dump() + else: + pydantic_output = None + json_output = None + elif not self._guardrails and not self._guardrail: + raw = result pydantic_output, json_output = self._export_output(result) else: + raw = result pydantic_output, json_output = None, None task_output = TaskOutput( name=self.name or self.description, description=self.description, expected_output=self.expected_output, - raw=result, + raw=raw, pydantic=pydantic_output, json_dict=json_output, agent=agent.role, @@ -624,11 +638,15 @@ class Task(BaseModel): self.end_time = datetime.datetime.now() if self.callback: - self.callback(self.output) + cb_result = self.callback(self.output) + if inspect.isawaitable(cb_result): + await cb_result crew = self.agent.crew # type: ignore[union-attr] if crew and crew.task_callback and crew.task_callback != self.callback: - crew.task_callback(self.output) + cb_result = crew.task_callback(self.output) + if inspect.isawaitable(cb_result): + await cb_result if self.output_file: content = ( @@ -682,16 +700,29 @@ class Task(BaseModel): self._post_agent_execution(agent) - if not self._guardrails and not self._guardrail: + if isinstance(result, BaseModel): + raw = result.model_dump_json() + if self.output_pydantic: + pydantic_output = result + json_output = None + elif self.output_json: + pydantic_output = None + json_output = result.model_dump() + else: + pydantic_output = None + json_output = None + elif not self._guardrails and not self._guardrail: + raw = result pydantic_output, json_output = self._export_output(result) else: + raw = result pydantic_output, json_output = None, None task_output = TaskOutput( name=self.name or self.description, description=self.description, expected_output=self.expected_output, - raw=result, + raw=raw, pydantic=pydantic_output, json_dict=json_output, agent=agent.role, @@ -722,11 +753,15 @@ class Task(BaseModel): self.end_time = datetime.datetime.now() if self.callback: - self.callback(self.output) + cb_result = self.callback(self.output) + if inspect.iscoroutine(cb_result): + asyncio.run(cb_result) crew = self.agent.crew # type: ignore[union-attr] if crew and crew.task_callback and crew.task_callback != self.callback: - crew.task_callback(self.output) + cb_result = crew.task_callback(self.output) + if inspect.iscoroutine(cb_result): + asyncio.run(cb_result) if self.output_file: content = ( diff --git a/lib/crewai/src/crewai/telemetry/__init__.py b/lib/crewai/src/crewai/telemetry/__init__.py index 38739d88a..b927aa02e 100644 --- a/lib/crewai/src/crewai/telemetry/__init__.py +++ b/lib/crewai/src/crewai/telemetry/__init__.py @@ -1,5 +1,4 @@ from crewai.telemetry.telemetry import Telemetry - __all__ = ["Telemetry"] diff --git a/lib/crewai/src/crewai/telemetry/telemetry.py b/lib/crewai/src/crewai/telemetry/telemetry.py index 04303fc3d..136a7d7d0 100644 --- a/lib/crewai/src/crewai/telemetry/telemetry.py +++ b/lib/crewai/src/crewai/telemetry/telemetry.py @@ -173,6 +173,12 @@ class Telemetry: self._original_handlers: dict[int, Any] = {} + if threading.current_thread() is not threading.main_thread(): + logger.debug( + "Skipping signal handler registration: not running in main thread" + ) + return + self._register_signal_handler(signal.SIGTERM, SigTermEvent, shutdown=True) self._register_signal_handler(signal.SIGINT, SigIntEvent, shutdown=True) if hasattr(signal, "SIGHUP"): diff --git a/lib/crewai/src/crewai/tools/__init__.py b/lib/crewai/src/crewai/tools/__init__.py index ef698c90a..a2415b1b2 100644 --- a/lib/crewai/src/crewai/tools/__init__.py +++ b/lib/crewai/src/crewai/tools/__init__.py @@ -1,7 +1,6 @@ from crewai.tools.base_tool import BaseTool, EnvVar, tool - __all__ = [ "BaseTool", "EnvVar", diff --git a/lib/crewai/src/crewai/tools/base_tool.py b/lib/crewai/src/crewai/tools/base_tool.py index 8a10cdfa3..07fa61b07 100644 --- a/lib/crewai/src/crewai/tools/base_tool.py +++ b/lib/crewai/src/crewai/tools/base_tool.py @@ -23,7 +23,7 @@ from pydantic import ( ) from typing_extensions import TypeIs -from crewai.tools.structured_tool import CrewStructuredTool +from crewai.tools.structured_tool import CrewStructuredTool, build_schema_hint from crewai.utilities.printer import Printer from crewai.utilities.pydantic_schema_utils import generate_model_description from crewai.utilities.string_utils import sanitize_tool_name @@ -150,14 +150,39 @@ class BaseTool(BaseModel, ABC): super().model_post_init(__context) + def _validate_kwargs(self, kwargs: dict[str, Any]) -> dict[str, Any]: + """Validate keyword arguments against args_schema if present. + + Args: + kwargs: The keyword arguments to validate. + + Returns: + Validated (and possibly coerced) keyword arguments. + + Raises: + ValueError: If validation against args_schema fails. + """ + if self.args_schema is not None and self.args_schema.model_fields: + try: + validated = self.args_schema.model_validate(kwargs) + return validated.model_dump() + except Exception as e: + hint = build_schema_hint(self.args_schema) + raise ValueError( + f"Tool '{self.name}' arguments validation failed: {e}{hint}" + ) from e + return kwargs + def run( self, *args: Any, **kwargs: Any, ) -> Any: + if not args: + kwargs = self._validate_kwargs(kwargs) + result = self._run(*args, **kwargs) - # If _run is async, we safely run it if asyncio.iscoroutine(result): result = asyncio.run(result) @@ -179,6 +204,8 @@ class BaseTool(BaseModel, ABC): Returns: The result of the tool execution. """ + if not args: + kwargs = self._validate_kwargs(kwargs) result = await self._arun(*args, **kwargs) self.current_usage_count += 1 return result @@ -331,6 +358,9 @@ class Tool(BaseTool, Generic[P, R]): Returns: The result of the tool execution. """ + if not args: + kwargs = self._validate_kwargs(kwargs) # type: ignore[assignment] + result = self.func(*args, **kwargs) if asyncio.iscoroutine(result): @@ -361,6 +391,8 @@ class Tool(BaseTool, Generic[P, R]): Returns: The result of the tool execution. """ + if not args: + kwargs = self._validate_kwargs(kwargs) # type: ignore[assignment] result = await self._arun(*args, **kwargs) self.current_usage_count += 1 return result diff --git a/lib/crewai/src/crewai/tools/mcp_native_tool.py b/lib/crewai/src/crewai/tools/mcp_native_tool.py index f25b2f4d7..d14c26a5a 100644 --- a/lib/crewai/src/crewai/tools/mcp_native_tool.py +++ b/lib/crewai/src/crewai/tools/mcp_native_tool.py @@ -27,14 +27,16 @@ class MCPNativeTool(BaseTool): tool_name: str, tool_schema: dict[str, Any], server_name: str, + original_tool_name: str | None = None, ) -> None: """Initialize native MCP tool. Args: mcp_client: MCPClient instance with active session. - tool_name: Original name of the tool on the MCP server. + tool_name: Name of the tool (may be prefixed). tool_schema: Schema information for the tool. server_name: Name of the MCP server for prefixing. + original_tool_name: Original name of the tool on the MCP server. """ # Create tool name with server prefix to avoid conflicts prefixed_name = f"{server_name}_{tool_name}" @@ -57,7 +59,7 @@ class MCPNativeTool(BaseTool): # Set instance attributes after super().__init__ self._mcp_client = mcp_client - self._original_tool_name = tool_name + self._original_tool_name = original_tool_name or tool_name self._server_name = server_name # self._logger = logging.getLogger(__name__) diff --git a/lib/crewai/src/crewai/tools/memory_tools.py b/lib/crewai/src/crewai/tools/memory_tools.py index 5c98a9892..c1874a532 100644 --- a/lib/crewai/src/crewai/tools/memory_tools.py +++ b/lib/crewai/src/crewai/tools/memory_tools.py @@ -20,14 +20,6 @@ class RecallMemorySchema(BaseModel): "or multiple items to search for several things at once." ), ) - scope: str | None = Field( - default=None, - description="Optional scope to narrow the search (e.g. /project/alpha)", - ) - depth: str = Field( - default="shallow", - description="'shallow' for fast vector search, 'deep' for LLM-analyzed retrieval", - ) class RecallMemoryTool(BaseTool): @@ -41,32 +33,27 @@ class RecallMemoryTool(BaseTool): def _run( self, queries: list[str] | str, - scope: str | None = None, - depth: str = "shallow", **kwargs: Any, ) -> str: """Search memory for relevant information. Args: queries: One or more search queries (string or list of strings). - scope: Optional scope prefix to narrow the search. - depth: "shallow" for fast vector search, "deep" for LLM-analyzed retrieval. Returns: Formatted string of matching memories, or a message if none found. """ if isinstance(queries, str): queries = [queries] - actual_depth = depth if depth in ("shallow", "deep") else "shallow" all_lines: list[str] = [] seen_ids: set[str] = set() for query in queries: - matches = self.memory.recall(query, scope=scope, limit=5, depth=actual_depth) + matches = self.memory.recall(query, limit=20) for m in matches: if m.record.id not in seen_ids: seen_ids.add(m.record.id) - all_lines.append(f"- (score={m.score:.2f}) {m.record.content}") + all_lines.append(m.format()) if not all_lines: return "No relevant memories found." @@ -117,20 +104,28 @@ class RememberTool(BaseTool): def create_memory_tools(memory: Any) -> list[BaseTool]: """Create Recall and Remember tools for the given memory instance. + When memory is read-only (``_read_only=True``), only the RecallMemoryTool + is returned — the RememberTool is omitted so agents are never offered a + save capability they cannot use. + Args: memory: A Memory, MemoryScope, or MemorySlice instance. Returns: - List containing a RecallMemoryTool and a RememberTool. + List containing a RecallMemoryTool and, if not read-only, a RememberTool. """ i18n = get_i18n() - return [ + tools: list[BaseTool] = [ RecallMemoryTool( memory=memory, description=i18n.tools("recall_memory"), ), - RememberTool( - memory=memory, - description=i18n.tools("save_to_memory"), - ), ] + if not memory.read_only: + tools.append( + RememberTool( + memory=memory, + description=i18n.tools("save_to_memory"), + ) + ) + return tools diff --git a/lib/crewai/src/crewai/tools/structured_tool.py b/lib/crewai/src/crewai/tools/structured_tool.py index 44f0af2d9..4b95caeb7 100644 --- a/lib/crewai/src/crewai/tools/structured_tool.py +++ b/lib/crewai/src/crewai/tools/structured_tool.py @@ -17,6 +17,27 @@ if TYPE_CHECKING: from crewai.tools.base_tool import BaseTool +def build_schema_hint(args_schema: type[BaseModel]) -> str: + """Build a human-readable hint from a Pydantic model's JSON schema. + + Args: + args_schema: The Pydantic model class to extract schema from. + + Returns: + A formatted string with expected arguments and required fields, + or empty string if schema extraction fails. + """ + try: + schema = args_schema.model_json_schema() + return ( + f"\nExpected arguments: " + f"{json.dumps(schema.get('properties', {}))}" + f"\nRequired: {json.dumps(schema.get('required', []))}" + ) + except Exception: + return "" + + class ToolUsageLimitExceededError(Exception): """Exception raised when a tool has reached its maximum usage limit.""" @@ -208,7 +229,8 @@ class CrewStructuredTool: validated_args = self.args_schema.model_validate(raw_args) return validated_args.model_dump() except Exception as e: - raise ValueError(f"Arguments validation failed: {e}") from e + hint = build_schema_hint(self.args_schema) + raise ValueError(f"Arguments validation failed: {e}{hint}") from e async def ainvoke( self, diff --git a/lib/crewai/src/crewai/translations/en.json b/lib/crewai/src/crewai/translations/en.json index 1eb02c746..833f6e9e7 100644 --- a/lib/crewai/src/crewai/translations/en.json +++ b/lib/crewai/src/crewai/translations/en.json @@ -7,7 +7,7 @@ "slices": { "observation": "\nObservation:", "task": "\nCurrent Task: {input}\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:", - "memory": "\n\n# Useful context: \n{memory}", + "memory": "\n\n# Memories from past conversations:\n{memory}\n\nIMPORTANT: The memories above are an automatic selection and may be INCOMPLETE. If the task involves counting, listing, or summing items (e.g. 'how many', 'total', 'list all'), you MUST use the Search memory tool with several different queries before answering — do NOT rely solely on the memories shown above. Enumerate each distinct item you find before giving a final count.", "role_playing": "You are {role}. {backstory}\nYour personal goal is: {goal}", "tools": "\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\n{tools}\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought: you should always think about what to do\nAction: the action to take, only one name of [{tool_names}], just the name, exactly as it's written.\nAction Input: the input to the action, just a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce all necessary information is gathered, return the following format:\n\n```\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n```", "no_tools": "", @@ -60,12 +60,12 @@ "description": "See image to understand its content, you can optionally ask a question about the image", "default_action": "Please provide a detailed description of this image, including all visual elements, context, and any notable details you can observe." }, - "recall_memory": "Search through the team's shared memory for relevant information. Pass one or more queries to search for multiple things at once. Use this when you need to find facts, decisions, preferences, or past results that may have been stored previously.", + "recall_memory": "Search through the team's shared memory for relevant information. Pass one or more queries to search for multiple things at once. Use this when you need to find facts, decisions, preferences, or past results that may have been stored previously. IMPORTANT: For questions that require counting, summing, or listing items across multiple conversations (e.g. 'how many X', 'total Y', 'list all Z'), you MUST search multiple times with different phrasings to ensure you find ALL relevant items before giving a final count or total. Do not rely on a single search — items may be described differently across conversations.", "save_to_memory": "Store one or more important facts, decisions, observations, or lessons in memory so they can be recalled later by you or other agents. Pass multiple items at once when you have several things worth remembering." }, "memory": { "query_system": "You analyze a query for searching memory.\nGiven the query and available scopes, output:\n1. keywords: Key entities or keywords that can be used to filter by category.\n2. suggested_scopes: Which available scopes are most relevant (empty for all).\n3. complexity: 'simple' or 'complex'.\n4. recall_queries: 1-3 short, targeted search phrases distilled from the query. Each should be a concise phrase optimized for semantic vector search. If the query is already short and focused, return it as-is in a single-item list. For long task descriptions, extract the distinct things worth searching for.\n5. time_filter: If the query references a time period (like 'last week', 'yesterday', 'in January'), return an ISO 8601 date string for the earliest relevant date (e.g. '2026-02-01'). Return null if no time constraint is implied.", - "extract_memories_system": "You extract discrete, reusable memory statements from raw content (e.g. a task description and its result).\n\nFor the given content, output a list of memory statements. Each memory must:\n- Be one clear sentence or short statement\n- Be understandable without the original context\n- Capture a decision, fact, outcome, preference, lesson, or observation worth remembering\n- NOT be a vague summary or a restatement of the task description\n- NOT duplicate the same idea in different words\n\nIf there is nothing worth remembering (e.g. empty result, no decisions or facts), return an empty list.\nOutput a JSON object with a single key \"memories\" whose value is a list of strings.", + "extract_memories_system": "You extract discrete, reusable memory statements from raw content (e.g. a task description and its result, or a conversation between a user and an assistant).\n\nFor the given content, output a list of memory statements. Each memory must:\n- Be one clear sentence or short statement\n- Be understandable without the original context\n- Capture a decision, fact, outcome, preference, lesson, or observation worth remembering\n- NOT be a vague summary or a restatement of the task description\n- NOT duplicate the same idea in different words\n\nWhen the content is a conversation, pay special attention to facts stated by the user (first-person statements). These personal facts are HIGH PRIORITY and must always be extracted:\n- What the user did, bought, made, visited, attended, or completed\n- Names of people, pets, places, brands, and specific items the user mentions\n- Quantities, durations, dates, and measurements the user states\n- Subordinate clauses and casual asides often contain important personal details (e.g. \"by the way, it took me 4 hours\" or \"my Golden Retriever Max\")\n\nPreserve exact names and numbers — never generalize (e.g. keep \"lavender gin fizz\" not just \"cocktail\", keep \"12 largemouth bass\" not just \"fish caught\", keep \"Golden Retriever\" not just \"dog\").\n\nAdditional extraction rules:\n- Presupposed facts: When the user reveals a fact indirectly in a question (e.g. \"What collar suits a Golden Retriever like Max?\" presupposes Max is a Golden Retriever), extract that fact as a separate memory.\n- Date precision: Always preserve the full date including day-of-month when stated (e.g. \"February 14th\" not just \"February\", \"March 5\" not just \"March\").\n- Life events in passing: When the user mentions a life event (birth, wedding, graduation, move, adoption) while discussing something else, extract the life event as its own memory (e.g. \"my friend David had a baby boy named Jasper\" is a birth fact, even if mentioned while planning to send congratulations).\n\nIf there is nothing worth remembering (e.g. empty result, no decisions or facts), return an empty list.\nOutput a JSON object with a single key \"memories\" whose value is a list of strings.", "extract_memories_user": "Content:\n{content}\n\nExtract memory statements as described. Return structured output.", "query_user": "Query: {query}\n\nAvailable scopes: {available_scopes}\n{scope_desc}\n\nReturn the analysis as structured output.", "save_system": "You analyze content to be stored in a hierarchical memory system.\nGiven the content and the existing scopes and categories, output:\n1. suggested_scope: The best matching existing scope path, or a new path if none fit (use / for root).\n2. categories: A list of categories (reuse existing when relevant, add new ones if needed).\n3. importance: A number from 0.0 to 1.0 indicating how significant this memory is.\n4. extracted_metadata: A JSON object with any entities, dates, or topics you can extract.", diff --git a/lib/crewai/src/crewai/utilities/agent_utils.py b/lib/crewai/src/crewai/utilities/agent_utils.py index 22b498541..e4f3d3fee 100644 --- a/lib/crewai/src/crewai/utilities/agent_utils.py +++ b/lib/crewai/src/crewai/utilities/agent_utils.py @@ -3,6 +3,7 @@ from __future__ import annotations import asyncio from collections.abc import Callable, Sequence import concurrent.futures +import inspect import json import re from typing import TYPE_CHECKING, Any, Final, Literal, TypedDict @@ -138,7 +139,11 @@ def render_text_description_and_args( def convert_tools_to_openai_schema( tools: Sequence[BaseTool | CrewStructuredTool], -) -> tuple[list[dict[str, Any]], dict[str, Callable[..., Any]]]: +) -> tuple[ + list[dict[str, Any]], + dict[str, Callable[..., Any]], + dict[str, BaseTool | CrewStructuredTool], +]: """Convert CrewAI tools to OpenAI function calling format. This function converts CrewAI BaseTool and CrewStructuredTool objects @@ -151,23 +156,21 @@ def convert_tools_to_openai_schema( Returns: Tuple containing: - List of OpenAI-format tool schema dictionaries - - Dict mapping tool names to their callable run() methods - - Example: - >>> tools = [CalculatorTool(), SearchTool()] - >>> schemas, functions = convert_tools_to_openai_schema(tools) - >>> # schemas can be passed to llm.call(tools=schemas) - >>> # functions can be passed to llm.call(available_functions=functions) + - Dict mapping sanitized tool names to their callable run() methods + - Dict mapping sanitized tool names to their original tool objects """ openai_tools: list[dict[str, Any]] = [] available_functions: dict[str, Callable[..., Any]] = {} + tool_name_mapping: dict[str, BaseTool | CrewStructuredTool] = {} for tool in tools: # Get the JSON schema for tool parameters parameters: dict[str, Any] = {} if hasattr(tool, "args_schema") and tool.args_schema is not None: try: - schema_output = generate_model_description(tool.args_schema) + schema_output = generate_model_description( + tool.args_schema, strip_null_types=False + ) parameters = schema_output.get("json_schema", {}).get("schema", {}) # Remove title and description from schema root as they're redundant parameters.pop("title", None) @@ -183,6 +186,14 @@ def convert_tools_to_openai_schema( sanitized_name = sanitize_tool_name(tool.name) + if sanitized_name in available_functions: + counter = 2 + candidate = sanitize_tool_name(f"{sanitized_name}_{counter}") + while candidate in available_functions: + counter += 1 + candidate = sanitize_tool_name(f"{sanitized_name}_{counter}") + sanitized_name = candidate + schema: dict[str, Any] = { "type": "function", "function": { @@ -194,8 +205,9 @@ def convert_tools_to_openai_schema( } openai_tools.append(schema) available_functions[sanitized_name] = tool.run # type: ignore[union-attr] + tool_name_mapping[sanitized_name] = tool - return openai_tools, available_functions + return openai_tools, available_functions, tool_name_mapping def has_reached_max_iterations(iterations: int, max_iterations: int) -> bool: @@ -501,7 +513,9 @@ def handle_agent_action_core( - TODO: Remove messages parameter and its usage. """ if step_callback: - step_callback(tool_result) + cb_result = step_callback(tool_result) + if inspect.iscoroutine(cb_result): + asyncio.run(cb_result) formatted_answer.text += f"\nObservation: {tool_result.result}" formatted_answer.result = tool_result.result @@ -1143,6 +1157,36 @@ def extract_tool_call_info( return None +def parse_tool_call_args( + func_args: dict[str, Any] | str, + func_name: str, + call_id: str, + original_tool: Any = None, +) -> tuple[dict[str, Any], None] | tuple[None, dict[str, Any]]: + """Parse tool call arguments from a JSON string or dict. + + Returns: + ``(args_dict, None)`` on success, or ``(None, error_result)`` on + JSON parse failure where ``error_result`` is a ready-to-return dict + with the same shape as ``_execute_single_native_tool_call`` return values. + """ + if isinstance(func_args, str): + try: + return json.loads(func_args), None + except json.JSONDecodeError as e: + return None, { + "call_id": call_id, + "func_name": func_name, + "result": ( + f"Error: Failed to parse tool arguments as JSON: {e}. " + f"Please provide valid JSON arguments for the '{func_name}' tool." + ), + "from_cache": False, + "original_tool": original_tool, + } + return func_args, None + + def _setup_before_llm_call_hooks( executor_context: CrewAgentExecutor | AgentExecutor | LiteAgent | None, printer: Printer, diff --git a/lib/crewai/src/crewai/utilities/llm_utils.py b/lib/crewai/src/crewai/utilities/llm_utils.py index 129f064d5..55a42968a 100644 --- a/lib/crewai/src/crewai/utilities/llm_utils.py +++ b/lib/crewai/src/crewai/utilities/llm_utils.py @@ -69,7 +69,7 @@ def create_llm( UNACCEPTED_ATTRIBUTES: Final[list[str]] = [ "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", - "AWS_REGION_NAME", + "AWS_DEFAULT_REGION", ] @@ -146,7 +146,7 @@ def _llm_via_environment_or_fallback() -> LLM | None: unaccepted_attributes = [ "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", - "AWS_REGION_NAME", + "AWS_DEFAULT_REGION", ] set_provider = model_name.partition("/")[0] if "/" in model_name else "openai" diff --git a/lib/crewai/src/crewai/utilities/pydantic_schema_utils.py b/lib/crewai/src/crewai/utilities/pydantic_schema_utils.py index 191f38c35..87d80da81 100644 --- a/lib/crewai/src/crewai/utilities/pydantic_schema_utils.py +++ b/lib/crewai/src/crewai/utilities/pydantic_schema_utils.py @@ -417,7 +417,11 @@ def strip_null_from_types(schema: dict[str, Any]) -> dict[str, Any]: return schema -def generate_model_description(model: type[BaseModel]) -> ModelDescription: +def generate_model_description( + model: type[BaseModel], + *, + strip_null_types: bool = True, +) -> ModelDescription: """Generate JSON schema description of a Pydantic model. This function takes a Pydantic model class and returns its JSON schema, @@ -426,6 +430,9 @@ def generate_model_description(model: type[BaseModel]) -> ModelDescription: Args: model: A Pydantic model class. + strip_null_types: When ``True`` (default), remove ``null`` from + ``anyOf`` / ``type`` arrays. Set to ``False`` to allow sending ``null`` for + optional fields. Returns: A ModelDescription with JSON schema representation of the model. @@ -442,7 +449,9 @@ def generate_model_description(model: type[BaseModel]) -> ModelDescription: json_schema = fix_discriminator_mappings(json_schema) json_schema = convert_oneof_to_anyof(json_schema) json_schema = ensure_all_properties_required(json_schema) - json_schema = strip_null_from_types(json_schema) + + if strip_null_types: + json_schema = strip_null_from_types(json_schema) return { "type": "json_schema", @@ -482,10 +491,66 @@ FORMAT_TYPE_MAP: dict[str, type[Any]] = { } +def build_rich_field_description(prop_schema: dict[str, Any]) -> str: + """Build a comprehensive field description including constraints. + + Embeds format, enum, pattern, min/max, and example constraints into the + description text so that LLMs can understand tool parameter requirements + without inspecting the raw JSON Schema. + + Args: + prop_schema: Property schema with description and constraints. + + Returns: + Enhanced description with format, enum, and other constraints. + """ + parts: list[str] = [] + + description = prop_schema.get("description", "") + if description: + parts.append(description) + + format_type = prop_schema.get("format") + if format_type: + parts.append(f"Format: {format_type}") + + enum_values = prop_schema.get("enum") + if enum_values: + enum_str = ", ".join(repr(v) for v in enum_values) + parts.append(f"Allowed values: [{enum_str}]") + + pattern = prop_schema.get("pattern") + if pattern: + parts.append(f"Pattern: {pattern}") + + minimum = prop_schema.get("minimum") + maximum = prop_schema.get("maximum") + if minimum is not None: + parts.append(f"Minimum: {minimum}") + if maximum is not None: + parts.append(f"Maximum: {maximum}") + + min_length = prop_schema.get("minLength") + max_length = prop_schema.get("maxLength") + if min_length is not None: + parts.append(f"Min length: {min_length}") + if max_length is not None: + parts.append(f"Max length: {max_length}") + + examples = prop_schema.get("examples") + if examples: + examples_str = ", ".join(repr(e) for e in examples[:3]) + parts.append(f"Examples: {examples_str}") + + return ". ".join(parts) if parts else "" + + def create_model_from_schema( # type: ignore[no-any-unimported] json_schema: dict[str, Any], *, root_schema: dict[str, Any] | None = None, + model_name: str | None = None, + enrich_descriptions: bool = False, __config__: ConfigDict | None = None, __base__: type[BaseModel] | None = None, __module__: str = __name__, @@ -503,6 +568,13 @@ def create_model_from_schema( # type: ignore[no-any-unimported] json_schema: A dictionary representing the JSON schema. root_schema: The root schema containing $defs. If not provided, the current schema is treated as the root schema. + model_name: Override for the model name. If not provided, the schema + ``title`` field is used, falling back to ``"DynamicModel"``. + enrich_descriptions: When True, augment field descriptions with + constraint info (format, enum, pattern, min/max, examples) via + :func:`build_rich_field_description`. Useful for LLM-facing tool + schemas where constraints in the description help the model + understand parameter requirements. __config__: Pydantic configuration for the generated model. __base__: Base class for the generated model. Defaults to BaseModel. __module__: Module name for the generated model class. @@ -539,10 +611,14 @@ def create_model_from_schema( # type: ignore[no-any-unimported] if "title" not in json_schema and "title" in (root_schema or {}): json_schema["title"] = (root_schema or {}).get("title") - model_name = json_schema.get("title") or "DynamicModel" + effective_name = model_name or json_schema.get("title") or "DynamicModel" field_definitions = { name: _json_schema_to_pydantic_field( - name, prop, json_schema.get("required", []), effective_root + name, + prop, + json_schema.get("required", []), + effective_root, + enrich_descriptions=enrich_descriptions, ) for name, prop in (json_schema.get("properties", {}) or {}).items() } @@ -550,7 +626,7 @@ def create_model_from_schema( # type: ignore[no-any-unimported] effective_config = __config__ or ConfigDict(extra="forbid") return create_model_base( - model_name, + effective_name, __config__=effective_config, __base__=__base__, __module__=__module__, @@ -565,6 +641,8 @@ def _json_schema_to_pydantic_field( json_schema: dict[str, Any], required: list[str], root_schema: dict[str, Any], + *, + enrich_descriptions: bool = False, ) -> Any: """Convert a JSON schema property to a Pydantic field definition. @@ -573,20 +651,29 @@ def _json_schema_to_pydantic_field( json_schema: The JSON schema for this field. required: List of required field names. root_schema: The root schema for resolving $ref. + enrich_descriptions: When True, embed constraints in the description. Returns: A tuple of (type, Field) for use with create_model. """ - type_ = _json_schema_to_pydantic_type(json_schema, root_schema, name_=name.title()) - description = json_schema.get("description") - examples = json_schema.get("examples") + type_ = _json_schema_to_pydantic_type( + json_schema, root_schema, name_=name.title(), enrich_descriptions=enrich_descriptions + ) is_required = name in required field_params: dict[str, Any] = {} schema_extra: dict[str, Any] = {} - if description: - field_params["description"] = description + if enrich_descriptions: + rich_desc = build_rich_field_description(json_schema) + if rich_desc: + field_params["description"] = rich_desc + else: + description = json_schema.get("description") + if description: + field_params["description"] = description + + examples = json_schema.get("examples") if examples: schema_extra["examples"] = examples @@ -702,6 +789,7 @@ def _json_schema_to_pydantic_type( root_schema: dict[str, Any], *, name_: str | None = None, + enrich_descriptions: bool = False, ) -> Any: """Convert a JSON schema to a Python/Pydantic type. @@ -709,6 +797,7 @@ def _json_schema_to_pydantic_type( json_schema: The JSON schema to convert. root_schema: The root schema for resolving $ref. name_: Optional name for nested models. + enrich_descriptions: Propagated to nested model creation. Returns: A Python type corresponding to the JSON schema. @@ -716,7 +805,9 @@ def _json_schema_to_pydantic_type( ref = json_schema.get("$ref") if ref: ref_schema = _resolve_ref(ref, root_schema) - return _json_schema_to_pydantic_type(ref_schema, root_schema, name_=name_) + return _json_schema_to_pydantic_type( + ref_schema, root_schema, name_=name_, enrich_descriptions=enrich_descriptions + ) enum_values = json_schema.get("enum") if enum_values: @@ -731,7 +822,10 @@ def _json_schema_to_pydantic_type( if any_of_schemas: any_of_types = [ _json_schema_to_pydantic_type( - schema, root_schema, name_=f"{name_ or 'Union'}Option{i}" + schema, + root_schema, + name_=f"{name_ or 'Union'}Option{i}", + enrich_descriptions=enrich_descriptions, ) for i, schema in enumerate(any_of_schemas) ] @@ -741,10 +835,14 @@ def _json_schema_to_pydantic_type( if all_of_schemas: if len(all_of_schemas) == 1: return _json_schema_to_pydantic_type( - all_of_schemas[0], root_schema, name_=name_ + all_of_schemas[0], root_schema, name_=name_, + enrich_descriptions=enrich_descriptions, ) merged = _merge_all_of_schemas(all_of_schemas, root_schema) - return _json_schema_to_pydantic_type(merged, root_schema, name_=name_) + return _json_schema_to_pydantic_type( + merged, root_schema, name_=name_, + enrich_descriptions=enrich_descriptions, + ) type_ = json_schema.get("type") @@ -760,7 +858,8 @@ def _json_schema_to_pydantic_type( items_schema = json_schema.get("items") if items_schema: item_type = _json_schema_to_pydantic_type( - items_schema, root_schema, name_=name_ + items_schema, root_schema, name_=name_, + enrich_descriptions=enrich_descriptions, ) return list[item_type] # type: ignore[valid-type] return list @@ -770,7 +869,10 @@ def _json_schema_to_pydantic_type( json_schema_ = json_schema.copy() if json_schema_.get("title") is None: json_schema_["title"] = name_ or "DynamicModel" - return create_model_from_schema(json_schema_, root_schema=root_schema) + return create_model_from_schema( + json_schema_, root_schema=root_schema, + enrich_descriptions=enrich_descriptions, + ) return dict if type_ == "null": return None diff --git a/lib/crewai/src/crewai/utilities/string_utils.py b/lib/crewai/src/crewai/utilities/string_utils.py index 8834c2e38..98735b3ea 100644 --- a/lib/crewai/src/crewai/utilities/string_utils.py +++ b/lib/crewai/src/crewai/utilities/string_utils.py @@ -2,6 +2,7 @@ # https://github.com/un33k/python-slugify # MIT License +import hashlib import re from typing import Any, Final import unicodedata @@ -40,7 +41,9 @@ def sanitize_tool_name(name: str, max_length: int = _MAX_TOOL_NAME_LENGTH) -> st name = name.strip("_") if len(name) > max_length: - name = name[:max_length].rstrip("_") + name_hash = hashlib.sha256(name.encode()).hexdigest()[:8] + suffix = f"_{name_hash}" + name = name[: max_length - len(suffix)].rstrip("_") + suffix return name diff --git a/lib/crewai/tests/agents/test_agent_executor.py b/lib/crewai/tests/agents/test_agent_executor.py index 4163f0693..ab886ff38 100644 --- a/lib/crewai/tests/agents/test_agent_executor.py +++ b/lib/crewai/tests/agents/test_agent_executor.py @@ -4,6 +4,7 @@ Tests the Flow-based agent executor implementation including state management, flow methods, routing logic, and error handling. """ +import time from unittest.mock import Mock, patch import pytest @@ -122,7 +123,7 @@ class TestAgentExecutor: executor.state.iterations = 10 result = executor.check_max_iterations() - assert result == "force_final_answer" + assert result == "max_iterations_exceeded" def test_route_by_answer_type_action(self, mock_dependencies): """Test routing for AgentAction.""" @@ -462,3 +463,176 @@ class TestFlowInvoke: assert result == {"output": "Done"} assert len(executor.state.messages) >= 2 + + +class TestNativeToolExecution: + """Test native tool execution behavior.""" + + @pytest.fixture + def mock_dependencies(self): + llm = Mock() + llm.supports_stop_words.return_value = True + + task = Mock() + task.name = "Test Task" + task.description = "Test" + task.human_input = False + task.response_model = None + + crew = Mock() + crew._memory = None + crew.verbose = False + crew._train = False + + agent = Mock() + agent.id = "test-agent-id" + agent.role = "Test Agent" + agent.verbose = False + agent.key = "test-key" + + prompt = {"prompt": "Test {input} {tool_names} {tools}"} + + tools_handler = Mock() + tools_handler.cache = None + + return { + "llm": llm, + "task": task, + "crew": crew, + "agent": agent, + "prompt": prompt, + "max_iter": 10, + "tools": [], + "tools_names": "", + "stop_words": [], + "tools_description": "", + "tools_handler": tools_handler, + } + + def test_execute_native_tool_runs_parallel_for_multiple_calls( + self, mock_dependencies + ): + executor = AgentExecutor(**mock_dependencies) + + def slow_one() -> str: + time.sleep(0.2) + return "one" + + def slow_two() -> str: + time.sleep(0.2) + return "two" + + executor._available_functions = {"slow_one": slow_one, "slow_two": slow_two} + executor.state.pending_tool_calls = [ + { + "id": "call_1", + "function": {"name": "slow_one", "arguments": "{}"}, + }, + { + "id": "call_2", + "function": {"name": "slow_two", "arguments": "{}"}, + }, + ] + + started = time.perf_counter() + result = executor.execute_native_tool() + elapsed = time.perf_counter() - started + + assert result == "native_tool_completed" + assert elapsed < 0.5 + tool_messages = [m for m in executor.state.messages if m.get("role") == "tool"] + assert len(tool_messages) == 2 + assert tool_messages[0]["tool_call_id"] == "call_1" + assert tool_messages[1]["tool_call_id"] == "call_2" + + def test_execute_native_tool_falls_back_to_sequential_for_result_as_answer( + self, mock_dependencies + ): + executor = AgentExecutor(**mock_dependencies) + + def slow_one() -> str: + time.sleep(0.2) + return "one" + + def slow_two() -> str: + time.sleep(0.2) + return "two" + + result_tool = Mock() + result_tool.name = "slow_one" + result_tool.result_as_answer = True + result_tool.max_usage_count = None + result_tool.current_usage_count = 0 + + executor.original_tools = [result_tool] + executor._available_functions = {"slow_one": slow_one, "slow_two": slow_two} + executor.state.pending_tool_calls = [ + { + "id": "call_1", + "function": {"name": "slow_one", "arguments": "{}"}, + }, + { + "id": "call_2", + "function": {"name": "slow_two", "arguments": "{}"}, + }, + ] + + started = time.perf_counter() + result = executor.execute_native_tool() + elapsed = time.perf_counter() - started + + assert result == "tool_result_is_final" + assert elapsed >= 0.2 + assert elapsed < 0.8 + assert isinstance(executor.state.current_answer, AgentFinish) + assert executor.state.current_answer.output == "one" + + def test_execute_native_tool_result_as_answer_short_circuits_remaining_calls( + self, mock_dependencies + ): + executor = AgentExecutor(**mock_dependencies) + call_counts = {"slow_one": 0, "slow_two": 0} + + def slow_one() -> str: + call_counts["slow_one"] += 1 + time.sleep(0.2) + return "one" + + def slow_two() -> str: + call_counts["slow_two"] += 1 + time.sleep(0.2) + return "two" + + result_tool = Mock() + result_tool.name = "slow_one" + result_tool.result_as_answer = True + result_tool.max_usage_count = None + result_tool.current_usage_count = 0 + + executor.original_tools = [result_tool] + executor._available_functions = {"slow_one": slow_one, "slow_two": slow_two} + executor.state.pending_tool_calls = [ + { + "id": "call_1", + "function": {"name": "slow_one", "arguments": "{}"}, + }, + { + "id": "call_2", + "function": {"name": "slow_two", "arguments": "{}"}, + }, + ] + + started = time.perf_counter() + result = executor.execute_native_tool() + elapsed = time.perf_counter() - started + + assert result == "tool_result_is_final" + assert isinstance(executor.state.current_answer, AgentFinish) + assert executor.state.current_answer.output == "one" + assert call_counts["slow_one"] == 1 + assert call_counts["slow_two"] == 0 + assert elapsed < 0.5 + + tool_messages = [m for m in executor.state.messages if m.get("role") == "tool"] + assert len(tool_messages) == 1 + assert tool_messages[0]["tool_call_id"] == "call_1" diff --git a/lib/crewai/tests/agents/test_async_agent_executor.py b/lib/crewai/tests/agents/test_async_agent_executor.py index b696c5227..01297bdcc 100644 --- a/lib/crewai/tests/agents/test_async_agent_executor.py +++ b/lib/crewai/tests/agents/test_async_agent_executor.py @@ -2,7 +2,7 @@ import asyncio from typing import Any -from unittest.mock import AsyncMock, MagicMock, patch +from unittest.mock import AsyncMock, MagicMock, Mock, patch import pytest @@ -291,6 +291,46 @@ class TestAsyncAgentExecutor: assert max_concurrent > 1, f"Expected concurrent execution, max concurrent was {max_concurrent}" +class TestInvokeStepCallback: + """Tests for _invoke_step_callback with sync and async callbacks.""" + + def test_invoke_step_callback_with_sync_callback( + self, executor: CrewAgentExecutor + ) -> None: + """Test that a sync step callback is called normally.""" + callback = Mock() + executor.step_callback = callback + answer = AgentFinish(thought="thinking", output="test", text="final") + + executor._invoke_step_callback(answer) + + callback.assert_called_once_with(answer) + + def test_invoke_step_callback_with_async_callback( + self, executor: CrewAgentExecutor + ) -> None: + """Test that an async step callback is awaited via asyncio.run.""" + async_callback = AsyncMock() + executor.step_callback = async_callback + answer = AgentFinish(thought="thinking", output="test", text="final") + + with patch("crewai.agents.crew_agent_executor.asyncio.run") as mock_run: + executor._invoke_step_callback(answer) + + async_callback.assert_called_once_with(answer) + mock_run.assert_called_once() + + def test_invoke_step_callback_with_none( + self, executor: CrewAgentExecutor + ) -> None: + """Test that no error is raised when step_callback is None.""" + executor.step_callback = None + answer = AgentFinish(thought="thinking", output="test", text="final") + + # Should not raise + executor._invoke_step_callback(answer) + + class TestAsyncLLMResponseHelper: """Tests for aget_llm_response helper function.""" diff --git a/lib/crewai/tests/agents/test_lite_agent.py b/lib/crewai/tests/agents/test_lite_agent.py index 761a12b23..0d7093f82 100644 --- a/lib/crewai/tests/agents/test_lite_agent.py +++ b/lib/crewai/tests/agents/test_lite_agent.py @@ -659,7 +659,7 @@ def test_agent_kickoff_with_platform_tools(mock_get, mock_post): @patch.dict("os.environ", {"EXA_API_KEY": "test_exa_key"}) -@patch("crewai.agent.Agent._get_external_mcp_tools") +@patch("crewai.agent.Agent.get_mcp_tools") @pytest.mark.vcr() def test_agent_kickoff_with_mcp_tools(mock_get_mcp_tools): """Test that Agent.kickoff() properly integrates MCP tools with LiteAgent""" @@ -691,7 +691,7 @@ def test_agent_kickoff_with_mcp_tools(mock_get_mcp_tools): assert result.raw is not None # Verify MCP tools were retrieved - mock_get_mcp_tools.assert_called_once_with("https://mcp.exa.ai/mcp?api_key=test_exa_key&profile=research") + mock_get_mcp_tools.assert_called_once_with(["https://mcp.exa.ai/mcp?api_key=test_exa_key&profile=research"]) # ============================================================================ @@ -1136,6 +1136,7 @@ def test_lite_agent_memory_instance_recall_and_save_called(): successful_requests=1, ) mock_memory = Mock() + mock_memory.read_only = False mock_memory.recall.return_value = [] mock_memory.extract_memories.return_value = ["Fact one.", "Fact two."] diff --git a/lib/crewai/tests/agents/test_native_tool_calling.py b/lib/crewai/tests/agents/test_native_tool_calling.py index fde883df9..73a2c5156 100644 --- a/lib/crewai/tests/agents/test_native_tool_calling.py +++ b/lib/crewai/tests/agents/test_native_tool_calling.py @@ -6,13 +6,20 @@ when the LLM supports it, across multiple providers. from __future__ import annotations +from collections.abc import Generator import os -from unittest.mock import patch +import threading +import time +from collections import Counter +from unittest.mock import Mock, patch import pytest from pydantic import BaseModel, Field from crewai import Agent, Crew, Task +from crewai.events import crewai_event_bus +from crewai.hooks import register_after_tool_call_hook, register_before_tool_call_hook +from crewai.hooks.tool_hooks import ToolCallHookContext from crewai.llm import LLM from crewai.tools.base_tool import BaseTool @@ -64,6 +71,73 @@ class FailingTool(BaseTool): def _run(self) -> str: raise Exception("This tool always fails") + +class LocalSearchInput(BaseModel): + query: str = Field(description="Search query") + + +class ParallelProbe: + """Thread-safe in-memory recorder for tool execution windows.""" + + _lock = threading.Lock() + _windows: list[tuple[str, float, float]] = [] + + @classmethod + def reset(cls) -> None: + with cls._lock: + cls._windows = [] + + @classmethod + def record(cls, tool_name: str, start: float, end: float) -> None: + with cls._lock: + cls._windows.append((tool_name, start, end)) + + @classmethod + def windows(cls) -> list[tuple[str, float, float]]: + with cls._lock: + return list(cls._windows) + + +def _parallel_prompt() -> str: + return ( + "This is a tool-calling compliance test. " + "In your next assistant turn, emit exactly 3 tool calls in the same response (parallel tool calls), in this order: " + "1) parallel_local_search_one(query='latest OpenAI model release notes'), " + "2) parallel_local_search_two(query='latest Anthropic model release notes'), " + "3) parallel_local_search_three(query='latest Gemini model release notes'). " + "Do not call any other tools and do not answer before those 3 tool calls are emitted. " + "After the tool results return, provide a one paragraph summary." + ) + + +def _max_concurrency(windows: list[tuple[str, float, float]]) -> int: + points: list[tuple[float, int]] = [] + for _, start, end in windows: + points.append((start, 1)) + points.append((end, -1)) + points.sort(key=lambda p: (p[0], p[1])) + + current = 0 + maximum = 0 + for _, delta in points: + current += delta + if current > maximum: + maximum = current + return maximum + + +def _assert_tools_overlapped() -> None: + windows = ParallelProbe.windows() + local_windows = [ + w + for w in windows + if w[0].startswith("parallel_local_search_") + ] + + assert len(local_windows) >= 3, f"Expected at least 3 local tool calls, got {len(local_windows)}" + assert _max_concurrency(local_windows) >= 2, "Expected overlapping local tool executions" + + @pytest.fixture def calculator_tool() -> CalculatorTool: """Create a calculator tool for testing.""" @@ -82,6 +156,65 @@ def failing_tool() -> BaseTool: ) + +@pytest.fixture +def parallel_tools() -> list[BaseTool]: + """Create local tools used to verify native parallel execution deterministically.""" + + class ParallelLocalSearchOne(BaseTool): + name: str = "parallel_local_search_one" + description: str = "Local search tool #1 for concurrency testing." + args_schema: type[BaseModel] = LocalSearchInput + + def _run(self, query: str) -> str: + start = time.perf_counter() + time.sleep(1.0) + end = time.perf_counter() + ParallelProbe.record(self.name, start, end) + return f"[one] {query}" + + class ParallelLocalSearchTwo(BaseTool): + name: str = "parallel_local_search_two" + description: str = "Local search tool #2 for concurrency testing." + args_schema: type[BaseModel] = LocalSearchInput + + def _run(self, query: str) -> str: + start = time.perf_counter() + time.sleep(1.0) + end = time.perf_counter() + ParallelProbe.record(self.name, start, end) + return f"[two] {query}" + + class ParallelLocalSearchThree(BaseTool): + name: str = "parallel_local_search_three" + description: str = "Local search tool #3 for concurrency testing." + args_schema: type[BaseModel] = LocalSearchInput + + def _run(self, query: str) -> str: + start = time.perf_counter() + time.sleep(1.0) + end = time.perf_counter() + ParallelProbe.record(self.name, start, end) + return f"[three] {query}" + + return [ + ParallelLocalSearchOne(), + ParallelLocalSearchTwo(), + ParallelLocalSearchThree(), + ] + + +def _attach_parallel_probe_handler() -> None: + @crewai_event_bus.on(ToolUsageFinishedEvent) + def _capture_tool_window(_source, event: ToolUsageFinishedEvent): + if not event.tool_name.startswith("parallel_local_search_"): + return + ParallelProbe.record( + event.tool_name, + event.started_at.timestamp(), + event.finished_at.timestamp(), + ) + # ============================================================================= # OpenAI Provider Tests # ============================================================================= @@ -122,7 +255,7 @@ class TestOpenAINativeToolCalling: self, calculator_tool: CalculatorTool ) -> None: """Test OpenAI agent kickoff with mocked LLM call.""" - llm = LLM(model="gpt-4o-mini") + llm = LLM(model="gpt-5-nano") with patch.object(llm, "call", return_value="The answer is 120.") as mock_call: agent = Agent( @@ -146,6 +279,174 @@ class TestOpenAINativeToolCalling: assert mock_call.called assert result is not None + @pytest.mark.vcr() + @pytest.mark.timeout(180) + def test_openai_parallel_native_tool_calling_test_crew( + self, parallel_tools: list[BaseTool] + ) -> None: + agent = Agent( + role="Parallel Tool Agent", + goal="Use both tools exactly as instructed", + backstory="You follow tool instructions precisely.", + tools=parallel_tools, + llm=LLM(model="gpt-5-nano", temperature=1), + verbose=False, + max_iter=3, + ) + task = Task( + description=_parallel_prompt(), + expected_output="A one sentence summary of both tool outputs", + agent=agent, + ) + crew = Crew(agents=[agent], tasks=[task]) + result = crew.kickoff() + assert result is not None + _assert_tools_overlapped() + + @pytest.mark.vcr() + @pytest.mark.timeout(180) + def test_openai_parallel_native_tool_calling_test_agent_kickoff( + self, parallel_tools: list[BaseTool] + ) -> None: + agent = Agent( + role="Parallel Tool Agent", + goal="Use both tools exactly as instructed", + backstory="You follow tool instructions precisely.", + tools=parallel_tools, + llm=LLM(model="gpt-4o-mini"), + verbose=False, + max_iter=3, + ) + result = agent.kickoff(_parallel_prompt()) + assert result is not None + _assert_tools_overlapped() + + @pytest.mark.vcr() + @pytest.mark.timeout(180) + def test_openai_parallel_native_tool_calling_tool_hook_parity_crew( + self, parallel_tools: list[BaseTool] + ) -> None: + hook_calls: dict[str, list[dict[str, str]]] = {"before": [], "after": []} + + def before_hook(context: ToolCallHookContext) -> bool | None: + if context.tool_name.startswith("parallel_local_search_"): + hook_calls["before"].append( + { + "tool_name": context.tool_name, + "query": str(context.tool_input.get("query", "")), + } + ) + return None + + def after_hook(context: ToolCallHookContext) -> str | None: + if context.tool_name.startswith("parallel_local_search_"): + hook_calls["after"].append( + { + "tool_name": context.tool_name, + "query": str(context.tool_input.get("query", "")), + } + ) + return None + + register_before_tool_call_hook(before_hook) + register_after_tool_call_hook(after_hook) + + try: + agent = Agent( + role="Parallel Tool Agent", + goal="Use both tools exactly as instructed", + backstory="You follow tool instructions precisely.", + tools=parallel_tools, + llm=LLM(model="gpt-5-nano", temperature=1), + verbose=False, + max_iter=3, + ) + task = Task( + description=_parallel_prompt(), + expected_output="A one sentence summary of both tool outputs", + agent=agent, + ) + crew = Crew(agents=[agent], tasks=[task]) + result = crew.kickoff() + + assert result is not None + _assert_tools_overlapped() + + before_names = [call["tool_name"] for call in hook_calls["before"]] + after_names = [call["tool_name"] for call in hook_calls["after"]] + assert len(before_names) >= 3, "Expected before hooks for all parallel calls" + assert Counter(before_names) == Counter(after_names) + assert all(call["query"] for call in hook_calls["before"]) + assert all(call["query"] for call in hook_calls["after"]) + finally: + from crewai.hooks import ( + unregister_after_tool_call_hook, + unregister_before_tool_call_hook, + ) + + unregister_before_tool_call_hook(before_hook) + unregister_after_tool_call_hook(after_hook) + + @pytest.mark.vcr() + @pytest.mark.timeout(180) + def test_openai_parallel_native_tool_calling_tool_hook_parity_agent_kickoff( + self, parallel_tools: list[BaseTool] + ) -> None: + hook_calls: dict[str, list[dict[str, str]]] = {"before": [], "after": []} + + def before_hook(context: ToolCallHookContext) -> bool | None: + if context.tool_name.startswith("parallel_local_search_"): + hook_calls["before"].append( + { + "tool_name": context.tool_name, + "query": str(context.tool_input.get("query", "")), + } + ) + return None + + def after_hook(context: ToolCallHookContext) -> str | None: + if context.tool_name.startswith("parallel_local_search_"): + hook_calls["after"].append( + { + "tool_name": context.tool_name, + "query": str(context.tool_input.get("query", "")), + } + ) + return None + + register_before_tool_call_hook(before_hook) + register_after_tool_call_hook(after_hook) + + try: + agent = Agent( + role="Parallel Tool Agent", + goal="Use both tools exactly as instructed", + backstory="You follow tool instructions precisely.", + tools=parallel_tools, + llm=LLM(model="gpt-5-nano", temperature=1), + verbose=False, + max_iter=3, + ) + result = agent.kickoff(_parallel_prompt()) + + assert result is not None + _assert_tools_overlapped() + + before_names = [call["tool_name"] for call in hook_calls["before"]] + after_names = [call["tool_name"] for call in hook_calls["after"]] + assert len(before_names) >= 3, "Expected before hooks for all parallel calls" + assert Counter(before_names) == Counter(after_names) + assert all(call["query"] for call in hook_calls["before"]) + assert all(call["query"] for call in hook_calls["after"]) + finally: + from crewai.hooks import ( + unregister_after_tool_call_hook, + unregister_before_tool_call_hook, + ) + + unregister_before_tool_call_hook(before_hook) + unregister_after_tool_call_hook(after_hook) + # ============================================================================= # Anthropic Provider Tests @@ -217,6 +518,46 @@ class TestAnthropicNativeToolCalling: assert mock_call.called assert result is not None + @pytest.mark.vcr() + def test_anthropic_parallel_native_tool_calling_test_crew( + self, parallel_tools: list[BaseTool] + ) -> None: + agent = Agent( + role="Parallel Tool Agent", + goal="Use both tools exactly as instructed", + backstory="You follow tool instructions precisely.", + tools=parallel_tools, + llm=LLM(model="anthropic/claude-sonnet-4-6"), + verbose=False, + max_iter=3, + ) + task = Task( + description=_parallel_prompt(), + expected_output="A one sentence summary of both tool outputs", + agent=agent, + ) + crew = Crew(agents=[agent], tasks=[task]) + result = crew.kickoff() + assert result is not None + _assert_tools_overlapped() + + @pytest.mark.vcr() + def test_anthropic_parallel_native_tool_calling_test_agent_kickoff( + self, parallel_tools: list[BaseTool] + ) -> None: + agent = Agent( + role="Parallel Tool Agent", + goal="Use both tools exactly as instructed", + backstory="You follow tool instructions precisely.", + tools=parallel_tools, + llm=LLM(model="anthropic/claude-sonnet-4-6"), + verbose=False, + max_iter=3, + ) + result = agent.kickoff(_parallel_prompt()) + assert result is not None + _assert_tools_overlapped() + # ============================================================================= # Google/Gemini Provider Tests @@ -247,7 +588,7 @@ class TestGeminiNativeToolCalling: goal="Help users with mathematical calculations", backstory="You are a helpful math assistant.", tools=[calculator_tool], - llm=LLM(model="gemini/gemini-2.0-flash-exp"), + llm=LLM(model="gemini/gemini-2.5-flash"), ) task = Task( @@ -266,7 +607,7 @@ class TestGeminiNativeToolCalling: self, calculator_tool: CalculatorTool ) -> None: """Test Gemini agent kickoff with mocked LLM call.""" - llm = LLM(model="gemini/gemini-2.0-flash-001") + llm = LLM(model="gemini/gemini-2.5-flash") with patch.object(llm, "call", return_value="The answer is 120.") as mock_call: agent = Agent( @@ -290,6 +631,46 @@ class TestGeminiNativeToolCalling: assert mock_call.called assert result is not None + @pytest.mark.vcr() + def test_gemini_parallel_native_tool_calling_test_crew( + self, parallel_tools: list[BaseTool] + ) -> None: + agent = Agent( + role="Parallel Tool Agent", + goal="Use both tools exactly as instructed", + backstory="You follow tool instructions precisely.", + tools=parallel_tools, + llm=LLM(model="gemini/gemini-2.5-flash"), + verbose=False, + max_iter=3, + ) + task = Task( + description=_parallel_prompt(), + expected_output="A one sentence summary of both tool outputs", + agent=agent, + ) + crew = Crew(agents=[agent], tasks=[task]) + result = crew.kickoff() + assert result is not None + _assert_tools_overlapped() + + @pytest.mark.vcr() + def test_gemini_parallel_native_tool_calling_test_agent_kickoff( + self, parallel_tools: list[BaseTool] + ) -> None: + agent = Agent( + role="Parallel Tool Agent", + goal="Use both tools exactly as instructed", + backstory="You follow tool instructions precisely.", + tools=parallel_tools, + llm=LLM(model="gemini/gemini-2.5-flash"), + verbose=False, + max_iter=3, + ) + result = agent.kickoff(_parallel_prompt()) + assert result is not None + _assert_tools_overlapped() + # ============================================================================= # Azure Provider Tests @@ -324,7 +705,7 @@ class TestAzureNativeToolCalling: goal="Help users with mathematical calculations", backstory="You are a helpful math assistant.", tools=[calculator_tool], - llm=LLM(model="azure/gpt-4o-mini"), + llm=LLM(model="azure/gpt-5-nano"), verbose=False, max_iter=3, ) @@ -347,7 +728,7 @@ class TestAzureNativeToolCalling: ) -> None: """Test Azure agent kickoff with mocked LLM call.""" llm = LLM( - model="azure/gpt-4o-mini", + model="azure/gpt-5-nano", api_key="test-key", base_url="https://test.openai.azure.com", ) @@ -374,6 +755,46 @@ class TestAzureNativeToolCalling: assert mock_call.called assert result is not None + @pytest.mark.vcr() + def test_azure_parallel_native_tool_calling_test_crew( + self, parallel_tools: list[BaseTool] + ) -> None: + agent = Agent( + role="Parallel Tool Agent", + goal="Use both tools exactly as instructed", + backstory="You follow tool instructions precisely.", + tools=parallel_tools, + llm=LLM(model="azure/gpt-5-nano"), + verbose=False, + max_iter=3, + ) + task = Task( + description=_parallel_prompt(), + expected_output="A one sentence summary of both tool outputs", + agent=agent, + ) + crew = Crew(agents=[agent], tasks=[task]) + result = crew.kickoff() + assert result is not None + _assert_tools_overlapped() + + @pytest.mark.vcr() + def test_azure_parallel_native_tool_calling_test_agent_kickoff( + self, parallel_tools: list[BaseTool] + ) -> None: + agent = Agent( + role="Parallel Tool Agent", + goal="Use both tools exactly as instructed", + backstory="You follow tool instructions precisely.", + tools=parallel_tools, + llm=LLM(model="azure/gpt-5-nano"), + verbose=False, + max_iter=3, + ) + result = agent.kickoff(_parallel_prompt()) + assert result is not None + _assert_tools_overlapped() + # ============================================================================= # Bedrock Provider Tests @@ -384,18 +805,30 @@ class TestBedrockNativeToolCalling: """Tests for native tool calling with AWS Bedrock models.""" @pytest.fixture(autouse=True) - def mock_aws_env(self): - """Mock AWS environment variables for tests.""" - env_vars = { - "AWS_ACCESS_KEY_ID": "test-key", - "AWS_SECRET_ACCESS_KEY": "test-secret", - "AWS_REGION": "us-east-1", - } - if "AWS_ACCESS_KEY_ID" not in os.environ: - with patch.dict(os.environ, env_vars): - yield - else: - yield + def validate_bedrock_credentials_for_live_recording(self): + """Run Bedrock tests only when explicitly enabled.""" + run_live_bedrock = os.getenv("RUN_BEDROCK_LIVE_TESTS", "false").lower() == "true" + + if not run_live_bedrock: + pytest.skip( + "Skipping Bedrock tests by default. " + "Set RUN_BEDROCK_LIVE_TESTS=true with valid AWS credentials to enable." + ) + + access_key = os.getenv("AWS_ACCESS_KEY_ID", "") + secret_key = os.getenv("AWS_SECRET_ACCESS_KEY", "") + if ( + not access_key + or not secret_key + or access_key.startswith(("fake-", "test-")) + or secret_key.startswith(("fake-", "test-")) + ): + pytest.skip( + "Skipping Bedrock tests: valid AWS credentials are required when " + "RUN_BEDROCK_LIVE_TESTS=true." + ) + + yield @pytest.mark.vcr() def test_bedrock_agent_kickoff_with_tools_mocked( @@ -427,6 +860,46 @@ class TestBedrockNativeToolCalling: assert result.raw is not None assert "120" in str(result.raw) + @pytest.mark.vcr() + def test_bedrock_parallel_native_tool_calling_test_crew( + self, parallel_tools: list[BaseTool] + ) -> None: + agent = Agent( + role="Parallel Tool Agent", + goal="Use both tools exactly as instructed", + backstory="You follow tool instructions precisely.", + tools=parallel_tools, + llm=LLM(model="bedrock/anthropic.claude-3-haiku-20240307-v1:0"), + verbose=False, + max_iter=3, + ) + task = Task( + description=_parallel_prompt(), + expected_output="A one sentence summary of both tool outputs", + agent=agent, + ) + crew = Crew(agents=[agent], tasks=[task]) + result = crew.kickoff() + assert result is not None + _assert_tools_overlapped() + + @pytest.mark.vcr() + def test_bedrock_parallel_native_tool_calling_test_agent_kickoff( + self, parallel_tools: list[BaseTool] + ) -> None: + agent = Agent( + role="Parallel Tool Agent", + goal="Use both tools exactly as instructed", + backstory="You follow tool instructions precisely.", + tools=parallel_tools, + llm=LLM(model="bedrock/anthropic.claude-3-haiku-20240307-v1:0"), + verbose=False, + max_iter=3, + ) + result = agent.kickoff(_parallel_prompt()) + assert result is not None + _assert_tools_overlapped() + # ============================================================================= # Cross-Provider Native Tool Calling Behavior Tests @@ -439,7 +912,7 @@ class TestNativeToolCallingBehavior: def test_supports_function_calling_check(self) -> None: """Test that supports_function_calling() is properly checked.""" # OpenAI should support function calling - openai_llm = LLM(model="gpt-4o-mini") + openai_llm = LLM(model="gpt-5-nano") assert hasattr(openai_llm, "supports_function_calling") assert openai_llm.supports_function_calling() is True @@ -475,7 +948,7 @@ class TestNativeToolCallingTokenUsage: goal="Perform calculations efficiently", backstory="You calculate things.", tools=[calculator_tool], - llm=LLM(model="gpt-4o-mini"), + llm=LLM(model="gpt-5-nano"), verbose=False, max_iter=3, ) @@ -519,7 +992,7 @@ def test_native_tool_calling_error_handling(failing_tool: FailingTool): goal="Perform calculations efficiently", backstory="You calculate things.", tools=[failing_tool], - llm=LLM(model="gpt-4o-mini"), + llm=LLM(model="gpt-5-nano"), verbose=False, max_iter=3, ) @@ -578,7 +1051,7 @@ class TestMaxUsageCountWithNativeToolCalling: goal="Call the counting tool multiple times", backstory="You are an agent that counts things.", tools=[tool], - llm=LLM(model="gpt-4o-mini"), + llm=LLM(model="gpt-5-nano"), verbose=False, max_iter=5, ) @@ -606,7 +1079,7 @@ class TestMaxUsageCountWithNativeToolCalling: goal="Use the counting tool as many times as requested", backstory="You are an agent that counts things. You must try to use the tool for each value requested.", tools=[tool], - llm=LLM(model="gpt-4o-mini"), + llm=LLM(model="gpt-5-nano"), verbose=False, max_iter=5, ) @@ -638,7 +1111,7 @@ class TestMaxUsageCountWithNativeToolCalling: goal="Use the counting tool exactly as requested", backstory="You are an agent that counts things precisely.", tools=[tool], - llm=LLM(model="gpt-4o-mini"), + llm=LLM(model="gpt-5-nano"), verbose=False, max_iter=5, ) @@ -653,5 +1126,153 @@ class TestMaxUsageCountWithNativeToolCalling: result = crew.kickoff() assert result is not None - # Verify usage count was incremented for each successful call - assert tool.current_usage_count == 2 + # Verify the requested calls occurred while keeping usage bounded. + assert tool.current_usage_count >= 2 + assert tool.current_usage_count <= tool.max_usage_count + + +# ============================================================================= +# JSON Parse Error Handling Tests +# ============================================================================= + + +class TestNativeToolCallingJsonParseError: + """Tests that malformed JSON tool arguments produce clear errors + instead of silently dropping all arguments.""" + + def _make_executor(self, tools: list[BaseTool]) -> "CrewAgentExecutor": + """Create a minimal CrewAgentExecutor with mocked dependencies.""" + from crewai.agents.crew_agent_executor import CrewAgentExecutor + from crewai.tools.base_tool import to_langchain + + structured_tools = to_langchain(tools) + mock_agent = Mock() + mock_agent.key = "test_agent" + mock_agent.role = "tester" + mock_agent.verbose = False + mock_agent.fingerprint = None + mock_agent.tools_results = [] + + mock_task = Mock() + mock_task.name = "test" + mock_task.description = "test" + mock_task.id = "test-id" + + executor = object.__new__(CrewAgentExecutor) + executor.agent = mock_agent + executor.task = mock_task + executor.crew = Mock() + executor.tools = structured_tools + executor.original_tools = tools + executor.tools_handler = None + executor._printer = Mock() + executor.messages = [] + + return executor + + def test_malformed_json_returns_parse_error(self) -> None: + """Malformed JSON args must return a descriptive error, not silently become {}.""" + + class CodeTool(BaseTool): + name: str = "execute_code" + description: str = "Run code" + + def _run(self, code: str) -> str: + return f"ran: {code}" + + tool = CodeTool() + executor = self._make_executor([tool]) + + from crewai.utilities.agent_utils import convert_tools_to_openai_schema + _, available_functions, _ = convert_tools_to_openai_schema([tool]) + + malformed_json = '{"code": "print("hello")"}' + + result = executor._execute_single_native_tool_call( + call_id="call_123", + func_name="execute_code", + func_args=malformed_json, + available_functions=available_functions, + ) + + assert "Failed to parse tool arguments as JSON" in result["result"] + assert tool.current_usage_count == 0 + + def test_valid_json_still_executes_normally(self) -> None: + """Valid JSON args should execute the tool as before.""" + + class CodeTool(BaseTool): + name: str = "execute_code" + description: str = "Run code" + + def _run(self, code: str) -> str: + return f"ran: {code}" + + tool = CodeTool() + executor = self._make_executor([tool]) + + from crewai.utilities.agent_utils import convert_tools_to_openai_schema + _, available_functions, _ = convert_tools_to_openai_schema([tool]) + + valid_json = '{"code": "print(1)"}' + + result = executor._execute_single_native_tool_call( + call_id="call_456", + func_name="execute_code", + func_args=valid_json, + available_functions=available_functions, + ) + + assert result["result"] == "ran: print(1)" + + def test_dict_args_bypass_json_parsing(self) -> None: + """When func_args is already a dict, no JSON parsing occurs.""" + + class CodeTool(BaseTool): + name: str = "execute_code" + description: str = "Run code" + + def _run(self, code: str) -> str: + return f"ran: {code}" + + tool = CodeTool() + executor = self._make_executor([tool]) + + from crewai.utilities.agent_utils import convert_tools_to_openai_schema + _, available_functions, _ = convert_tools_to_openai_schema([tool]) + + result = executor._execute_single_native_tool_call( + call_id="call_789", + func_name="execute_code", + func_args={"code": "x = 42"}, + available_functions=available_functions, + ) + + assert result["result"] == "ran: x = 42" + + def test_schema_validation_catches_missing_args_on_native_path(self) -> None: + """The native function calling path should now enforce args_schema, + catching missing required fields before _run is called.""" + + class StrictTool(BaseTool): + name: str = "strict_tool" + description: str = "A tool with required args" + + def _run(self, code: str, language: str) -> str: + return f"{language}: {code}" + + tool = StrictTool() + executor = self._make_executor([tool]) + + from crewai.utilities.agent_utils import convert_tools_to_openai_schema + _, available_functions, _ = convert_tools_to_openai_schema([tool]) + + result = executor._execute_single_native_tool_call( + call_id="call_schema", + func_name="strict_tool", + func_args={"code": "print(1)"}, + available_functions=available_functions, + ) + + assert "Error" in result["result"] + assert "validation failed" in result["result"].lower() or "missing" in result["result"].lower() diff --git a/lib/crewai/tests/cassettes/agents/TestAnthropicNativeToolCalling.test_anthropic_parallel_native_tool_calling_test_agent_kickoff.yaml b/lib/crewai/tests/cassettes/agents/TestAnthropicNativeToolCalling.test_anthropic_parallel_native_tool_calling_test_agent_kickoff.yaml new file mode 100644 index 000000000..c35e40c57 --- /dev/null +++ b/lib/crewai/tests/cassettes/agents/TestAnthropicNativeToolCalling.test_anthropic_parallel_native_tool_calling_test_agent_kickoff.yaml @@ -0,0 +1,247 @@ +interactions: +- request: + body: '{"max_tokens":4096,"messages":[{"role":"user","content":"\nCurrent Task: + This is a tool-calling compliance test. In your next assistant turn, emit exactly + 3 tool calls in the same response (parallel tool calls), in this order: 1) parallel_local_search_one(query=''latest + OpenAI model release notes''), 2) parallel_local_search_two(query=''latest Anthropic + model release notes''), 3) parallel_local_search_three(query=''latest Gemini + model release notes''). Do not call any other tools and do not answer before + those 3 tool calls are emitted. After the tool results return, provide a one + paragraph summary."}],"model":"claude-sonnet-4-6","stop_sequences":["\nObservation:"],"stream":false,"system":"You + are Parallel Tool Agent. You follow tool instructions precisely.\nYour personal + goal is: Use both tools exactly as instructed","tools":[{"name":"parallel_local_search_one","description":"Local + search tool #1 for concurrency testing.","input_schema":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}},{"name":"parallel_local_search_two","description":"Local + search tool #2 for concurrency testing.","input_schema":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}},{"name":"parallel_local_search_three","description":"Local + search tool #3 for concurrency testing.","input_schema":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}}]}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + anthropic-version: + - '2023-06-01' + connection: + - keep-alive + content-length: + - '1639' + content-type: + - application/json + host: + - api.anthropic.com + x-api-key: + - X-API-KEY-XXX + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 0.73.0 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + x-stainless-timeout: + - NOT_GIVEN + method: POST + uri: https://api.anthropic.com/v1/messages + response: + body: + string: '{"model":"claude-sonnet-4-6","id":"msg_01XeN1XTXZgmPyLMMGjivabb","type":"message","role":"assistant","content":[{"type":"text","text":"I''ll + execute all 3 parallel searches simultaneously right now!"},{"type":"tool_use","id":"toolu_01NwzvrxEz6tvT3A8ydvMtHu","name":"parallel_local_search_one","input":{"query":"latest + OpenAI model release notes"},"caller":{"type":"direct"}},{"type":"tool_use","id":"toolu_01YCxzSB1suk9uPVC1uwfHz9","name":"parallel_local_search_two","input":{"query":"latest + Anthropic model release notes"},"caller":{"type":"direct"}},{"type":"tool_use","id":"toolu_01Mauvxzv58eDY7pUt9HMKGy","name":"parallel_local_search_three","input":{"query":"latest + Gemini model release notes"},"caller":{"type":"direct"}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":914,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":169,"service_tier":"standard","inference_geo":"global"}}' + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Security-Policy: + - CSP-FILTERED + Content-Type: + - application/json + Date: + - Wed, 18 Feb 2026 23:54:43 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Robots-Tag: + - none + anthropic-organization-id: + - ANTHROPIC-ORGANIZATION-ID-XXX + anthropic-ratelimit-input-tokens-limit: + - ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX + anthropic-ratelimit-input-tokens-remaining: + - ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX + anthropic-ratelimit-input-tokens-reset: + - ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX + anthropic-ratelimit-output-tokens-limit: + - ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX + anthropic-ratelimit-output-tokens-remaining: + - ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX + anthropic-ratelimit-output-tokens-reset: + - ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX + anthropic-ratelimit-requests-limit: + - '20000' + anthropic-ratelimit-requests-remaining: + - '19999' + anthropic-ratelimit-requests-reset: + - '2026-02-18T23:54:41Z' + anthropic-ratelimit-tokens-limit: + - ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX + anthropic-ratelimit-tokens-remaining: + - ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX + anthropic-ratelimit-tokens-reset: + - ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX + cf-cache-status: + - DYNAMIC + request-id: + - REQUEST-ID-XXX + strict-transport-security: + - STS-XXX + x-envoy-upstream-service-time: + - '2099' + status: + code: 200 + message: OK +- request: + body: '{"max_tokens":4096,"messages":[{"role":"user","content":"\nCurrent Task: + This is a tool-calling compliance test. In your next assistant turn, emit exactly + 3 tool calls in the same response (parallel tool calls), in this order: 1) parallel_local_search_one(query=''latest + OpenAI model release notes''), 2) parallel_local_search_two(query=''latest Anthropic + model release notes''), 3) parallel_local_search_three(query=''latest Gemini + model release notes''). Do not call any other tools and do not answer before + those 3 tool calls are emitted. After the tool results return, provide a one + paragraph summary."},{"role":"assistant","content":[{"type":"tool_use","id":"toolu_01NwzvrxEz6tvT3A8ydvMtHu","name":"parallel_local_search_one","input":{"query":"latest + OpenAI model release notes"}},{"type":"tool_use","id":"toolu_01YCxzSB1suk9uPVC1uwfHz9","name":"parallel_local_search_two","input":{"query":"latest + Anthropic model release notes"}},{"type":"tool_use","id":"toolu_01Mauvxzv58eDY7pUt9HMKGy","name":"parallel_local_search_three","input":{"query":"latest + Gemini model release notes"}}]},{"role":"user","content":[{"type":"tool_result","tool_use_id":"toolu_01NwzvrxEz6tvT3A8ydvMtHu","content":"[one] + latest OpenAI model release notes"},{"type":"tool_result","tool_use_id":"toolu_01YCxzSB1suk9uPVC1uwfHz9","content":"[two] + latest Anthropic model release notes"},{"type":"tool_result","tool_use_id":"toolu_01Mauvxzv58eDY7pUt9HMKGy","content":"[three] + latest Gemini model release notes"}]}],"model":"claude-sonnet-4-6","stop_sequences":["\nObservation:"],"stream":false,"system":"You + are Parallel Tool Agent. You follow tool instructions precisely.\nYour personal + goal is: Use both tools exactly as instructed","tools":[{"name":"parallel_local_search_one","description":"Local + search tool #1 for concurrency testing.","input_schema":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}},{"name":"parallel_local_search_two","description":"Local + search tool #2 for concurrency testing.","input_schema":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}},{"name":"parallel_local_search_three","description":"Local + search tool #3 for concurrency testing.","input_schema":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}}]}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + anthropic-version: + - '2023-06-01' + connection: + - keep-alive + content-length: + - '2517' + content-type: + - application/json + host: + - api.anthropic.com + x-api-key: + - X-API-KEY-XXX + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 0.73.0 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + x-stainless-timeout: + - NOT_GIVEN + method: POST + uri: https://api.anthropic.com/v1/messages + response: + body: + string: "{\"model\":\"claude-sonnet-4-6\",\"id\":\"msg_01PFXqwwdwwHWadPdtNU5tUZ\",\"type\":\"message\",\"role\":\"assistant\",\"content\":[{\"type\":\"text\",\"text\":\"The + three parallel searches were executed successfully, each targeting the latest + release notes for the leading AI model families. The search results confirm + that queries were dispatched simultaneously to retrieve the most recent developments + from **OpenAI** (via tool one), **Anthropic** (via tool two), and **Google's + Gemini** (via tool three). While the local search tools returned placeholder + outputs in this test environment rather than detailed release notes, the structure + of the test validates that all three parallel tool calls were emitted correctly + and in the specified order \u2014 demonstrating proper concurrent tool-call + behavior with no dependencies between the three independent searches.\"}],\"stop_reason\":\"end_turn\",\"stop_sequence\":null,\"usage\":{\"input_tokens\":1197,\"cache_creation_input_tokens\":0,\"cache_read_input_tokens\":0,\"cache_creation\":{\"ephemeral_5m_input_tokens\":0,\"ephemeral_1h_input_tokens\":0},\"output_tokens\":131,\"service_tier\":\"standard\",\"inference_geo\":\"global\"}}" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Security-Policy: + - CSP-FILTERED + Content-Type: + - application/json + Date: + - Wed, 18 Feb 2026 23:54:49 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Robots-Tag: + - none + anthropic-organization-id: + - ANTHROPIC-ORGANIZATION-ID-XXX + anthropic-ratelimit-input-tokens-limit: + - ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX + anthropic-ratelimit-input-tokens-remaining: + - ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX + anthropic-ratelimit-input-tokens-reset: + - ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX + anthropic-ratelimit-output-tokens-limit: + - ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX + anthropic-ratelimit-output-tokens-remaining: + - ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX + anthropic-ratelimit-output-tokens-reset: + - ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX + anthropic-ratelimit-requests-limit: + - '20000' + anthropic-ratelimit-requests-remaining: + - '19999' + anthropic-ratelimit-requests-reset: + - '2026-02-18T23:54:44Z' + anthropic-ratelimit-tokens-limit: + - ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX + anthropic-ratelimit-tokens-remaining: + - ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX + anthropic-ratelimit-tokens-reset: + - ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX + cf-cache-status: + - DYNAMIC + request-id: + - REQUEST-ID-XXX + strict-transport-security: + - STS-XXX + x-envoy-upstream-service-time: + - '4092' + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/agents/TestAnthropicNativeToolCalling.test_anthropic_parallel_native_tool_calling_test_crew.yaml b/lib/crewai/tests/cassettes/agents/TestAnthropicNativeToolCalling.test_anthropic_parallel_native_tool_calling_test_crew.yaml new file mode 100644 index 000000000..cff5647fd --- /dev/null +++ b/lib/crewai/tests/cassettes/agents/TestAnthropicNativeToolCalling.test_anthropic_parallel_native_tool_calling_test_crew.yaml @@ -0,0 +1,254 @@ +interactions: +- request: + body: '{"max_tokens":4096,"messages":[{"role":"user","content":"\nCurrent Task: + This is a tool-calling compliance test. In your next assistant turn, emit exactly + 3 tool calls in the same response (parallel tool calls), in this order: 1) parallel_local_search_one(query=''latest + OpenAI model release notes''), 2) parallel_local_search_two(query=''latest Anthropic + model release notes''), 3) parallel_local_search_three(query=''latest Gemini + model release notes''). Do not call any other tools and do not answer before + those 3 tool calls are emitted. After the tool results return, provide a one + paragraph summary.\n\nThis is the expected criteria for your final answer: A + one sentence summary of both tool outputs\nyou MUST return the actual complete + content as the final answer, not a summary."}],"model":"claude-sonnet-4-6","stop_sequences":["\nObservation:"],"stream":false,"system":"You + are Parallel Tool Agent. You follow tool instructions precisely.\nYour personal + goal is: Use both tools exactly as instructed","tools":[{"name":"parallel_local_search_one","description":"Local + search tool #1 for concurrency testing.","input_schema":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}},{"name":"parallel_local_search_two","description":"Local + search tool #2 for concurrency testing.","input_schema":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}},{"name":"parallel_local_search_three","description":"Local + search tool #3 for concurrency testing.","input_schema":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}}]}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + anthropic-version: + - '2023-06-01' + connection: + - keep-alive + content-length: + - '1820' + content-type: + - application/json + host: + - api.anthropic.com + x-api-key: + - X-API-KEY-XXX + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 0.73.0 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + x-stainless-timeout: + - NOT_GIVEN + method: POST + uri: https://api.anthropic.com/v1/messages + response: + body: + string: '{"model":"claude-sonnet-4-6","id":"msg_01RJ4CphwpmkmsJFJjeCNvXz","type":"message","role":"assistant","content":[{"type":"text","text":"I''ll + execute all 3 parallel tool calls simultaneously right away!"},{"type":"tool_use","id":"toolu_01YWY3cSomRuv4USmq55Prk3","name":"parallel_local_search_one","input":{"query":"latest + OpenAI model release notes"},"caller":{"type":"direct"}},{"type":"tool_use","id":"toolu_01Aaqj3LMXksE1nB3pscRhV5","name":"parallel_local_search_two","input":{"query":"latest + Anthropic model release notes"},"caller":{"type":"direct"}},{"type":"tool_use","id":"toolu_01AcYxQvy8aYmAoUg9zx9qfq","name":"parallel_local_search_three","input":{"query":"latest + Gemini model release notes"},"caller":{"type":"direct"}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":951,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":170,"service_tier":"standard","inference_geo":"global"}}' + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Security-Policy: + - CSP-FILTERED + Content-Type: + - application/json + Date: + - Wed, 18 Feb 2026 23:54:51 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Robots-Tag: + - none + anthropic-organization-id: + - ANTHROPIC-ORGANIZATION-ID-XXX + anthropic-ratelimit-input-tokens-limit: + - ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX + anthropic-ratelimit-input-tokens-remaining: + - ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX + anthropic-ratelimit-input-tokens-reset: + - ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX + anthropic-ratelimit-output-tokens-limit: + - ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX + anthropic-ratelimit-output-tokens-remaining: + - ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX + anthropic-ratelimit-output-tokens-reset: + - ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX + anthropic-ratelimit-requests-limit: + - '20000' + anthropic-ratelimit-requests-remaining: + - '19999' + anthropic-ratelimit-requests-reset: + - '2026-02-18T23:54:49Z' + anthropic-ratelimit-tokens-limit: + - ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX + anthropic-ratelimit-tokens-remaining: + - ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX + anthropic-ratelimit-tokens-reset: + - ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX + cf-cache-status: + - DYNAMIC + request-id: + - REQUEST-ID-XXX + strict-transport-security: + - STS-XXX + x-envoy-upstream-service-time: + - '1967' + status: + code: 200 + message: OK +- request: + body: '{"max_tokens":4096,"messages":[{"role":"user","content":"\nCurrent Task: + This is a tool-calling compliance test. In your next assistant turn, emit exactly + 3 tool calls in the same response (parallel tool calls), in this order: 1) parallel_local_search_one(query=''latest + OpenAI model release notes''), 2) parallel_local_search_two(query=''latest Anthropic + model release notes''), 3) parallel_local_search_three(query=''latest Gemini + model release notes''). Do not call any other tools and do not answer before + those 3 tool calls are emitted. After the tool results return, provide a one + paragraph summary.\n\nThis is the expected criteria for your final answer: A + one sentence summary of both tool outputs\nyou MUST return the actual complete + content as the final answer, not a summary."},{"role":"assistant","content":[{"type":"tool_use","id":"toolu_01YWY3cSomRuv4USmq55Prk3","name":"parallel_local_search_one","input":{"query":"latest + OpenAI model release notes"}},{"type":"tool_use","id":"toolu_01Aaqj3LMXksE1nB3pscRhV5","name":"parallel_local_search_two","input":{"query":"latest + Anthropic model release notes"}},{"type":"tool_use","id":"toolu_01AcYxQvy8aYmAoUg9zx9qfq","name":"parallel_local_search_three","input":{"query":"latest + Gemini model release notes"}}]},{"role":"user","content":[{"type":"tool_result","tool_use_id":"toolu_01YWY3cSomRuv4USmq55Prk3","content":"[one] + latest OpenAI model release notes"},{"type":"tool_result","tool_use_id":"toolu_01Aaqj3LMXksE1nB3pscRhV5","content":"[two] + latest Anthropic model release notes"},{"type":"tool_result","tool_use_id":"toolu_01AcYxQvy8aYmAoUg9zx9qfq","content":"[three] + latest Gemini model release notes"}]},{"role":"user","content":"Analyze the + tool result. If requirements are met, provide the Final Answer. Otherwise, call + the next tool. Deliver only the answer without meta-commentary."}],"model":"claude-sonnet-4-6","stop_sequences":["\nObservation:"],"stream":false,"system":"You + are Parallel Tool Agent. You follow tool instructions precisely.\nYour personal + goal is: Use both tools exactly as instructed","tools":[{"name":"parallel_local_search_one","description":"Local + search tool #1 for concurrency testing.","input_schema":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}},{"name":"parallel_local_search_two","description":"Local + search tool #2 for concurrency testing.","input_schema":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}},{"name":"parallel_local_search_three","description":"Local + search tool #3 for concurrency testing.","input_schema":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}}]}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + anthropic-version: + - '2023-06-01' + connection: + - keep-alive + content-length: + - '2882' + content-type: + - application/json + host: + - api.anthropic.com + x-api-key: + - X-API-KEY-XXX + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 0.73.0 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + x-stainless-timeout: + - NOT_GIVEN + method: POST + uri: https://api.anthropic.com/v1/messages + response: + body: + string: "{\"model\":\"claude-sonnet-4-6\",\"id\":\"msg_0143MHUne1az3Tt69EoLjyZd\",\"type\":\"message\",\"role\":\"assistant\",\"content\":[{\"type\":\"text\",\"text\":\"Here + is the complete content returned from all three tool calls:\\n\\n- **parallel_local_search_one** + result: `[one] latest OpenAI model release notes`\\n- **parallel_local_search_two** + result: `[two] latest Anthropic model release notes`\\n- **parallel_local_search_three** + result: `[three] latest Gemini model release notes`\\n\\nAll three parallel + tool calls were executed successfully in the same response turn, returning + their respective outputs: the first tool searched for the latest OpenAI model + release notes, the second tool searched for the latest Anthropic model release + notes, and the third tool searched for the latest Gemini model release notes + \u2014 confirming that all search queries were dispatched concurrently and + their results retrieved as expected.\"}],\"stop_reason\":\"end_turn\",\"stop_sequence\":null,\"usage\":{\"input_tokens\":1272,\"cache_creation_input_tokens\":0,\"cache_read_input_tokens\":0,\"cache_creation\":{\"ephemeral_5m_input_tokens\":0,\"ephemeral_1h_input_tokens\":0},\"output_tokens\":172,\"service_tier\":\"standard\",\"inference_geo\":\"global\"}}" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Security-Policy: + - CSP-FILTERED + Content-Type: + - application/json + Date: + - Wed, 18 Feb 2026 23:54:55 GMT + Server: + - cloudflare + Transfer-Encoding: + - chunked + X-Robots-Tag: + - none + anthropic-organization-id: + - ANTHROPIC-ORGANIZATION-ID-XXX + anthropic-ratelimit-input-tokens-limit: + - ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX + anthropic-ratelimit-input-tokens-remaining: + - ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX + anthropic-ratelimit-input-tokens-reset: + - ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX + anthropic-ratelimit-output-tokens-limit: + - ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX + anthropic-ratelimit-output-tokens-remaining: + - ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX + anthropic-ratelimit-output-tokens-reset: + - ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX + anthropic-ratelimit-requests-limit: + - '20000' + anthropic-ratelimit-requests-remaining: + - '19999' + anthropic-ratelimit-requests-reset: + - '2026-02-18T23:54:52Z' + anthropic-ratelimit-tokens-limit: + - ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX + anthropic-ratelimit-tokens-remaining: + - ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX + anthropic-ratelimit-tokens-reset: + - ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX + cf-cache-status: + - DYNAMIC + request-id: + - REQUEST-ID-XXX + strict-transport-security: + - STS-XXX + x-envoy-upstream-service-time: + - '3144' + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/agents/TestAzureNativeToolCalling.test_azure_agent_with_native_tool_calling.yaml b/lib/crewai/tests/cassettes/agents/TestAzureNativeToolCalling.test_azure_agent_with_native_tool_calling.yaml index cfec2e992..53938dd0e 100644 --- a/lib/crewai/tests/cassettes/agents/TestAzureNativeToolCalling.test_azure_agent_with_native_tool_calling.yaml +++ b/lib/crewai/tests/cassettes/agents/TestAzureNativeToolCalling.test_azure_agent_with_native_tool_calling.yaml @@ -5,20 +5,19 @@ interactions: calculations"}, {"role": "user", "content": "\nCurrent Task: Calculate what is 15 * 8\n\nThis is the expected criteria for your final answer: The result of the calculation\nyou MUST return the actual complete content as the final - answer, not a summary.\n\nThis is VERY important to you, your job depends on - it!"}], "stream": false, "stop": ["\nObservation:"], "tool_choice": "auto", - "tools": [{"function": {"name": "calculator", "description": "Perform mathematical - calculations. Use this for any math operations.", "parameters": {"properties": - {"expression": {"description": "Mathematical expression to evaluate", "title": - "Expression", "type": "string"}}, "required": ["expression"], "type": "object"}}, - "type": "function"}]}' + answer, not a summary."}], "stream": false, "tool_choice": "auto", "tools": + [{"function": {"name": "calculator", "description": "Perform mathematical calculations. + Use this for any math operations.", "parameters": {"properties": {"expression": + {"description": "Mathematical expression to evaluate", "title": "Expression", + "type": "string"}}, "required": ["expression"], "type": "object", "additionalProperties": + false}}, "type": "function"}]}' headers: Accept: - application/json Connection: - keep-alive Content-Length: - - '883' + - '828' Content-Type: - application/json User-Agent: @@ -32,20 +31,20 @@ interactions: x-ms-client-request-id: - X-MS-CLIENT-REQUEST-ID-XXX method: POST - uri: https://fake-azure-endpoint.openai.azure.com/openai/deployments/gpt-4o-mini/chat/completions?api-version=2024-12-01-preview + uri: https://fake-azure-endpoint.openai.azure.com/openai/deployments/gpt-5-nano/chat/completions?api-version=2024-12-01-preview response: body: string: '{"choices":[{"content_filter_results":{},"finish_reason":"tool_calls","index":0,"logprobs":null,"message":{"annotations":[],"content":null,"refusal":null,"role":"assistant","tool_calls":[{"function":{"arguments":"{\"expression\":\"15 - * 8\"}","name":"calculator"},"id":"call_cJWzKh5LdBpY3Sk8GATS3eRe","type":"function"}]}}],"created":1769122114,"id":"chatcmpl-D0xlavS0V3m00B9Fsjyv39xQWUGFV","model":"gpt-4o-mini-2024-07-18","object":"chat.completion","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"jailbreak":{"filtered":false,"detected":false},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"system_fingerprint":"fp_f97eff32c5","usage":{"completion_tokens":18,"completion_tokens_details":{"accepted_prediction_tokens":0,"audio_tokens":0,"reasoning_tokens":0,"rejected_prediction_tokens":0},"prompt_tokens":137,"prompt_tokens_details":{"audio_tokens":0,"cached_tokens":0},"total_tokens":155}} + * 8\"}","name":"calculator"},"id":"call_Cow46pNllpDx0pxUgZFeqlh1","type":"function"}]}}],"created":1771459544,"id":"chatcmpl-DAlq4osCP9ABJ1HyXFBoYWylMg0bi","model":"gpt-5-nano-2025-08-07","object":"chat.completion","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"jailbreak":{"filtered":false,"detected":false},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"system_fingerprint":null,"usage":{"completion_tokens":219,"completion_tokens_details":{"accepted_prediction_tokens":0,"audio_tokens":0,"reasoning_tokens":192,"rejected_prediction_tokens":0},"prompt_tokens":208,"prompt_tokens_details":{"audio_tokens":0,"cached_tokens":0},"total_tokens":427}} ' headers: Content-Length: - - '1058' + - '1049' Content-Type: - application/json Date: - - Thu, 22 Jan 2026 22:48:34 GMT + - Thu, 19 Feb 2026 00:05:45 GMT Strict-Transport-Security: - STS-XXX apim-request-id: @@ -59,7 +58,7 @@ interactions: x-ms-client-request-id: - X-MS-CLIENT-REQUEST-ID-XXX x-ms-deployment-name: - - gpt-4o-mini + - gpt-5-nano x-ms-rai-invoked: - 'true' x-ms-region: @@ -83,26 +82,25 @@ interactions: calculations"}, {"role": "user", "content": "\nCurrent Task: Calculate what is 15 * 8\n\nThis is the expected criteria for your final answer: The result of the calculation\nyou MUST return the actual complete content as the final - answer, not a summary.\n\nThis is VERY important to you, your job depends on - it!"}, {"role": "assistant", "content": "", "tool_calls": [{"id": "call_cJWzKh5LdBpY3Sk8GATS3eRe", - "type": "function", "function": {"name": "calculator", "arguments": "{\"expression\":\"15 - * 8\"}"}}]}, {"role": "tool", "tool_call_id": "call_cJWzKh5LdBpY3Sk8GATS3eRe", - "content": "The result of 15 * 8 is 120"}, {"role": "user", "content": "Analyze - the tool result. If requirements are met, provide the Final Answer. Otherwise, - call the next tool. Deliver only the answer without meta-commentary."}], "stream": - false, "stop": ["\nObservation:"], "tool_choice": "auto", "tools": [{"function": - {"name": "calculator", "description": "Perform mathematical calculations. Use - this for any math operations.", "parameters": {"properties": {"expression": - {"description": "Mathematical expression to evaluate", "title": "Expression", - "type": "string"}}, "required": ["expression"], "type": "object"}}, "type": - "function"}]}' + answer, not a summary."}, {"role": "assistant", "content": "", "tool_calls": + [{"id": "call_Cow46pNllpDx0pxUgZFeqlh1", "type": "function", "function": {"name": + "calculator", "arguments": "{\"expression\":\"15 * 8\"}"}}]}, {"role": "tool", + "tool_call_id": "call_Cow46pNllpDx0pxUgZFeqlh1", "content": "The result of 15 + * 8 is 120"}, {"role": "user", "content": "Analyze the tool result. If requirements + are met, provide the Final Answer. Otherwise, call the next tool. Deliver only + the answer without meta-commentary."}], "stream": false, "tool_choice": "auto", + "tools": [{"function": {"name": "calculator", "description": "Perform mathematical + calculations. Use this for any math operations.", "parameters": {"properties": + {"expression": {"description": "Mathematical expression to evaluate", "title": + "Expression", "type": "string"}}, "required": ["expression"], "type": "object", + "additionalProperties": false}}, "type": "function"}]}' headers: Accept: - application/json Connection: - keep-alive Content-Length: - - '1375' + - '1320' Content-Type: - application/json User-Agent: @@ -116,20 +114,19 @@ interactions: x-ms-client-request-id: - X-MS-CLIENT-REQUEST-ID-XXX method: POST - uri: https://fake-azure-endpoint.openai.azure.com/openai/deployments/gpt-4o-mini/chat/completions?api-version=2024-12-01-preview + uri: https://fake-azure-endpoint.openai.azure.com/openai/deployments/gpt-5-nano/chat/completions?api-version=2024-12-01-preview response: body: - string: '{"choices":[{"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"protected_material_code":{"filtered":false,"detected":false},"protected_material_text":{"filtered":false,"detected":false},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}},"finish_reason":"stop","index":0,"logprobs":null,"message":{"annotations":[],"content":"The - result of the calculation is 120.","refusal":null,"role":"assistant"}}],"created":1769122115,"id":"chatcmpl-D0xlbUNVA7RVkn0GsuBGoNhgQTtac","model":"gpt-4o-mini-2024-07-18","object":"chat.completion","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"jailbreak":{"filtered":false,"detected":false},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"system_fingerprint":"fp_f97eff32c5","usage":{"completion_tokens":11,"completion_tokens_details":{"accepted_prediction_tokens":0,"audio_tokens":0,"reasoning_tokens":0,"rejected_prediction_tokens":0},"prompt_tokens":207,"prompt_tokens_details":{"audio_tokens":0,"cached_tokens":0},"total_tokens":218}} + string: '{"choices":[{"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"protected_material_code":{"filtered":false,"detected":false},"protected_material_text":{"filtered":false,"detected":false},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}},"finish_reason":"stop","index":0,"logprobs":null,"message":{"annotations":[],"content":"120","refusal":null,"role":"assistant"}}],"created":1771459547,"id":"chatcmpl-DAlq7zJimnIMoXieNww8jY5f2pIPd","model":"gpt-5-nano-2025-08-07","object":"chat.completion","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"jailbreak":{"filtered":false,"detected":false},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"system_fingerprint":null,"usage":{"completion_tokens":203,"completion_tokens_details":{"accepted_prediction_tokens":0,"audio_tokens":0,"reasoning_tokens":192,"rejected_prediction_tokens":0},"prompt_tokens":284,"prompt_tokens_details":{"audio_tokens":0,"cached_tokens":0},"total_tokens":487}} ' headers: Content-Length: - - '1250' + - '1207' Content-Type: - application/json Date: - - Thu, 22 Jan 2026 22:48:34 GMT + - Thu, 19 Feb 2026 00:05:49 GMT Strict-Transport-Security: - STS-XXX apim-request-id: @@ -143,7 +140,7 @@ interactions: x-ms-client-request-id: - X-MS-CLIENT-REQUEST-ID-XXX x-ms-deployment-name: - - gpt-4o-mini + - gpt-5-nano x-ms-rai-invoked: - 'true' x-ms-region: diff --git a/lib/crewai/tests/cassettes/agents/TestAzureNativeToolCalling.test_azure_parallel_native_tool_calling_test_agent_kickoff.yaml b/lib/crewai/tests/cassettes/agents/TestAzureNativeToolCalling.test_azure_parallel_native_tool_calling_test_agent_kickoff.yaml new file mode 100644 index 000000000..ca3632302 --- /dev/null +++ b/lib/crewai/tests/cassettes/agents/TestAzureNativeToolCalling.test_azure_parallel_native_tool_calling_test_agent_kickoff.yaml @@ -0,0 +1,198 @@ +interactions: +- request: + body: '{"messages": [{"role": "system", "content": "You are Parallel Tool Agent. + You follow tool instructions precisely.\nYour personal goal is: Use both tools + exactly as instructed"}, {"role": "user", "content": "\nCurrent Task: This is + a tool-calling compliance test. In your next assistant turn, emit exactly 3 + tool calls in the same response (parallel tool calls), in this order: 1) parallel_local_search_one(query=''latest + OpenAI model release notes''), 2) parallel_local_search_two(query=''latest Anthropic + model release notes''), 3) parallel_local_search_three(query=''latest Gemini + model release notes''). Do not call any other tools and do not answer before + those 3 tool calls are emitted. After the tool results return, provide a one + paragraph summary."}], "stream": false, "tool_choice": "auto", "tools": [{"function": + {"name": "parallel_local_search_one", "description": "Local search tool #1 for + concurrency testing.", "parameters": {"properties": {"query": {"description": + "Search query", "title": "Query", "type": "string"}}, "required": ["query"], + "type": "object", "additionalProperties": false}}, "type": "function"}, {"function": + {"name": "parallel_local_search_two", "description": "Local search tool #2 for + concurrency testing.", "parameters": {"properties": {"query": {"description": + "Search query", "title": "Query", "type": "string"}}, "required": ["query"], + "type": "object", "additionalProperties": false}}, "type": "function"}, {"function": + {"name": "parallel_local_search_three", "description": "Local search tool #3 + for concurrency testing.", "parameters": {"properties": {"query": {"description": + "Search query", "title": "Query", "type": "string"}}, "required": ["query"], + "type": "object", "additionalProperties": false}}, "type": "function"}]}' + headers: + Accept: + - application/json + Connection: + - keep-alive + Content-Length: + - '1763' + Content-Type: + - application/json + User-Agent: + - X-USER-AGENT-XXX + accept-encoding: + - ACCEPT-ENCODING-XXX + api-key: + - X-API-KEY-XXX + authorization: + - AUTHORIZATION-XXX + x-ms-client-request-id: + - X-MS-CLIENT-REQUEST-ID-XXX + method: POST + uri: https://fake-azure-endpoint.openai.azure.com/openai/deployments/gpt-5-nano/chat/completions?api-version=2024-12-01-preview + response: + body: + string: '{"choices":[{"content_filter_results":{},"finish_reason":"tool_calls","index":0,"logprobs":null,"message":{"annotations":[],"content":null,"refusal":null,"role":"assistant","tool_calls":[{"function":{"arguments":"{\"query\": + \"latest OpenAI model release notes\"}","name":"parallel_local_search_one"},"id":"call_emQmocGydKuxvESfQopNngdm","type":"function"},{"function":{"arguments":"{\"query\": + \"latest Anthropic model release notes\"}","name":"parallel_local_search_two"},"id":"call_eNpK9WUYFCX2ZEUPhYCKvdMs","type":"function"},{"function":{"arguments":"{\"query\": + \"latest Gemini model release notes\"}","name":"parallel_local_search_three"},"id":"call_Wdtl6jFxGehSUMn5I1O4Mrdx","type":"function"}]}}],"created":1771459550,"id":"chatcmpl-DAlqAyJGnQKDkNCaTcjU2T8BeJaXM","model":"gpt-5-nano-2025-08-07","object":"chat.completion","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"jailbreak":{"filtered":false,"detected":false},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"system_fingerprint":null,"usage":{"completion_tokens":666,"completion_tokens_details":{"accepted_prediction_tokens":0,"audio_tokens":0,"reasoning_tokens":576,"rejected_prediction_tokens":0},"prompt_tokens":343,"prompt_tokens_details":{"audio_tokens":0,"cached_tokens":0},"total_tokens":1009}} + + ' + headers: + Content-Length: + - '1433' + Content-Type: + - application/json + Date: + - Thu, 19 Feb 2026 00:05:55 GMT + Strict-Transport-Security: + - STS-XXX + apim-request-id: + - APIM-REQUEST-ID-XXX + azureml-model-session: + - AZUREML-MODEL-SESSION-XXX + x-accel-buffering: + - 'no' + x-content-type-options: + - X-CONTENT-TYPE-XXX + x-ms-client-request-id: + - X-MS-CLIENT-REQUEST-ID-XXX + x-ms-deployment-name: + - gpt-5-nano + x-ms-rai-invoked: + - 'true' + x-ms-region: + - X-MS-REGION-XXX + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "system", "content": "You are Parallel Tool Agent. + You follow tool instructions precisely.\nYour personal goal is: Use both tools + exactly as instructed"}, {"role": "user", "content": "\nCurrent Task: This is + a tool-calling compliance test. In your next assistant turn, emit exactly 3 + tool calls in the same response (parallel tool calls), in this order: 1) parallel_local_search_one(query=''latest + OpenAI model release notes''), 2) parallel_local_search_two(query=''latest Anthropic + model release notes''), 3) parallel_local_search_three(query=''latest Gemini + model release notes''). Do not call any other tools and do not answer before + those 3 tool calls are emitted. After the tool results return, provide a one + paragraph summary."}, {"role": "assistant", "content": "", "tool_calls": [{"id": + "call_emQmocGydKuxvESfQopNngdm", "type": "function", "function": {"name": "parallel_local_search_one", + "arguments": "{\"query\": \"latest OpenAI model release notes\"}"}}, {"id": + "call_eNpK9WUYFCX2ZEUPhYCKvdMs", "type": "function", "function": {"name": "parallel_local_search_two", + "arguments": "{\"query\": \"latest Anthropic model release notes\"}"}}, {"id": + "call_Wdtl6jFxGehSUMn5I1O4Mrdx", "type": "function", "function": {"name": "parallel_local_search_three", + "arguments": "{\"query\": \"latest Gemini model release notes\"}"}}]}, {"role": + "tool", "tool_call_id": "call_emQmocGydKuxvESfQopNngdm", "content": "[one] latest + OpenAI model release notes"}, {"role": "tool", "tool_call_id": "call_eNpK9WUYFCX2ZEUPhYCKvdMs", + "content": "[two] latest Anthropic model release notes"}, {"role": "tool", "tool_call_id": + "call_Wdtl6jFxGehSUMn5I1O4Mrdx", "content": "[three] latest Gemini model release + notes"}], "stream": false, "tool_choice": "auto", "tools": [{"function": {"name": + "parallel_local_search_one", "description": "Local search tool #1 for concurrency + testing.", "parameters": {"properties": {"query": {"description": "Search query", + "title": "Query", "type": "string"}}, "required": ["query"], "type": "object", + "additionalProperties": false}}, "type": "function"}, {"function": {"name": + "parallel_local_search_two", "description": "Local search tool #2 for concurrency + testing.", "parameters": {"properties": {"query": {"description": "Search query", + "title": "Query", "type": "string"}}, "required": ["query"], "type": "object", + "additionalProperties": false}}, "type": "function"}, {"function": {"name": + "parallel_local_search_three", "description": "Local search tool #3 for concurrency + testing.", "parameters": {"properties": {"query": {"description": "Search query", + "title": "Query", "type": "string"}}, "required": ["query"], "type": "object", + "additionalProperties": false}}, "type": "function"}]}' + headers: + Accept: + - application/json + Connection: + - keep-alive + Content-Length: + - '2727' + Content-Type: + - application/json + User-Agent: + - X-USER-AGENT-XXX + accept-encoding: + - ACCEPT-ENCODING-XXX + api-key: + - X-API-KEY-XXX + authorization: + - AUTHORIZATION-XXX + x-ms-client-request-id: + - X-MS-CLIENT-REQUEST-ID-XXX + method: POST + uri: https://fake-azure-endpoint.openai.azure.com/openai/deployments/gpt-5-nano/chat/completions?api-version=2024-12-01-preview + response: + body: + string: '{"choices":[{"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"protected_material_code":{"filtered":false,"detected":false},"protected_material_text":{"filtered":false,"detected":false},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}},"finish_reason":"stop","index":0,"logprobs":null,"message":{"annotations":[],"content":"The + latest release notes have been published for the OpenAI, Anthropic, and Gemini + models, signaling concurrent updates across the leading AI model families. + Each set outlines new capabilities and performance improvements, along with + changes to APIs, tooling, and deployment guidelines. Users should review the + individual notes to understand new features, adjustments to tokenization, + latency or throughput, safety and alignment enhancements, pricing or access + changes, and any breaking changes or migration steps required to adopt the + updated models in existing workflows.","refusal":null,"role":"assistant"}}],"created":1771459556,"id":"chatcmpl-DAlqGKWXfGNlTIbDY9F6oHQp6hbxM","model":"gpt-5-nano-2025-08-07","object":"chat.completion","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"jailbreak":{"filtered":false,"detected":false},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"system_fingerprint":null,"usage":{"completion_tokens":747,"completion_tokens_details":{"accepted_prediction_tokens":0,"audio_tokens":0,"reasoning_tokens":640,"rejected_prediction_tokens":0},"prompt_tokens":467,"prompt_tokens_details":{"audio_tokens":0,"cached_tokens":0},"total_tokens":1214}} + + ' + headers: + Content-Length: + - '1778' + Content-Type: + - application/json + Date: + - Thu, 19 Feb 2026 00:06:02 GMT + Strict-Transport-Security: + - STS-XXX + apim-request-id: + - APIM-REQUEST-ID-XXX + azureml-model-session: + - AZUREML-MODEL-SESSION-XXX + x-accel-buffering: + - 'no' + x-content-type-options: + - X-CONTENT-TYPE-XXX + x-ms-client-request-id: + - X-MS-CLIENT-REQUEST-ID-XXX + x-ms-deployment-name: + - gpt-5-nano + x-ms-rai-invoked: + - 'true' + x-ms-region: + - X-MS-REGION-XXX + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/agents/TestAzureNativeToolCalling.test_azure_parallel_native_tool_calling_test_crew.yaml b/lib/crewai/tests/cassettes/agents/TestAzureNativeToolCalling.test_azure_parallel_native_tool_calling_test_crew.yaml new file mode 100644 index 000000000..db53cf2f4 --- /dev/null +++ b/lib/crewai/tests/cassettes/agents/TestAzureNativeToolCalling.test_azure_parallel_native_tool_calling_test_crew.yaml @@ -0,0 +1,201 @@ +interactions: +- request: + body: '{"messages": [{"role": "system", "content": "You are Parallel Tool Agent. + You follow tool instructions precisely.\nYour personal goal is: Use both tools + exactly as instructed"}, {"role": "user", "content": "\nCurrent Task: This is + a tool-calling compliance test. In your next assistant turn, emit exactly 3 + tool calls in the same response (parallel tool calls), in this order: 1) parallel_local_search_one(query=''latest + OpenAI model release notes''), 2) parallel_local_search_two(query=''latest Anthropic + model release notes''), 3) parallel_local_search_three(query=''latest Gemini + model release notes''). Do not call any other tools and do not answer before + those 3 tool calls are emitted. After the tool results return, provide a one + paragraph summary.\n\nThis is the expected criteria for your final answer: A + one sentence summary of both tool outputs\nyou MUST return the actual complete + content as the final answer, not a summary."}], "stream": false, "tool_choice": + "auto", "tools": [{"function": {"name": "parallel_local_search_one", "description": + "Local search tool #1 for concurrency testing.", "parameters": {"properties": + {"query": {"description": "Search query", "title": "Query", "type": "string"}}, + "required": ["query"], "type": "object", "additionalProperties": false}}, "type": + "function"}, {"function": {"name": "parallel_local_search_two", "description": + "Local search tool #2 for concurrency testing.", "parameters": {"properties": + {"query": {"description": "Search query", "title": "Query", "type": "string"}}, + "required": ["query"], "type": "object", "additionalProperties": false}}, "type": + "function"}, {"function": {"name": "parallel_local_search_three", "description": + "Local search tool #3 for concurrency testing.", "parameters": {"properties": + {"query": {"description": "Search query", "title": "Query", "type": "string"}}, + "required": ["query"], "type": "object", "additionalProperties": false}}, "type": + "function"}]}' + headers: + Accept: + - application/json + Connection: + - keep-alive + Content-Length: + - '1944' + Content-Type: + - application/json + User-Agent: + - X-USER-AGENT-XXX + accept-encoding: + - ACCEPT-ENCODING-XXX + api-key: + - X-API-KEY-XXX + authorization: + - AUTHORIZATION-XXX + x-ms-client-request-id: + - X-MS-CLIENT-REQUEST-ID-XXX + method: POST + uri: https://fake-azure-endpoint.openai.azure.com/openai/deployments/gpt-5-nano/chat/completions?api-version=2024-12-01-preview + response: + body: + string: '{"choices":[{"content_filter_results":{},"finish_reason":"tool_calls","index":0,"logprobs":null,"message":{"annotations":[],"content":null,"refusal":null,"role":"assistant","tool_calls":[{"function":{"arguments":"{\"query\": + \"latest OpenAI model release notes\"}","name":"parallel_local_search_one"},"id":"call_NEvGoF86nhPQfXRoJd5SOyLd","type":"function"},{"function":{"arguments":"{\"query\": + \"latest Anthropic model release notes\"}","name":"parallel_local_search_two"},"id":"call_q8Q2du4gAMQLrGTgWgfwfbDZ","type":"function"},{"function":{"arguments":"{\"query\": + \"latest Gemini model release notes\"}","name":"parallel_local_search_three"},"id":"call_yTBal9ofZzuo10j0pWqhHCSj","type":"function"}]}}],"created":1771459563,"id":"chatcmpl-DAlqN7kyC5ACI5Yl1Pj63rOH5HIvI","model":"gpt-5-nano-2025-08-07","object":"chat.completion","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"jailbreak":{"filtered":false,"detected":false},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"system_fingerprint":null,"usage":{"completion_tokens":2457,"completion_tokens_details":{"accepted_prediction_tokens":0,"audio_tokens":0,"reasoning_tokens":2368,"rejected_prediction_tokens":0},"prompt_tokens":378,"prompt_tokens_details":{"audio_tokens":0,"cached_tokens":0},"total_tokens":2835}} + + ' + headers: + Content-Length: + - '1435' + Content-Type: + - application/json + Date: + - Thu, 19 Feb 2026 00:06:17 GMT + Strict-Transport-Security: + - STS-XXX + apim-request-id: + - APIM-REQUEST-ID-XXX + azureml-model-session: + - AZUREML-MODEL-SESSION-XXX + x-accel-buffering: + - 'no' + x-content-type-options: + - X-CONTENT-TYPE-XXX + x-ms-client-request-id: + - X-MS-CLIENT-REQUEST-ID-XXX + x-ms-deployment-name: + - gpt-5-nano + x-ms-rai-invoked: + - 'true' + x-ms-region: + - X-MS-REGION-XXX + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "system", "content": "You are Parallel Tool Agent. + You follow tool instructions precisely.\nYour personal goal is: Use both tools + exactly as instructed"}, {"role": "user", "content": "\nCurrent Task: This is + a tool-calling compliance test. In your next assistant turn, emit exactly 3 + tool calls in the same response (parallel tool calls), in this order: 1) parallel_local_search_one(query=''latest + OpenAI model release notes''), 2) parallel_local_search_two(query=''latest Anthropic + model release notes''), 3) parallel_local_search_three(query=''latest Gemini + model release notes''). Do not call any other tools and do not answer before + those 3 tool calls are emitted. After the tool results return, provide a one + paragraph summary.\n\nThis is the expected criteria for your final answer: A + one sentence summary of both tool outputs\nyou MUST return the actual complete + content as the final answer, not a summary."}, {"role": "assistant", "content": + "", "tool_calls": [{"id": "call_NEvGoF86nhPQfXRoJd5SOyLd", "type": "function", + "function": {"name": "parallel_local_search_one", "arguments": "{\"query\": + \"latest OpenAI model release notes\"}"}}, {"id": "call_q8Q2du4gAMQLrGTgWgfwfbDZ", + "type": "function", "function": {"name": "parallel_local_search_two", "arguments": + "{\"query\": \"latest Anthropic model release notes\"}"}}, {"id": "call_yTBal9ofZzuo10j0pWqhHCSj", + "type": "function", "function": {"name": "parallel_local_search_three", "arguments": + "{\"query\": \"latest Gemini model release notes\"}"}}]}, {"role": "tool", "tool_call_id": + "call_NEvGoF86nhPQfXRoJd5SOyLd", "content": "[one] latest OpenAI model release + notes"}, {"role": "tool", "tool_call_id": "call_q8Q2du4gAMQLrGTgWgfwfbDZ", "content": + "[two] latest Anthropic model release notes"}, {"role": "tool", "tool_call_id": + "call_yTBal9ofZzuo10j0pWqhHCSj", "content": "[three] latest Gemini model release + notes"}, {"role": "user", "content": "Analyze the tool result. If requirements + are met, provide the Final Answer. Otherwise, call the next tool. Deliver only + the answer without meta-commentary."}], "stream": false, "tool_choice": "auto", + "tools": [{"function": {"name": "parallel_local_search_one", "description": + "Local search tool #1 for concurrency testing.", "parameters": {"properties": + {"query": {"description": "Search query", "title": "Query", "type": "string"}}, + "required": ["query"], "type": "object", "additionalProperties": false}}, "type": + "function"}, {"function": {"name": "parallel_local_search_two", "description": + "Local search tool #2 for concurrency testing.", "parameters": {"properties": + {"query": {"description": "Search query", "title": "Query", "type": "string"}}, + "required": ["query"], "type": "object", "additionalProperties": false}}, "type": + "function"}, {"function": {"name": "parallel_local_search_three", "description": + "Local search tool #3 for concurrency testing.", "parameters": {"properties": + {"query": {"description": "Search query", "title": "Query", "type": "string"}}, + "required": ["query"], "type": "object", "additionalProperties": false}}, "type": + "function"}]}' + headers: + Accept: + - application/json + Connection: + - keep-alive + Content-Length: + - '3096' + Content-Type: + - application/json + User-Agent: + - X-USER-AGENT-XXX + accept-encoding: + - ACCEPT-ENCODING-XXX + api-key: + - X-API-KEY-XXX + authorization: + - AUTHORIZATION-XXX + x-ms-client-request-id: + - X-MS-CLIENT-REQUEST-ID-XXX + method: POST + uri: https://fake-azure-endpoint.openai.azure.com/openai/deployments/gpt-5-nano/chat/completions?api-version=2024-12-01-preview + response: + body: + string: '{"choices":[{"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"protected_material_code":{"filtered":false,"detected":false},"protected_material_text":{"filtered":false,"detected":false},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}},"finish_reason":"stop","index":0,"logprobs":null,"message":{"annotations":[],"content":"The + three tool results indicate the latest release notes are available for OpenAI + models, Anthropic models, and Gemini models.","refusal":null,"role":"assistant"}}],"created":1771459579,"id":"chatcmpl-DAlqdRtr8EefmFfazuh4jm7KvVxim","model":"gpt-5-nano-2025-08-07","object":"chat.completion","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"jailbreak":{"filtered":false,"detected":false},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"system_fingerprint":null,"usage":{"completion_tokens":1826,"completion_tokens_details":{"accepted_prediction_tokens":0,"audio_tokens":0,"reasoning_tokens":1792,"rejected_prediction_tokens":0},"prompt_tokens":537,"prompt_tokens_details":{"audio_tokens":0,"cached_tokens":0},"total_tokens":2363}} + + ' + headers: + Content-Length: + - '1333' + Content-Type: + - application/json + Date: + - Thu, 19 Feb 2026 00:06:31 GMT + Strict-Transport-Security: + - STS-XXX + apim-request-id: + - APIM-REQUEST-ID-XXX + azureml-model-session: + - AZUREML-MODEL-SESSION-XXX + x-accel-buffering: + - 'no' + x-content-type-options: + - X-CONTENT-TYPE-XXX + x-ms-client-request-id: + - X-MS-CLIENT-REQUEST-ID-XXX + x-ms-deployment-name: + - gpt-5-nano + x-ms-rai-invoked: + - 'true' + x-ms-region: + - X-MS-REGION-XXX + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/agents/TestBedrockNativeToolCalling.test_bedrock_parallel_native_tool_calling_test_agent_kickoff.yaml b/lib/crewai/tests/cassettes/agents/TestBedrockNativeToolCalling.test_bedrock_parallel_native_tool_calling_test_agent_kickoff.yaml new file mode 100644 index 000000000..6ffc10e62 --- /dev/null +++ b/lib/crewai/tests/cassettes/agents/TestBedrockNativeToolCalling.test_bedrock_parallel_native_tool_calling_test_agent_kickoff.yaml @@ -0,0 +1,63 @@ +interactions: +- request: + body: '{"messages": [{"role": "user", "content": [{"text": "\nCurrent Task: This + is a tool-calling compliance test. In your next assistant turn, emit exactly + 3 tool calls in the same response (parallel tool calls), in this order: 1) parallel_local_search_one(query=''latest + OpenAI model release notes''), 2) parallel_local_search_two(query=''latest Anthropic + model release notes''), 3) parallel_local_search_three(query=''latest Gemini + model release notes''). Do not call any other tools and do not answer before + those 3 tool calls are emitted. After the tool results return, provide a one + paragraph summary."}]}], "inferenceConfig": {"stopSequences": ["\nObservation:"]}, + "system": [{"text": "You are Parallel Tool Agent. You follow tool instructions + precisely.\nYour personal goal is: Use both tools exactly as instructed"}], + "toolConfig": {"tools": [{"toolSpec": {"name": "parallel_local_search_one", + "description": "Local search tool #1 for concurrency testing.", "inputSchema": + {"json": {"properties": {"query": {"description": "Search query", "title": "Query", + "type": "string"}}, "required": ["query"], "type": "object", "additionalProperties": + false}}}}, {"toolSpec": {"name": "parallel_local_search_two", "description": + "Local search tool #2 for concurrency testing.", "inputSchema": {"json": {"properties": + {"query": {"description": "Search query", "title": "Query", "type": "string"}}, + "required": ["query"], "type": "object", "additionalProperties": false}}}}, + {"toolSpec": {"name": "parallel_local_search_three", "description": "Local search + tool #3 for concurrency testing.", "inputSchema": {"json": {"properties": {"query": + {"description": "Search query", "title": "Query", "type": "string"}}, "required": + ["query"], "type": "object", "additionalProperties": false}}}}]}}' + headers: + Content-Length: + - '1773' + Content-Type: + - !!binary | + YXBwbGljYXRpb24vanNvbg== + User-Agent: + - X-USER-AGENT-XXX + amz-sdk-invocation-id: + - AMZ-SDK-INVOCATION-ID-XXX + amz-sdk-request: + - !!binary | + YXR0ZW1wdD0x + authorization: + - AUTHORIZATION-XXX + x-amz-date: + - X-AMZ-DATE-XXX + method: POST + uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/anthropic.claude-3-haiku-20240307-v1%3A0/converse + response: + body: + string: '{"message":"The security token included in the request is invalid."}' + headers: + Connection: + - keep-alive + Content-Length: + - '68' + Content-Type: + - application/json + Date: + - Thu, 19 Feb 2026 00:00:08 GMT + x-amzn-ErrorType: + - UnrecognizedClientException:http://internal.amazon.com/coral/com.amazon.coral.service/ + x-amzn-RequestId: + - X-AMZN-REQUESTID-XXX + status: + code: 403 + message: Forbidden +version: 1 diff --git a/lib/crewai/tests/cassettes/agents/TestBedrockNativeToolCalling.test_bedrock_parallel_native_tool_calling_test_crew.yaml b/lib/crewai/tests/cassettes/agents/TestBedrockNativeToolCalling.test_bedrock_parallel_native_tool_calling_test_crew.yaml new file mode 100644 index 000000000..00ee01d24 --- /dev/null +++ b/lib/crewai/tests/cassettes/agents/TestBedrockNativeToolCalling.test_bedrock_parallel_native_tool_calling_test_crew.yaml @@ -0,0 +1,226 @@ +interactions: +- request: + body: '{"messages": [{"role": "user", "content": [{"text": "\nCurrent Task: This + is a tool-calling compliance test. In your next assistant turn, emit exactly + 3 tool calls in the same response (parallel tool calls), in this order: 1) parallel_local_search_one(query=''latest + OpenAI model release notes''), 2) parallel_local_search_two(query=''latest Anthropic + model release notes''), 3) parallel_local_search_three(query=''latest Gemini + model release notes''). Do not call any other tools and do not answer before + those 3 tool calls are emitted. After the tool results return, provide a one + paragraph summary.\n\nThis is the expected criteria for your final answer: A + one sentence summary of both tool outputs\nyou MUST return the actual complete + content as the final answer, not a summary."}]}], "inferenceConfig": {"stopSequences": + ["\nObservation:"]}, "system": [{"text": "You are Parallel Tool Agent. You follow + tool instructions precisely.\nYour personal goal is: Use both tools exactly + as instructed"}], "toolConfig": {"tools": [{"toolSpec": {"name": "parallel_local_search_one", + "description": "Local search tool #1 for concurrency testing.", "inputSchema": + {"json": {"properties": {"query": {"description": "Search query", "title": "Query", + "type": "string"}}, "required": ["query"], "type": "object", "additionalProperties": + false}}}}, {"toolSpec": {"name": "parallel_local_search_two", "description": + "Local search tool #2 for concurrency testing.", "inputSchema": {"json": {"properties": + {"query": {"description": "Search query", "title": "Query", "type": "string"}}, + "required": ["query"], "type": "object", "additionalProperties": false}}}}, + {"toolSpec": {"name": "parallel_local_search_three", "description": "Local search + tool #3 for concurrency testing.", "inputSchema": {"json": {"properties": {"query": + {"description": "Search query", "title": "Query", "type": "string"}}, "required": + ["query"], "type": "object", "additionalProperties": false}}}}]}}' + headers: + Content-Length: + - '1954' + Content-Type: + - !!binary | + YXBwbGljYXRpb24vanNvbg== + User-Agent: + - X-USER-AGENT-XXX + amz-sdk-invocation-id: + - AMZ-SDK-INVOCATION-ID-XXX + amz-sdk-request: + - !!binary | + YXR0ZW1wdD0x + authorization: + - AUTHORIZATION-XXX + x-amz-date: + - X-AMZ-DATE-XXX + method: POST + uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/anthropic.claude-3-haiku-20240307-v1%3A0/converse + response: + body: + string: '{"message":"The security token included in the request is invalid."}' + headers: + Connection: + - keep-alive + Content-Length: + - '68' + Content-Type: + - application/json + Date: + - Thu, 19 Feb 2026 00:00:07 GMT + x-amzn-ErrorType: + - UnrecognizedClientException:http://internal.amazon.com/coral/com.amazon.coral.service/ + x-amzn-RequestId: + - X-AMZN-REQUESTID-XXX + status: + code: 403 + message: Forbidden +- request: + body: '{"messages": [{"role": "user", "content": [{"text": "\nCurrent Task: This + is a tool-calling compliance test. In your next assistant turn, emit exactly + 3 tool calls in the same response (parallel tool calls), in this order: 1) parallel_local_search_one(query=''latest + OpenAI model release notes''), 2) parallel_local_search_two(query=''latest Anthropic + model release notes''), 3) parallel_local_search_three(query=''latest Gemini + model release notes''). Do not call any other tools and do not answer before + those 3 tool calls are emitted. After the tool results return, provide a one + paragraph summary.\n\nThis is the expected criteria for your final answer: A + one sentence summary of both tool outputs\nyou MUST return the actual complete + content as the final answer, not a summary."}]}, {"role": "user", "content": + [{"text": "\nCurrent Task: This is a tool-calling compliance test. In your next + assistant turn, emit exactly 3 tool calls in the same response (parallel tool + calls), in this order: 1) parallel_local_search_one(query=''latest OpenAI model + release notes''), 2) parallel_local_search_two(query=''latest Anthropic model + release notes''), 3) parallel_local_search_three(query=''latest Gemini model + release notes''). Do not call any other tools and do not answer before those + 3 tool calls are emitted. After the tool results return, provide a one paragraph + summary.\n\nThis is the expected criteria for your final answer: A one sentence + summary of both tool outputs\nyou MUST return the actual complete content as + the final answer, not a summary."}]}], "inferenceConfig": {"stopSequences": + ["\nObservation:"]}, "system": [{"text": "You are Parallel Tool Agent. You follow + tool instructions precisely.\nYour personal goal is: Use both tools exactly + as instructed\n\nYou are Parallel Tool Agent. You follow tool instructions precisely.\nYour + personal goal is: Use both tools exactly as instructed"}], "toolConfig": {"tools": + [{"toolSpec": {"name": "parallel_local_search_one", "description": "Local search + tool #1 for concurrency testing.", "inputSchema": {"json": {"properties": {"query": + {"description": "Search query", "title": "Query", "type": "string"}}, "required": + ["query"], "type": "object", "additionalProperties": false}}}}, {"toolSpec": + {"name": "parallel_local_search_two", "description": "Local search tool #2 for + concurrency testing.", "inputSchema": {"json": {"properties": {"query": {"description": + "Search query", "title": "Query", "type": "string"}}, "required": ["query"], + "type": "object", "additionalProperties": false}}}}, {"toolSpec": {"name": "parallel_local_search_three", + "description": "Local search tool #3 for concurrency testing.", "inputSchema": + {"json": {"properties": {"query": {"description": "Search query", "title": "Query", + "type": "string"}}, "required": ["query"], "type": "object", "additionalProperties": + false}}}}]}}' + headers: + Content-Length: + - '2855' + Content-Type: + - !!binary | + YXBwbGljYXRpb24vanNvbg== + User-Agent: + - X-USER-AGENT-XXX + amz-sdk-invocation-id: + - AMZ-SDK-INVOCATION-ID-XXX + amz-sdk-request: + - !!binary | + YXR0ZW1wdD0x + authorization: + - AUTHORIZATION-XXX + x-amz-date: + - X-AMZ-DATE-XXX + method: POST + uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/anthropic.claude-3-haiku-20240307-v1%3A0/converse + response: + body: + string: '{"message":"The security token included in the request is invalid."}' + headers: + Connection: + - keep-alive + Content-Length: + - '68' + Content-Type: + - application/json + Date: + - Thu, 19 Feb 2026 00:00:07 GMT + x-amzn-ErrorType: + - UnrecognizedClientException:http://internal.amazon.com/coral/com.amazon.coral.service/ + x-amzn-RequestId: + - X-AMZN-REQUESTID-XXX + status: + code: 403 + message: Forbidden +- request: + body: '{"messages": [{"role": "user", "content": [{"text": "\nCurrent Task: This + is a tool-calling compliance test. In your next assistant turn, emit exactly + 3 tool calls in the same response (parallel tool calls), in this order: 1) parallel_local_search_one(query=''latest + OpenAI model release notes''), 2) parallel_local_search_two(query=''latest Anthropic + model release notes''), 3) parallel_local_search_three(query=''latest Gemini + model release notes''). Do not call any other tools and do not answer before + those 3 tool calls are emitted. After the tool results return, provide a one + paragraph summary.\n\nThis is the expected criteria for your final answer: A + one sentence summary of both tool outputs\nyou MUST return the actual complete + content as the final answer, not a summary."}]}, {"role": "user", "content": + [{"text": "\nCurrent Task: This is a tool-calling compliance test. In your next + assistant turn, emit exactly 3 tool calls in the same response (parallel tool + calls), in this order: 1) parallel_local_search_one(query=''latest OpenAI model + release notes''), 2) parallel_local_search_two(query=''latest Anthropic model + release notes''), 3) parallel_local_search_three(query=''latest Gemini model + release notes''). Do not call any other tools and do not answer before those + 3 tool calls are emitted. After the tool results return, provide a one paragraph + summary.\n\nThis is the expected criteria for your final answer: A one sentence + summary of both tool outputs\nyou MUST return the actual complete content as + the final answer, not a summary."}]}, {"role": "user", "content": [{"text": + "\nCurrent Task: This is a tool-calling compliance test. In your next assistant + turn, emit exactly 3 tool calls in the same response (parallel tool calls), + in this order: 1) parallel_local_search_one(query=''latest OpenAI model release + notes''), 2) parallel_local_search_two(query=''latest Anthropic model release + notes''), 3) parallel_local_search_three(query=''latest Gemini model release + notes''). Do not call any other tools and do not answer before those 3 tool + calls are emitted. After the tool results return, provide a one paragraph summary.\n\nThis + is the expected criteria for your final answer: A one sentence summary of both + tool outputs\nyou MUST return the actual complete content as the final answer, + not a summary."}]}], "inferenceConfig": {"stopSequences": ["\nObservation:"]}, + "system": [{"text": "You are Parallel Tool Agent. You follow tool instructions + precisely.\nYour personal goal is: Use both tools exactly as instructed\n\nYou + are Parallel Tool Agent. You follow tool instructions precisely.\nYour personal + goal is: Use both tools exactly as instructed\n\nYou are Parallel Tool Agent. + You follow tool instructions precisely.\nYour personal goal is: Use both tools + exactly as instructed"}], "toolConfig": {"tools": [{"toolSpec": {"name": "parallel_local_search_one", + "description": "Local search tool #1 for concurrency testing.", "inputSchema": + {"json": {"properties": {"query": {"description": "Search query", "title": "Query", + "type": "string"}}, "required": ["query"], "type": "object", "additionalProperties": + false}}}}, {"toolSpec": {"name": "parallel_local_search_two", "description": + "Local search tool #2 for concurrency testing.", "inputSchema": {"json": {"properties": + {"query": {"description": "Search query", "title": "Query", "type": "string"}}, + "required": ["query"], "type": "object", "additionalProperties": false}}}}, + {"toolSpec": {"name": "parallel_local_search_three", "description": "Local search + tool #3 for concurrency testing.", "inputSchema": {"json": {"properties": {"query": + {"description": "Search query", "title": "Query", "type": "string"}}, "required": + ["query"], "type": "object", "additionalProperties": false}}}}]}}' + headers: + Content-Length: + - '3756' + Content-Type: + - !!binary | + YXBwbGljYXRpb24vanNvbg== + User-Agent: + - X-USER-AGENT-XXX + amz-sdk-invocation-id: + - AMZ-SDK-INVOCATION-ID-XXX + amz-sdk-request: + - !!binary | + YXR0ZW1wdD0x + authorization: + - AUTHORIZATION-XXX + x-amz-date: + - X-AMZ-DATE-XXX + method: POST + uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/anthropic.claude-3-haiku-20240307-v1%3A0/converse + response: + body: + string: '{"message":"The security token included in the request is invalid."}' + headers: + Connection: + - keep-alive + Content-Length: + - '68' + Content-Type: + - application/json + Date: + - Thu, 19 Feb 2026 00:00:07 GMT + x-amzn-ErrorType: + - UnrecognizedClientException:http://internal.amazon.com/coral/com.amazon.coral.service/ + x-amzn-RequestId: + - X-AMZN-REQUESTID-XXX + status: + code: 403 + message: Forbidden +version: 1 diff --git a/lib/crewai/tests/cassettes/agents/TestGeminiNativeToolCalling.test_gemini_agent_with_native_tool_calling.yaml b/lib/crewai/tests/cassettes/agents/TestGeminiNativeToolCalling.test_gemini_agent_with_native_tool_calling.yaml index 3682cdf68..da016a4dd 100644 --- a/lib/crewai/tests/cassettes/agents/TestGeminiNativeToolCalling.test_gemini_agent_with_native_tool_calling.yaml +++ b/lib/crewai/tests/cassettes/agents/TestGeminiNativeToolCalling.test_gemini_agent_with_native_tool_calling.yaml @@ -3,14 +3,14 @@ interactions: body: '{"contents": [{"parts": [{"text": "\nCurrent Task: Calculate what is 15 * 8\n\nThis is the expected criteria for your final answer: The result of the calculation\nyou MUST return the actual complete content as the final answer, - not a summary.\n\nThis is VERY important to you, your job depends on it!"}], - "role": "user"}], "systemInstruction": {"parts": [{"text": "You are Math Assistant. - You are a helpful math assistant.\nYour personal goal is: Help users with mathematical - calculations"}], "role": "user"}, "tools": [{"functionDeclarations": [{"description": - "Perform mathematical calculations. Use this for any math operations.", "name": - "calculator", "parameters": {"properties": {"expression": {"description": "Mathematical - expression to evaluate", "title": "Expression", "type": "STRING"}}, "required": - ["expression"], "type": "OBJECT"}}]}], "generationConfig": {"stopSequences": + not a summary."}], "role": "user"}], "systemInstruction": {"parts": [{"text": + "You are Math Assistant. You are a helpful math assistant.\nYour personal goal + is: Help users with mathematical calculations"}], "role": "user"}, "tools": + [{"functionDeclarations": [{"description": "Perform mathematical calculations. + Use this for any math operations.", "name": "calculator", "parameters_json_schema": + {"properties": {"expression": {"description": "Mathematical expression to evaluate", + "title": "Expression", "type": "string"}}, "required": ["expression"], "type": + "object", "additionalProperties": false}}]}], "generationConfig": {"stopSequences": ["\nObservation:"]}}' headers: User-Agent: @@ -22,7 +22,7 @@ interactions: connection: - keep-alive content-length: - - '907' + - '892' content-type: - application/json host: @@ -32,31 +32,31 @@ interactions: x-goog-api-key: - X-GOOG-API-KEY-XXX method: POST - uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash-exp:generateContent + uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent response: body: string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": [\n {\n \"functionCall\": {\n \"name\": \"calculator\",\n \ \"args\": {\n \"expression\": \"15 * 8\"\n }\n - \ }\n }\n ],\n \"role\": \"model\"\n },\n - \ \"finishReason\": \"STOP\",\n \"avgLogprobs\": -0.00062879999833447594\n - \ }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 103,\n \"candidatesTokenCount\": - 7,\n \"totalTokenCount\": 110,\n \"promptTokensDetails\": [\n {\n - \ \"modality\": \"TEXT\",\n \"tokenCount\": 103\n }\n ],\n - \ \"candidatesTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n - \ \"tokenCount\": 7\n }\n ]\n },\n \"modelVersion\": \"gemini-2.0-flash-exp\",\n - \ \"responseId\": \"PpByabfUHsih_uMPlu2ysAM\"\n}\n" + \ },\n \"thoughtSignature\": \"Cp8DAb4+9vu74rJ0QQNTa6oMMh3QAlvx3cS4TL0I1od7EdQZtMBbsr5viQiTUR/LKj8nwPvtLjZxib5SXqmV0t2B2ZMdq1nqD62vLPD3i7tmUeRoysODfxomRGRhy/CPysMhobt5HWF1W/n6tNiQz3V36f0/dRx5yJeyN4tJL/RZePv77FUqywOfFlYOkOIyAkrE5LT6FicOjhHm/B9bGV/y7TNmN6TtwQDxoE9nU92Q/UNZ7rNyZE7aSR7KPJZuRXrrBBh+akt5dX5n6N9kGWkyRpWVgUox01+b22RSj4S/QY45IvadtmmkFk8DMVAtAnEiK0WazltC+TOdUJHwVgBD494fngoVcHU+R1yIJrVe7h6Ce3Ts5IYLrRCedDU3wW1ghn/hXx1nvTqQumpsGTGtE2v3KjF/7DmQA96WzB1X7+QUOF2J3pK9HemiKxAQl4U9fP2eNN8shvy2YykBlahWDujEwye7ji4wIWtNHbf0t+uFwGTQ3QruAKXvWB04ExjHM2I/8O9U5tOsH0cwPqnpFR2EaTqaPXXUllZ2K+DaaA==\"\n + \ }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": + \"STOP\",\n \"index\": 0,\n \"finishMessage\": \"Model generated + function call(s).\"\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": + 115,\n \"candidatesTokenCount\": 17,\n \"totalTokenCount\": 227,\n \"promptTokensDetails\": + [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 115\n + \ }\n ],\n \"thoughtsTokenCount\": 95\n },\n \"modelVersion\": + \"gemini-2.5-flash\",\n \"responseId\": \"Y1KWadvNMKz1jMcPiJeJmAI\"\n}\n" headers: Alt-Svc: - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 Content-Type: - application/json; charset=UTF-8 Date: - - Thu, 22 Jan 2026 21:01:50 GMT + - Wed, 18 Feb 2026 23:59:32 GMT Server: - scaffolding on HTTPServer2 Server-Timing: - - gfet4t7; dur=521 + - gfet4t7; dur=956 Transfer-Encoding: - chunked Vary: @@ -76,18 +76,19 @@ interactions: body: '{"contents": [{"parts": [{"text": "\nCurrent Task: Calculate what is 15 * 8\n\nThis is the expected criteria for your final answer: The result of the calculation\nyou MUST return the actual complete content as the final answer, - not a summary.\n\nThis is VERY important to you, your job depends on it!"}], - "role": "user"}, {"parts": [{"text": ""}], "role": "model"}, {"parts": [{"text": - "The result of 15 * 8 is 120"}], "role": "user"}, {"parts": [{"text": "Analyze - the tool result. If requirements are met, provide the Final Answer. Otherwise, - call the next tool. Deliver only the answer without meta-commentary."}], "role": - "user"}], "systemInstruction": {"parts": [{"text": "You are Math Assistant. - You are a helpful math assistant.\nYour personal goal is: Help users with mathematical - calculations"}], "role": "user"}, "tools": [{"functionDeclarations": [{"description": - "Perform mathematical calculations. Use this for any math operations.", "name": - "calculator", "parameters": {"properties": {"expression": {"description": "Mathematical - expression to evaluate", "title": "Expression", "type": "STRING"}}, "required": - ["expression"], "type": "OBJECT"}}]}], "generationConfig": {"stopSequences": + not a summary."}], "role": "user"}, {"parts": [{"functionCall": {"args": {"expression": + "15 * 8"}, "name": "calculator"}}], "role": "model"}, {"parts": [{"functionResponse": + {"name": "calculator", "response": {"result": "The result of 15 * 8 is 120"}}}], + "role": "user"}, {"parts": [{"text": "Analyze the tool result. If requirements + are met, provide the Final Answer. Otherwise, call the next tool. Deliver only + the answer without meta-commentary."}], "role": "user"}], "systemInstruction": + {"parts": [{"text": "You are Math Assistant. You are a helpful math assistant.\nYour + personal goal is: Help users with mathematical calculations"}], "role": "user"}, + "tools": [{"functionDeclarations": [{"description": "Perform mathematical calculations. + Use this for any math operations.", "name": "calculator", "parameters_json_schema": + {"properties": {"expression": {"description": "Mathematical expression to evaluate", + "title": "Expression", "type": "string"}}, "required": ["expression"], "type": + "object", "additionalProperties": false}}]}], "generationConfig": {"stopSequences": ["\nObservation:"]}}' headers: User-Agent: @@ -99,7 +100,7 @@ interactions: connection: - keep-alive content-length: - - '1219' + - '1326' content-type: - application/json host: @@ -109,378 +110,28 @@ interactions: x-goog-api-key: - X-GOOG-API-KEY-XXX method: POST - uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash-exp:generateContent + uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent response: body: string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": - [\n {\n \"functionCall\": {\n \"name\": \"calculator\",\n - \ \"args\": {\n \"expression\": \"15 * 8\"\n }\n - \ }\n }\n ],\n \"role\": \"model\"\n },\n - \ \"finishReason\": \"STOP\",\n \"avgLogprobs\": -0.013549212898526872\n - \ }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 149,\n \"candidatesTokenCount\": - 7,\n \"totalTokenCount\": 156,\n \"promptTokensDetails\": [\n {\n - \ \"modality\": \"TEXT\",\n \"tokenCount\": 149\n }\n ],\n - \ \"candidatesTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n - \ \"tokenCount\": 7\n }\n ]\n },\n \"modelVersion\": \"gemini-2.0-flash-exp\",\n - \ \"responseId\": \"P5Byadc8kJT-4w_p99XQAQ\"\n}\n" + [\n {\n \"text\": \"The result of 15 * 8 is 120\"\n }\n + \ ],\n \"role\": \"model\"\n },\n \"finishReason\": + \"STOP\",\n \"index\": 0\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": + 191,\n \"candidatesTokenCount\": 14,\n \"totalTokenCount\": 205,\n \"promptTokensDetails\": + [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 191\n + \ }\n ]\n },\n \"modelVersion\": \"gemini-2.5-flash\",\n \"responseId\": + \"ZFKWaf2BMM6MjMcP6P--kQM\"\n}\n" headers: Alt-Svc: - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 Content-Type: - application/json; charset=UTF-8 Date: - - Thu, 22 Jan 2026 21:01:51 GMT + - Wed, 18 Feb 2026 23:59:33 GMT Server: - scaffolding on HTTPServer2 Server-Timing: - - gfet4t7; dur=444 - Transfer-Encoding: - - chunked - Vary: - - Origin - - X-Origin - - Referer - X-Content-Type-Options: - - X-CONTENT-TYPE-XXX - X-Frame-Options: - - X-FRAME-OPTIONS-XXX - X-XSS-Protection: - - '0' - status: - code: 200 - message: OK -- request: - body: '{"contents": [{"parts": [{"text": "\nCurrent Task: Calculate what is 15 - * 8\n\nThis is the expected criteria for your final answer: The result of the - calculation\nyou MUST return the actual complete content as the final answer, - not a summary.\n\nThis is VERY important to you, your job depends on it!"}], - "role": "user"}, {"parts": [{"text": ""}], "role": "model"}, {"parts": [{"text": - "The result of 15 * 8 is 120"}], "role": "user"}, {"parts": [{"text": "Analyze - the tool result. If requirements are met, provide the Final Answer. Otherwise, - call the next tool. Deliver only the answer without meta-commentary."}], "role": - "user"}, {"parts": [{"text": ""}], "role": "model"}, {"parts": [{"text": "The - result of 15 * 8 is 120"}], "role": "user"}, {"parts": [{"text": "Analyze the - tool result. If requirements are met, provide the Final Answer. Otherwise, call - the next tool. Deliver only the answer without meta-commentary."}], "role": - "user"}], "systemInstruction": {"parts": [{"text": "You are Math Assistant. - You are a helpful math assistant.\nYour personal goal is: Help users with mathematical - calculations"}], "role": "user"}, "tools": [{"functionDeclarations": [{"description": - "Perform mathematical calculations. Use this for any math operations.", "name": - "calculator", "parameters": {"properties": {"expression": {"description": "Mathematical - expression to evaluate", "title": "Expression", "type": "STRING"}}, "required": - ["expression"], "type": "OBJECT"}}]}], "generationConfig": {"stopSequences": - ["\nObservation:"]}}' - headers: - User-Agent: - - X-USER-AGENT-XXX - accept: - - '*/*' - accept-encoding: - - ACCEPT-ENCODING-XXX - connection: - - keep-alive - content-length: - - '1531' - content-type: - - application/json - host: - - generativelanguage.googleapis.com - x-goog-api-client: - - google-genai-sdk/1.49.0 gl-python/3.13.3 - x-goog-api-key: - - X-GOOG-API-KEY-XXX - method: POST - uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash-exp:generateContent - response: - body: - string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": - [\n {\n \"functionCall\": {\n \"name\": \"calculator\",\n - \ \"args\": {\n \"expression\": \"15 * 8\"\n }\n - \ }\n }\n ],\n \"role\": \"model\"\n },\n - \ \"finishReason\": \"STOP\",\n \"avgLogprobs\": -0.0409286447933742\n - \ }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 195,\n \"candidatesTokenCount\": - 7,\n \"totalTokenCount\": 202,\n \"promptTokensDetails\": [\n {\n - \ \"modality\": \"TEXT\",\n \"tokenCount\": 195\n }\n ],\n - \ \"candidatesTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n - \ \"tokenCount\": 7\n }\n ]\n },\n \"modelVersion\": \"gemini-2.0-flash-exp\",\n - \ \"responseId\": \"P5Byadn5HOK6_uMPnvmXwAk\"\n}\n" - headers: - Alt-Svc: - - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 - Content-Type: - - application/json; charset=UTF-8 - Date: - - Thu, 22 Jan 2026 21:01:51 GMT - Server: - - scaffolding on HTTPServer2 - Server-Timing: - - gfet4t7; dur=503 - Transfer-Encoding: - - chunked - Vary: - - Origin - - X-Origin - - Referer - X-Content-Type-Options: - - X-CONTENT-TYPE-XXX - X-Frame-Options: - - X-FRAME-OPTIONS-XXX - X-XSS-Protection: - - '0' - status: - code: 200 - message: OK -- request: - body: '{"contents": [{"parts": [{"text": "\nCurrent Task: Calculate what is 15 - * 8\n\nThis is the expected criteria for your final answer: The result of the - calculation\nyou MUST return the actual complete content as the final answer, - not a summary.\n\nThis is VERY important to you, your job depends on it!"}], - "role": "user"}, {"parts": [{"text": ""}], "role": "model"}, {"parts": [{"text": - "The result of 15 * 8 is 120"}], "role": "user"}, {"parts": [{"text": "Analyze - the tool result. If requirements are met, provide the Final Answer. Otherwise, - call the next tool. Deliver only the answer without meta-commentary."}], "role": - "user"}, {"parts": [{"text": ""}], "role": "model"}, {"parts": [{"text": "The - result of 15 * 8 is 120"}], "role": "user"}, {"parts": [{"text": "Analyze the - tool result. If requirements are met, provide the Final Answer. Otherwise, call - the next tool. Deliver only the answer without meta-commentary."}], "role": - "user"}, {"parts": [{"text": ""}], "role": "model"}, {"parts": [{"text": "The - result of 15 * 8 is 120"}], "role": "user"}, {"parts": [{"text": "Analyze the - tool result. If requirements are met, provide the Final Answer. Otherwise, call - the next tool. Deliver only the answer without meta-commentary."}], "role": - "user"}], "systemInstruction": {"parts": [{"text": "You are Math Assistant. - You are a helpful math assistant.\nYour personal goal is: Help users with mathematical - calculations"}], "role": "user"}, "tools": [{"functionDeclarations": [{"description": - "Perform mathematical calculations. Use this for any math operations.", "name": - "calculator", "parameters": {"properties": {"expression": {"description": "Mathematical - expression to evaluate", "title": "Expression", "type": "STRING"}}, "required": - ["expression"], "type": "OBJECT"}}]}], "generationConfig": {"stopSequences": - ["\nObservation:"]}}' - headers: - User-Agent: - - X-USER-AGENT-XXX - accept: - - '*/*' - accept-encoding: - - ACCEPT-ENCODING-XXX - connection: - - keep-alive - content-length: - - '1843' - content-type: - - application/json - host: - - generativelanguage.googleapis.com - x-goog-api-client: - - google-genai-sdk/1.49.0 gl-python/3.13.3 - x-goog-api-key: - - X-GOOG-API-KEY-XXX - method: POST - uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash-exp:generateContent - response: - body: - string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": - [\n {\n \"functionCall\": {\n \"name\": \"calculator\",\n - \ \"args\": {\n \"expression\": \"15 * 8\"\n }\n - \ }\n }\n ],\n \"role\": \"model\"\n },\n - \ \"finishReason\": \"STOP\",\n \"avgLogprobs\": -0.018002046006066457\n - \ }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 241,\n \"candidatesTokenCount\": - 7,\n \"totalTokenCount\": 248,\n \"promptTokensDetails\": [\n {\n - \ \"modality\": \"TEXT\",\n \"tokenCount\": 241\n }\n ],\n - \ \"candidatesTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n - \ \"tokenCount\": 7\n }\n ]\n },\n \"modelVersion\": \"gemini-2.0-flash-exp\",\n - \ \"responseId\": \"P5Byafi2PKbn_uMPtIbfuQI\"\n}\n" - headers: - Alt-Svc: - - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 - Content-Type: - - application/json; charset=UTF-8 - Date: - - Thu, 22 Jan 2026 21:01:52 GMT - Server: - - scaffolding on HTTPServer2 - Server-Timing: - - gfet4t7; dur=482 - Transfer-Encoding: - - chunked - Vary: - - Origin - - X-Origin - - Referer - X-Content-Type-Options: - - X-CONTENT-TYPE-XXX - X-Frame-Options: - - X-FRAME-OPTIONS-XXX - X-XSS-Protection: - - '0' - status: - code: 200 - message: OK -- request: - body: '{"contents": [{"parts": [{"text": "\nCurrent Task: Calculate what is 15 - * 8\n\nThis is the expected criteria for your final answer: The result of the - calculation\nyou MUST return the actual complete content as the final answer, - not a summary.\n\nThis is VERY important to you, your job depends on it!"}], - "role": "user"}, {"parts": [{"text": ""}], "role": "model"}, {"parts": [{"text": - "The result of 15 * 8 is 120"}], "role": "user"}, {"parts": [{"text": "Analyze - the tool result. If requirements are met, provide the Final Answer. Otherwise, - call the next tool. Deliver only the answer without meta-commentary."}], "role": - "user"}, {"parts": [{"text": ""}], "role": "model"}, {"parts": [{"text": "The - result of 15 * 8 is 120"}], "role": "user"}, {"parts": [{"text": "Analyze the - tool result. If requirements are met, provide the Final Answer. Otherwise, call - the next tool. Deliver only the answer without meta-commentary."}], "role": - "user"}, {"parts": [{"text": ""}], "role": "model"}, {"parts": [{"text": "The - result of 15 * 8 is 120"}], "role": "user"}, {"parts": [{"text": "Analyze the - tool result. If requirements are met, provide the Final Answer. Otherwise, call - the next tool. Deliver only the answer without meta-commentary."}], "role": - "user"}, {"parts": [{"text": ""}], "role": "model"}, {"parts": [{"text": "The - result of 15 * 8 is 120"}], "role": "user"}, {"parts": [{"text": "Analyze the - tool result. If requirements are met, provide the Final Answer. Otherwise, call - the next tool. Deliver only the answer without meta-commentary."}], "role": - "user"}], "systemInstruction": {"parts": [{"text": "You are Math Assistant. - You are a helpful math assistant.\nYour personal goal is: Help users with mathematical - calculations"}], "role": "user"}, "tools": [{"functionDeclarations": [{"description": - "Perform mathematical calculations. Use this for any math operations.", "name": - "calculator", "parameters": {"properties": {"expression": {"description": "Mathematical - expression to evaluate", "title": "Expression", "type": "STRING"}}, "required": - ["expression"], "type": "OBJECT"}}]}], "generationConfig": {"stopSequences": - ["\nObservation:"]}}' - headers: - User-Agent: - - X-USER-AGENT-XXX - accept: - - '*/*' - accept-encoding: - - ACCEPT-ENCODING-XXX - connection: - - keep-alive - content-length: - - '2155' - content-type: - - application/json - host: - - generativelanguage.googleapis.com - x-goog-api-client: - - google-genai-sdk/1.49.0 gl-python/3.13.3 - x-goog-api-key: - - X-GOOG-API-KEY-XXX - method: POST - uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash-exp:generateContent - response: - body: - string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": - [\n {\n \"functionCall\": {\n \"name\": \"calculator\",\n - \ \"args\": {\n \"expression\": \"15 * 8\"\n }\n - \ }\n }\n ],\n \"role\": \"model\"\n },\n - \ \"finishReason\": \"STOP\",\n \"avgLogprobs\": -0.10329001290457589\n - \ }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 287,\n \"candidatesTokenCount\": - 7,\n \"totalTokenCount\": 294,\n \"promptTokensDetails\": [\n {\n - \ \"modality\": \"TEXT\",\n \"tokenCount\": 287\n }\n ],\n - \ \"candidatesTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n - \ \"tokenCount\": 7\n }\n ]\n },\n \"modelVersion\": \"gemini-2.0-flash-exp\",\n - \ \"responseId\": \"QJByaamVIP_g_uMPt6mI0Qg\"\n}\n" - headers: - Alt-Svc: - - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 - Content-Type: - - application/json; charset=UTF-8 - Date: - - Thu, 22 Jan 2026 21:01:52 GMT - Server: - - scaffolding on HTTPServer2 - Server-Timing: - - gfet4t7; dur=534 - Transfer-Encoding: - - chunked - Vary: - - Origin - - X-Origin - - Referer - X-Content-Type-Options: - - X-CONTENT-TYPE-XXX - X-Frame-Options: - - X-FRAME-OPTIONS-XXX - X-XSS-Protection: - - '0' - status: - code: 200 - message: OK -- request: - body: '{"contents": [{"parts": [{"text": "\nCurrent Task: Calculate what is 15 - * 8\n\nThis is the expected criteria for your final answer: The result of the - calculation\nyou MUST return the actual complete content as the final answer, - not a summary.\n\nThis is VERY important to you, your job depends on it!"}], - "role": "user"}, {"parts": [{"text": ""}], "role": "model"}, {"parts": [{"text": - "The result of 15 * 8 is 120"}], "role": "user"}, {"parts": [{"text": "Analyze - the tool result. If requirements are met, provide the Final Answer. Otherwise, - call the next tool. Deliver only the answer without meta-commentary."}], "role": - "user"}, {"parts": [{"text": ""}], "role": "model"}, {"parts": [{"text": "The - result of 15 * 8 is 120"}], "role": "user"}, {"parts": [{"text": "Analyze the - tool result. If requirements are met, provide the Final Answer. Otherwise, call - the next tool. Deliver only the answer without meta-commentary."}], "role": - "user"}, {"parts": [{"text": ""}], "role": "model"}, {"parts": [{"text": "The - result of 15 * 8 is 120"}], "role": "user"}, {"parts": [{"text": "Analyze the - tool result. If requirements are met, provide the Final Answer. Otherwise, call - the next tool. Deliver only the answer without meta-commentary."}], "role": - "user"}, {"parts": [{"text": ""}], "role": "model"}, {"parts": [{"text": "The - result of 15 * 8 is 120"}], "role": "user"}, {"parts": [{"text": "Analyze the - tool result. If requirements are met, provide the Final Answer. Otherwise, call - the next tool. Deliver only the answer without meta-commentary."}], "role": - "user"}, {"parts": [{"text": ""}], "role": "model"}, {"parts": [{"text": "The - result of 15 * 8 is 120"}], "role": "user"}, {"parts": [{"text": "Analyze the - tool result. If requirements are met, provide the Final Answer. Otherwise, call - the next tool. Deliver only the answer without meta-commentary."}], "role": - "user"}], "systemInstruction": {"parts": [{"text": "You are Math Assistant. - You are a helpful math assistant.\nYour personal goal is: Help users with mathematical - calculations"}], "role": "user"}, "tools": [{"functionDeclarations": [{"description": - "Perform mathematical calculations. Use this for any math operations.", "name": - "calculator", "parameters": {"properties": {"expression": {"description": "Mathematical - expression to evaluate", "title": "Expression", "type": "STRING"}}, "required": - ["expression"], "type": "OBJECT"}}]}], "generationConfig": {"stopSequences": - ["\nObservation:"]}}' - headers: - User-Agent: - - X-USER-AGENT-XXX - accept: - - '*/*' - accept-encoding: - - ACCEPT-ENCODING-XXX - connection: - - keep-alive - content-length: - - '2467' - content-type: - - application/json - host: - - generativelanguage.googleapis.com - x-goog-api-client: - - google-genai-sdk/1.49.0 gl-python/3.13.3 - x-goog-api-key: - - X-GOOG-API-KEY-XXX - method: POST - uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash-exp:generateContent - response: - body: - string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": - [\n {\n \"text\": \"120\\n\"\n }\n ],\n - \ \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n - \ \"avgLogprobs\": -0.0097615998238325119\n }\n ],\n \"usageMetadata\": - {\n \"promptTokenCount\": 333,\n \"candidatesTokenCount\": 4,\n \"totalTokenCount\": - 337,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n - \ \"tokenCount\": 333\n }\n ],\n \"candidatesTokensDetails\": - [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 4\n }\n - \ ]\n },\n \"modelVersion\": \"gemini-2.0-flash-exp\",\n \"responseId\": - \"QZByaZHABO-i_uMP58aYqAk\"\n}\n" - headers: - Alt-Svc: - - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 - Content-Type: - - application/json; charset=UTF-8 - Date: - - Thu, 22 Jan 2026 21:01:53 GMT - Server: - - scaffolding on HTTPServer2 - Server-Timing: - - gfet4t7; dur=412 + - gfet4t7; dur=421 Transfer-Encoding: - chunked Vary: diff --git a/lib/crewai/tests/cassettes/agents/TestGeminiNativeToolCalling.test_gemini_parallel_native_tool_calling_test_agent_kickoff.yaml b/lib/crewai/tests/cassettes/agents/TestGeminiNativeToolCalling.test_gemini_parallel_native_tool_calling_test_agent_kickoff.yaml new file mode 100644 index 000000000..ae21dfce5 --- /dev/null +++ b/lib/crewai/tests/cassettes/agents/TestGeminiNativeToolCalling.test_gemini_parallel_native_tool_calling_test_agent_kickoff.yaml @@ -0,0 +1,188 @@ +interactions: +- request: + body: '{"contents": [{"parts": [{"text": "\nCurrent Task: This is a tool-calling + compliance test. In your next assistant turn, emit exactly 3 tool calls in the + same response (parallel tool calls), in this order: 1) parallel_local_search_one(query=''latest + OpenAI model release notes''), 2) parallel_local_search_two(query=''latest Anthropic + model release notes''), 3) parallel_local_search_three(query=''latest Gemini + model release notes''). Do not call any other tools and do not answer before + those 3 tool calls are emitted. After the tool results return, provide a one + paragraph summary."}], "role": "user"}], "systemInstruction": {"parts": [{"text": + "You are Parallel Tool Agent. You follow tool instructions precisely.\nYour + personal goal is: Use both tools exactly as instructed"}], "role": "user"}, + "tools": [{"functionDeclarations": [{"description": "Local search tool #1 for + concurrency testing.", "name": "parallel_local_search_one", "parameters_json_schema": + {"properties": {"query": {"description": "Search query", "title": "Query", "type": + "string"}}, "required": ["query"], "type": "object", "additionalProperties": + false}}, {"description": "Local search tool #2 for concurrency testing.", "name": + "parallel_local_search_two", "parameters_json_schema": {"properties": {"query": + {"description": "Search query", "title": "Query", "type": "string"}}, "required": + ["query"], "type": "object", "additionalProperties": false}}, {"description": + "Local search tool #3 for concurrency testing.", "name": "parallel_local_search_three", + "parameters_json_schema": {"properties": {"query": {"description": "Search query", + "title": "Query", "type": "string"}}, "required": ["query"], "type": "object", + "additionalProperties": false}}]}], "generationConfig": {"stopSequences": ["\nObservation:"]}}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - '*/*' + accept-encoding: + - ACCEPT-ENCODING-XXX + connection: + - keep-alive + content-length: + - '1783' + content-type: + - application/json + host: + - generativelanguage.googleapis.com + x-goog-api-client: + - google-genai-sdk/1.49.0 gl-python/3.13.3 + x-goog-api-key: + - X-GOOG-API-KEY-XXX + method: POST + uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent + response: + body: + string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": + [\n {\n \"functionCall\": {\n \"name\": \"parallel_local_search_one\",\n + \ \"args\": {\n \"query\": \"latest OpenAI model + release notes\"\n }\n },\n \"thoughtSignature\": + \"CrICAb4+9vtrrkiSatPyOs7fssb9akcgCIiQdJKp/k+hcEZVNFvU/H0e4FFmLIhTCPRyHxmU+AQPtBZ5vg6y9ZCcv11RdcWgYW8rPQzCnC+YTUxPAfDzaObky1QsL5pl9+yglQqVoVM31ZcnoiH02z85pwAv6TSJxdJZEekW6XwcIrCoHNCgY3ghHFEd3y3wLJ5JWL7wmiRNTC9TCT8aJHXKFohYrb+4JMULCx8BqKVxOucZPiDHA8GsoqSlzkYEe2xCh9oSdaZpCFrxhZ9bwoVDbVmPrjaq2hj5BoJ5hNxscHJ/E0EOl4ogeKZW+hIVfdzpjAFZW9Oejkb9G4ZSLbxXsoO7x8bi4LHFRABniGrWvNuOOH0Udh4t57oXHXZO4u5NNTood/GkJGcP+aHqUAH1fwqL\"\n + \ },\n {\n \"functionCall\": {\n \"name\": + \"parallel_local_search_two\",\n \"args\": {\n \"query\": + \"latest Anthropic model release notes\"\n }\n }\n + \ },\n {\n \"functionCall\": {\n \"name\": + \"parallel_local_search_three\",\n \"args\": {\n \"query\": + \"latest Gemini model release notes\"\n }\n }\n }\n + \ ],\n \"role\": \"model\"\n },\n \"finishReason\": + \"STOP\",\n \"index\": 0,\n \"finishMessage\": \"Model generated + function call(s).\"\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": + 291,\n \"candidatesTokenCount\": 70,\n \"totalTokenCount\": 428,\n \"promptTokensDetails\": + [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 291\n + \ }\n ],\n \"thoughtsTokenCount\": 67\n },\n \"modelVersion\": + \"gemini-2.5-flash\",\n \"responseId\": \"alKWacytCLi5jMcPhISaoAI\"\n}\n" + headers: + Alt-Svc: + - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + Content-Type: + - application/json; charset=UTF-8 + Date: + - Wed, 18 Feb 2026 23:59:39 GMT + Server: + - scaffolding on HTTPServer2 + Server-Timing: + - gfet4t7; dur=999 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + X-Frame-Options: + - X-FRAME-OPTIONS-XXX + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +- request: + body: '{"contents": [{"parts": [{"text": "\nCurrent Task: This is a tool-calling + compliance test. In your next assistant turn, emit exactly 3 tool calls in the + same response (parallel tool calls), in this order: 1) parallel_local_search_one(query=''latest + OpenAI model release notes''), 2) parallel_local_search_two(query=''latest Anthropic + model release notes''), 3) parallel_local_search_three(query=''latest Gemini + model release notes''). Do not call any other tools and do not answer before + those 3 tool calls are emitted. After the tool results return, provide a one + paragraph summary."}], "role": "user"}, {"parts": [{"functionCall": {"args": + {"query": "latest OpenAI model release notes"}, "name": "parallel_local_search_one"}, + "thoughtSignature": "CrICAb4-9vtrrkiSatPyOs7fssb9akcgCIiQdJKp_k-hcEZVNFvU_H0e4FFmLIhTCPRyHxmU-AQPtBZ5vg6y9ZCcv11RdcWgYW8rPQzCnC-YTUxPAfDzaObky1QsL5pl9-yglQqVoVM31ZcnoiH02z85pwAv6TSJxdJZEekW6XwcIrCoHNCgY3ghHFEd3y3wLJ5JWL7wmiRNTC9TCT8aJHXKFohYrb-4JMULCx8BqKVxOucZPiDHA8GsoqSlzkYEe2xCh9oSdaZpCFrxhZ9bwoVDbVmPrjaq2hj5BoJ5hNxscHJ_E0EOl4ogeKZW-hIVfdzpjAFZW9Oejkb9G4ZSLbxXsoO7x8bi4LHFRABniGrWvNuOOH0Udh4t57oXHXZO4u5NNTood_GkJGcP-aHqUAH1fwqL"}, + {"functionCall": {"args": {"query": "latest Anthropic model release notes"}, + "name": "parallel_local_search_two"}}, {"functionCall": {"args": {"query": "latest + Gemini model release notes"}, "name": "parallel_local_search_three"}}], "role": + "model"}, {"parts": [{"functionResponse": {"name": "parallel_local_search_one", + "response": {"result": "[one] latest OpenAI model release notes"}}}], "role": + "user"}, {"parts": [{"functionResponse": {"name": "parallel_local_search_two", + "response": {"result": "[two] latest Anthropic model release notes"}}}], "role": + "user"}, {"parts": [{"functionResponse": {"name": "parallel_local_search_three", + "response": {"result": "[three] latest Gemini model release notes"}}}], "role": + "user"}], "systemInstruction": {"parts": [{"text": "You are Parallel Tool Agent. + You follow tool instructions precisely.\nYour personal goal is: Use both tools + exactly as instructed"}], "role": "user"}, "tools": [{"functionDeclarations": + [{"description": "Local search tool #1 for concurrency testing.", "name": "parallel_local_search_one", + "parameters_json_schema": {"properties": {"query": {"description": "Search query", + "title": "Query", "type": "string"}}, "required": ["query"], "type": "object", + "additionalProperties": false}}, {"description": "Local search tool #2 for concurrency + testing.", "name": "parallel_local_search_two", "parameters_json_schema": {"properties": + {"query": {"description": "Search query", "title": "Query", "type": "string"}}, + "required": ["query"], "type": "object", "additionalProperties": false}}, {"description": + "Local search tool #3 for concurrency testing.", "name": "parallel_local_search_three", + "parameters_json_schema": {"properties": {"query": {"description": "Search query", + "title": "Query", "type": "string"}}, "required": ["query"], "type": "object", + "additionalProperties": false}}]}], "generationConfig": {"stopSequences": ["\nObservation:"]}}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - '*/*' + accept-encoding: + - ACCEPT-ENCODING-XXX + connection: + - keep-alive + content-length: + - '3071' + content-type: + - application/json + host: + - generativelanguage.googleapis.com + x-goog-api-client: + - google-genai-sdk/1.49.0 gl-python/3.13.3 + x-goog-api-key: + - X-GOOG-API-KEY-XXX + method: POST + uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent + response: + body: + string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": + [\n {\n \"text\": \"Here is a summary of the latest model + release notes: I have retrieved information regarding the latest OpenAI model + release notes, the latest Anthropic model release notes, and the latest Gemini + model release notes. The specific details of these release notes are available + through the respective tool outputs.\",\n \"thoughtSignature\": + \"CsoBAb4+9vtPvWFM08lR1S4QrLN+Z1+Zpf04Y/bC8tjOpnxz3EEvHyRNEwkslUX5pftBi8J78Xk4/FUER0xjJZc8clUObTvayxLNup4h1JwJ5ZdatulInNGTEieFnF4w8KjSFB/vqNCZvXWZbiLkpzqAnsoAIf0x4VmMN11V0Ozo+3f2QftD+iBrfu3g21UI5tbG0Z+0QHxjRVKXrQOp7dmoZPzaxI0zalfDEI+A2jGpVl/VvauVNv0jQn0yItcA5tkVeWLq6717CjNoig==\"\n + \ }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": + \"STOP\",\n \"index\": 0\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": + 435,\n \"candidatesTokenCount\": 54,\n \"totalTokenCount\": 524,\n \"promptTokensDetails\": + [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 435\n + \ }\n ],\n \"thoughtsTokenCount\": 35\n },\n \"modelVersion\": + \"gemini-2.5-flash\",\n \"responseId\": \"bFKWaZOZCqCvjMcPvvGNgAc\"\n}\n" + headers: + Alt-Svc: + - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + Content-Type: + - application/json; charset=UTF-8 + Date: + - Wed, 18 Feb 2026 23:59:41 GMT + Server: + - scaffolding on HTTPServer2 + Server-Timing: + - gfet4t7; dur=967 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + X-Frame-Options: + - X-FRAME-OPTIONS-XXX + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/agents/TestGeminiNativeToolCalling.test_gemini_parallel_native_tool_calling_test_crew.yaml b/lib/crewai/tests/cassettes/agents/TestGeminiNativeToolCalling.test_gemini_parallel_native_tool_calling_test_crew.yaml new file mode 100644 index 000000000..fc4e42135 --- /dev/null +++ b/lib/crewai/tests/cassettes/agents/TestGeminiNativeToolCalling.test_gemini_parallel_native_tool_calling_test_crew.yaml @@ -0,0 +1,192 @@ +interactions: +- request: + body: '{"contents": [{"parts": [{"text": "\nCurrent Task: This is a tool-calling + compliance test. In your next assistant turn, emit exactly 3 tool calls in the + same response (parallel tool calls), in this order: 1) parallel_local_search_one(query=''latest + OpenAI model release notes''), 2) parallel_local_search_two(query=''latest Anthropic + model release notes''), 3) parallel_local_search_three(query=''latest Gemini + model release notes''). Do not call any other tools and do not answer before + those 3 tool calls are emitted. After the tool results return, provide a one + paragraph summary.\n\nThis is the expected criteria for your final answer: A + one sentence summary of both tool outputs\nyou MUST return the actual complete + content as the final answer, not a summary."}], "role": "user"}], "systemInstruction": + {"parts": [{"text": "You are Parallel Tool Agent. You follow tool instructions + precisely.\nYour personal goal is: Use both tools exactly as instructed"}], + "role": "user"}, "tools": [{"functionDeclarations": [{"description": "Local + search tool #1 for concurrency testing.", "name": "parallel_local_search_one", + "parameters_json_schema": {"properties": {"query": {"description": "Search query", + "title": "Query", "type": "string"}}, "required": ["query"], "type": "object", + "additionalProperties": false}}, {"description": "Local search tool #2 for concurrency + testing.", "name": "parallel_local_search_two", "parameters_json_schema": {"properties": + {"query": {"description": "Search query", "title": "Query", "type": "string"}}, + "required": ["query"], "type": "object", "additionalProperties": false}}, {"description": + "Local search tool #3 for concurrency testing.", "name": "parallel_local_search_three", + "parameters_json_schema": {"properties": {"query": {"description": "Search query", + "title": "Query", "type": "string"}}, "required": ["query"], "type": "object", + "additionalProperties": false}}]}], "generationConfig": {"stopSequences": ["\nObservation:"]}}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - '*/*' + accept-encoding: + - ACCEPT-ENCODING-XXX + connection: + - keep-alive + content-length: + - '1964' + content-type: + - application/json + host: + - generativelanguage.googleapis.com + x-goog-api-client: + - google-genai-sdk/1.49.0 gl-python/3.13.3 + x-goog-api-key: + - X-GOOG-API-KEY-XXX + method: POST + uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent + response: + body: + string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": + [\n {\n \"functionCall\": {\n \"name\": \"parallel_local_search_one\",\n + \ \"args\": {\n \"query\": \"latest OpenAI model + release notes\"\n }\n },\n \"thoughtSignature\": + \"CuMEAb4+9vu1V1iOC9o/a8+jQqow8F4RTrjlnjnDCwsisMHLLJ+Wj3pZxbFDeIjCJe9pa6+14InyYHh/ezgHrv+xPGIJtX9pJQatDCBAfCmcZ3fDipVIMAHLcl0Q660EVuZ+vRgvNhPSau+uSN9u303wJsaKvdzOQnfww2LfLtJMNtOhSHfkfhfw2bkBOtMa5/FuLqKSr6m94dSdE7HShR6+jLMLbiSXkBLWsRp0jGl85Wvd0hoA7dUyq+uIuyOBr5Myo9uMrLbxfnrRRbPMorOpYTCmHK0HE8mEBRjzh1hNwcBcfRL0VcgA2UnBIurStIeVbq51BJQ1UOq6r1wVi50Wdh1GjIQ/iN9C15T1Ql3adjom5QbmY+XY08RJOiNyVplh1YQ0qlWCVHEpueEfdzcIB+BUauVrLNqBcBr5g6ekO5QZCAdt7PLerQU8jhKjDQy367jCKQyaHir0GmAISS8RlZ8tkLKNZlZhd11D76ui6X8ep9yznViBbqH0AS1R2hMm+ielMVFjhidglTMjqB0X+yk1K2eZXkc+R/xsXRPlnlZWRygnV+IbU8RAnZWtneM464Wccmc1scfF45GKiji5bLYO7Zx+ZF8mSLcQaC8M3z121D6VbFonhaIdkJ3Wb7nI2vEyxFjdinVk3/P0zL8nu3nHeqQviTrQIoHMsZk0yPyqu9NWxg3wGJL5pbcaQh87ROQuTsInkuzzEr0QMzjw9W5iquhMh4/Wy/OKXAgf3maQB9Jb4HoHZlc0io+KYqewFSVx2BvqXbqJbIrTkTo6XRTbK7dkwlCbMmE1wKIwjrrzZQI=\"\n + \ },\n {\n \"functionCall\": {\n \"name\": + \"parallel_local_search_two\",\n \"args\": {\n \"query\": + \"latest Anthropic model release notes\"\n }\n }\n + \ },\n {\n \"functionCall\": {\n \"name\": + \"parallel_local_search_three\",\n \"args\": {\n \"query\": + \"latest Gemini model release notes\"\n }\n }\n }\n + \ ],\n \"role\": \"model\"\n },\n \"finishReason\": + \"STOP\",\n \"index\": 0,\n \"finishMessage\": \"Model generated + function call(s).\"\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": + 327,\n \"candidatesTokenCount\": 70,\n \"totalTokenCount\": 536,\n \"promptTokensDetails\": + [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 327\n + \ }\n ],\n \"thoughtsTokenCount\": 139\n },\n \"modelVersion\": + \"gemini-2.5-flash\",\n \"responseId\": \"ZVKWabziF7bcjMcP3r2SuAg\"\n}\n" + headers: + Alt-Svc: + - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + Content-Type: + - application/json; charset=UTF-8 + Date: + - Wed, 18 Feb 2026 23:59:34 GMT + Server: + - scaffolding on HTTPServer2 + Server-Timing: + - gfet4t7; dur=1262 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + X-Frame-Options: + - X-FRAME-OPTIONS-XXX + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +- request: + body: '{"contents": [{"parts": [{"text": "\nCurrent Task: This is a tool-calling + compliance test. In your next assistant turn, emit exactly 3 tool calls in the + same response (parallel tool calls), in this order: 1) parallel_local_search_one(query=''latest + OpenAI model release notes''), 2) parallel_local_search_two(query=''latest Anthropic + model release notes''), 3) parallel_local_search_three(query=''latest Gemini + model release notes''). Do not call any other tools and do not answer before + those 3 tool calls are emitted. After the tool results return, provide a one + paragraph summary.\n\nThis is the expected criteria for your final answer: A + one sentence summary of both tool outputs\nyou MUST return the actual complete + content as the final answer, not a summary."}], "role": "user"}, {"parts": [{"functionCall": + {"args": {"query": "latest OpenAI model release notes"}, "name": "parallel_local_search_one"}}, + {"functionCall": {"args": {"query": "latest Anthropic model release notes"}, + "name": "parallel_local_search_two"}}, {"functionCall": {"args": {"query": "latest + Gemini model release notes"}, "name": "parallel_local_search_three"}}], "role": + "model"}, {"parts": [{"functionResponse": {"name": "parallel_local_search_one", + "response": {"result": "[one] latest OpenAI model release notes"}}}], "role": + "user"}, {"parts": [{"functionResponse": {"name": "parallel_local_search_two", + "response": {"result": "[two] latest Anthropic model release notes"}}}], "role": + "user"}, {"parts": [{"functionResponse": {"name": "parallel_local_search_three", + "response": {"result": "[three] latest Gemini model release notes"}}}], "role": + "user"}, {"parts": [{"text": "Analyze the tool result. If requirements are met, + provide the Final Answer. Otherwise, call the next tool. Deliver only the answer + without meta-commentary."}], "role": "user"}], "systemInstruction": {"parts": + [{"text": "You are Parallel Tool Agent. You follow tool instructions precisely.\nYour + personal goal is: Use both tools exactly as instructed"}], "role": "user"}, + "tools": [{"functionDeclarations": [{"description": "Local search tool #1 for + concurrency testing.", "name": "parallel_local_search_one", "parameters_json_schema": + {"properties": {"query": {"description": "Search query", "title": "Query", "type": + "string"}}, "required": ["query"], "type": "object", "additionalProperties": + false}}, {"description": "Local search tool #2 for concurrency testing.", "name": + "parallel_local_search_two", "parameters_json_schema": {"properties": {"query": + {"description": "Search query", "title": "Query", "type": "string"}}, "required": + ["query"], "type": "object", "additionalProperties": false}}, {"description": + "Local search tool #3 for concurrency testing.", "name": "parallel_local_search_three", + "parameters_json_schema": {"properties": {"query": {"description": "Search query", + "title": "Query", "type": "string"}}, "required": ["query"], "type": "object", + "additionalProperties": false}}]}], "generationConfig": {"stopSequences": ["\nObservation:"]}}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - '*/*' + accept-encoding: + - ACCEPT-ENCODING-XXX + connection: + - keep-alive + content-length: + - '3014' + content-type: + - application/json + host: + - generativelanguage.googleapis.com + x-goog-api-client: + - google-genai-sdk/1.49.0 gl-python/3.13.3 + x-goog-api-key: + - X-GOOG-API-KEY-XXX + method: POST + uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent + response: + body: + string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": + [\n {\n \"text\": \"The search results indicate the latest + model release notes for OpenAI, Anthropic, and Gemini are: [one] latest OpenAI + model release notes[two] latest Anthropic model release notes[three] latest + Gemini model release notes.\",\n \"thoughtSignature\": \"CsUPAb4+9vs4hkuatQAakl1FSHx5DIde9nHYobJdlWs2HEzES9gHn7uwjMIlFPTzJUbnZqxpAK93hqsCofdfGANr8dwK+/IbZAiMSikpAq2ZjEbWADjfalU3ke4LcQMh6TEYFVGz1QCinjne3jZx5jOVaL8YdAtjOYnBZWA6KqdvfKjD7+Ct/BLoEqvu4LW6kxhXQgcV+D3M1QxGlr1dxpajj4wyYFI9LXchE2vCdAMPYTkPQ4WPbS3xjz0jJb6qFAwwg+BY5kGemkWWVHsvq28t09pd7FEH0bod5cEpR65qEefpJfhHsXYqmOwHDkfNePYnYC+5qmn7kvkN+fhF41SoMRZahMZGDjIo+q6vvru3eXKmZiuLsrh8AqQIks/4S3sSuxt16ogYKE+LlFxml2ygXFPww59nRAtc+xK6VW8jB2vyv9Eo5cpnG9ZBv1dOznJnmj4AWA1ddMlp+yq8AdaboTSo5dysYMwFcSXS3kuU+xi92dC+7GqZZbDr5frvnc+MnSuzYwHhNjSQqvTo5DKGit53zDwlFJT74kLBXk36BOFQp4xlfs+BpKkw11bow6qQoTvC68D023ZHami+McO1WYBDoO5CrDoosU8fAYljqaGArBoMlssF4O7VKHEaEbEZnYCr0Wxo6XP/mtPIpHQE4OyCz/GAJSJtQv1hO7DNCMzpSpkLyuemB1SOZGl3mlLQhosh3TAGP0xgqmHpKccdCSWoXGWjO48VluFuV9E1FwW1Xi++XhMRcUaljJXPZaNVjGcAG1uAxeVkUMsY8tBvQ0vaumUK2jkzbyQTWeStEWwl1yKmklI8JDXske/k6tYJOyF+8t0mF7oCEqNHSNicj7TomihpPlVjNl1Mm4l5fvwlKtAPJwiKrchCunlZB3uGN1AR0h0Hvznffutc/lV/FWFbNgFAaNJZKRs40vMk1xmRZyH2rs+Ob2fZriQ3BSwzzNeiwDLXxm0m/ytOai+K9ObFuC/IEh5fJfvQbNeo3TmiCAMCZPNXMDtlOyLqQzzKwmMFH4c53Ol+kkTiuAKECNQR1dOCufAL0U5lzEUFRxFvOq67lp6xqG8m+WzCIkbnF8QyJHfujtXVMJACaevUkM7+kAVyTwETEKQsanp0tBwzV42ieChp/h7pivcC++cFXdSG5dvR94BgkHmtpC9+jfNH32RREPLuyWfU5aBXiOkxjRs9fDexAFjrkGjM18I+jqHZNeuUR20BKe2jFsU8xJS3Fa4eXabm/YPL1t8R5jr572Ch/r4bspFp8MQ5RcFo8Nn/HiBmW8uZ2BcLEY1RPWUBvxVhfvh/hNxaRKu21x8vGz72RoiNuOjNbeADYAaBJqBGLp0MALxZ/rnXPzDLQUt6Mv07fWHAZr5p3r/skleot25lr2Tcl4qJCPM4/cfs6U0x4CY26ktBiCs4bWKqSEV1Q05nf5kpxVOIRSTgxqFOj/rWIAF3uw7mvsuRKd3YXILV5OrvEoETdQvf7BdYPbQbIQYDf7DBKhf51O8RKQgcfl6mVQswamdJ+PyqLbozTkFCjXMKI0PwJdy8tfKfCeeEe0TbOXSfeTczKQkL8WyWkBg4tS81JnWAVzfVlNjbvo/fk+wv7FyfJJS1HJGlxZ0kUlWi1369rSlldYPoSqopuekOxtYnpYpz92y/jVLNQXE1IVLqWYh9o3gTwjeyaHG7fCaWF2QRGrCUvejT8eJjevhj/sgadjPVcEP5o7Zcw5yTBCgc0+FX1j5KpCmfZ/dVvT4iIX8bOkhxjHQ8ifOx39BMM4EObgCA+g+BFN+Ra7kOf4hJ6tPNhqvJa4E4fyISlVrRiBqSt59ZkuLyWuY9SYy0nvbklP30WDUHSAvcuEwVMSuT524afHISfO/+tSgE7JAKzEPSOoVO3Z5NS9kcAqHuBSe/LL4XJbCKF9Oggm9/gwdAulnBANd4ydQ/raTPE/QUu/CGqqGhBd+wo8x0Jg/BMZWkwhz0fEzsh+OjnrEkHv4QIqZ9v/j1Rv9uc+cDeK7eGi62okGLrPFX2pNQtsZRdUM9aBSlTBUVSdCDpkvieENzLnR257EDZy1EV2HxGRfOFZVVdaW1n8XvL73pcFoQ5XABpfYuigOS8i4S8g43Qfe77GosnuXR5rcJCrL03q3hptb97K5ysKFLgumsaaWo92MBhZYKvQ6SwStgyWRlb22uQGQJYsS8OTD/uVNiQzFjOMsR/l71c9RI1Eb7SQJT6WWvL1YhA7sQw/lQf8soLKfWshoky6mMrGopjRak8xHpJe5VWbqK8PK6iXDd403JrHICyh4M3FpEja3eX2V3SN6U+EgIWKIE8lE/iQZakhLtG2KL7nNQy/cksxzIh5ElQCe5NkrQZO0fai6ek8qwbmz07RVg2FknD7F2hvmxZBqoJSXhsFVn/9+fnkcsZekEtUevFmlQQNspPc63XgO0XmpTye9uM/BbTEsNEWeHSFZTEQLLx1l+pgwsYO3NlNSIUN24/GIR7JrZFG4fAoljkDKjhrYQzr1Fiy3t5G+CmadZ0TcjRQQdDw36ETlf7cizcrQc4FNtnx5rNWEaf54vUvlsd2DD19UIkzP9omITsiuNPPcUNq0A6v1TkgnSNYfhb26nxJIg34r8MmCAhWzB2eCy54gvOHDGLFAwfFZrQdvl\"\n + \ }\n ],\n \"role\": \"model\"\n },\n \"finishReason\": + \"STOP\",\n \"index\": 0\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": + 504,\n \"candidatesTokenCount\": 45,\n \"totalTokenCount\": 973,\n \"promptTokensDetails\": + [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 504\n + \ }\n ],\n \"thoughtsTokenCount\": 424\n },\n \"modelVersion\": + \"gemini-2.5-flash\",\n \"responseId\": \"Z1KWaYbTKZvnjMcP7piEoAg\"\n}\n" + headers: + Alt-Svc: + - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + Content-Type: + - application/json; charset=UTF-8 + Date: + - Wed, 18 Feb 2026 23:59:37 GMT + Server: + - scaffolding on HTTPServer2 + Server-Timing: + - gfet4t7; dur=2283 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + X-Frame-Options: + - X-FRAME-OPTIONS-XXX + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/agents/TestOpenAINativeToolCalling.test_openai_agent_with_native_tool_calling.yaml b/lib/crewai/tests/cassettes/agents/TestOpenAINativeToolCalling.test_openai_agent_with_native_tool_calling.yaml index 2a984da27..28376cb8d 100644 --- a/lib/crewai/tests/cassettes/agents/TestOpenAINativeToolCalling.test_openai_agent_with_native_tool_calling.yaml +++ b/lib/crewai/tests/cassettes/agents/TestOpenAINativeToolCalling.test_openai_agent_with_native_tool_calling.yaml @@ -5,9 +5,9 @@ interactions: calculations"},{"role":"user","content":"\nCurrent Task: Calculate what is 15 * 8\n\nThis is the expected criteria for your final answer: The result of the calculation\nyou MUST return the actual complete content as the final answer, - not a summary.\n\nThis is VERY important to you, your job depends on it!"}],"model":"gpt-4o-mini","tool_choice":"auto","tools":[{"type":"function","function":{"name":"calculator","description":"Perform - mathematical calculations. Use this for any math operations.","parameters":{"properties":{"expression":{"description":"Mathematical - expression to evaluate","title":"Expression","type":"string"}},"required":["expression"],"type":"object"}}}]}' + not a summary."}],"model":"gpt-5-nano","tool_choice":"auto","tools":[{"type":"function","function":{"name":"calculator","description":"Perform + mathematical calculations. Use this for any math operations.","strict":true,"parameters":{"properties":{"expression":{"description":"Mathematical + expression to evaluate","title":"Expression","type":"string"}},"required":["expression"],"type":"object","additionalProperties":false}}}]}' headers: User-Agent: - X-USER-AGENT-XXX @@ -20,7 +20,7 @@ interactions: connection: - keep-alive content-length: - - '829' + - '813' content-type: - application/json host: @@ -47,140 +47,17 @@ interactions: uri: https://api.openai.com/v1/chat/completions response: body: - string: "{\n \"id\": \"chatcmpl-D0vm7joOuDBPcMpfmOnftOoTCPtc8\",\n \"object\": - \"chat.completion\",\n \"created\": 1769114459,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n - \ \"id\": \"call_G73UZDvL4wC9EEdvm1UcRIRM\",\n \"type\": - \"function\",\n \"function\": {\n \"name\": \"calculator\",\n - \ \"arguments\": \"{\\\"expression\\\":\\\"15 * 8\\\"}\"\n }\n - \ }\n ],\n \"refusal\": null,\n \"annotations\": - []\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n - \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 137,\n \"completion_tokens\": - 17,\n \"total_tokens\": 154,\n \"prompt_tokens_details\": {\n \"cached_tokens\": - 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": - {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": - 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": - \"default\",\n \"system_fingerprint\": \"fp_c4585b5b9c\"\n}\n" - headers: - CF-RAY: - - CF-RAY-XXX - Connection: - - keep-alive - Content-Type: - - application/json - Date: - - Thu, 22 Jan 2026 20:40:59 GMT - Server: - - cloudflare - Set-Cookie: - - SET-COOKIE-XXX - Strict-Transport-Security: - - STS-XXX - Transfer-Encoding: - - chunked - X-Content-Type-Options: - - X-CONTENT-TYPE-XXX - access-control-expose-headers: - - ACCESS-CONTROL-XXX - alt-svc: - - h3=":443"; ma=86400 - cf-cache-status: - - DYNAMIC - openai-organization: - - OPENAI-ORG-XXX - openai-processing-ms: - - '761' - openai-project: - - OPENAI-PROJECT-XXX - openai-version: - - '2020-10-01' - x-envoy-upstream-service-time: - - '1080' - x-openai-proxy-wasm: - - v0.1 - x-ratelimit-limit-requests: - - X-RATELIMIT-LIMIT-REQUESTS-XXX - x-ratelimit-limit-tokens: - - X-RATELIMIT-LIMIT-TOKENS-XXX - x-ratelimit-remaining-requests: - - X-RATELIMIT-REMAINING-REQUESTS-XXX - x-ratelimit-remaining-tokens: - - X-RATELIMIT-REMAINING-TOKENS-XXX - x-ratelimit-reset-requests: - - X-RATELIMIT-RESET-REQUESTS-XXX - x-ratelimit-reset-tokens: - - X-RATELIMIT-RESET-TOKENS-XXX - x-request-id: - - X-REQUEST-ID-XXX - status: - code: 200 - message: OK -- request: - body: '{"messages":[{"role":"system","content":"You are Math Assistant. You are - a helpful math assistant.\nYour personal goal is: Help users with mathematical - calculations"},{"role":"user","content":"\nCurrent Task: Calculate what is 15 - * 8\n\nThis is the expected criteria for your final answer: The result of the - calculation\nyou MUST return the actual complete content as the final answer, - not a summary.\n\nThis is VERY important to you, your job depends on it!"},{"role":"assistant","content":null,"tool_calls":[{"id":"call_G73UZDvL4wC9EEdvm1UcRIRM","type":"function","function":{"name":"calculator","arguments":"{\"expression\":\"15 - * 8\"}"}}]},{"role":"tool","tool_call_id":"call_G73UZDvL4wC9EEdvm1UcRIRM","content":"The - result of 15 * 8 is 120"},{"role":"user","content":"Analyze the tool result. - If requirements are met, provide the Final Answer. Otherwise, call the next - tool. Deliver only the answer without meta-commentary."}],"model":"gpt-4o-mini","tool_choice":"auto","tools":[{"type":"function","function":{"name":"calculator","description":"Perform - mathematical calculations. Use this for any math operations.","parameters":{"properties":{"expression":{"description":"Mathematical - expression to evaluate","title":"Expression","type":"string"}},"required":["expression"],"type":"object"}}}]}' - headers: - User-Agent: - - X-USER-AGENT-XXX - accept: - - application/json - accept-encoding: - - ACCEPT-ENCODING-XXX - authorization: - - AUTHORIZATION-XXX - connection: - - keep-alive - content-length: - - '1299' - content-type: - - application/json - cookie: - - COOKIE-XXX - host: - - api.openai.com - x-stainless-arch: - - X-STAINLESS-ARCH-XXX - x-stainless-async: - - 'false' - x-stainless-lang: - - python - x-stainless-os: - - X-STAINLESS-OS-XXX - x-stainless-package-version: - - 1.83.0 - x-stainless-read-timeout: - - X-STAINLESS-READ-TIMEOUT-XXX - x-stainless-retry-count: - - '0' - x-stainless-runtime: - - CPython - x-stainless-runtime-version: - - 3.13.3 - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - body: - string: "{\n \"id\": \"chatcmpl-D0vm8mUnzLxu9pf1rc7MODkrMsCmf\",\n \"object\": - \"chat.completion\",\n \"created\": 1769114460,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n + string: "{\n \"id\": \"chatcmpl-DAlG9W2mJYuOgpf3FwCRgbqaiHWf3\",\n \"object\": + \"chat.completion\",\n \"created\": 1771457317,\n \"model\": \"gpt-5-nano-2025-08-07\",\n \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"120\",\n \"refusal\": null,\n - \ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": - \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 207,\n \"completion_tokens\": - 2,\n \"total_tokens\": 209,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + \ \"annotations\": []\n },\n \"finish_reason\": \"stop\"\n + \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 208,\n \"completion_tokens\": + 138,\n \"total_tokens\": 346,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": - {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + {\n \"reasoning_tokens\": 128,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": - \"default\",\n \"system_fingerprint\": \"fp_c4585b5b9c\"\n}\n" + \"default\",\n \"system_fingerprint\": null\n}\n" headers: CF-RAY: - CF-RAY-XXX @@ -189,7 +66,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 22 Jan 2026 20:41:00 GMT + - Wed, 18 Feb 2026 23:28:39 GMT Server: - cloudflare Strict-Transport-Security: @@ -207,13 +84,13 @@ interactions: openai-organization: - OPENAI-ORG-XXX openai-processing-ms: - - '262' + - '1869' openai-project: - OPENAI-PROJECT-XXX openai-version: - '2020-10-01' - x-envoy-upstream-service-time: - - '496' + set-cookie: + - SET-COOKIE-XXX x-openai-proxy-wasm: - v0.1 x-ratelimit-limit-requests: diff --git a/lib/crewai/tests/cassettes/agents/TestOpenAINativeToolCalling.test_openai_parallel_native_tool_calling_test_agent_kickoff.yaml b/lib/crewai/tests/cassettes/agents/TestOpenAINativeToolCalling.test_openai_parallel_native_tool_calling_test_agent_kickoff.yaml new file mode 100644 index 000000000..75e134753 --- /dev/null +++ b/lib/crewai/tests/cassettes/agents/TestOpenAINativeToolCalling.test_openai_parallel_native_tool_calling_test_agent_kickoff.yaml @@ -0,0 +1,265 @@ +interactions: +- request: + body: '{"messages":[{"role":"system","content":"You are Parallel Tool Agent. You + follow tool instructions precisely.\nYour personal goal is: Use both tools exactly + as instructed"},{"role":"user","content":"\nCurrent Task: This is a tool-calling + compliance test. In your next assistant turn, emit exactly 3 tool calls in the + same response (parallel tool calls), in this order: 1) parallel_local_search_one(query=''latest + OpenAI model release notes''), 2) parallel_local_search_two(query=''latest Anthropic + model release notes''), 3) parallel_local_search_three(query=''latest Gemini + model release notes''). Do not call any other tools and do not answer before + those 3 tool calls are emitted. After the tool results return, provide a one + paragraph summary."}],"model":"gpt-4o-mini","tool_choice":"auto","tools":[{"type":"function","function":{"name":"parallel_local_search_one","description":"Local + search tool #1 for concurrency testing.","strict":true,"parameters":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}}},{"type":"function","function":{"name":"parallel_local_search_two","description":"Local + search tool #2 for concurrency testing.","strict":true,"parameters":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}}},{"type":"function","function":{"name":"parallel_local_search_three","description":"Local + search tool #3 for concurrency testing.","strict":true,"parameters":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}}}]}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '1733' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-DAldZHfQGVcV3FNwAJAtNooU3PAU7\",\n \"object\": + \"chat.completion\",\n \"created\": 1771458769,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n + \ \"id\": \"call_kz1qLLRsugXwWiQMeH9oFAep\",\n \"type\": + \"function\",\n \"function\": {\n \"name\": \"parallel_local_search_one\",\n + \ \"arguments\": \"{\\\"query\\\": \\\"latest OpenAI model release + notes\\\"}\"\n }\n },\n {\n \"id\": + \"call_yNouGq1Kv6P5W9fhTng6acZi\",\n \"type\": \"function\",\n + \ \"function\": {\n \"name\": \"parallel_local_search_two\",\n + \ \"arguments\": \"{\\\"query\\\": \\\"latest Anthropic model + release notes\\\"}\"\n }\n },\n {\n \"id\": + \"call_O7MqnuniDmyT6a0BS31GTunB\",\n \"type\": \"function\",\n + \ \"function\": {\n \"name\": \"parallel_local_search_three\",\n + \ \"arguments\": \"{\\\"query\\\": \\\"latest Gemini model release + notes\\\"}\"\n }\n }\n ],\n \"refusal\": + null,\n \"annotations\": []\n },\n \"logprobs\": null,\n + \ \"finish_reason\": \"tool_calls\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 259,\n \"completion_tokens\": 78,\n \"total_tokens\": 337,\n \"prompt_tokens_details\": + {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_414ba99a04\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Wed, 18 Feb 2026 23:52:50 GMT + Server: + - cloudflare + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '1418' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + set-cookie: + - SET-COOKIE-XXX + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"role":"system","content":"You are Parallel Tool Agent. You + follow tool instructions precisely.\nYour personal goal is: Use both tools exactly + as instructed"},{"role":"user","content":"\nCurrent Task: This is a tool-calling + compliance test. In your next assistant turn, emit exactly 3 tool calls in the + same response (parallel tool calls), in this order: 1) parallel_local_search_one(query=''latest + OpenAI model release notes''), 2) parallel_local_search_two(query=''latest Anthropic + model release notes''), 3) parallel_local_search_three(query=''latest Gemini + model release notes''). Do not call any other tools and do not answer before + those 3 tool calls are emitted. After the tool results return, provide a one + paragraph summary."},{"role":"assistant","content":null,"tool_calls":[{"id":"call_kz1qLLRsugXwWiQMeH9oFAep","type":"function","function":{"name":"parallel_local_search_one","arguments":"{\"query\": + \"latest OpenAI model release notes\"}"}},{"id":"call_yNouGq1Kv6P5W9fhTng6acZi","type":"function","function":{"name":"parallel_local_search_two","arguments":"{\"query\": + \"latest Anthropic model release notes\"}"}},{"id":"call_O7MqnuniDmyT6a0BS31GTunB","type":"function","function":{"name":"parallel_local_search_three","arguments":"{\"query\": + \"latest Gemini model release notes\"}"}}]},{"role":"tool","tool_call_id":"call_kz1qLLRsugXwWiQMeH9oFAep","name":"parallel_local_search_one","content":"[one] + latest OpenAI model release notes"},{"role":"tool","tool_call_id":"call_yNouGq1Kv6P5W9fhTng6acZi","name":"parallel_local_search_two","content":"[two] + latest Anthropic model release notes"},{"role":"tool","tool_call_id":"call_O7MqnuniDmyT6a0BS31GTunB","name":"parallel_local_search_three","content":"[three] + latest Gemini model release notes"}],"model":"gpt-4o-mini","tool_choice":"auto","tools":[{"type":"function","function":{"name":"parallel_local_search_one","description":"Local + search tool #1 for concurrency testing.","strict":true,"parameters":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}}},{"type":"function","function":{"name":"parallel_local_search_two","description":"Local + search tool #2 for concurrency testing.","strict":true,"parameters":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}}},{"type":"function","function":{"name":"parallel_local_search_three","description":"Local + search tool #3 for concurrency testing.","strict":true,"parameters":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}}}]}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '2756' + content-type: + - application/json + cookie: + - COOKIE-XXX + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-DAldbawkFNpOeXbaJTkTlsSi7OiII\",\n \"object\": + \"chat.completion\",\n \"created\": 1771458771,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"The latest release notes for OpenAI, + Anthropic, and Gemini models highlight significant updates and improvements + in each respective technology. OpenAI's notes detail new features and optimizations + that enhance user interaction and performance. Anthropic's release emphasizes + their focus on safety and alignment in AI development, showcasing advancements + in responsible AI practices. Gemini's notes underline their innovative approaches + and cutting-edge functionalities designed to push the boundaries of current + AI capabilities.\",\n \"refusal\": null,\n \"annotations\": + []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n + \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 377,\n \"completion_tokens\": + 85,\n \"total_tokens\": 462,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_414ba99a04\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Wed, 18 Feb 2026 23:52:53 GMT + Server: + - cloudflare + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '1755' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/agents/TestOpenAINativeToolCalling.test_openai_parallel_native_tool_calling_test_crew.yaml b/lib/crewai/tests/cassettes/agents/TestOpenAINativeToolCalling.test_openai_parallel_native_tool_calling_test_crew.yaml new file mode 100644 index 000000000..fa063b1ae --- /dev/null +++ b/lib/crewai/tests/cassettes/agents/TestOpenAINativeToolCalling.test_openai_parallel_native_tool_calling_test_crew.yaml @@ -0,0 +1,265 @@ +interactions: +- request: + body: '{"messages":[{"role":"system","content":"You are Parallel Tool Agent. You + follow tool instructions precisely.\nYour personal goal is: Use both tools exactly + as instructed"},{"role":"user","content":"\nCurrent Task: This is a tool-calling + compliance test. In your next assistant turn, emit exactly 3 tool calls in the + same response (parallel tool calls), in this order: 1) parallel_local_search_one(query=''latest + OpenAI model release notes''), 2) parallel_local_search_two(query=''latest Anthropic + model release notes''), 3) parallel_local_search_three(query=''latest Gemini + model release notes''). Do not call any other tools and do not answer before + those 3 tool calls are emitted. After the tool results return, provide a one + paragraph summary.\n\nThis is the expected criteria for your final answer: A + one sentence summary of both tool outputs\nyou MUST return the actual complete + content as the final answer, not a summary."}],"model":"gpt-5-nano","temperature":1,"tool_choice":"auto","tools":[{"type":"function","function":{"name":"parallel_local_search_one","description":"Local + search tool #1 for concurrency testing.","strict":true,"parameters":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}}},{"type":"function","function":{"name":"parallel_local_search_two","description":"Local + search tool #2 for concurrency testing.","strict":true,"parameters":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}}},{"type":"function","function":{"name":"parallel_local_search_three","description":"Local + search tool #3 for concurrency testing.","strict":true,"parameters":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}}}]}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '1929' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-DAlddfEozIpgleBufPaffZMQWK0Hj\",\n \"object\": + \"chat.completion\",\n \"created\": 1771458773,\n \"model\": \"gpt-5-nano-2025-08-07\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n + \ \"id\": \"call_Putc2jV5GhiIZMwx8mDcI61Q\",\n \"type\": + \"function\",\n \"function\": {\n \"name\": \"parallel_local_search_one\",\n + \ \"arguments\": \"{\\\"query\\\": \\\"latest OpenAI model release + notes\\\"}\"\n }\n },\n {\n \"id\": + \"call_iyjwcvkL3PdoOddxsqkHCT9T\",\n \"type\": \"function\",\n + \ \"function\": {\n \"name\": \"parallel_local_search_two\",\n + \ \"arguments\": \"{\\\"query\\\": \\\"latest Anthropic model + release notes\\\"}\"\n }\n },\n {\n \"id\": + \"call_G728RseEU7SbGk5YTiyyp9IH\",\n \"type\": \"function\",\n + \ \"function\": {\n \"name\": \"parallel_local_search_three\",\n + \ \"arguments\": \"{\\\"query\\\": \\\"latest Gemini model release + notes\\\"}\"\n }\n }\n ],\n \"refusal\": + null,\n \"annotations\": []\n },\n \"finish_reason\": \"tool_calls\"\n + \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 378,\n \"completion_tokens\": + 1497,\n \"total_tokens\": 1875,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 1408,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": null\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Wed, 18 Feb 2026 23:53:08 GMT + Server: + - cloudflare + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '14853' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + set-cookie: + - SET-COOKIE-XXX + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"role":"system","content":"You are Parallel Tool Agent. You + follow tool instructions precisely.\nYour personal goal is: Use both tools exactly + as instructed"},{"role":"user","content":"\nCurrent Task: This is a tool-calling + compliance test. In your next assistant turn, emit exactly 3 tool calls in the + same response (parallel tool calls), in this order: 1) parallel_local_search_one(query=''latest + OpenAI model release notes''), 2) parallel_local_search_two(query=''latest Anthropic + model release notes''), 3) parallel_local_search_three(query=''latest Gemini + model release notes''). Do not call any other tools and do not answer before + those 3 tool calls are emitted. After the tool results return, provide a one + paragraph summary.\n\nThis is the expected criteria for your final answer: A + one sentence summary of both tool outputs\nyou MUST return the actual complete + content as the final answer, not a summary."},{"role":"assistant","content":null,"tool_calls":[{"id":"call_Putc2jV5GhiIZMwx8mDcI61Q","type":"function","function":{"name":"parallel_local_search_one","arguments":"{\"query\": + \"latest OpenAI model release notes\"}"}},{"id":"call_iyjwcvkL3PdoOddxsqkHCT9T","type":"function","function":{"name":"parallel_local_search_two","arguments":"{\"query\": + \"latest Anthropic model release notes\"}"}},{"id":"call_G728RseEU7SbGk5YTiyyp9IH","type":"function","function":{"name":"parallel_local_search_three","arguments":"{\"query\": + \"latest Gemini model release notes\"}"}}]},{"role":"tool","tool_call_id":"call_Putc2jV5GhiIZMwx8mDcI61Q","name":"parallel_local_search_one","content":"[one] + latest OpenAI model release notes"},{"role":"tool","tool_call_id":"call_iyjwcvkL3PdoOddxsqkHCT9T","name":"parallel_local_search_two","content":"[two] + latest Anthropic model release notes"},{"role":"tool","tool_call_id":"call_G728RseEU7SbGk5YTiyyp9IH","name":"parallel_local_search_three","content":"[three] + latest Gemini model release notes"},{"role":"user","content":"Analyze the tool + result. If requirements are met, provide the Final Answer. Otherwise, call the + next tool. Deliver only the answer without meta-commentary."}],"model":"gpt-5-nano","temperature":1,"tool_choice":"auto","tools":[{"type":"function","function":{"name":"parallel_local_search_one","description":"Local + search tool #1 for concurrency testing.","strict":true,"parameters":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}}},{"type":"function","function":{"name":"parallel_local_search_two","description":"Local + search tool #2 for concurrency testing.","strict":true,"parameters":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}}},{"type":"function","function":{"name":"parallel_local_search_three","description":"Local + search tool #3 for concurrency testing.","strict":true,"parameters":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}}}]}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '3136' + content-type: + - application/json + cookie: + - COOKIE-XXX + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-DAldt2BXNqiYYLPgInjHCpYKfk2VK\",\n \"object\": + \"chat.completion\",\n \"created\": 1771458789,\n \"model\": \"gpt-5-nano-2025-08-07\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"The results show the latest model release + notes for OpenAI, Anthropic, and Gemini.\",\n \"refusal\": null,\n + \ \"annotations\": []\n },\n \"finish_reason\": \"stop\"\n + \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 537,\n \"completion_tokens\": + 2011,\n \"total_tokens\": 2548,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 1984,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": null\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Wed, 18 Feb 2026 23:53:25 GMT + Server: + - cloudflare + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '15368' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/agents/TestOpenAINativeToolCalling.test_openai_parallel_native_tool_calling_tool_hook_parity_agent_kickoff.yaml b/lib/crewai/tests/cassettes/agents/TestOpenAINativeToolCalling.test_openai_parallel_native_tool_calling_tool_hook_parity_agent_kickoff.yaml new file mode 100644 index 000000000..47dc51636 --- /dev/null +++ b/lib/crewai/tests/cassettes/agents/TestOpenAINativeToolCalling.test_openai_parallel_native_tool_calling_tool_hook_parity_agent_kickoff.yaml @@ -0,0 +1,264 @@ +interactions: +- request: + body: '{"messages":[{"role":"system","content":"You are Parallel Tool Agent. You + follow tool instructions precisely.\nYour personal goal is: Use both tools exactly + as instructed"},{"role":"user","content":"\nCurrent Task: This is a tool-calling + compliance test. In your next assistant turn, emit exactly 3 tool calls in the + same response (parallel tool calls), in this order: 1) parallel_local_search_one(query=''latest + OpenAI model release notes''), 2) parallel_local_search_two(query=''latest Anthropic + model release notes''), 3) parallel_local_search_three(query=''latest Gemini + model release notes''). Do not call any other tools and do not answer before + those 3 tool calls are emitted. After the tool results return, provide a one + paragraph summary."}],"model":"gpt-5-nano","temperature":1,"tool_choice":"auto","tools":[{"type":"function","function":{"name":"parallel_local_search_one","description":"Local + search tool #1 for concurrency testing.","strict":true,"parameters":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}}},{"type":"function","function":{"name":"parallel_local_search_two","description":"Local + search tool #2 for concurrency testing.","strict":true,"parameters":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}}},{"type":"function","function":{"name":"parallel_local_search_three","description":"Local + search tool #3 for concurrency testing.","strict":true,"parameters":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}}}]}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '1748' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-DB244zBgA66fzl8TNcIPRWoE4lDIQ\",\n \"object\": + \"chat.completion\",\n \"created\": 1771521916,\n \"model\": \"gpt-5-nano-2025-08-07\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n + \ \"id\": \"call_D2ojRWqkng6krQ51vWQEU8wR\",\n \"type\": + \"function\",\n \"function\": {\n \"name\": \"parallel_local_search_one\",\n + \ \"arguments\": \"{\\\"query\\\": \\\"latest OpenAI model release + notes\\\"}\"\n }\n },\n {\n \"id\": + \"call_v1tpTKw1sYcI75SWG1LCkAC3\",\n \"type\": \"function\",\n + \ \"function\": {\n \"name\": \"parallel_local_search_two\",\n + \ \"arguments\": \"{\\\"query\\\": \\\"latest Anthropic model + release notes\\\"}\"\n }\n },\n {\n \"id\": + \"call_RrbyZClymnngoNLhlkQLLpwM\",\n \"type\": \"function\",\n + \ \"function\": {\n \"name\": \"parallel_local_search_three\",\n + \ \"arguments\": \"{\\\"query\\\": \\\"latest Gemini model release + notes\\\"}\"\n }\n }\n ],\n \"refusal\": + null,\n \"annotations\": []\n },\n \"finish_reason\": \"tool_calls\"\n + \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 343,\n \"completion_tokens\": + 855,\n \"total_tokens\": 1198,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 768,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": null\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Thu, 19 Feb 2026 17:25:23 GMT + Server: + - cloudflare + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '6669' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + set-cookie: + - SET-COOKIE-XXX + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"role":"system","content":"You are Parallel Tool Agent. You + follow tool instructions precisely.\nYour personal goal is: Use both tools exactly + as instructed"},{"role":"user","content":"\nCurrent Task: This is a tool-calling + compliance test. In your next assistant turn, emit exactly 3 tool calls in the + same response (parallel tool calls), in this order: 1) parallel_local_search_one(query=''latest + OpenAI model release notes''), 2) parallel_local_search_two(query=''latest Anthropic + model release notes''), 3) parallel_local_search_three(query=''latest Gemini + model release notes''). Do not call any other tools and do not answer before + those 3 tool calls are emitted. After the tool results return, provide a one + paragraph summary."},{"role":"assistant","content":null,"tool_calls":[{"id":"call_D2ojRWqkng6krQ51vWQEU8wR","type":"function","function":{"name":"parallel_local_search_one","arguments":"{\"query\": + \"latest OpenAI model release notes\"}"}},{"id":"call_v1tpTKw1sYcI75SWG1LCkAC3","type":"function","function":{"name":"parallel_local_search_two","arguments":"{\"query\": + \"latest Anthropic model release notes\"}"}},{"id":"call_RrbyZClymnngoNLhlkQLLpwM","type":"function","function":{"name":"parallel_local_search_three","arguments":"{\"query\": + \"latest Gemini model release notes\"}"}}]},{"role":"tool","tool_call_id":"call_D2ojRWqkng6krQ51vWQEU8wR","name":"parallel_local_search_one","content":"[one] + latest OpenAI model release notes"},{"role":"tool","tool_call_id":"call_v1tpTKw1sYcI75SWG1LCkAC3","name":"parallel_local_search_two","content":"[two] + latest Anthropic model release notes"},{"role":"tool","tool_call_id":"call_RrbyZClymnngoNLhlkQLLpwM","name":"parallel_local_search_three","content":"[three] + latest Gemini model release notes"}],"model":"gpt-5-nano","temperature":1,"tool_choice":"auto","tools":[{"type":"function","function":{"name":"parallel_local_search_one","description":"Local + search tool #1 for concurrency testing.","strict":true,"parameters":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}}},{"type":"function","function":{"name":"parallel_local_search_two","description":"Local + search tool #2 for concurrency testing.","strict":true,"parameters":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}}},{"type":"function","function":{"name":"parallel_local_search_three","description":"Local + search tool #3 for concurrency testing.","strict":true,"parameters":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}}}]}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '2771' + content-type: + - application/json + cookie: + - COOKIE-XXX + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-DB24DjyYsIHiQJ7hHXob8tQFfeXBs\",\n \"object\": + \"chat.completion\",\n \"created\": 1771521925,\n \"model\": \"gpt-5-nano-2025-08-07\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"The three latest release-note references + retrieved encompass OpenAI, Anthropic, and Gemini, indicating that all three + major model families are actively updating their offerings. These notes typically + cover improvements to capabilities, safety measures, performance enhancements, + and any new APIs or features, suggesting a trend of ongoing refinement across + providers. If you\u2019d like, I can pull the full release notes or extract + and compare the key changes across the three sources.\",\n \"refusal\": + null,\n \"annotations\": []\n },\n \"finish_reason\": \"stop\"\n + \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 467,\n \"completion_tokens\": + 1437,\n \"total_tokens\": 1904,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 1344,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": null\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Thu, 19 Feb 2026 17:25:35 GMT + Server: + - cloudflare + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '10369' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/agents/TestOpenAINativeToolCalling.test_openai_parallel_native_tool_calling_tool_hook_parity_crew.yaml b/lib/crewai/tests/cassettes/agents/TestOpenAINativeToolCalling.test_openai_parallel_native_tool_calling_tool_hook_parity_crew.yaml new file mode 100644 index 000000000..9ce9bf06f --- /dev/null +++ b/lib/crewai/tests/cassettes/agents/TestOpenAINativeToolCalling.test_openai_parallel_native_tool_calling_tool_hook_parity_crew.yaml @@ -0,0 +1,339 @@ +interactions: +- request: + body: '{"trace_id": "e456cc10-ce7b-4e68-a2cc-ddb806a2e7b9", "execution_type": + "crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null, + "crew_name": "crew", "flow_name": null, "crewai_version": "1.9.3", "privacy_level": + "standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count": + 0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2026-02-19T17:24:41.723158+00:00"}, + "ephemeral_trace_id": "e456cc10-ce7b-4e68-a2cc-ddb806a2e7b9"}' + headers: + Accept: + - '*/*' + Connection: + - keep-alive + Content-Length: + - '488' + Content-Type: + - application/json + User-Agent: + - X-USER-AGENT-XXX + X-Crewai-Organization-Id: + - 3433f0ee-8a94-4aa4-822b-2ac71aa38b18 + X-Crewai-Version: + - 1.9.3 + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + method: POST + uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches + response: + body: + string: '{"id":"a78f2aca-0525-47c7-8f37-b3fca0ad6672","ephemeral_trace_id":"e456cc10-ce7b-4e68-a2cc-ddb806a2e7b9","execution_type":"crew","crew_name":"crew","flow_name":null,"status":"running","duration_ms":null,"crewai_version":"1.9.3","total_events":0,"execution_context":{"crew_fingerprint":null,"crew_name":"crew","flow_name":null,"crewai_version":"1.9.3","privacy_level":"standard"},"created_at":"2026-02-19T17:24:41.989Z","updated_at":"2026-02-19T17:24:41.989Z","access_code":"TRACE-bd80d6be74","user_identifier":null}' + headers: + Connection: + - keep-alive + Content-Length: + - '515' + Content-Type: + - application/json; charset=utf-8 + Date: + - Thu, 19 Feb 2026 17:24:41 GMT + cache-control: + - no-store + content-security-policy: + - CSP-FILTERED + etag: + - ETAG-XXX + expires: + - '0' + permissions-policy: + - PERMISSIONS-POLICY-XXX + pragma: + - no-cache + referrer-policy: + - REFERRER-POLICY-XXX + strict-transport-security: + - STS-XXX + vary: + - Accept + x-content-type-options: + - X-CONTENT-TYPE-XXX + x-frame-options: + - X-FRAME-OPTIONS-XXX + x-permitted-cross-domain-policies: + - X-PERMITTED-XXX + x-request-id: + - X-REQUEST-ID-XXX + x-runtime: + - X-RUNTIME-XXX + x-xss-protection: + - X-XSS-PROTECTION-XXX + status: + code: 201 + message: Created +- request: + body: '{"messages":[{"role":"system","content":"You are Parallel Tool Agent. You + follow tool instructions precisely.\nYour personal goal is: Use both tools exactly + as instructed"},{"role":"user","content":"\nCurrent Task: This is a tool-calling + compliance test. In your next assistant turn, emit exactly 3 tool calls in the + same response (parallel tool calls), in this order: 1) parallel_local_search_one(query=''latest + OpenAI model release notes''), 2) parallel_local_search_two(query=''latest Anthropic + model release notes''), 3) parallel_local_search_three(query=''latest Gemini + model release notes''). Do not call any other tools and do not answer before + those 3 tool calls are emitted. After the tool results return, provide a one + paragraph summary.\n\nThis is the expected criteria for your final answer: A + one sentence summary of both tool outputs\nyou MUST return the actual complete + content as the final answer, not a summary."}],"model":"gpt-5-nano","temperature":1,"tool_choice":"auto","tools":[{"type":"function","function":{"name":"parallel_local_search_one","description":"Local + search tool #1 for concurrency testing.","strict":true,"parameters":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}}},{"type":"function","function":{"name":"parallel_local_search_two","description":"Local + search tool #2 for concurrency testing.","strict":true,"parameters":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}}},{"type":"function","function":{"name":"parallel_local_search_three","description":"Local + search tool #3 for concurrency testing.","strict":true,"parameters":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}}}]}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '1929' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-DB23W8RBF6zlxweiHYGb6maVfyctt\",\n \"object\": + \"chat.completion\",\n \"created\": 1771521882,\n \"model\": \"gpt-5-nano-2025-08-07\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n + \ \"id\": \"call_sge1FXUkpmPEDe8nTOgn0tQG\",\n \"type\": + \"function\",\n \"function\": {\n \"name\": \"parallel_local_search_one\",\n + \ \"arguments\": \"{\\\"query\\\": \\\"latest OpenAI model release + notes\\\"}\"\n }\n },\n {\n \"id\": + \"call_z5jRPH4DQ7Wp3HdDUlZe8gGh\",\n \"type\": \"function\",\n + \ \"function\": {\n \"name\": \"parallel_local_search_two\",\n + \ \"arguments\": \"{\\\"query\\\": \\\"latest Anthropic model + release notes\\\"}\"\n }\n },\n {\n \"id\": + \"call_DNlgqnadODDsyQkSuLcXZCX2\",\n \"type\": \"function\",\n + \ \"function\": {\n \"name\": \"parallel_local_search_three\",\n + \ \"arguments\": \"{\\\"query\\\": \\\"latest Gemini model release + notes\\\"}\"\n }\n }\n ],\n \"refusal\": + null,\n \"annotations\": []\n },\n \"finish_reason\": \"tool_calls\"\n + \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 378,\n \"completion_tokens\": + 2456,\n \"total_tokens\": 2834,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 2368,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": null\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Thu, 19 Feb 2026 17:25:02 GMT + Server: + - cloudflare + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '19582' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + set-cookie: + - SET-COOKIE-XXX + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"role":"system","content":"You are Parallel Tool Agent. You + follow tool instructions precisely.\nYour personal goal is: Use both tools exactly + as instructed"},{"role":"user","content":"\nCurrent Task: This is a tool-calling + compliance test. In your next assistant turn, emit exactly 3 tool calls in the + same response (parallel tool calls), in this order: 1) parallel_local_search_one(query=''latest + OpenAI model release notes''), 2) parallel_local_search_two(query=''latest Anthropic + model release notes''), 3) parallel_local_search_three(query=''latest Gemini + model release notes''). Do not call any other tools and do not answer before + those 3 tool calls are emitted. After the tool results return, provide a one + paragraph summary.\n\nThis is the expected criteria for your final answer: A + one sentence summary of both tool outputs\nyou MUST return the actual complete + content as the final answer, not a summary."},{"role":"assistant","content":null,"tool_calls":[{"id":"call_sge1FXUkpmPEDe8nTOgn0tQG","type":"function","function":{"name":"parallel_local_search_one","arguments":"{\"query\": + \"latest OpenAI model release notes\"}"}},{"id":"call_z5jRPH4DQ7Wp3HdDUlZe8gGh","type":"function","function":{"name":"parallel_local_search_two","arguments":"{\"query\": + \"latest Anthropic model release notes\"}"}},{"id":"call_DNlgqnadODDsyQkSuLcXZCX2","type":"function","function":{"name":"parallel_local_search_three","arguments":"{\"query\": + \"latest Gemini model release notes\"}"}}]},{"role":"tool","tool_call_id":"call_sge1FXUkpmPEDe8nTOgn0tQG","name":"parallel_local_search_one","content":"[one] + latest OpenAI model release notes"},{"role":"tool","tool_call_id":"call_z5jRPH4DQ7Wp3HdDUlZe8gGh","name":"parallel_local_search_two","content":"[two] + latest Anthropic model release notes"},{"role":"tool","tool_call_id":"call_DNlgqnadODDsyQkSuLcXZCX2","name":"parallel_local_search_three","content":"[three] + latest Gemini model release notes"},{"role":"user","content":"Analyze the tool + result. If requirements are met, provide the Final Answer. Otherwise, call the + next tool. Deliver only the answer without meta-commentary."}],"model":"gpt-5-nano","temperature":1,"tool_choice":"auto","tools":[{"type":"function","function":{"name":"parallel_local_search_one","description":"Local + search tool #1 for concurrency testing.","strict":true,"parameters":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}}},{"type":"function","function":{"name":"parallel_local_search_two","description":"Local + search tool #2 for concurrency testing.","strict":true,"parameters":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}}},{"type":"function","function":{"name":"parallel_local_search_three","description":"Local + search tool #3 for concurrency testing.","strict":true,"parameters":{"properties":{"query":{"description":"Search + query","title":"Query","type":"string"}},"required":["query"],"type":"object","additionalProperties":false}}}]}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '3136' + content-type: + - application/json + cookie: + - COOKIE-XXX + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-DB23sY0Ahpd1yAgLZ882KkA50Zljx\",\n \"object\": + \"chat.completion\",\n \"created\": 1771521904,\n \"model\": \"gpt-5-nano-2025-08-07\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Results returned three items: the latest + OpenAI model release notes, the latest Anthropic model release notes, and + the latest Gemini model release notes.\",\n \"refusal\": null,\n \"annotations\": + []\n },\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": + {\n \"prompt_tokens\": 537,\n \"completion_tokens\": 1383,\n \"total_tokens\": + 1920,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": + 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": + 1344,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n + \ \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": null\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Thu, 19 Feb 2026 17:25:16 GMT + Server: + - cloudflare + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '12339' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/llms/google/test_gemini_crew_structured_output_with_tools.yaml b/lib/crewai/tests/cassettes/llms/google/test_gemini_crew_structured_output_with_tools.yaml new file mode 100644 index 000000000..78f9bbe4e --- /dev/null +++ b/lib/crewai/tests/cassettes/llms/google/test_gemini_crew_structured_output_with_tools.yaml @@ -0,0 +1,197 @@ +interactions: +- request: + body: '{"contents": [{"parts": [{"text": "\nCurrent Task: Calculate 15 + 27 using + your add_numbers tool. Report the result.\n\nThis is the expected criteria for + your final answer: A structured calculation result\nyou MUST return the actual + complete content as the final answer, not a summary.\nFormat your final answer + according to the following OpenAPI schema: {\n \"properties\": {\n \"operation\": + {\n \"description\": \"The mathematical operation performed\",\n \"title\": + \"Operation\",\n \"type\": \"string\"\n },\n \"result\": {\n \"description\": + \"The result of the calculation\",\n \"title\": \"Result\",\n \"type\": + \"integer\"\n },\n \"explanation\": {\n \"description\": \"Brief + explanation of the calculation\",\n \"title\": \"Explanation\",\n \"type\": + \"string\"\n }\n },\n \"required\": [\n \"operation\",\n \"result\",\n \"explanation\"\n ],\n \"title\": + \"CalculationResult\",\n \"type\": \"object\",\n \"additionalProperties\": + false\n}\n\nIMPORTANT: Preserve the original content exactly as-is. Do NOT rewrite, + paraphrase, or modify the meaning of the content. Only structure it to match + the schema format.\n\nDo not include the OpenAPI schema in the final output. + Ensure the final output does not include any code block markers like ```json + or ```python."}], "role": "user"}], "systemInstruction": {"parts": [{"text": + "You are Calculator. You are a calculator assistant that uses tools to compute + results.\nYour personal goal is: Perform calculations using available tools"}], + "role": "user"}, "tools": [{"functionDeclarations": [{"description": "Add two + numbers together and return the sum.", "name": "add_numbers", "parameters_json_schema": + {"properties": {"a": {"title": "A", "type": "integer"}, "b": {"title": "B", + "type": "integer"}}, "required": ["a", "b"], "type": "object", "additionalProperties": + false}}, {"description": "Use this tool to provide your final structured response. + Call this tool when you have gathered all necessary information and are ready + to provide the final answer in the required format.", "name": "structured_output", + "parameters_json_schema": {"properties": {"operation": {"description": "The + mathematical operation performed", "title": "Operation", "type": "string"}, + "result": {"description": "The result of the calculation", "title": "Result", + "type": "integer"}, "explanation": {"description": "Brief explanation of the + calculation", "title": "Explanation", "type": "string"}}, "required": ["operation", + "result", "explanation"], "title": "CalculationResult", "type": "object", "additionalProperties": + false, "propertyOrdering": ["operation", "result", "explanation"]}}]}], "generationConfig": + {"stopSequences": ["\nObservation:"]}}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - '*/*' + accept-encoding: + - ACCEPT-ENCODING-XXX + connection: + - keep-alive + content-length: + - '2763' + content-type: + - application/json + host: + - generativelanguage.googleapis.com + x-goog-api-client: + - google-genai-sdk/1.49.0 gl-python/3.13.12 + x-goog-api-key: + - X-GOOG-API-KEY-XXX + method: POST + uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash-001:generateContent + response: + body: + string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": + [\n {\n \"functionCall\": {\n \"name\": \"add_numbers\",\n + \ \"args\": {\n \"a\": 15,\n \"b\": + 27\n }\n }\n }\n ],\n \"role\": + \"model\"\n },\n \"finishReason\": \"STOP\",\n \"avgLogprobs\": + 4.3579145442760951e-06\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": + 377,\n \"candidatesTokenCount\": 7,\n \"totalTokenCount\": 384,\n \"promptTokensDetails\": + [\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 377\n + \ }\n ],\n \"candidatesTokensDetails\": [\n {\n \"modality\": + \"TEXT\",\n \"tokenCount\": 7\n }\n ]\n },\n \"modelVersion\": + \"gemini-2.0-flash-001\",\n \"responseId\": \"vVefaYDSOouXjMcPicLCsQY\"\n}\n" + headers: + Alt-Svc: + - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + Content-Type: + - application/json; charset=UTF-8 + Date: + - Wed, 25 Feb 2026 20:12:46 GMT + Server: + - scaffolding on HTTPServer2 + Server-Timing: + - gfet4t7; dur=718 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + X-Frame-Options: + - X-FRAME-OPTIONS-XXX + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +- request: + body: '{"contents": [{"parts": [{"text": "\nCurrent Task: Calculate 15 + 27 using + your add_numbers tool. Report the result.\n\nThis is the expected criteria for + your final answer: A structured calculation result\nyou MUST return the actual + complete content as the final answer, not a summary.\nFormat your final answer + according to the following OpenAPI schema: {\n \"properties\": {\n \"operation\": + {\n \"description\": \"The mathematical operation performed\",\n \"title\": + \"Operation\",\n \"type\": \"string\"\n },\n \"result\": {\n \"description\": + \"The result of the calculation\",\n \"title\": \"Result\",\n \"type\": + \"integer\"\n },\n \"explanation\": {\n \"description\": \"Brief + explanation of the calculation\",\n \"title\": \"Explanation\",\n \"type\": + \"string\"\n }\n },\n \"required\": [\n \"operation\",\n \"result\",\n \"explanation\"\n ],\n \"title\": + \"CalculationResult\",\n \"type\": \"object\",\n \"additionalProperties\": + false\n}\n\nIMPORTANT: Preserve the original content exactly as-is. Do NOT rewrite, + paraphrase, or modify the meaning of the content. Only structure it to match + the schema format.\n\nDo not include the OpenAPI schema in the final output. + Ensure the final output does not include any code block markers like ```json + or ```python."}], "role": "user"}, {"parts": [{"functionCall": {"args": {"a": + 15, "b": 27}, "name": "add_numbers"}}], "role": "model"}, {"parts": [{"functionResponse": + {"name": "add_numbers", "response": {"result": 42}}}], "role": "user"}, {"parts": + [{"text": "Analyze the tool result. If requirements are met, provide the Final + Answer. Otherwise, call the next tool. Deliver only the answer without meta-commentary."}], + "role": "user"}], "systemInstruction": {"parts": [{"text": "You are Calculator. + You are a calculator assistant that uses tools to compute results.\nYour personal + goal is: Perform calculations using available tools"}], "role": "user"}, "tools": + [{"functionDeclarations": [{"description": "Add two numbers together and return + the sum.", "name": "add_numbers", "parameters_json_schema": {"properties": {"a": + {"title": "A", "type": "integer"}, "b": {"title": "B", "type": "integer"}}, + "required": ["a", "b"], "type": "object", "additionalProperties": false}}, {"description": + "Use this tool to provide your final structured response. Call this tool when + you have gathered all necessary information and are ready to provide the final + answer in the required format.", "name": "structured_output", "parameters_json_schema": + {"properties": {"operation": {"description": "The mathematical operation performed", + "title": "Operation", "type": "string"}, "result": {"description": "The result + of the calculation", "title": "Result", "type": "integer"}, "explanation": {"description": + "Brief explanation of the calculation", "title": "Explanation", "type": "string"}}, + "required": ["operation", "result", "explanation"], "title": "CalculationResult", + "type": "object", "additionalProperties": false, "propertyOrdering": ["operation", + "result", "explanation"]}}]}], "generationConfig": {"stopSequences": ["\nObservation:"]}}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - '*/*' + accept-encoding: + - ACCEPT-ENCODING-XXX + connection: + - keep-alive + content-length: + - '3166' + content-type: + - application/json + host: + - generativelanguage.googleapis.com + x-goog-api-client: + - google-genai-sdk/1.49.0 gl-python/3.13.12 + x-goog-api-key: + - X-GOOG-API-KEY-XXX + method: POST + uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash-001:generateContent + response: + body: + string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\": + [\n {\n \"functionCall\": {\n \"name\": \"structured_output\",\n + \ \"args\": {\n \"result\": 42,\n \"explanation\": + \"15 + 27 = 42\",\n \"operation\": \"addition\"\n }\n + \ }\n }\n ],\n \"role\": \"model\"\n },\n + \ \"finishReason\": \"STOP\",\n \"avgLogprobs\": -0.07498827245500353\n + \ }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 421,\n \"candidatesTokenCount\": + 18,\n \"totalTokenCount\": 439,\n \"promptTokensDetails\": [\n {\n + \ \"modality\": \"TEXT\",\n \"tokenCount\": 421\n }\n ],\n + \ \"candidatesTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n + \ \"tokenCount\": 18\n }\n ]\n },\n \"modelVersion\": \"gemini-2.0-flash-001\",\n + \ \"responseId\": \"vlefac7bJb6TjMcPzYWh0Ag\"\n}\n" + headers: + Alt-Svc: + - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + Content-Type: + - application/json; charset=UTF-8 + Date: + - Wed, 25 Feb 2026 20:12:47 GMT + Server: + - scaffolding on HTTPServer2 + Server-Timing: + - gfet4t7; dur=774 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + X-Frame-Options: + - X-FRAME-OPTIONS-XXX + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/test_crew_testing_function.yaml b/lib/crewai/tests/cassettes/test_crew_testing_function.yaml index 5aff851cb..74ff9f0b9 100644 --- a/lib/crewai/tests/cassettes/test_crew_testing_function.yaml +++ b/lib/crewai/tests/cassettes/test_crew_testing_function.yaml @@ -1,735 +1,768 @@ interactions: - request: - body: '{"trace_id": "b2bfe230-4539-4522-a372-ab58b85f4ce1", "execution_type": "crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null, "crew_name": "crew", "flow_name": null, "crewai_version": "1.0.0", "privacy_level": "standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count": 0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-10-21T14:21:07.172904+00:00"}, "ephemeral_trace_id": "b2bfe230-4539-4522-a372-ab58b85f4ce1"}' + body: '{"messages":[{"role":"system","content":"You are Researcher. You''re an + expert researcher, specialized in technology, software engineering, AI and startups. + You work as a freelancer and is now working on doing research and analysis for + a new customer.\nYour personal goal is: Make the best research and analysis + on content about AI and AI agents"},{"role":"user","content":"\nCurrent Task: + Come up with a list of 5 interesting ideas to explore for an article, then write + one amazing paragraph highlight for each idea that showcases how good an article + about this topic could be. Return the list of ideas with their paragraph and + your notes.\n\nThis is the expected criteria for your final answer: 5 bullet + points with a paragraph for each idea.\nyou MUST return the actual complete + content as the final answer, not a summary.\n\nProvide your complete response:"}],"model":"gpt-4.1-mini"}' headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate, zstd - Connection: - - keep-alive - Content-Length: - - '488' - Content-Type: - - application/json User-Agent: - - CrewAI-CLI/1.0.0 - X-Crewai-Version: - - 1.0.0 - method: POST - uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches - response: - body: - string: '{"id":"9da97d5c-cb85-4950-8c08-3a33f4e87265","ephemeral_trace_id":"b2bfe230-4539-4522-a372-ab58b85f4ce1","execution_type":"crew","crew_name":"crew","flow_name":null,"status":"running","duration_ms":null,"crewai_version":"1.0.0","total_events":0,"execution_context":{"crew_fingerprint":null,"crew_name":"crew","flow_name":null,"crewai_version":"1.0.0","privacy_level":"standard"},"created_at":"2025-10-21T14:21:09.013Z","updated_at":"2025-10-21T14:21:09.013Z","access_code":"TRACE-4feb6c2ae8","user_identifier":null}' - headers: - Connection: - - keep-alive - Content-Length: - - '515' - Content-Type: - - application/json; charset=utf-8 - Date: - - Tue, 21 Oct 2025 14:21:09 GMT - cache-control: - - no-store - content-security-policy: - - 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self'' ''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/ https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net https://js.hscollectedforms.net - https://js.usemessages.com https://snap.licdn.com https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data: *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com; connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/* https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com https://api.hubspot.com - https://forms.hscollectedforms.net https://api.hubapi.com https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509 https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self'' *.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com https://drive.google.com https://slides.google.com https://accounts.google.com https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/ https://www.youtube.com https://share.descript.com' - etag: - - W/"df138e6daf98e0258258ca1415a9037a" - expires: - - '0' - permissions-policy: - - camera=(), microphone=(self), geolocation=() - pragma: - - no-cache - referrer-policy: - - strict-origin-when-cross-origin - strict-transport-security: - - max-age=63072000; includeSubDomains - vary: - - Accept - x-content-type-options: - - nosniff - x-frame-options: - - SAMEORIGIN - x-permitted-cross-domain-policies: - - none - x-request-id: - - fc4913e1-332f-4747-a028-d42998def4f6 - x-runtime: - - '0.557821' - x-xss-protection: - - 1; mode=block - status: - code: 201 - message: Created -- request: - body: '{"messages": [{"role": "system", "content": "You are Researcher. You''re an expert researcher, specialized in technology, software engineering, AI and startups. You work as a freelancer and is now working on doing research and analysis for a new customer.\nYour personal goal is: Make the best research and analysis on content about AI and AI agents\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task: Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.\n\nThis is the expected criteria for your - final answer: 5 bullet points with a paragraph for each idea.\nyou MUST return the actual complete content as the final answer, not a summary.\n\nYou MUST follow these instructions: \n - Incorporate specific examples and case studies in initial outputs for clearer illustration of concepts.\n - Engage more with current events or trends to enhance relevance, especially in fields like remote work and decision-making.\n - Invite perspectives from experts and stakeholders to add depth to discussions on ethical implications and collaboration in creativity.\n - Use more precise language when discussing topics, ensuring clarity and accessibility for readers.\n - Encourage exploration of user experiences and testimonials to provide more relatable content, especially in education and mental health contexts.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stream": false}' - headers: + - X-USER-AGENT-XXX accept: - application/json accept-encoding: - - gzip, deflate, zstd + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX connection: - keep-alive content-length: - - '1979' + - '886' content-type: - application/json host: - api.openai.com - user-agent: - - OpenAI/Python 1.109.1 x-stainless-arch: - - arm64 + - X-STAINLESS-ARCH-XXX x-stainless-async: - 'false' x-stainless-lang: - python x-stainless-os: - - MacOS + - X-STAINLESS-OS-XXX x-stainless-package-version: - - 1.109.1 + - 1.83.0 x-stainless-read-timeout: - - '600' + - X-STAINLESS-READ-TIMEOUT-XXX x-stainless-retry-count: - '0' x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.12.9 + - 3.13.12 method: POST uri: https://api.openai.com/v1/chat/completions response: body: - string: "{\n \"id\": \"chatcmpl-CT7WWADiu3QnBcF0XrM0OdYDRxh6o\",\n \"object\": \"chat.completion\",\n \"created\": 1761056468,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"I now can give a great answer. \\nFinal Answer:\\n\\n- **The Rise of AI Agents in Remote Work** \\nAs remote work continues to crystallize in the corporate environment, AI agents are transforming how teams collaborate and operate. An article could delve into case studies from companies like GitLab and Buffer that have successfully integrated AI tools to facilitate communication, project tracking, and workload distribution among distributed teams. By interviewing team leaders and employees, we can uncover how AI agents are not merely assisting but actively enhancing productivity and reducing the cognitive load on human workers. This exploration would not only highlight the practical applications\ - \ of AI in remote settings but also provoke critical discussions about the evolving nature of work, employee satisfaction, and balance between AI support and human effort.\\n\\n- **AI as a Creative Collaborator** \\nThe boundaries of human creativity are being challenged as AI becomes a key player in the creative process. An example can be drawn from collaborative tools like OpenAI's DALL-E or Adobe's Sensei, which are enabling artists, writers, and designers to push their creative limits. The article could feature testimonials from creators who have embraced AI as a partner in innovation, sharing stories of how these collaborations have led to unexpected and inspiring outcomes. Additionally, engaging with experts in the field of AI ethics can deepen the discussion about the implications of AI in art, including authorship, originality, and the definition of creativity itself. This exploration could stimulate a broader dialogue on the future of the creative industry and the role\ - \ of AI within it.\\n\\n- **Personalized Learning through AI** \\nIn the realm of education, AI is reshaping how personalized learning experiences are crafted for students. For instance, platforms like DreamBox and Knewton use AI algorithms to tailor lessons based on individual learning styles and paces. An article addressing this topic could synthesize insights gathered from educators who have implemented AI in their classrooms, sharing success stories and challenges faced when adjusting teaching methodologies. By highlighting real user experiences, the discussion could extend to the ethical implications of data privacy, algorithmic bias, and equity in education, thereby painting a comprehensive picture of how AI is both a tool of progress and a subject for scrutiny.\\n\\n- **The Ethical Implications of AI in Decision Making** \\nAs organizations increasingly rely on AI for decision-making—be it through predictive analytics in finance or recruiting algorithms in HR—the ethical\ - \ implications of these technologies come into sharp focus. This article could pull in case studies such as Amazon's recruitment tool that inadvertently favored male candidates, thereby unveiling the hidden biases entrenched in AI systems. By interviewing ethicists, data scientists, and business leaders, the narrative could explore how companies are navigating these ethical waters, balancing efficiency and fairness, and ensuring that AI serves as an equitable decision-making assistant rather than a perpetuator of injustice. \\n\\n- **AI in Mental Health: A New Frontier** \\nThe integration of AI in mental health care presents a myriad of possibilities, from chatbots like Woebot providing cognitive behavioral therapy (CBT) to AI tools that analyze speech for emotional cues. An inspiring article could chronicle the journeys of users who have benefited from AI-driven mental health services, juxtaposed with expert opinions from psychologists discussing the potential and limitations\ - \ of AI in this sensitive field. Through this exploration, readers would gain a nuanced understanding of how AI is shaping therapeutic practices, the importance of human empathy in mental health support, and the ethical concerns that arise from relying on technology for emotional well-being.\\n\\nNotes: Each idea provides a rich ground for exploration, allowing for real-world applications and ethical considerations regarding AI. The proposed articles have the potential to engage a wide readership by blending current trends, expert insights, and relatable experiences—making them both informative and meaningful.\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 354,\n \"completion_tokens\": 744,\n \"total_tokens\": 1098,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\"\ - : {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_560af6e559\"\n}\n" + string: "{\n \"id\": \"chatcmpl-DDFzCiMzYEJMnv9oV3KbMUwH6TGRO\",\n \"object\": + \"chat.completion\",\n \"created\": 1772052086,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"- **The Rise of Autonomous AI Agents: + Redefining Productivity and Creativity** \\n This article would dive into + how autonomous AI agents\u2014intelligent software systems capable of independently + performing complex tasks\u2014are transforming industries by augmenting human + productivity and creativity. It would explore real-world use cases from automated + content generation and customer support bots to AI-driven design and research + assistants, illustrating how these agents reduce repetitive workload and open + new avenues for innovation. The article could also analyze challenges such + as ethical considerations, decision-making transparency, and integration with + existing workflows, offering readers a comprehensive view of how autonomous + AI agents are reshaping the future of work.\\n\\n- **Bridging Human-AI Collaboration: + Designing AI Agents for Intuitive Interaction** \\n This piece would investigate + the critical design principles behind successful human-AI collaboration, focusing + on building AI agents that communicate and interact naturally with users. + From natural language processing nuances to adaptive learning from user behavior, + the article would examine how these technological advancements create seamless + partnerships between humans and machines. Highlighting case studies in healthcare, + finance, and creative industries, it would demonstrate the importance of trust, + interpretability, and empathy in AI agent interfaces, emphasizing how better-designed + interactions can dramatically improve adoption and effectiveness.\\n\\n- **The + Ethical Frontier: Navigating Bias and Accountability in AI Agents** \\n Exploring + the ethical implications of deploying AI agents at scale, this article would + address pressing issues like algorithmic bias, privacy concerns, and accountability + in autonomous decision-making. It would analyze how biases embedded in training + data can propagate through AI agents, impacting critical outcomes in hiring, + lending, and law enforcement. The article would also discuss emerging regulatory + frameworks, best practices for auditing AI agents, and the role of interdisciplinary + ethics teams in ensuring these technologies are fair, transparent, and responsible, + helping readers grasp the societal responsibilities accompanying AI advancement.\\n\\n- + **AI Agents in Startups: Driving Innovation and Competitive Advantage** \\n + \ Focused on the startup ecosystem, this article would explore how emerging + companies leverage AI agents to disrupt markets and scale rapidly with limited + resources. It would profile startups using AI agents for customer acquisition, + personalized marketing, operational automation, and product development, illustrating + how these tools enable lean teams to achieve much more. The narrative would + consider investment trends, challenges faced by startups incorporating AI + agents, and strategies for balancing innovation with reliability, providing + entrepreneurs and investors with valuable insights into harnessing AI agents + for meaningful growth.\\n\\n- **From Data to Decision: How AI Agents Transform + Business Intelligence** \\n This article would delve into the role of AI + agents as intelligent intermediaries in business intelligence (BI) systems, + automating data analysis and delivering actionable insights in real-time. + It would explain how AI agents can parse vast datasets, identify trends, generate + forecasts, and even suggest strategic decisions without constant human oversight. + Highlighting innovations like conversational BI interfaces and predictive + analytics agents, the article would underscore how businesses of all sizes + can democratize data-driven decision-making, driving agility and competitive + advantage in increasingly complex markets.\",\n \"refusal\": null,\n + \ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": + \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 164,\n \"completion_tokens\": + 597,\n \"total_tokens\": 761,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_417e90869b\"\n}\n" headers: CF-RAY: - - 9921664f4da41b58-EWR + - CF-RAY-XXX Connection: - keep-alive Content-Type: - application/json Date: - - Tue, 21 Oct 2025 14:21:25 GMT + - Wed, 25 Feb 2026 20:41:39 GMT Server: - cloudflare - Set-Cookie: - - __cf_bm=p.q22A4.LnQmooo01V_89ZAEGXj_S4fJNkPlbLadtaE-1761056485-1.0.1.1-txy4D4FrtqHpILOE_iiFcBXCTM8d2UsSGzKJeB0qgd3TosZJx3.EmL1CgIJqbJS31Qd5mnCHOqUjx6UFOgOxfBO1NpIe4inEmYUS9xJf33M; path=/; expires=Tue, 21-Oct-25 14:51:25 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - - _cfuvid=Vix88TUp4dnmVridKpA6LWYGOsSdcnEg942n1s6NoNg-1761056485340-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None Strict-Transport-Security: - - max-age=31536000; includeSubDomains; preload + - STS-XXX Transfer-Encoding: - chunked X-Content-Type-Options: - - nosniff + - X-CONTENT-TYPE-XXX access-control-expose-headers: - - X-Request-ID + - ACCESS-CONTROL-XXX alt-svc: - h3=":443"; ma=86400 cf-cache-status: - DYNAMIC openai-organization: - - crewai-iuxna1 + - OPENAI-ORG-XXX openai-processing-ms: - - '16416' + - '13437' openai-project: - - proj_fL4UBWR1CMpAAdgzaSKqsVvA + - OPENAI-PROJECT-XXX openai-version: - '2020-10-01' - x-envoy-upstream-service-time: - - '16584' + set-cookie: + - SET-COOKIE-XXX x-openai-proxy-wasm: - v0.1 x-ratelimit-limit-requests: - - '10000' + - X-RATELIMIT-LIMIT-REQUESTS-XXX x-ratelimit-limit-tokens: - - '200000' + - X-RATELIMIT-LIMIT-TOKENS-XXX x-ratelimit-remaining-requests: - - '9999' + - X-RATELIMIT-REMAINING-REQUESTS-XXX x-ratelimit-remaining-tokens: - - '199538' + - X-RATELIMIT-REMAINING-TOKENS-XXX x-ratelimit-reset-requests: - - 8.64s + - X-RATELIMIT-RESET-REQUESTS-XXX x-ratelimit-reset-tokens: - - 138ms + - X-RATELIMIT-RESET-TOKENS-XXX x-request-id: - - req_711a382be0544fa291fe95d0f1e51072 + - X-REQUEST-ID-XXX status: code: 200 message: OK - request: - body: '{"messages": [{"role": "system", "content": "You are Task Execution Evaluator. Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed\nYour personal goal is: Your goal is to evaluate the performance of the agents in the crew based on the tasks they have performed using score from 1 to 10 evaluating on completion, quality, and overall performance.\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task: Based on the task description and the expected output, compare and evaluate the performance of the agents in the crew based on the Task Output they have performed using score from 1 to 10 evaluating - on completion, quality, and overall performance.task_description: Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes. task_expected_output: 5 bullet points with a paragraph for each idea. agent: Researcher agent_goal: Make the best research and analysis on content about AI and AI agents Task Output: - **The Rise of AI Agents in Remote Work** \nAs remote work continues to crystallize in the corporate environment, AI agents are transforming how teams collaborate and operate. An article could delve into case studies from companies like GitLab and Buffer that have successfully integrated AI tools to facilitate communication, project tracking, and workload distribution among distributed teams. By interviewing team leaders and employees, we can uncover how AI agents are not merely assisting - but actively enhancing productivity and reducing the cognitive load on human workers. This exploration would not only highlight the practical applications of AI in remote settings but also provoke critical discussions about the evolving nature of work, employee satisfaction, and balance between AI support and human effort.\n\n- **AI as a Creative Collaborator** \nThe boundaries of human creativity are being challenged as AI becomes a key player in the creative process. An example can be drawn from collaborative tools like OpenAI''s DALL-E or Adobe''s Sensei, which are enabling artists, writers, and designers to push their creative limits. The article could feature testimonials from creators who have embraced AI as a partner in innovation, sharing stories of how these collaborations have led to unexpected and inspiring outcomes. Additionally, engaging with experts in the field of AI ethics can deepen the discussion about the implications of AI in art, including authorship, originality, - and the definition of creativity itself. This exploration could stimulate a broader dialogue on the future of the creative industry and the role of AI within it.\n\n- **Personalized Learning through AI** \nIn the realm of education, AI is reshaping how personalized learning experiences are crafted for students. For instance, platforms like DreamBox and Knewton use AI algorithms to tailor lessons based on individual learning styles and paces. An article addressing this topic could synthesize insights gathered from educators who have implemented AI in their classrooms, sharing success stories and challenges faced when adjusting teaching methodologies. By highlighting real user experiences, the discussion could extend to the ethical implications of data privacy, algorithmic bias, and equity in education, thereby painting a comprehensive picture of how AI is both a tool of progress and a subject for scrutiny.\n\n- **The Ethical Implications of AI in Decision Making** \nAs organizations - increasingly rely on AI for decision-making\u2014be it through predictive analytics in finance or recruiting algorithms in HR\u2014the ethical implications of these technologies come into sharp focus. This article could pull in case studies such as Amazon''s recruitment tool that inadvertently favored male candidates, thereby unveiling the hidden biases entrenched in AI systems. By interviewing ethicists, data scientists, and business leaders, the narrative could explore how companies are navigating these ethical waters, balancing efficiency and fairness, and ensuring that AI serves as an equitable decision-making assistant rather than a perpetuator of injustice. \n\n- **AI in Mental Health: A New Frontier** \nThe integration of AI in mental health care presents a myriad of possibilities, from chatbots like Woebot providing cognitive behavioral therapy (CBT) to AI tools that analyze speech for emotional cues. An inspiring article could chronicle the journeys of users who have benefited - from AI-driven mental health services, juxtaposed with expert opinions from psychologists discussing the potential and limitations of AI in this sensitive field. Through this exploration, readers would gain a nuanced understanding of how AI is shaping therapeutic practices, the importance of human empathy in mental health support, and the ethical concerns that arise from relying on technology for emotional well-being.\n\nNotes: Each idea provides a rich ground for exploration, allowing for real-world applications and ethical considerations regarding AI. The proposed articles have the potential to engage a wide readership by blending current trends, expert insights, and relatable experiences\u2014making them both informative and meaningful.\n\nThis is the expected criteria for your final answer: Evaluation Score from 1 to 10 based on the performance of the agents on the tasks\nyou MUST return the actual complete content as the final answer, not a summary.\nEnsure your final answer contains - only the content in the following format: {\n \"quality\": float\n}\n\nEnsure the final output does not include any code block markers like ```json or ```python.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stream": false}' + body: "{\"messages\":[{\"role\":\"system\",\"content\":\"You are Task Execution + Evaluator. Evaluator agent for crew evaluation with precise capabilities to + evaluate the performance of the agents in the crew based on the tasks they have + performed\\nYour personal goal is: Your goal is to evaluate the performance + of the agents in the crew based on the tasks they have performed using score + from 1 to 10 evaluating on completion, quality, and overall performance.\"},{\"role\":\"user\",\"content\":\"\\nCurrent + Task: Based on the task description and the expected output, compare and evaluate + the performance of the agents in the crew based on the Task Output they have + performed using score from 1 to 10 evaluating on completion, quality, and overall + performance.task_description: Come up with a list of 5 interesting ideas to + explore for an article, then write one amazing paragraph highlight for each + idea that showcases how good an article about this topic could be. Return the + list of ideas with their paragraph and your notes. task_expected_output: 5 bullet + points with a paragraph for each idea. agent: Researcher agent_goal: Make the + best research and analysis on content about AI and AI agents Task Output: - + **The Rise of Autonomous AI Agents: Redefining Productivity and Creativity** + \ \\n This article would dive into how autonomous AI agents\u2014intelligent + software systems capable of independently performing complex tasks\u2014are + transforming industries by augmenting human productivity and creativity. It + would explore real-world use cases from automated content generation and customer + support bots to AI-driven design and research assistants, illustrating how these + agents reduce repetitive workload and open new avenues for innovation. The article + could also analyze challenges such as ethical considerations, decision-making + transparency, and integration with existing workflows, offering readers a comprehensive + view of how autonomous AI agents are reshaping the future of work.\\n\\n- **Bridging + Human-AI Collaboration: Designing AI Agents for Intuitive Interaction** \\n + \ This piece would investigate the critical design principles behind successful + human-AI collaboration, focusing on building AI agents that communicate and + interact naturally with users. From natural language processing nuances to adaptive + learning from user behavior, the article would examine how these technological + advancements create seamless partnerships between humans and machines. Highlighting + case studies in healthcare, finance, and creative industries, it would demonstrate + the importance of trust, interpretability, and empathy in AI agent interfaces, + emphasizing how better-designed interactions can dramatically improve adoption + and effectiveness.\\n\\n- **The Ethical Frontier: Navigating Bias and Accountability + in AI Agents** \\n Exploring the ethical implications of deploying AI agents + at scale, this article would address pressing issues like algorithmic bias, + privacy concerns, and accountability in autonomous decision-making. It would + analyze how biases embedded in training data can propagate through AI agents, + impacting critical outcomes in hiring, lending, and law enforcement. The article + would also discuss emerging regulatory frameworks, best practices for auditing + AI agents, and the role of interdisciplinary ethics teams in ensuring these + technologies are fair, transparent, and responsible, helping readers grasp the + societal responsibilities accompanying AI advancement.\\n\\n- **AI Agents in + Startups: Driving Innovation and Competitive Advantage** \\n Focused on the + startup ecosystem, this article would explore how emerging companies leverage + AI agents to disrupt markets and scale rapidly with limited resources. It would + profile startups using AI agents for customer acquisition, personalized marketing, + operational automation, and product development, illustrating how these tools + enable lean teams to achieve much more. The narrative would consider investment + trends, challenges faced by startups incorporating AI agents, and strategies + for balancing innovation with reliability, providing entrepreneurs and investors + with valuable insights into harnessing AI agents for meaningful growth.\\n\\n- + **From Data to Decision: How AI Agents Transform Business Intelligence** \\n + \ This article would delve into the role of AI agents as intelligent intermediaries + in business intelligence (BI) systems, automating data analysis and delivering + actionable insights in real-time. It would explain how AI agents can parse vast + datasets, identify trends, generate forecasts, and even suggest strategic decisions + without constant human oversight. Highlighting innovations like conversational + BI interfaces and predictive analytics agents, the article would underscore + how businesses of all sizes can democratize data-driven decision-making, driving + agility and competitive advantage in increasingly complex markets.\\n\\nThis + is the expected criteria for your final answer: Evaluation Score from 1 to 10 + based on the performance of the agents on the tasks\\nyou MUST return the actual + complete content as the final answer, not a summary.\\nFormat your final answer + according to the following OpenAPI schema: {\\n \\\"properties\\\": {\\n \\\"quality\\\": + {\\n \\\"description\\\": \\\"A score from 1 to 10 evaluating on completion, + quality, and overall performance from the task_description and task_expected_output + to the actual Task Output.\\\",\\n \\\"title\\\": \\\"Quality\\\",\\n \\\"type\\\": + \\\"number\\\"\\n }\\n },\\n \\\"required\\\": [\\n \\\"quality\\\"\\n + \ ],\\n \\\"title\\\": \\\"TaskEvaluationPydanticOutput\\\",\\n \\\"type\\\": + \\\"object\\\",\\n \\\"additionalProperties\\\": false\\n}\\n\\nIMPORTANT: + Preserve the original content exactly as-is. Do NOT rewrite, paraphrase, or + modify the meaning of the content. Only structure it to match the schema format.\\n\\nDo + not include the OpenAPI schema in the final output. Ensure the final output + does not include any code block markers like ```json or ```python.\\n\\nProvide + your complete response:\"}],\"model\":\"gpt-4o-mini\",\"response_format\":{\"type\":\"json_schema\",\"json_schema\":{\"schema\":{\"properties\":{\"quality\":{\"description\":\"A + score from 1 to 10 evaluating on completion, quality, and overall performance + from the task_description and task_expected_output to the actual Task Output.\",\"title\":\"Quality\",\"type\":\"number\"}},\"required\":[\"quality\"],\"title\":\"TaskEvaluationPydanticOutput\",\"type\":\"object\",\"additionalProperties\":false},\"name\":\"TaskEvaluationPydanticOutput\",\"strict\":true}},\"stream\":false}" headers: + User-Agent: + - X-USER-AGENT-XXX accept: - application/json accept-encoding: - - gzip, deflate, zstd + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX connection: - keep-alive content-length: - - '6338' + - '6502' content-type: - application/json host: - api.openai.com - user-agent: - - OpenAI/Python 1.109.1 x-stainless-arch: - - arm64 + - X-STAINLESS-ARCH-XXX x-stainless-async: - 'false' + x-stainless-helper-method: + - beta.chat.completions.parse x-stainless-lang: - python x-stainless-os: - - MacOS + - X-STAINLESS-OS-XXX x-stainless-package-version: - - 1.109.1 + - 1.83.0 x-stainless-read-timeout: - - '600' + - X-STAINLESS-READ-TIMEOUT-XXX x-stainless-retry-count: - '0' x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.12.9 + - 3.13.12 method: POST uri: https://api.openai.com/v1/chat/completions response: body: - string: "{\n \"id\": \"chatcmpl-CT7WnkkrcKAvciaNC1JI6BT3L6Xm5\",\n \"object\": \"chat.completion\",\n \"created\": 1761056485,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"I now can give a great answer \\nFinal Answer: {\\n \\\"quality\\\": 9.5\\n}\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 1134,\n \"completion_tokens\": 22,\n \"total_tokens\": 1156,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_560af6e559\"\ - \n}\n" + string: "{\n \"id\": \"chatcmpl-DDFzQTBe214rOuf82URXmgkuNj5u4\",\n \"object\": + \"chat.completion\",\n \"created\": 1772052100,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"{\\\"quality\\\":9}\",\n \"refusal\": + null,\n \"annotations\": []\n },\n \"logprobs\": null,\n + \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 1134,\n \"completion_tokens\": 5,\n \"total_tokens\": 1139,\n \"prompt_tokens_details\": + {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_bd4be55b21\"\n}\n" headers: CF-RAY: - - 992166b9cb8e42c0-EWR + - CF-RAY-XXX Connection: - keep-alive Content-Type: - application/json Date: - - Tue, 21 Oct 2025 14:21:26 GMT + - Wed, 25 Feb 2026 20:41:40 GMT Server: - cloudflare - Set-Cookie: - - __cf_bm=ujzGLIjq87ZWlYFBH3FycG8cIYtaTjon9XdbV63J84s-1761056486-1.0.1.1-V9wZVN8TxLIQ..Cd6VD53rSKVM8GssieHpzu53MMLsuoM7jVI8nAKNTbZeCqJxyHPutyhj_BwPvR56_gb0Nx90S6pVs3gQC2vj8VmCPbh1Y; path=/; expires=Tue, 21-Oct-25 14:51:26 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - - _cfuvid=GIa.4ZxD52A2dXSZxoW1Mckm_eGjntP2i_mB4sczwEI-1761056486732-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None Strict-Transport-Security: - - max-age=31536000; includeSubDomains; preload + - STS-XXX Transfer-Encoding: - chunked X-Content-Type-Options: - - nosniff + - X-CONTENT-TYPE-XXX access-control-expose-headers: - - X-Request-ID + - ACCESS-CONTROL-XXX alt-svc: - h3=":443"; ma=86400 cf-cache-status: - DYNAMIC openai-organization: - - crewai-iuxna1 + - OPENAI-ORG-XXX openai-processing-ms: - - '787' + - '241' openai-project: - - proj_fL4UBWR1CMpAAdgzaSKqsVvA + - OPENAI-PROJECT-XXX openai-version: - '2020-10-01' - x-envoy-upstream-service-time: - - '955' + set-cookie: + - SET-COOKIE-XXX x-openai-proxy-wasm: - v0.1 x-ratelimit-limit-requests: - - '10000' + - X-RATELIMIT-LIMIT-REQUESTS-XXX x-ratelimit-limit-tokens: - - '200000' + - X-RATELIMIT-LIMIT-TOKENS-XXX x-ratelimit-remaining-requests: - - '9999' + - X-RATELIMIT-REMAINING-REQUESTS-XXX x-ratelimit-remaining-tokens: - - '198454' + - X-RATELIMIT-REMAINING-TOKENS-XXX x-ratelimit-reset-requests: - - 8.64s + - X-RATELIMIT-RESET-REQUESTS-XXX x-ratelimit-reset-tokens: - - 463ms + - X-RATELIMIT-RESET-TOKENS-XXX x-request-id: - - req_1911f5f9d34e499ca97c99ee3f40d04b + - X-REQUEST-ID-XXX status: code: 200 message: OK - request: - body: '{"events": [{"event_id": "660f299b-0769-482e-b169-fcd0cb2ce48b", "timestamp": "2025-10-21T14:21:07.171393+00:00", "type": "crew_kickoff_started", "event_data": {"timestamp": "2025-10-21T14:21:07.171393+00:00", "type": "crew_kickoff_started", "source_fingerprint": null, "source_type": null, "fingerprint_metadata": null, "task_id": null, "task_name": null, "agent_id": null, "agent_role": null, "crew_name": "crew", "crew": null, "inputs": {"topic": "AI"}}}, {"event_id": "9c615452-4b34-4f35-8768-a15b69e01205", "timestamp": "2025-10-21T14:21:07.173802+00:00", "type": "task_started", "event_data": {"task_description": "Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.", "expected_output": "5 bullet points with a paragraph for each idea.", "task_name": "Come up with a list of 5 interesting - ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.", "context": "", "agent_role": "Researcher", "task_id": "b6c0fa7b-c537-48d9-9456-914fd6dbc421"}}, {"event_id": "fec0b51d-50c2-4e7a-b73e-016d0fb260fc", "timestamp": "2025-10-21T14:21:07.174578+00:00", "type": "agent_execution_started", "event_data": {"agent_role": "Researcher", "agent_goal": "Make the best research and analysis on content about AI and AI agents", "agent_backstory": "You''re an expert researcher, specialized in technology, software engineering, AI and startups. You work as a freelancer and is now working on doing research and analysis for a new customer."}}, {"event_id": "ac858ea6-3ba3-46cb-93e8-3244029faac4", "timestamp": "2025-10-21T14:21:07.174743+00:00", "type": "llm_call_started", "event_data": {"timestamp": "2025-10-21T14:21:07.174743+00:00", "type": - "llm_call_started", "source_fingerprint": null, "source_type": null, "fingerprint_metadata": null, "task_id": "b6c0fa7b-c537-48d9-9456-914fd6dbc421", "task_name": "Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.", "agent_id": "d2d12d9f-88f6-4955-bf69-f5f9868961c3", "agent_role": "Researcher", "from_task": null, "from_agent": null, "model": "gpt-4o-mini", "messages": [{"role": "system", "content": "You are Researcher. You''re an expert researcher, specialized in technology, software engineering, AI and startups. You work as a freelancer and is now working on doing research and analysis for a new customer.\nYour personal goal is: Make the best research and analysis on content about AI and AI agents\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: - I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task: Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.\n\nThis is the expected criteria for your final answer: 5 bullet points with a paragraph for each idea.\nyou MUST return the actual complete content as the final answer, not a summary.\n\nYou MUST follow these instructions: \n - Incorporate specific examples and case studies in initial outputs for clearer illustration of concepts.\n - Engage more with current events or trends to enhance relevance, especially in fields like remote work and decision-making.\n - Invite perspectives from experts and - stakeholders to add depth to discussions on ethical implications and collaboration in creativity.\n - Use more precise language when discussing topics, ensuring clarity and accessibility for readers.\n - Encourage exploration of user experiences and testimonials to provide more relatable content, especially in education and mental health contexts.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}], "tools": null, "callbacks": [""], "available_functions": null}}, {"event_id": "0ff635a9-0d57-4d4d-991a-71e2fc6e6b86", "timestamp": "2025-10-21T14:21:25.185353+00:00", "type": "llm_call_completed", "event_data": {"timestamp": "2025-10-21T14:21:25.185353+00:00", "type": "llm_call_completed", "source_fingerprint": null, "source_type": null, "fingerprint_metadata": null, "task_id": "b6c0fa7b-c537-48d9-9456-914fd6dbc421", "task_name": - "Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.", "agent_id": "d2d12d9f-88f6-4955-bf69-f5f9868961c3", "agent_role": "Researcher", "from_task": null, "from_agent": null, "messages": [{"role": "system", "content": "You are Researcher. You''re an expert researcher, specialized in technology, software engineering, AI and startups. You work as a freelancer and is now working on doing research and analysis for a new customer.\nYour personal goal is: Make the best research and analysis on content about AI and AI agents\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, - my job depends on it!"}, {"role": "user", "content": "\nCurrent Task: Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.\n\nThis is the expected criteria for your final answer: 5 bullet points with a paragraph for each idea.\nyou MUST return the actual complete content as the final answer, not a summary.\n\nYou MUST follow these instructions: \n - Incorporate specific examples and case studies in initial outputs for clearer illustration of concepts.\n - Engage more with current events or trends to enhance relevance, especially in fields like remote work and decision-making.\n - Invite perspectives from experts and stakeholders to add depth to discussions on ethical implications and collaboration in creativity.\n - Use more precise language when discussing topics, ensuring clarity and accessibility - for readers.\n - Encourage exploration of user experiences and testimonials to provide more relatable content, especially in education and mental health contexts.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}], "response": "I now can give a great answer. \nFinal Answer:\n\n- **The Rise of AI Agents in Remote Work** \nAs remote work continues to crystallize in the corporate environment, AI agents are transforming how teams collaborate and operate. An article could delve into case studies from companies like GitLab and Buffer that have successfully integrated AI tools to facilitate communication, project tracking, and workload distribution among distributed teams. By interviewing team leaders and employees, we can uncover how AI agents are not merely assisting but actively enhancing productivity and reducing the cognitive load on human workers. This exploration would not only highlight the practical - applications of AI in remote settings but also provoke critical discussions about the evolving nature of work, employee satisfaction, and balance between AI support and human effort.\n\n- **AI as a Creative Collaborator** \nThe boundaries of human creativity are being challenged as AI becomes a key player in the creative process. An example can be drawn from collaborative tools like OpenAI''s DALL-E or Adobe''s Sensei, which are enabling artists, writers, and designers to push their creative limits. The article could feature testimonials from creators who have embraced AI as a partner in innovation, sharing stories of how these collaborations have led to unexpected and inspiring outcomes. Additionally, engaging with experts in the field of AI ethics can deepen the discussion about the implications of AI in art, including authorship, originality, and the definition of creativity itself. This exploration could stimulate a broader dialogue on the future of the creative industry and the - role of AI within it.\n\n- **Personalized Learning through AI** \nIn the realm of education, AI is reshaping how personalized learning experiences are crafted for students. For instance, platforms like DreamBox and Knewton use AI algorithms to tailor lessons based on individual learning styles and paces. An article addressing this topic could synthesize insights gathered from educators who have implemented AI in their classrooms, sharing success stories and challenges faced when adjusting teaching methodologies. By highlighting real user experiences, the discussion could extend to the ethical implications of data privacy, algorithmic bias, and equity in education, thereby painting a comprehensive picture of how AI is both a tool of progress and a subject for scrutiny.\n\n- **The Ethical Implications of AI in Decision Making** \nAs organizations increasingly rely on AI for decision-making\u2014be it through predictive analytics in finance or recruiting algorithms in HR\u2014the ethical - implications of these technologies come into sharp focus. This article could pull in case studies such as Amazon''s recruitment tool that inadvertently favored male candidates, thereby unveiling the hidden biases entrenched in AI systems. By interviewing ethicists, data scientists, and business leaders, the narrative could explore how companies are navigating these ethical waters, balancing efficiency and fairness, and ensuring that AI serves as an equitable decision-making assistant rather than a perpetuator of injustice. \n\n- **AI in Mental Health: A New Frontier** \nThe integration of AI in mental health care presents a myriad of possibilities, from chatbots like Woebot providing cognitive behavioral therapy (CBT) to AI tools that analyze speech for emotional cues. An inspiring article could chronicle the journeys of users who have benefited from AI-driven mental health services, juxtaposed with expert opinions from psychologists discussing the potential and limitations of AI - in this sensitive field. Through this exploration, readers would gain a nuanced understanding of how AI is shaping therapeutic practices, the importance of human empathy in mental health support, and the ethical concerns that arise from relying on technology for emotional well-being.\n\nNotes: Each idea provides a rich ground for exploration, allowing for real-world applications and ethical considerations regarding AI. The proposed articles have the potential to engage a wide readership by blending current trends, expert insights, and relatable experiences\u2014making them both informative and meaningful.", "call_type": "", "model": "gpt-4o-mini"}}, {"event_id": "4b0cdd26-0257-4e1e-95b5-f174793088f4", "timestamp": "2025-10-21T14:21:25.185503+00:00", "type": "agent_execution_completed", "event_data": {"agent_role": "Researcher", "agent_goal": "Make the best research and analysis on content about AI and AI agents", "agent_backstory": "You''re an expert - researcher, specialized in technology, software engineering, AI and startups. You work as a freelancer and is now working on doing research and analysis for a new customer."}}, {"event_id": "cf861e0b-7e38-4162-8c7f-64135c9b59f4", "timestamp": "2025-10-21T14:21:25.186875+00:00", "type": "task_started", "event_data": {"task_description": "Based on the task description and the expected output, compare and evaluate the performance of the agents in the crew based on the Task Output they have performed using score from 1 to 10 evaluating on completion, quality, and overall performance.task_description: Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes. task_expected_output: 5 bullet points with a paragraph for each idea. agent: Researcher agent_goal: Make the best research and analysis on content - about AI and AI agents Task Output: - **The Rise of AI Agents in Remote Work** \nAs remote work continues to crystallize in the corporate environment, AI agents are transforming how teams collaborate and operate. An article could delve into case studies from companies like GitLab and Buffer that have successfully integrated AI tools to facilitate communication, project tracking, and workload distribution among distributed teams. By interviewing team leaders and employees, we can uncover how AI agents are not merely assisting but actively enhancing productivity and reducing the cognitive load on human workers. This exploration would not only highlight the practical applications of AI in remote settings but also provoke critical discussions about the evolving nature of work, employee satisfaction, and balance between AI support and human effort.\n\n- **AI as a Creative Collaborator** \nThe boundaries of human creativity are being challenged as AI becomes a key player in the creative - process. An example can be drawn from collaborative tools like OpenAI''s DALL-E or Adobe''s Sensei, which are enabling artists, writers, and designers to push their creative limits. The article could feature testimonials from creators who have embraced AI as a partner in innovation, sharing stories of how these collaborations have led to unexpected and inspiring outcomes. Additionally, engaging with experts in the field of AI ethics can deepen the discussion about the implications of AI in art, including authorship, originality, and the definition of creativity itself. This exploration could stimulate a broader dialogue on the future of the creative industry and the role of AI within it.\n\n- **Personalized Learning through AI** \nIn the realm of education, AI is reshaping how personalized learning experiences are crafted for students. For instance, platforms like DreamBox and Knewton use AI algorithms to tailor lessons based on individual learning styles and paces. An article addressing - this topic could synthesize insights gathered from educators who have implemented AI in their classrooms, sharing success stories and challenges faced when adjusting teaching methodologies. By highlighting real user experiences, the discussion could extend to the ethical implications of data privacy, algorithmic bias, and equity in education, thereby painting a comprehensive picture of how AI is both a tool of progress and a subject for scrutiny.\n\n- **The Ethical Implications of AI in Decision Making** \nAs organizations increasingly rely on AI for decision-making\u2014be it through predictive analytics in finance or recruiting algorithms in HR\u2014the ethical implications of these technologies come into sharp focus. This article could pull in case studies such as Amazon''s recruitment tool that inadvertently favored male candidates, thereby unveiling the hidden biases entrenched in AI systems. By interviewing ethicists, data scientists, and business leaders, the narrative could - explore how companies are navigating these ethical waters, balancing efficiency and fairness, and ensuring that AI serves as an equitable decision-making assistant rather than a perpetuator of injustice. \n\n- **AI in Mental Health: A New Frontier** \nThe integration of AI in mental health care presents a myriad of possibilities, from chatbots like Woebot providing cognitive behavioral therapy (CBT) to AI tools that analyze speech for emotional cues. An inspiring article could chronicle the journeys of users who have benefited from AI-driven mental health services, juxtaposed with expert opinions from psychologists discussing the potential and limitations of AI in this sensitive field. Through this exploration, readers would gain a nuanced understanding of how AI is shaping therapeutic practices, the importance of human empathy in mental health support, and the ethical concerns that arise from relying on technology for emotional well-being.\n\nNotes: Each idea provides a rich ground - for exploration, allowing for real-world applications and ethical considerations regarding AI. The proposed articles have the potential to engage a wide readership by blending current trends, expert insights, and relatable experiences\u2014making them both informative and meaningful.", "expected_output": "Evaluation Score from 1 to 10 based on the performance of the agents on the tasks", "task_name": "Based on the task description and the expected output, compare and evaluate the performance of the agents in the crew based on the Task Output they have performed using score from 1 to 10 evaluating on completion, quality, and overall performance.task_description: Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes. task_expected_output: 5 bullet points with a paragraph for each idea. agent: - Researcher agent_goal: Make the best research and analysis on content about AI and AI agents Task Output: - **The Rise of AI Agents in Remote Work** \nAs remote work continues to crystallize in the corporate environment, AI agents are transforming how teams collaborate and operate. An article could delve into case studies from companies like GitLab and Buffer that have successfully integrated AI tools to facilitate communication, project tracking, and workload distribution among distributed teams. By interviewing team leaders and employees, we can uncover how AI agents are not merely assisting but actively enhancing productivity and reducing the cognitive load on human workers. This exploration would not only highlight the practical applications of AI in remote settings but also provoke critical discussions about the evolving nature of work, employee satisfaction, and balance between AI support and human effort.\n\n- **AI as a Creative Collaborator** \nThe boundaries of human creativity - are being challenged as AI becomes a key player in the creative process. An example can be drawn from collaborative tools like OpenAI''s DALL-E or Adobe''s Sensei, which are enabling artists, writers, and designers to push their creative limits. The article could feature testimonials from creators who have embraced AI as a partner in innovation, sharing stories of how these collaborations have led to unexpected and inspiring outcomes. Additionally, engaging with experts in the field of AI ethics can deepen the discussion about the implications of AI in art, including authorship, originality, and the definition of creativity itself. This exploration could stimulate a broader dialogue on the future of the creative industry and the role of AI within it.\n\n- **Personalized Learning through AI** \nIn the realm of education, AI is reshaping how personalized learning experiences are crafted for students. For instance, platforms like DreamBox and Knewton use AI algorithms to tailor lessons - based on individual learning styles and paces. An article addressing this topic could synthesize insights gathered from educators who have implemented AI in their classrooms, sharing success stories and challenges faced when adjusting teaching methodologies. By highlighting real user experiences, the discussion could extend to the ethical implications of data privacy, algorithmic bias, and equity in education, thereby painting a comprehensive picture of how AI is both a tool of progress and a subject for scrutiny.\n\n- **The Ethical Implications of AI in Decision Making** \nAs organizations increasingly rely on AI for decision-making\u2014be it through predictive analytics in finance or recruiting algorithms in HR\u2014the ethical implications of these technologies come into sharp focus. This article could pull in case studies such as Amazon''s recruitment tool that inadvertently favored male candidates, thereby unveiling the hidden biases entrenched in AI systems. By interviewing - ethicists, data scientists, and business leaders, the narrative could explore how companies are navigating these ethical waters, balancing efficiency and fairness, and ensuring that AI serves as an equitable decision-making assistant rather than a perpetuator of injustice. \n\n- **AI in Mental Health: A New Frontier** \nThe integration of AI in mental health care presents a myriad of possibilities, from chatbots like Woebot providing cognitive behavioral therapy (CBT) to AI tools that analyze speech for emotional cues. An inspiring article could chronicle the journeys of users who have benefited from AI-driven mental health services, juxtaposed with expert opinions from psychologists discussing the potential and limitations of AI in this sensitive field. Through this exploration, readers would gain a nuanced understanding of how AI is shaping therapeutic practices, the importance of human empathy in mental health support, and the ethical concerns that arise from relying on technology - for emotional well-being.\n\nNotes: Each idea provides a rich ground for exploration, allowing for real-world applications and ethical considerations regarding AI. The proposed articles have the potential to engage a wide readership by blending current trends, expert insights, and relatable experiences\u2014making them both informative and meaningful.", "context": null, "agent_role": "Task Execution Evaluator", "task_id": "73f03e42-b56a-4e3b-8015-2377ba04393b"}}, {"event_id": "2f874052-6753-4641-97a8-ae358ff85ac8", "timestamp": "2025-10-21T14:21:25.187449+00:00", "type": "agent_execution_started", "event_data": {"agent_role": "Task Execution Evaluator", "agent_goal": "Your goal is to evaluate the performance of the agents in the crew based on the tasks they have performed using score from 1 to 10 evaluating on completion, quality, and overall performance.", "agent_backstory": "Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in - the crew based on the tasks they have performed"}}, {"event_id": "b60f05bd-529b-4041-bb8b-fd6a84e9c163", "timestamp": "2025-10-21T14:21:25.187556+00:00", "type": "llm_call_started", "event_data": {"timestamp": "2025-10-21T14:21:25.187556+00:00", "type": "llm_call_started", "source_fingerprint": null, "source_type": null, "fingerprint_metadata": null, "task_id": "73f03e42-b56a-4e3b-8015-2377ba04393b", "task_name": "Based on the task description and the expected output, compare and evaluate the performance of the agents in the crew based on the Task Output they have performed using score from 1 to 10 evaluating on completion, quality, and overall performance.task_description: Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes. task_expected_output: 5 bullet points with a paragraph for each - idea. agent: Researcher agent_goal: Make the best research and analysis on content about AI and AI agents Task Output: - **The Rise of AI Agents in Remote Work** \nAs remote work continues to crystallize in the corporate environment, AI agents are transforming how teams collaborate and operate. An article could delve into case studies from companies like GitLab and Buffer that have successfully integrated AI tools to facilitate communication, project tracking, and workload distribution among distributed teams. By interviewing team leaders and employees, we can uncover how AI agents are not merely assisting but actively enhancing productivity and reducing the cognitive load on human workers. This exploration would not only highlight the practical applications of AI in remote settings but also provoke critical discussions about the evolving nature of work, employee satisfaction, and balance between AI support and human effort.\n\n- **AI as a Creative Collaborator** \nThe boundaries - of human creativity are being challenged as AI becomes a key player in the creative process. An example can be drawn from collaborative tools like OpenAI''s DALL-E or Adobe''s Sensei, which are enabling artists, writers, and designers to push their creative limits. The article could feature testimonials from creators who have embraced AI as a partner in innovation, sharing stories of how these collaborations have led to unexpected and inspiring outcomes. Additionally, engaging with experts in the field of AI ethics can deepen the discussion about the implications of AI in art, including authorship, originality, and the definition of creativity itself. This exploration could stimulate a broader dialogue on the future of the creative industry and the role of AI within it.\n\n- **Personalized Learning through AI** \nIn the realm of education, AI is reshaping how personalized learning experiences are crafted for students. For instance, platforms like DreamBox and Knewton use AI algorithms - to tailor lessons based on individual learning styles and paces. An article addressing this topic could synthesize insights gathered from educators who have implemented AI in their classrooms, sharing success stories and challenges faced when adjusting teaching methodologies. By highlighting real user experiences, the discussion could extend to the ethical implications of data privacy, algorithmic bias, and equity in education, thereby painting a comprehensive picture of how AI is both a tool of progress and a subject for scrutiny.\n\n- **The Ethical Implications of AI in Decision Making** \nAs organizations increasingly rely on AI for decision-making\u2014be it through predictive analytics in finance or recruiting algorithms in HR\u2014the ethical implications of these technologies come into sharp focus. This article could pull in case studies such as Amazon''s recruitment tool that inadvertently favored male candidates, thereby unveiling the hidden biases entrenched in AI systems. - By interviewing ethicists, data scientists, and business leaders, the narrative could explore how companies are navigating these ethical waters, balancing efficiency and fairness, and ensuring that AI serves as an equitable decision-making assistant rather than a perpetuator of injustice. \n\n- **AI in Mental Health: A New Frontier** \nThe integration of AI in mental health care presents a myriad of possibilities, from chatbots like Woebot providing cognitive behavioral therapy (CBT) to AI tools that analyze speech for emotional cues. An inspiring article could chronicle the journeys of users who have benefited from AI-driven mental health services, juxtaposed with expert opinions from psychologists discussing the potential and limitations of AI in this sensitive field. Through this exploration, readers would gain a nuanced understanding of how AI is shaping therapeutic practices, the importance of human empathy in mental health support, and the ethical concerns that arise from relying - on technology for emotional well-being.\n\nNotes: Each idea provides a rich ground for exploration, allowing for real-world applications and ethical considerations regarding AI. The proposed articles have the potential to engage a wide readership by blending current trends, expert insights, and relatable experiences\u2014making them both informative and meaningful.", "agent_id": "ddff2e62-4b2d-474d-a519-2acef5744e2b", "agent_role": "Task Execution Evaluator", "from_task": null, "from_agent": null, "model": "gpt-4o-mini", "messages": [{"role": "system", "content": "You are Task Execution Evaluator. Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed\nYour personal goal is: Your goal is to evaluate the performance of the agents in the crew based on the tasks they have performed using score from 1 to 10 evaluating on completion, quality, and overall performance.\nTo give my best complete - final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task: Based on the task description and the expected output, compare and evaluate the performance of the agents in the crew based on the Task Output they have performed using score from 1 to 10 evaluating on completion, quality, and overall performance.task_description: Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes. task_expected_output: 5 bullet points with a paragraph for each idea. agent: Researcher agent_goal: Make the best research and analysis on content about AI - and AI agents Task Output: - **The Rise of AI Agents in Remote Work** \nAs remote work continues to crystallize in the corporate environment, AI agents are transforming how teams collaborate and operate. An article could delve into case studies from companies like GitLab and Buffer that have successfully integrated AI tools to facilitate communication, project tracking, and workload distribution among distributed teams. By interviewing team leaders and employees, we can uncover how AI agents are not merely assisting but actively enhancing productivity and reducing the cognitive load on human workers. This exploration would not only highlight the practical applications of AI in remote settings but also provoke critical discussions about the evolving nature of work, employee satisfaction, and balance between AI support and human effort.\n\n- **AI as a Creative Collaborator** \nThe boundaries of human creativity are being challenged as AI becomes a key player in the creative process. - An example can be drawn from collaborative tools like OpenAI''s DALL-E or Adobe''s Sensei, which are enabling artists, writers, and designers to push their creative limits. The article could feature testimonials from creators who have embraced AI as a partner in innovation, sharing stories of how these collaborations have led to unexpected and inspiring outcomes. Additionally, engaging with experts in the field of AI ethics can deepen the discussion about the implications of AI in art, including authorship, originality, and the definition of creativity itself. This exploration could stimulate a broader dialogue on the future of the creative industry and the role of AI within it.\n\n- **Personalized Learning through AI** \nIn the realm of education, AI is reshaping how personalized learning experiences are crafted for students. For instance, platforms like DreamBox and Knewton use AI algorithms to tailor lessons based on individual learning styles and paces. An article addressing this - topic could synthesize insights gathered from educators who have implemented AI in their classrooms, sharing success stories and challenges faced when adjusting teaching methodologies. By highlighting real user experiences, the discussion could extend to the ethical implications of data privacy, algorithmic bias, and equity in education, thereby painting a comprehensive picture of how AI is both a tool of progress and a subject for scrutiny.\n\n- **The Ethical Implications of AI in Decision Making** \nAs organizations increasingly rely on AI for decision-making\u2014be it through predictive analytics in finance or recruiting algorithms in HR\u2014the ethical implications of these technologies come into sharp focus. This article could pull in case studies such as Amazon''s recruitment tool that inadvertently favored male candidates, thereby unveiling the hidden biases entrenched in AI systems. By interviewing ethicists, data scientists, and business leaders, the narrative could explore - how companies are navigating these ethical waters, balancing efficiency and fairness, and ensuring that AI serves as an equitable decision-making assistant rather than a perpetuator of injustice. \n\n- **AI in Mental Health: A New Frontier** \nThe integration of AI in mental health care presents a myriad of possibilities, from chatbots like Woebot providing cognitive behavioral therapy (CBT) to AI tools that analyze speech for emotional cues. An inspiring article could chronicle the journeys of users who have benefited from AI-driven mental health services, juxtaposed with expert opinions from psychologists discussing the potential and limitations of AI in this sensitive field. Through this exploration, readers would gain a nuanced understanding of how AI is shaping therapeutic practices, the importance of human empathy in mental health support, and the ethical concerns that arise from relying on technology for emotional well-being.\n\nNotes: Each idea provides a rich ground for exploration, - allowing for real-world applications and ethical considerations regarding AI. The proposed articles have the potential to engage a wide readership by blending current trends, expert insights, and relatable experiences\u2014making them both informative and meaningful.\n\nThis is the expected criteria for your final answer: Evaluation Score from 1 to 10 based on the performance of the agents on the tasks\nyou MUST return the actual complete content as the final answer, not a summary.\nEnsure your final answer contains only the content in the following format: {\n \"quality\": float\n}\n\nEnsure the final output does not include any code block markers like ```json or ```python.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}], "tools": null, "callbacks": [""], "available_functions": null}}, {"event_id": "1d66949b-ab73-4070-a772-77fbdcda1823", - "timestamp": "2025-10-21T14:21:26.580077+00:00", "type": "llm_call_completed", "event_data": {"timestamp": "2025-10-21T14:21:26.580077+00:00", "type": "llm_call_completed", "source_fingerprint": null, "source_type": null, "fingerprint_metadata": null, "task_id": "73f03e42-b56a-4e3b-8015-2377ba04393b", "task_name": "Based on the task description and the expected output, compare and evaluate the performance of the agents in the crew based on the Task Output they have performed using score from 1 to 10 evaluating on completion, quality, and overall performance.task_description: Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes. task_expected_output: 5 bullet points with a paragraph for each idea. agent: Researcher agent_goal: Make the best research and analysis on content about AI and AI - agents Task Output: - **The Rise of AI Agents in Remote Work** \nAs remote work continues to crystallize in the corporate environment, AI agents are transforming how teams collaborate and operate. An article could delve into case studies from companies like GitLab and Buffer that have successfully integrated AI tools to facilitate communication, project tracking, and workload distribution among distributed teams. By interviewing team leaders and employees, we can uncover how AI agents are not merely assisting but actively enhancing productivity and reducing the cognitive load on human workers. This exploration would not only highlight the practical applications of AI in remote settings but also provoke critical discussions about the evolving nature of work, employee satisfaction, and balance between AI support and human effort.\n\n- **AI as a Creative Collaborator** \nThe boundaries of human creativity are being challenged as AI becomes a key player in the creative process. An example - can be drawn from collaborative tools like OpenAI''s DALL-E or Adobe''s Sensei, which are enabling artists, writers, and designers to push their creative limits. The article could feature testimonials from creators who have embraced AI as a partner in innovation, sharing stories of how these collaborations have led to unexpected and inspiring outcomes. Additionally, engaging with experts in the field of AI ethics can deepen the discussion about the implications of AI in art, including authorship, originality, and the definition of creativity itself. This exploration could stimulate a broader dialogue on the future of the creative industry and the role of AI within it.\n\n- **Personalized Learning through AI** \nIn the realm of education, AI is reshaping how personalized learning experiences are crafted for students. For instance, platforms like DreamBox and Knewton use AI algorithms to tailor lessons based on individual learning styles and paces. An article addressing this topic could - synthesize insights gathered from educators who have implemented AI in their classrooms, sharing success stories and challenges faced when adjusting teaching methodologies. By highlighting real user experiences, the discussion could extend to the ethical implications of data privacy, algorithmic bias, and equity in education, thereby painting a comprehensive picture of how AI is both a tool of progress and a subject for scrutiny.\n\n- **The Ethical Implications of AI in Decision Making** \nAs organizations increasingly rely on AI for decision-making\u2014be it through predictive analytics in finance or recruiting algorithms in HR\u2014the ethical implications of these technologies come into sharp focus. This article could pull in case studies such as Amazon''s recruitment tool that inadvertently favored male candidates, thereby unveiling the hidden biases entrenched in AI systems. By interviewing ethicists, data scientists, and business leaders, the narrative could explore how companies - are navigating these ethical waters, balancing efficiency and fairness, and ensuring that AI serves as an equitable decision-making assistant rather than a perpetuator of injustice. \n\n- **AI in Mental Health: A New Frontier** \nThe integration of AI in mental health care presents a myriad of possibilities, from chatbots like Woebot providing cognitive behavioral therapy (CBT) to AI tools that analyze speech for emotional cues. An inspiring article could chronicle the journeys of users who have benefited from AI-driven mental health services, juxtaposed with expert opinions from psychologists discussing the potential and limitations of AI in this sensitive field. Through this exploration, readers would gain a nuanced understanding of how AI is shaping therapeutic practices, the importance of human empathy in mental health support, and the ethical concerns that arise from relying on technology for emotional well-being.\n\nNotes: Each idea provides a rich ground for exploration, allowing - for real-world applications and ethical considerations regarding AI. The proposed articles have the potential to engage a wide readership by blending current trends, expert insights, and relatable experiences\u2014making them both informative and meaningful.", "agent_id": "ddff2e62-4b2d-474d-a519-2acef5744e2b", "agent_role": "Task Execution Evaluator", "from_task": null, "from_agent": null, "messages": [{"role": "system", "content": "You are Task Execution Evaluator. Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed\nYour personal goal is: Your goal is to evaluate the performance of the agents in the crew based on the tasks they have performed using score from 1 to 10 evaluating on completion, quality, and overall performance.\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your - final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task: Based on the task description and the expected output, compare and evaluate the performance of the agents in the crew based on the Task Output they have performed using score from 1 to 10 evaluating on completion, quality, and overall performance.task_description: Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes. task_expected_output: 5 bullet points with a paragraph for each idea. agent: Researcher agent_goal: Make the best research and analysis on content about AI and AI agents Task Output: - **The Rise of AI Agents in Remote Work** \nAs remote work continues to crystallize in the corporate - environment, AI agents are transforming how teams collaborate and operate. An article could delve into case studies from companies like GitLab and Buffer that have successfully integrated AI tools to facilitate communication, project tracking, and workload distribution among distributed teams. By interviewing team leaders and employees, we can uncover how AI agents are not merely assisting but actively enhancing productivity and reducing the cognitive load on human workers. This exploration would not only highlight the practical applications of AI in remote settings but also provoke critical discussions about the evolving nature of work, employee satisfaction, and balance between AI support and human effort.\n\n- **AI as a Creative Collaborator** \nThe boundaries of human creativity are being challenged as AI becomes a key player in the creative process. An example can be drawn from collaborative tools like OpenAI''s DALL-E or Adobe''s Sensei, which are enabling artists, writers, - and designers to push their creative limits. The article could feature testimonials from creators who have embraced AI as a partner in innovation, sharing stories of how these collaborations have led to unexpected and inspiring outcomes. Additionally, engaging with experts in the field of AI ethics can deepen the discussion about the implications of AI in art, including authorship, originality, and the definition of creativity itself. This exploration could stimulate a broader dialogue on the future of the creative industry and the role of AI within it.\n\n- **Personalized Learning through AI** \nIn the realm of education, AI is reshaping how personalized learning experiences are crafted for students. For instance, platforms like DreamBox and Knewton use AI algorithms to tailor lessons based on individual learning styles and paces. An article addressing this topic could synthesize insights gathered from educators who have implemented AI in their classrooms, sharing success stories - and challenges faced when adjusting teaching methodologies. By highlighting real user experiences, the discussion could extend to the ethical implications of data privacy, algorithmic bias, and equity in education, thereby painting a comprehensive picture of how AI is both a tool of progress and a subject for scrutiny.\n\n- **The Ethical Implications of AI in Decision Making** \nAs organizations increasingly rely on AI for decision-making\u2014be it through predictive analytics in finance or recruiting algorithms in HR\u2014the ethical implications of these technologies come into sharp focus. This article could pull in case studies such as Amazon''s recruitment tool that inadvertently favored male candidates, thereby unveiling the hidden biases entrenched in AI systems. By interviewing ethicists, data scientists, and business leaders, the narrative could explore how companies are navigating these ethical waters, balancing efficiency and fairness, and ensuring that AI serves as an - equitable decision-making assistant rather than a perpetuator of injustice. \n\n- **AI in Mental Health: A New Frontier** \nThe integration of AI in mental health care presents a myriad of possibilities, from chatbots like Woebot providing cognitive behavioral therapy (CBT) to AI tools that analyze speech for emotional cues. An inspiring article could chronicle the journeys of users who have benefited from AI-driven mental health services, juxtaposed with expert opinions from psychologists discussing the potential and limitations of AI in this sensitive field. Through this exploration, readers would gain a nuanced understanding of how AI is shaping therapeutic practices, the importance of human empathy in mental health support, and the ethical concerns that arise from relying on technology for emotional well-being.\n\nNotes: Each idea provides a rich ground for exploration, allowing for real-world applications and ethical considerations regarding AI. The proposed articles have the - potential to engage a wide readership by blending current trends, expert insights, and relatable experiences\u2014making them both informative and meaningful.\n\nThis is the expected criteria for your final answer: Evaluation Score from 1 to 10 based on the performance of the agents on the tasks\nyou MUST return the actual complete content as the final answer, not a summary.\nEnsure your final answer contains only the content in the following format: {\n \"quality\": float\n}\n\nEnsure the final output does not include any code block markers like ```json or ```python.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}], "response": "I now can give a great answer \nFinal Answer: {\n \"quality\": 9.5\n}", "call_type": "", "model": "gpt-4o-mini"}}, {"event_id": "4bd6a925-d033-4703-85ad-762ace24440d", "timestamp": "2025-10-21T14:21:26.580236+00:00", "type": "agent_execution_completed", - "event_data": {"agent_role": "Task Execution Evaluator", "agent_goal": "Your goal is to evaluate the performance of the agents in the crew based on the tasks they have performed using score from 1 to 10 evaluating on completion, quality, and overall performance.", "agent_backstory": "Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed"}}, {"event_id": "85a0dad3-6767-4613-8565-d4c1f23921b4", "timestamp": "2025-10-21T14:21:26.581604+00:00", "type": "task_completed", "event_data": {"task_description": "Based on the task description and the expected output, compare and evaluate the performance of the agents in the crew based on the Task Output they have performed using score from 1 to 10 evaluating on completion, quality, and overall performance.task_description: Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea - that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes. task_expected_output: 5 bullet points with a paragraph for each idea. agent: Researcher agent_goal: Make the best research and analysis on content about AI and AI agents Task Output: - **The Rise of AI Agents in Remote Work** \nAs remote work continues to crystallize in the corporate environment, AI agents are transforming how teams collaborate and operate. An article could delve into case studies from companies like GitLab and Buffer that have successfully integrated AI tools to facilitate communication, project tracking, and workload distribution among distributed teams. By interviewing team leaders and employees, we can uncover how AI agents are not merely assisting but actively enhancing productivity and reducing the cognitive load on human workers. This exploration would not only highlight the practical applications of AI in remote settings but also provoke - critical discussions about the evolving nature of work, employee satisfaction, and balance between AI support and human effort.\n\n- **AI as a Creative Collaborator** \nThe boundaries of human creativity are being challenged as AI becomes a key player in the creative process. An example can be drawn from collaborative tools like OpenAI''s DALL-E or Adobe''s Sensei, which are enabling artists, writers, and designers to push their creative limits. The article could feature testimonials from creators who have embraced AI as a partner in innovation, sharing stories of how these collaborations have led to unexpected and inspiring outcomes. Additionally, engaging with experts in the field of AI ethics can deepen the discussion about the implications of AI in art, including authorship, originality, and the definition of creativity itself. This exploration could stimulate a broader dialogue on the future of the creative industry and the role of AI within it.\n\n- **Personalized Learning through - AI** \nIn the realm of education, AI is reshaping how personalized learning experiences are crafted for students. For instance, platforms like DreamBox and Knewton use AI algorithms to tailor lessons based on individual learning styles and paces. An article addressing this topic could synthesize insights gathered from educators who have implemented AI in their classrooms, sharing success stories and challenges faced when adjusting teaching methodologies. By highlighting real user experiences, the discussion could extend to the ethical implications of data privacy, algorithmic bias, and equity in education, thereby painting a comprehensive picture of how AI is both a tool of progress and a subject for scrutiny.\n\n- **The Ethical Implications of AI in Decision Making** \nAs organizations increasingly rely on AI for decision-making\u2014be it through predictive analytics in finance or recruiting algorithms in HR\u2014the ethical implications of these technologies come into sharp focus. - This article could pull in case studies such as Amazon''s recruitment tool that inadvertently favored male candidates, thereby unveiling the hidden biases entrenched in AI systems. By interviewing ethicists, data scientists, and business leaders, the narrative could explore how companies are navigating these ethical waters, balancing efficiency and fairness, and ensuring that AI serves as an equitable decision-making assistant rather than a perpetuator of injustice. \n\n- **AI in Mental Health: A New Frontier** \nThe integration of AI in mental health care presents a myriad of possibilities, from chatbots like Woebot providing cognitive behavioral therapy (CBT) to AI tools that analyze speech for emotional cues. An inspiring article could chronicle the journeys of users who have benefited from AI-driven mental health services, juxtaposed with expert opinions from psychologists discussing the potential and limitations of AI in this sensitive field. Through this exploration, readers - would gain a nuanced understanding of how AI is shaping therapeutic practices, the importance of human empathy in mental health support, and the ethical concerns that arise from relying on technology for emotional well-being.\n\nNotes: Each idea provides a rich ground for exploration, allowing for real-world applications and ethical considerations regarding AI. The proposed articles have the potential to engage a wide readership by blending current trends, expert insights, and relatable experiences\u2014making them both informative and meaningful.", "task_name": "Based on the task description and the expected output, compare and evaluate the performance of the agents in the crew based on the Task Output they have performed using score from 1 to 10 evaluating on completion, quality, and overall performance.task_description: Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article - about this topic could be. Return the list of ideas with their paragraph and your notes. task_expected_output: 5 bullet points with a paragraph for each idea. agent: Researcher agent_goal: Make the best research and analysis on content about AI and AI agents Task Output: - **The Rise of AI Agents in Remote Work** \nAs remote work continues to crystallize in the corporate environment, AI agents are transforming how teams collaborate and operate. An article could delve into case studies from companies like GitLab and Buffer that have successfully integrated AI tools to facilitate communication, project tracking, and workload distribution among distributed teams. By interviewing team leaders and employees, we can uncover how AI agents are not merely assisting but actively enhancing productivity and reducing the cognitive load on human workers. This exploration would not only highlight the practical applications of AI in remote settings but also provoke critical discussions about the - evolving nature of work, employee satisfaction, and balance between AI support and human effort.\n\n- **AI as a Creative Collaborator** \nThe boundaries of human creativity are being challenged as AI becomes a key player in the creative process. An example can be drawn from collaborative tools like OpenAI''s DALL-E or Adobe''s Sensei, which are enabling artists, writers, and designers to push their creative limits. The article could feature testimonials from creators who have embraced AI as a partner in innovation, sharing stories of how these collaborations have led to unexpected and inspiring outcomes. Additionally, engaging with experts in the field of AI ethics can deepen the discussion about the implications of AI in art, including authorship, originality, and the definition of creativity itself. This exploration could stimulate a broader dialogue on the future of the creative industry and the role of AI within it.\n\n- **Personalized Learning through AI** \nIn the realm of - education, AI is reshaping how personalized learning experiences are crafted for students. For instance, platforms like DreamBox and Knewton use AI algorithms to tailor lessons based on individual learning styles and paces. An article addressing this topic could synthesize insights gathered from educators who have implemented AI in their classrooms, sharing success stories and challenges faced when adjusting teaching methodologies. By highlighting real user experiences, the discussion could extend to the ethical implications of data privacy, algorithmic bias, and equity in education, thereby painting a comprehensive picture of how AI is both a tool of progress and a subject for scrutiny.\n\n- **The Ethical Implications of AI in Decision Making** \nAs organizations increasingly rely on AI for decision-making\u2014be it through predictive analytics in finance or recruiting algorithms in HR\u2014the ethical implications of these technologies come into sharp focus. This article could - pull in case studies such as Amazon''s recruitment tool that inadvertently favored male candidates, thereby unveiling the hidden biases entrenched in AI systems. By interviewing ethicists, data scientists, and business leaders, the narrative could explore how companies are navigating these ethical waters, balancing efficiency and fairness, and ensuring that AI serves as an equitable decision-making assistant rather than a perpetuator of injustice. \n\n- **AI in Mental Health: A New Frontier** \nThe integration of AI in mental health care presents a myriad of possibilities, from chatbots like Woebot providing cognitive behavioral therapy (CBT) to AI tools that analyze speech for emotional cues. An inspiring article could chronicle the journeys of users who have benefited from AI-driven mental health services, juxtaposed with expert opinions from psychologists discussing the potential and limitations of AI in this sensitive field. Through this exploration, readers would gain a nuanced - understanding of how AI is shaping therapeutic practices, the importance of human empathy in mental health support, and the ethical concerns that arise from relying on technology for emotional well-being.\n\nNotes: Each idea provides a rich ground for exploration, allowing for real-world applications and ethical considerations regarding AI. The proposed articles have the potential to engage a wide readership by blending current trends, expert insights, and relatable experiences\u2014making them both informative and meaningful.", "task_id": "73f03e42-b56a-4e3b-8015-2377ba04393b", "output_raw": "{\n \"quality\": 9.5\n}", "output_format": "OutputFormat.PYDANTIC", "agent_role": "Task Execution Evaluator"}}, {"event_id": "09e44c6e-89d4-48b4-a9d5-035bf00f288d", "timestamp": "2025-10-21T14:21:26.581917+00:00", "type": "task_completed", "event_data": {"task_description": "Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each - idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.", "task_name": "Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.", "task_id": "b6c0fa7b-c537-48d9-9456-914fd6dbc421", "output_raw": "- **The Rise of AI Agents in Remote Work** \nAs remote work continues to crystallize in the corporate environment, AI agents are transforming how teams collaborate and operate. An article could delve into case studies from companies like GitLab and Buffer that have successfully integrated AI tools to facilitate communication, project tracking, and workload distribution among distributed teams. By interviewing team leaders and employees, we can uncover how AI agents are not merely assisting but actively enhancing productivity - and reducing the cognitive load on human workers. This exploration would not only highlight the practical applications of AI in remote settings but also provoke critical discussions about the evolving nature of work, employee satisfaction, and balance between AI support and human effort.\n\n- **AI as a Creative Collaborator** \nThe boundaries of human creativity are being challenged as AI becomes a key player in the creative process. An example can be drawn from collaborative tools like OpenAI''s DALL-E or Adobe''s Sensei, which are enabling artists, writers, and designers to push their creative limits. The article could feature testimonials from creators who have embraced AI as a partner in innovation, sharing stories of how these collaborations have led to unexpected and inspiring outcomes. Additionally, engaging with experts in the field of AI ethics can deepen the discussion about the implications of AI in art, including authorship, originality, and the definition of creativity - itself. This exploration could stimulate a broader dialogue on the future of the creative industry and the role of AI within it.\n\n- **Personalized Learning through AI** \nIn the realm of education, AI is reshaping how personalized learning experiences are crafted for students. For instance, platforms like DreamBox and Knewton use AI algorithms to tailor lessons based on individual learning styles and paces. An article addressing this topic could synthesize insights gathered from educators who have implemented AI in their classrooms, sharing success stories and challenges faced when adjusting teaching methodologies. By highlighting real user experiences, the discussion could extend to the ethical implications of data privacy, algorithmic bias, and equity in education, thereby painting a comprehensive picture of how AI is both a tool of progress and a subject for scrutiny.\n\n- **The Ethical Implications of AI in Decision Making** \nAs organizations increasingly rely on AI for decision-making\u2014be - it through predictive analytics in finance or recruiting algorithms in HR\u2014the ethical implications of these technologies come into sharp focus. This article could pull in case studies such as Amazon''s recruitment tool that inadvertently favored male candidates, thereby unveiling the hidden biases entrenched in AI systems. By interviewing ethicists, data scientists, and business leaders, the narrative could explore how companies are navigating these ethical waters, balancing efficiency and fairness, and ensuring that AI serves as an equitable decision-making assistant rather than a perpetuator of injustice. \n\n- **AI in Mental Health: A New Frontier** \nThe integration of AI in mental health care presents a myriad of possibilities, from chatbots like Woebot providing cognitive behavioral therapy (CBT) to AI tools that analyze speech for emotional cues. An inspiring article could chronicle the journeys of users who have benefited from AI-driven mental health services, juxtaposed - with expert opinions from psychologists discussing the potential and limitations of AI in this sensitive field. Through this exploration, readers would gain a nuanced understanding of how AI is shaping therapeutic practices, the importance of human empathy in mental health support, and the ethical concerns that arise from relying on technology for emotional well-being.\n\nNotes: Each idea provides a rich ground for exploration, allowing for real-world applications and ethical considerations regarding AI. The proposed articles have the potential to engage a wide readership by blending current trends, expert insights, and relatable experiences\u2014making them both informative and meaningful.", "output_format": "OutputFormat.RAW", "agent_role": "Researcher"}}, {"event_id": "b4b3f1c6-a87a-414b-961a-7637f7d32334", "timestamp": "2025-10-21T14:21:26.587592+00:00", "type": "crew_kickoff_completed", "event_data": {"timestamp": "2025-10-21T14:21:26.587592+00:00", "type": "crew_kickoff_completed", - "source_fingerprint": null, "source_type": null, "fingerprint_metadata": null, "task_id": null, "task_name": null, "agent_id": null, "agent_role": null, "crew_name": "crew", "crew": null, "output": {"description": "Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.", "name": "Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.", "expected_output": "5 bullet points with a paragraph for each idea.", "summary": "Come up with a list of 5 interesting ideas to...", "raw": "- **The Rise of AI Agents in Remote Work** \nAs remote work continues to crystallize in the corporate environment, AI agents - are transforming how teams collaborate and operate. An article could delve into case studies from companies like GitLab and Buffer that have successfully integrated AI tools to facilitate communication, project tracking, and workload distribution among distributed teams. By interviewing team leaders and employees, we can uncover how AI agents are not merely assisting but actively enhancing productivity and reducing the cognitive load on human workers. This exploration would not only highlight the practical applications of AI in remote settings but also provoke critical discussions about the evolving nature of work, employee satisfaction, and balance between AI support and human effort.\n\n- **AI as a Creative Collaborator** \nThe boundaries of human creativity are being challenged as AI becomes a key player in the creative process. An example can be drawn from collaborative tools like OpenAI''s DALL-E or Adobe''s Sensei, which are enabling artists, writers, and designers to push their - creative limits. The article could feature testimonials from creators who have embraced AI as a partner in innovation, sharing stories of how these collaborations have led to unexpected and inspiring outcomes. Additionally, engaging with experts in the field of AI ethics can deepen the discussion about the implications of AI in art, including authorship, originality, and the definition of creativity itself. This exploration could stimulate a broader dialogue on the future of the creative industry and the role of AI within it.\n\n- **Personalized Learning through AI** \nIn the realm of education, AI is reshaping how personalized learning experiences are crafted for students. For instance, platforms like DreamBox and Knewton use AI algorithms to tailor lessons based on individual learning styles and paces. An article addressing this topic could synthesize insights gathered from educators who have implemented AI in their classrooms, sharing success stories and challenges faced when adjusting - teaching methodologies. By highlighting real user experiences, the discussion could extend to the ethical implications of data privacy, algorithmic bias, and equity in education, thereby painting a comprehensive picture of how AI is both a tool of progress and a subject for scrutiny.\n\n- **The Ethical Implications of AI in Decision Making** \nAs organizations increasingly rely on AI for decision-making\u2014be it through predictive analytics in finance or recruiting algorithms in HR\u2014the ethical implications of these technologies come into sharp focus. This article could pull in case studies such as Amazon''s recruitment tool that inadvertently favored male candidates, thereby unveiling the hidden biases entrenched in AI systems. By interviewing ethicists, data scientists, and business leaders, the narrative could explore how companies are navigating these ethical waters, balancing efficiency and fairness, and ensuring that AI serves as an equitable decision-making assistant - rather than a perpetuator of injustice. \n\n- **AI in Mental Health: A New Frontier** \nThe integration of AI in mental health care presents a myriad of possibilities, from chatbots like Woebot providing cognitive behavioral therapy (CBT) to AI tools that analyze speech for emotional cues. An inspiring article could chronicle the journeys of users who have benefited from AI-driven mental health services, juxtaposed with expert opinions from psychologists discussing the potential and limitations of AI in this sensitive field. Through this exploration, readers would gain a nuanced understanding of how AI is shaping therapeutic practices, the importance of human empathy in mental health support, and the ethical concerns that arise from relying on technology for emotional well-being.\n\nNotes: Each idea provides a rich ground for exploration, allowing for real-world applications and ethical considerations regarding AI. The proposed articles have the potential to engage a wide readership - by blending current trends, expert insights, and relatable experiences\u2014making them both informative and meaningful.", "pydantic": null, "json_dict": null, "agent": "Researcher", "output_format": "raw"}, "total_tokens": 1098}}, {"event_id": "e150cbcc-59ca-49e2-afcb-36adb0cc2e22", "timestamp": "2025-10-21T14:21:26.587778+00:00", "type": "crew_kickoff_started", "event_data": {"timestamp": "2025-10-21T14:21:26.587778+00:00", "type": "crew_kickoff_started", "source_fingerprint": null, "source_type": null, "fingerprint_metadata": null, "task_id": null, "task_name": null, "agent_id": null, "agent_role": null, "crew_name": "crew", "crew": null, "inputs": {"topic": "AI"}}}], "batch_metadata": {"events_count": 15, "batch_sequence": 1, "is_final_batch": false}}' + body: "{\"messages\":[{\"role\":\"system\",\"content\":\"You are Researcher. You're + an expert researcher, specialized in technology, software engineering, AI and + startups. You work as a freelancer and is now working on doing research and + analysis for a new customer.\\nYour personal goal is: Make the best research + and analysis on content about AI and AI agents\"},{\"role\":\"user\",\"content\":\"\\nCurrent + Task: Come up with a list of 5 interesting ideas to explore for an article, + then write one amazing paragraph highlight for each idea that showcases how + good an article about this topic could be. Return the list of ideas with their + paragraph and your notes.\\n\\nThis is the expected criteria for your final + answer: 5 bullet points with a paragraph for each idea.\\nyou MUST return the + actual complete content as the final answer, not a summary.\\n\\nProvide your + complete response:\"},{\"role\":\"assistant\",\"content\":\"- **The Rise of + Autonomous AI Agents: Redefining Productivity and Creativity** \\n This article + would dive into how autonomous AI agents\u2014intelligent software systems capable + of independently performing complex tasks\u2014are transforming industries by + augmenting human productivity and creativity. It would explore real-world use + cases from automated content generation and customer support bots to AI-driven + design and research assistants, illustrating how these agents reduce repetitive + workload and open new avenues for innovation. The article could also analyze + challenges such as ethical considerations, decision-making transparency, and + integration with existing workflows, offering readers a comprehensive view of + how autonomous AI agents are reshaping the future of work.\\n\\n- **Bridging + Human-AI Collaboration: Designing AI Agents for Intuitive Interaction** \\n + \ This piece would investigate the critical design principles behind successful + human-AI collaboration, focusing on building AI agents that communicate and + interact naturally with users. From natural language processing nuances to adaptive + learning from user behavior, the article would examine how these technological + advancements create seamless partnerships between humans and machines. Highlighting + case studies in healthcare, finance, and creative industries, it would demonstrate + the importance of trust, interpretability, and empathy in AI agent interfaces, + emphasizing how better-designed interactions can dramatically improve adoption + and effectiveness.\\n\\n- **The Ethical Frontier: Navigating Bias and Accountability + in AI Agents** \\n Exploring the ethical implications of deploying AI agents + at scale, this article would address pressing issues like algorithmic bias, + privacy concerns, and accountability in autonomous decision-making. It would + analyze how biases embedded in training data can propagate through AI agents, + impacting critical outcomes in hiring, lending, and law enforcement. The article + would also discuss emerging regulatory frameworks, best practices for auditing + AI agents, and the role of interdisciplinary ethics teams in ensuring these + technologies are fair, transparent, and responsible, helping readers grasp the + societal responsibilities accompanying AI advancement.\\n\\n- **AI Agents in + Startups: Driving Innovation and Competitive Advantage** \\n Focused on the + startup ecosystem, this article would explore how emerging companies leverage + AI agents to disrupt markets and scale rapidly with limited resources. It would + profile startups using AI agents for customer acquisition, personalized marketing, + operational automation, and product development, illustrating how these tools + enable lean teams to achieve much more. The narrative would consider investment + trends, challenges faced by startups incorporating AI agents, and strategies + for balancing innovation with reliability, providing entrepreneurs and investors + with valuable insights into harnessing AI agents for meaningful growth.\\n\\n- + **From Data to Decision: How AI Agents Transform Business Intelligence** \\n + \ This article would delve into the role of AI agents as intelligent intermediaries + in business intelligence (BI) systems, automating data analysis and delivering + actionable insights in real-time. It would explain how AI agents can parse vast + datasets, identify trends, generate forecasts, and even suggest strategic decisions + without constant human oversight. Highlighting innovations like conversational + BI interfaces and predictive analytics agents, the article would underscore + how businesses of all sizes can democratize data-driven decision-making, driving + agility and competitive advantage in increasingly complex markets.\"},{\"role\":\"system\",\"content\":\"You + are Researcher. You're an expert researcher, specialized in technology, software + engineering, AI and startups. You work as a freelancer and is now working on + doing research and analysis for a new customer.\\nYour personal goal is: Make + the best research and analysis on content about AI and AI agents\"},{\"role\":\"user\",\"content\":\"\\nCurrent + Task: Come up with a list of 5 interesting ideas to explore for an article, + then write one amazing paragraph highlight for each idea that showcases how + good an article about this topic could be. Return the list of ideas with their + paragraph and your notes.\\n\\nThis is the expected criteria for your final + answer: 5 bullet points with a paragraph for each idea.\\nyou MUST return the + actual complete content as the final answer, not a summary.\\n\\nProvide your + complete response:\"}],\"model\":\"gpt-4.1-mini\"}" headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate, zstd - Connection: - - keep-alive - Content-Length: - - '68760' - Content-Type: - - application/json User-Agent: - - CrewAI-CLI/1.0.0 - X-Crewai-Version: - - 1.0.0 - method: POST - uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches/b2bfe230-4539-4522-a372-ab58b85f4ce1/events - response: - body: - string: '{"events_created":15,"ephemeral_trace_batch_id":"9da97d5c-cb85-4950-8c08-3a33f4e87265"}' - headers: - Connection: - - keep-alive - Content-Length: - - '87' - Content-Type: - - application/json; charset=utf-8 - Date: - - Tue, 21 Oct 2025 14:21:27 GMT - cache-control: - - no-store - content-security-policy: - - 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self'' ''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/ https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net https://js.hscollectedforms.net - https://js.usemessages.com https://snap.licdn.com https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data: *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com; connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/* https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com https://api.hubspot.com - https://forms.hscollectedforms.net https://api.hubapi.com https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509 https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self'' *.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com https://drive.google.com https://slides.google.com https://accounts.google.com https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/ https://www.youtube.com https://share.descript.com' - etag: - - W/"5a6f0dc6b91e1a49e11fb8d9c581237c" - expires: - - '0' - permissions-policy: - - camera=(), microphone=(self), geolocation=() - pragma: - - no-cache - referrer-policy: - - strict-origin-when-cross-origin - strict-transport-security: - - max-age=63072000; includeSubDomains - vary: - - Accept - x-content-type-options: - - nosniff - x-frame-options: - - SAMEORIGIN - x-permitted-cross-domain-policies: - - none - x-request-id: - - bb343d8d-c717-4aae-8514-10d43504f6c0 - x-runtime: - - '0.189296' - x-xss-protection: - - 1; mode=block - status: - code: 200 - message: OK -- request: - body: '{"status": "completed", "duration_ms": 20012, "final_event_count": 15}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate, zstd - Connection: - - keep-alive - Content-Length: - - '70' - Content-Type: - - application/json - User-Agent: - - CrewAI-CLI/1.0.0 - X-Crewai-Version: - - 1.0.0 - method: PATCH - uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches/b2bfe230-4539-4522-a372-ab58b85f4ce1/finalize - response: - body: - string: '{"id":"9da97d5c-cb85-4950-8c08-3a33f4e87265","ephemeral_trace_id":"b2bfe230-4539-4522-a372-ab58b85f4ce1","execution_type":"crew","crew_name":"crew","flow_name":null,"status":"completed","duration_ms":20012,"crewai_version":"1.0.0","total_events":15,"execution_context":{"crew_name":"crew","flow_name":null,"privacy_level":"standard","crewai_version":"1.0.0","crew_fingerprint":null},"created_at":"2025-10-21T14:21:09.013Z","updated_at":"2025-10-21T14:21:27.614Z","access_code":"TRACE-4feb6c2ae8","user_identifier":null}' - headers: - Connection: - - keep-alive - Content-Length: - - '519' - Content-Type: - - application/json; charset=utf-8 - Date: - - Tue, 21 Oct 2025 14:21:27 GMT - cache-control: - - no-store - content-security-policy: - - 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self'' ''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/ https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net https://js.hscollectedforms.net - https://js.usemessages.com https://snap.licdn.com https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data: *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com; connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/* https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com https://api.hubspot.com - https://forms.hscollectedforms.net https://api.hubapi.com https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509 https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self'' *.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com https://drive.google.com https://slides.google.com https://accounts.google.com https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/ https://www.youtube.com https://share.descript.com' - etag: - - W/"582d3918b2f5e9373582e29a4e483d7a" - expires: - - '0' - permissions-policy: - - camera=(), microphone=(self), geolocation=() - pragma: - - no-cache - referrer-policy: - - strict-origin-when-cross-origin - strict-transport-security: - - max-age=63072000; includeSubDomains - vary: - - Accept - x-content-type-options: - - nosniff - x-frame-options: - - SAMEORIGIN - x-permitted-cross-domain-policies: - - none - x-request-id: - - 251e0eeb-e0cb-4e5a-ae95-388291fab541 - x-runtime: - - '0.064801' - x-xss-protection: - - 1; mode=block - status: - code: 200 - message: OK -- request: - body: '{"messages": [{"role": "system", "content": "You are Researcher. You''re an expert researcher, specialized in technology, software engineering, AI and startups. You work as a freelancer and is now working on doing research and analysis for a new customer.\nYour personal goal is: Make the best research and analysis on content about AI and AI agents\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task: Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.\n\nThis is the expected criteria for your - final answer: 5 bullet points with a paragraph for each idea.\nyou MUST return the actual complete content as the final answer, not a summary.\n\nYou MUST follow these instructions: \n - Incorporate specific examples and case studies in initial outputs for clearer illustration of concepts.\n - Engage more with current events or trends to enhance relevance, especially in fields like remote work and decision-making.\n - Invite perspectives from experts and stakeholders to add depth to discussions on ethical implications and collaboration in creativity.\n - Use more precise language when discussing topics, ensuring clarity and accessibility for readers.\n - Encourage exploration of user experiences and testimonials to provide more relatable content, especially in education and mental health contexts.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stream": false}' - headers: + - X-USER-AGENT-XXX accept: - application/json accept-encoding: - - gzip, deflate, zstd + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX connection: - keep-alive content-length: - - '1979' + - '5482' content-type: - application/json cookie: - - __cf_bm=p.q22A4.LnQmooo01V_89ZAEGXj_S4fJNkPlbLadtaE-1761056485-1.0.1.1-txy4D4FrtqHpILOE_iiFcBXCTM8d2UsSGzKJeB0qgd3TosZJx3.EmL1CgIJqbJS31Qd5mnCHOqUjx6UFOgOxfBO1NpIe4inEmYUS9xJf33M; _cfuvid=Vix88TUp4dnmVridKpA6LWYGOsSdcnEg942n1s6NoNg-1761056485340-0.0.1.1-604800000 + - COOKIE-XXX host: - api.openai.com - user-agent: - - OpenAI/Python 1.109.1 x-stainless-arch: - - arm64 + - X-STAINLESS-ARCH-XXX x-stainless-async: - 'false' x-stainless-lang: - python x-stainless-os: - - MacOS + - X-STAINLESS-OS-XXX x-stainless-package-version: - - 1.109.1 + - 1.83.0 x-stainless-read-timeout: - - '600' + - X-STAINLESS-READ-TIMEOUT-XXX x-stainless-retry-count: - '0' x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.12.9 + - 3.13.12 method: POST uri: https://api.openai.com/v1/chat/completions response: body: - string: "{\n \"id\": \"chatcmpl-CT7WoLeE11YIkd2ITzyzeMs1ozCFm\",\n \"object\": \"chat.completion\",\n \"created\": 1761056486,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"I now can give a great answer \\nFinal Answer:\\n\\n- **The Rise of AI Agents in Remote Work** \\nAs remote work continues to dominate the global workplace, the integration of AI agents is poised to transform how teams collaborate and engage. A recent case study from the distributed team at GitLab, which employs advanced AI tools to enhance communication and productivity, illustrates how these agents can streamline workflows and provide real-time assistance. By leveraging AI-driven insights, employees can optimize their schedules, track performance metrics, and even manage project deliverables more efficiently. An article exploring this phenomenon could delve into how AI agents are not just\ - \ supporting remote work but redefining it, highlighting testimonials from employees who have experienced increased focus and reduced burnout as a result.\\n\\n- **Ethical Implications of AI in Education** \\nThe increasing use of AI in educational settings raises critical ethical questions, especially concerning equity and accessibility. A notable example is Georgia Tech's AI teaching assistant, Jill Watson, which has provided support to students while sparking debates about the human touch in teaching. An article that examines the dual-edged sword of employing AI as a mentor could consider perspectives from educators, students, and policymakers. By inviting thoughts from experts in both AI ethics and education reform, the article could tackle the implications of data privacy, algorithmic bias, and the necessity of inclusivity in developing educational technologies.\\n\\n- **AI as Creative Collaborators** \\nAI's role as a creative collaborator is rapidly evolving, illustrated\ - \ by projects like OpenAI's DALL-E and GPT-3, which have successfully co-created art and literature with human artists. This dynamic presents a unique opportunity to explore the implications of creativity in the era of AI, especially how it influences workflows and ideation processes. An article could feature interviews with artists and designers who have utilized AI tools, discussing their experiences and the collaborative filters that guide creative decisions. Furthermore, examining how these interactions can democratize access to creative expression could spark deeper conversations about the future of art and originality in a technologically saturated landscape.\\n\\n- **AI in Mental Health Support** \\nAs mental health challenges increase globally, AI's potential to provide support through chatbots and virtual assistants offers a timely topic for exploration. Programs like Woebot employ AI to interact with users, delivering cognitive behavioral therapy techniques and mood tracking\ - \ options. This article could spotlight user testimonials highlighting the balance between technology and empathetic understanding. Insights from mental health professionals could provide critically engaging discussions on how AI tools can supplement traditional therapy while addressing concerns surrounding data security and the effectiveness of such treatments.\\n\\n- **AI's Influence on Decision-Making Processes** \\nIn the fast-paced business world, AI systems, such as IBM Watson, are redefining decision-making by providing data-driven insights that were previously unattainable. An article could investigate how organizations have adopted AI tools to enhance strategic choices and reduce risks. By incorporating expert opinions from business leaders who have successfully integrated AI into their processes, the article could reveal not only the measurable impacts of AI on their decision-making efficacy but also the human aspects of adaptability and trust in technology. Furthermore,\ - \ exploring the challenges faced during implementation could present a balanced view on the AI adoption journey, making it relatable for companies contemplating similar paths.\\n\\nNotes: In developing these ideas, I prioritized relevance to current events, case studies, and expert perspectives. Each idea aims to highlight the transformative impact of AI across diverse domains while addressing ethical concerns and user experiences to foster relatable content for a wide audience.\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 354,\n \"completion_tokens\": 693,\n \"total_tokens\": 1047,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\"\ - : 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_560af6e559\"\n}\n" + string: "{\n \"id\": \"chatcmpl-DDFzR5eiizUskHHVDIqVHcFSGrZxy\",\n \"object\": + \"chat.completion\",\n \"created\": 1772052101,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"- **The Evolution of AI Agents: From + Simple Automation to Context-Aware Decision Makers** \\n This article would + chart the remarkable progression of AI agents, tracing their journey from + rule-based automation systems to sophisticated, context-aware entities capable + of reasoning and adapting within complex environments. By exploring advances + in reinforcement learning, natural language understanding, and multi-agent + collaboration, readers would gain insight into how modern AI agents operate + with increasing autonomy. The piece could include cutting-edge research examples + and practical applications, illustrating how this evolution unlocks new possibilities + across industries such as healthcare, logistics, and customer service, truly + showcasing the expanding potential and impact of AI agents in everyday life.\\n\\n- + **Designing Trustworthy AI Agents: Balancing Transparency and Performance** + \ \\n Focusing on the critical issue of trust, this article would explore + the tension between creating AI agents that offer high performance and those + designed to be transparent and explainable to users. It would delve into techniques + like explainable AI (XAI), confidence scoring, and user-centric design principles + that foster trust and accountability. With a mix of theoretical insights and + real-world implementations, the article would highlight how companies tackle + challenges in deploying AI agents responsibly\u2014especially in sensitive + domains like finance, law enforcement, and healthcare\u2014demonstrating how + trustworthiness can become a competitive advantage in AI-driven services.\\n\\n- + **AI Agents as Personal Productivity Assistants: Beyond Scheduling and Reminders** + \ \\n This topic examines how AI agents are evolving from basic virtual assistants + to powerful personal productivity coaches that understand context, anticipate + needs, and proactively manage tasks. The article would investigate advances + in multi-modal understanding, emotional intelligence, and continuous learning + that enable AI agents to provide nuanced support in time management, email + triage, project coordination, and even creative brainstorming. Case studies + from popular platforms and startups would showcase how this new generation + of AI agents is revolutionizing daily workflows for professionals across sectors, + offering readers a forward-looking perspective on the future of personal digital + assistance.\\n\\n- **Collaborative AI Agents in Multi-Agent Systems: Driving + Complex Problem Solving** \\n This article would focus on the growing field + of multi-agent AI systems, where multiple AI agents communicate, negotiate, + and collaborate to solve problems that are too complex for a single agent. + It would highlight research advances in swarm intelligence, decentralized + decision-making, and cooperative game theory, and demonstrate practical applications + ranging from autonomous vehicle fleets to smart grid management and disaster + response coordination. By unpacking these complex interactions, the article + would engage readers with the fascinating dynamics of AI ecosystems and the + promise of collaborative agents to address society\u2019s grand challenges.\\n\\n- + **Startups Building Next-Gen AI Agents: Innovating at the Intersection of + AI and User Experience** \\n Highlighting startups at the forefront of AI + agent technology, this article would provide an in-depth look at how these + ventures blend cutting-edge artificial intelligence with seamless user experiences + to disrupt traditional markets. It would examine how startups harness advances + in natural language processing, reinforcement learning, and personalized modeling + to create AI agents that feel intuitive and human-like, powering applications + in healthcare, education, finance, and customer engagement. The article would + also discuss funding trends, go-to-market strategies, and technological challenges, + offering entrepreneurs, investors, and technologists valuable insights into + what it takes to succeed in the burgeoning AI agent landscape.\\n\\n**Notes:** + \ \\nThese ideas are crafted to cover a broad spectrum of AI agent-related + topics, combining technical depth with real-world relevance. Each paragraph + aims to showcase the potential richness, relevance, and appeal of a full article, + ensuring the content would engage a diverse readership, from AI researchers + and software engineers to startup founders and business leaders interested + in AI innovation.\",\n \"refusal\": null,\n \"annotations\": + []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n + \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 926,\n \"completion_tokens\": + 727,\n \"total_tokens\": 1653,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_417e90869b\"\n}\n" headers: CF-RAY: - - 992166c25cef1b58-EWR + - CF-RAY-XXX Connection: - keep-alive Content-Type: - application/json Date: - - Tue, 21 Oct 2025 14:21:42 GMT + - Wed, 25 Feb 2026 20:41:50 GMT Server: - cloudflare Strict-Transport-Security: - - max-age=31536000; includeSubDomains; preload + - STS-XXX Transfer-Encoding: - chunked X-Content-Type-Options: - - nosniff + - X-CONTENT-TYPE-XXX access-control-expose-headers: - - X-Request-ID + - ACCESS-CONTROL-XXX alt-svc: - h3=":443"; ma=86400 cf-cache-status: - DYNAMIC openai-organization: - - crewai-iuxna1 + - OPENAI-ORG-XXX openai-processing-ms: - - '15340' + - '9082' openai-project: - - proj_fL4UBWR1CMpAAdgzaSKqsVvA + - OPENAI-PROJECT-XXX openai-version: - '2020-10-01' - x-envoy-upstream-service-time: - - '15393' x-openai-proxy-wasm: - v0.1 x-ratelimit-limit-requests: - - '10000' + - X-RATELIMIT-LIMIT-REQUESTS-XXX x-ratelimit-limit-tokens: - - '200000' + - X-RATELIMIT-LIMIT-TOKENS-XXX x-ratelimit-remaining-requests: - - '9999' + - X-RATELIMIT-REMAINING-REQUESTS-XXX x-ratelimit-remaining-tokens: - - '199538' + - X-RATELIMIT-REMAINING-TOKENS-XXX x-ratelimit-reset-requests: - - 8.64s + - X-RATELIMIT-RESET-REQUESTS-XXX x-ratelimit-reset-tokens: - - 138ms + - X-RATELIMIT-RESET-TOKENS-XXX x-request-id: - - req_305226ed596943c186794ba45f2eec83 + - X-REQUEST-ID-XXX status: code: 200 message: OK - request: - body: '{"trace_id": "97d598c1-4fc1-472e-8528-c1e410bb260e", "execution_type": "crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null, "crew_name": "Unknown Crew", "flow_name": null, "crewai_version": "1.0.0", "privacy_level": "standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count": 0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-10-21T14:21:42.063022+00:00"}}' + body: "{\"messages\":[{\"role\":\"system\",\"content\":\"You are Task Execution + Evaluator. Evaluator agent for crew evaluation with precise capabilities to + evaluate the performance of the agents in the crew based on the tasks they have + performed\\nYour personal goal is: Your goal is to evaluate the performance + of the agents in the crew based on the tasks they have performed using score + from 1 to 10 evaluating on completion, quality, and overall performance.\"},{\"role\":\"user\",\"content\":\"\\nCurrent + Task: Based on the task description and the expected output, compare and evaluate + the performance of the agents in the crew based on the Task Output they have + performed using score from 1 to 10 evaluating on completion, quality, and overall + performance.task_description: Come up with a list of 5 interesting ideas to + explore for an article, then write one amazing paragraph highlight for each + idea that showcases how good an article about this topic could be. Return the + list of ideas with their paragraph and your notes. task_expected_output: 5 bullet + points with a paragraph for each idea. agent: Researcher agent_goal: Make the + best research and analysis on content about AI and AI agents Task Output: - + **The Evolution of AI Agents: From Simple Automation to Context-Aware Decision + Makers** \\n This article would chart the remarkable progression of AI agents, + tracing their journey from rule-based automation systems to sophisticated, context-aware + entities capable of reasoning and adapting within complex environments. By exploring + advances in reinforcement learning, natural language understanding, and multi-agent + collaboration, readers would gain insight into how modern AI agents operate + with increasing autonomy. The piece could include cutting-edge research examples + and practical applications, illustrating how this evolution unlocks new possibilities + across industries such as healthcare, logistics, and customer service, truly + showcasing the expanding potential and impact of AI agents in everyday life.\\n\\n- + **Designing Trustworthy AI Agents: Balancing Transparency and Performance** + \ \\n Focusing on the critical issue of trust, this article would explore the + tension between creating AI agents that offer high performance and those designed + to be transparent and explainable to users. It would delve into techniques like + explainable AI (XAI), confidence scoring, and user-centric design principles + that foster trust and accountability. With a mix of theoretical insights and + real-world implementations, the article would highlight how companies tackle + challenges in deploying AI agents responsibly\u2014especially in sensitive domains + like finance, law enforcement, and healthcare\u2014demonstrating how trustworthiness + can become a competitive advantage in AI-driven services.\\n\\n- **AI Agents + as Personal Productivity Assistants: Beyond Scheduling and Reminders** \\n + \ This topic examines how AI agents are evolving from basic virtual assistants + to powerful personal productivity coaches that understand context, anticipate + needs, and proactively manage tasks. The article would investigate advances + in multi-modal understanding, emotional intelligence, and continuous learning + that enable AI agents to provide nuanced support in time management, email triage, + project coordination, and even creative brainstorming. Case studies from popular + platforms and startups would showcase how this new generation of AI agents is + revolutionizing daily workflows for professionals across sectors, offering readers + a forward-looking perspective on the future of personal digital assistance.\\n\\n- + **Collaborative AI Agents in Multi-Agent Systems: Driving Complex Problem Solving** + \ \\n This article would focus on the growing field of multi-agent AI systems, + where multiple AI agents communicate, negotiate, and collaborate to solve problems + that are too complex for a single agent. It would highlight research advances + in swarm intelligence, decentralized decision-making, and cooperative game theory, + and demonstrate practical applications ranging from autonomous vehicle fleets + to smart grid management and disaster response coordination. By unpacking these + complex interactions, the article would engage readers with the fascinating + dynamics of AI ecosystems and the promise of collaborative agents to address + society\u2019s grand challenges.\\n\\n- **Startups Building Next-Gen AI Agents: + Innovating at the Intersection of AI and User Experience** \\n Highlighting + startups at the forefront of AI agent technology, this article would provide + an in-depth look at how these ventures blend cutting-edge artificial intelligence + with seamless user experiences to disrupt traditional markets. It would examine + how startups harness advances in natural language processing, reinforcement + learning, and personalized modeling to create AI agents that feel intuitive + and human-like, powering applications in healthcare, education, finance, and + customer engagement. The article would also discuss funding trends, go-to-market + strategies, and technological challenges, offering entrepreneurs, investors, + and technologists valuable insights into what it takes to succeed in the burgeoning + AI agent landscape.\\n\\n**Notes:** \\nThese ideas are crafted to cover a broad + spectrum of AI agent-related topics, combining technical depth with real-world + relevance. Each paragraph aims to showcase the potential richness, relevance, + and appeal of a full article, ensuring the content would engage a diverse readership, + from AI researchers and software engineers to startup founders and business + leaders interested in AI innovation.\\n\\nThis is the expected criteria for + your final answer: Evaluation Score from 1 to 10 based on the performance of + the agents on the tasks\\nyou MUST return the actual complete content as the + final answer, not a summary.\\nFormat your final answer according to the following + OpenAPI schema: {\\n \\\"properties\\\": {\\n \\\"quality\\\": {\\n \\\"description\\\": + \\\"A score from 1 to 10 evaluating on completion, quality, and overall performance + from the task_description and task_expected_output to the actual Task Output.\\\",\\n + \ \\\"title\\\": \\\"Quality\\\",\\n \\\"type\\\": \\\"number\\\"\\n + \ }\\n },\\n \\\"required\\\": [\\n \\\"quality\\\"\\n ],\\n \\\"title\\\": + \\\"TaskEvaluationPydanticOutput\\\",\\n \\\"type\\\": \\\"object\\\",\\n \\\"additionalProperties\\\": + false\\n}\\n\\nIMPORTANT: Preserve the original content exactly as-is. Do NOT + rewrite, paraphrase, or modify the meaning of the content. Only structure it + to match the schema format.\\n\\nDo not include the OpenAPI schema in the final + output. Ensure the final output does not include any code block markers like + ```json or ```python.\\n\\nProvide your complete response:\"}],\"model\":\"gpt-4o-mini\",\"response_format\":{\"type\":\"json_schema\",\"json_schema\":{\"schema\":{\"properties\":{\"quality\":{\"description\":\"A + score from 1 to 10 evaluating on completion, quality, and overall performance + from the task_description and task_expected_output to the actual Task Output.\",\"title\":\"Quality\",\"type\":\"number\"}},\"required\":[\"quality\"],\"title\":\"TaskEvaluationPydanticOutput\",\"type\":\"object\",\"additionalProperties\":false},\"name\":\"TaskEvaluationPydanticOutput\",\"strict\":true}},\"stream\":false}" headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate, zstd - Connection: - - keep-alive - Content-Length: - - '434' - Content-Type: - - application/json User-Agent: - - CrewAI-CLI/1.0.0 - X-Crewai-Version: - - 1.0.0 - method: POST - uri: https://app.crewai.com/crewai_plus/api/v1/tracing/batches - response: - body: - string: '{"error":"bad_credentials","message":"Bad credentials"}' - headers: - Connection: - - keep-alive - Content-Length: - - '55' - Content-Type: - - application/json; charset=utf-8 - Date: - - Tue, 21 Oct 2025 14:21:42 GMT - cache-control: - - no-store - content-security-policy: - - 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self'' ''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/ https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net https://js.hscollectedforms.net - https://js.usemessages.com https://snap.licdn.com https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data: *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com; connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/* https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com https://api.hubspot.com - https://forms.hscollectedforms.net https://api.hubapi.com https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509 https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self'' *.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com https://drive.google.com https://slides.google.com https://accounts.google.com https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/ https://www.youtube.com https://share.descript.com' - expires: - - '0' - permissions-policy: - - camera=(), microphone=(self), geolocation=() - pragma: - - no-cache - referrer-policy: - - strict-origin-when-cross-origin - strict-transport-security: - - max-age=63072000; includeSubDomains - vary: - - Accept - x-content-type-options: - - nosniff - x-frame-options: - - SAMEORIGIN - x-permitted-cross-domain-policies: - - none - x-request-id: - - af7d2664-96a5-47fa-aa2b-5fef9fb2889c - x-runtime: - - '0.324159' - x-xss-protection: - - 1; mode=block - status: - code: 401 - message: Unauthorized -- request: - body: '{"messages": [{"role": "system", "content": "You are Task Execution Evaluator. Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed\nYour personal goal is: Your goal is to evaluate the performance of the agents in the crew based on the tasks they have performed using score from 1 to 10 evaluating on completion, quality, and overall performance.\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task: Based on the task description and the expected output, compare and evaluate the performance of the agents in the crew based on the Task Output they have performed using score from 1 to 10 evaluating - on completion, quality, and overall performance.task_description: Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes. task_expected_output: 5 bullet points with a paragraph for each idea. agent: Researcher agent_goal: Make the best research and analysis on content about AI and AI agents Task Output: - **The Rise of AI Agents in Remote Work** \nAs remote work continues to dominate the global workplace, the integration of AI agents is poised to transform how teams collaborate and engage. A recent case study from the distributed team at GitLab, which employs advanced AI tools to enhance communication and productivity, illustrates how these agents can streamline workflows and provide real-time assistance. By leveraging AI-driven insights, employees can optimize their schedules, track performance - metrics, and even manage project deliverables more efficiently. An article exploring this phenomenon could delve into how AI agents are not just supporting remote work but redefining it, highlighting testimonials from employees who have experienced increased focus and reduced burnout as a result.\n\n- **Ethical Implications of AI in Education** \nThe increasing use of AI in educational settings raises critical ethical questions, especially concerning equity and accessibility. A notable example is Georgia Tech''s AI teaching assistant, Jill Watson, which has provided support to students while sparking debates about the human touch in teaching. An article that examines the dual-edged sword of employing AI as a mentor could consider perspectives from educators, students, and policymakers. By inviting thoughts from experts in both AI ethics and education reform, the article could tackle the implications of data privacy, algorithmic bias, and the necessity of inclusivity in developing - educational technologies.\n\n- **AI as Creative Collaborators** \nAI''s role as a creative collaborator is rapidly evolving, illustrated by projects like OpenAI''s DALL-E and GPT-3, which have successfully co-created art and literature with human artists. This dynamic presents a unique opportunity to explore the implications of creativity in the era of AI, especially how it influences workflows and ideation processes. An article could feature interviews with artists and designers who have utilized AI tools, discussing their experiences and the collaborative filters that guide creative decisions. Furthermore, examining how these interactions can democratize access to creative expression could spark deeper conversations about the future of art and originality in a technologically saturated landscape.\n\n- **AI in Mental Health Support** \nAs mental health challenges increase globally, AI''s potential to provide support through chatbots and virtual assistants offers a timely topic for - exploration. Programs like Woebot employ AI to interact with users, delivering cognitive behavioral therapy techniques and mood tracking options. This article could spotlight user testimonials highlighting the balance between technology and empathetic understanding. Insights from mental health professionals could provide critically engaging discussions on how AI tools can supplement traditional therapy while addressing concerns surrounding data security and the effectiveness of such treatments.\n\n- **AI''s Influence on Decision-Making Processes** \nIn the fast-paced business world, AI systems, such as IBM Watson, are redefining decision-making by providing data-driven insights that were previously unattainable. An article could investigate how organizations have adopted AI tools to enhance strategic choices and reduce risks. By incorporating expert opinions from business leaders who have successfully integrated AI into their processes, the article could reveal not only the measurable - impacts of AI on their decision-making efficacy but also the human aspects of adaptability and trust in technology. Furthermore, exploring the challenges faced during implementation could present a balanced view on the AI adoption journey, making it relatable for companies contemplating similar paths.\n\nNotes: In developing these ideas, I prioritized relevance to current events, case studies, and expert perspectives. Each idea aims to highlight the transformative impact of AI across diverse domains while addressing ethical concerns and user experiences to foster relatable content for a wide audience.\n\nThis is the expected criteria for your final answer: Evaluation Score from 1 to 10 based on the performance of the agents on the tasks\nyou MUST return the actual complete content as the final answer, not a summary.\nEnsure your final answer contains only the content in the following format: {\n \"quality\": float\n}\n\nEnsure the final output does not include any code block markers - like ```json or ```python.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stream": false}' - headers: + - X-USER-AGENT-XXX accept: - application/json accept-encoding: - - gzip, deflate, zstd + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX connection: - keep-alive content-length: - - '6196' + - '7196' content-type: - application/json cookie: - - __cf_bm=ujzGLIjq87ZWlYFBH3FycG8cIYtaTjon9XdbV63J84s-1761056486-1.0.1.1-V9wZVN8TxLIQ..Cd6VD53rSKVM8GssieHpzu53MMLsuoM7jVI8nAKNTbZeCqJxyHPutyhj_BwPvR56_gb0Nx90S6pVs3gQC2vj8VmCPbh1Y; _cfuvid=GIa.4ZxD52A2dXSZxoW1Mckm_eGjntP2i_mB4sczwEI-1761056486732-0.0.1.1-604800000 + - COOKIE-XXX host: - api.openai.com - user-agent: - - OpenAI/Python 1.109.1 x-stainless-arch: - - arm64 + - X-STAINLESS-ARCH-XXX x-stainless-async: - 'false' + x-stainless-helper-method: + - beta.chat.completions.parse x-stainless-lang: - python x-stainless-os: - - MacOS + - X-STAINLESS-OS-XXX x-stainless-package-version: - - 1.109.1 + - 1.83.0 x-stainless-read-timeout: - - '600' + - X-STAINLESS-READ-TIMEOUT-XXX x-stainless-retry-count: - '0' x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.12.9 + - 3.13.12 method: POST uri: https://api.openai.com/v1/chat/completions response: body: - string: "{\n \"id\": \"chatcmpl-CT7X4zWdSfYW2eTqKKF8ou174Mwp0\",\n \"object\": \"chat.completion\",\n \"created\": 1761056502,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"I now can give a great answer \\nFinal Answer: {\\n \\\"quality\\\": 9.0\\n}\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 1084,\n \"completion_tokens\": 22,\n \"total_tokens\": 1106,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_560af6e559\"\ - \n}\n" + string: "{\n \"id\": \"chatcmpl-DDFzaYq2i96GKjZisy507Xk2rVvjn\",\n \"object\": + \"chat.completion\",\n \"created\": 1772052110,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"{\\\"quality\\\":9}\",\n \"refusal\": + null,\n \"annotations\": []\n },\n \"logprobs\": null,\n + \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 1264,\n \"completion_tokens\": 5,\n \"total_tokens\": 1269,\n \"prompt_tokens_details\": + {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_bd4be55b21\"\n}\n" headers: CF-RAY: - - 99216723793e6180-EWR + - CF-RAY-XXX Connection: - keep-alive Content-Type: - application/json Date: - - Tue, 21 Oct 2025 14:21:43 GMT + - Wed, 25 Feb 2026 20:41:51 GMT Server: - cloudflare Strict-Transport-Security: - - max-age=31536000; includeSubDomains; preload + - STS-XXX Transfer-Encoding: - chunked X-Content-Type-Options: - - nosniff + - X-CONTENT-TYPE-XXX access-control-expose-headers: - - X-Request-ID + - ACCESS-CONTROL-XXX alt-svc: - h3=":443"; ma=86400 cf-cache-status: - DYNAMIC openai-organization: - - crewai-iuxna1 + - OPENAI-ORG-XXX openai-processing-ms: - - '792' + - '391' openai-project: - - proj_fL4UBWR1CMpAAdgzaSKqsVvA + - OPENAI-PROJECT-XXX openai-version: - '2020-10-01' - x-envoy-upstream-service-time: - - '942' x-openai-proxy-wasm: - v0.1 x-ratelimit-limit-requests: - - '10000' + - X-RATELIMIT-LIMIT-REQUESTS-XXX x-ratelimit-limit-tokens: - - '200000' + - X-RATELIMIT-LIMIT-TOKENS-XXX x-ratelimit-remaining-requests: - - '9998' + - X-RATELIMIT-REMAINING-REQUESTS-XXX x-ratelimit-remaining-tokens: - - '198488' + - X-RATELIMIT-REMAINING-TOKENS-XXX x-ratelimit-reset-requests: - - 9.341s + - X-RATELIMIT-RESET-REQUESTS-XXX x-ratelimit-reset-tokens: - - 453ms + - X-RATELIMIT-RESET-TOKENS-XXX x-request-id: - - req_b521ced37dbf49beb8d19fcf36980e3c + - X-REQUEST-ID-XXX status: code: 200 message: OK diff --git a/lib/crewai/tests/cassettes/test_custom_converter_cls.yaml b/lib/crewai/tests/cassettes/test_custom_converter_cls.yaml index 541d5e7b5..ddb9f56df 100644 --- a/lib/crewai/tests/cassettes/test_custom_converter_cls.yaml +++ b/lib/crewai/tests/cassettes/test_custom_converter_cls.yaml @@ -1,97 +1,120 @@ interactions: - request: - body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Give me an integer score between 1-5 for the following title: ''The impact of AI in the future of work''\n\nThis is the expected criteria for your final answer: The score of the title.\nyou MUST return the actual complete content as the final answer, not a summary.\nEnsure your final answer strictly adheres to the following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": - \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}' + body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert + scorer, specialized in scoring titles.\nYour personal goal is: Score the title"},{"role":"user","content":"\nCurrent + Task: Give me an integer score between 1-5 for the following title: ''The impact + of AI in the future of work''\n\nThis is the expected criteria for your final + answer: The score of the title.\nyou MUST return the actual complete content + as the final answer, not a summary.\nFormat your final answer according to the + following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\": + \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": + \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nIMPORTANT: + Preserve the original content exactly as-is. Do NOT rewrite, paraphrase, or + modify the meaning of the content. Only structure it to match the schema format.\n\nDo + not include the OpenAPI schema in the final output. Ensure the final output + does not include any code block markers like ```json or ```python.\n\nProvide + your complete response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"score":{"title":"Score","type":"integer"}},"required":["score"],"title":"ScoreOutput","type":"object","additionalProperties":false},"name":"ScoreOutput","strict":true}},"stream":false}' headers: + User-Agent: + - X-USER-AGENT-XXX accept: - application/json accept-encoding: - - gzip, deflate, zstd + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX connection: - keep-alive content-length: - - '1394' + - '1421' content-type: - application/json host: - api.openai.com - user-agent: - - OpenAI/Python 1.109.1 x-stainless-arch: - - arm64 + - X-STAINLESS-ARCH-XXX x-stainless-async: - 'false' + x-stainless-helper-method: + - beta.chat.completions.parse x-stainless-lang: - python x-stainless-os: - - MacOS + - X-STAINLESS-OS-XXX x-stainless-package-version: - - 1.109.1 + - 1.83.0 x-stainless-read-timeout: - - '600' + - X-STAINLESS-READ-TIMEOUT-XXX x-stainless-retry-count: - '0' x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.12.9 + - 3.13.12 method: POST uri: https://api.openai.com/v1/chat/completions response: body: - string: "{\n \"id\": \"chatcmpl-CYg0OJQX3eMkY3pcrZz7iSh2HHTPF\",\n \"object\": \"chat.completion\",\n \"created\": 1762380656,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal Answer: {\\\"score\\\":4}\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 294,\n \"completion_tokens\": 18,\n \"total_tokens\": 312,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_4c2851f862\"\n}\n" + string: "{\n \"id\": \"chatcmpl-DDDzfvCsU0fZWdxFwjGh6dmaEheAW\",\n \"object\": + \"chat.completion\",\n \"created\": 1772044427,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"{\\\"score\\\":4}\",\n \"refusal\": + null,\n \"annotations\": []\n },\n \"logprobs\": null,\n + \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 276,\n \"completion_tokens\": 5,\n \"total_tokens\": 281,\n \"prompt_tokens_details\": + {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_a391f2cee0\"\n}\n" headers: CF-RAY: - - REDACTED-RAY + - CF-RAY-XXX Connection: - keep-alive Content-Type: - application/json Date: - - Wed, 05 Nov 2025 22:10:56 GMT + - Wed, 25 Feb 2026 18:33:48 GMT Server: - cloudflare - Set-Cookie: - - __cf_bm=REDACTED; path=/; expires=Wed, 05-Nov-25 22:40:56 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - - _cfuvid=REDACTED; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None Strict-Transport-Security: - - max-age=31536000; includeSubDomains; preload + - STS-XXX Transfer-Encoding: - chunked X-Content-Type-Options: - - nosniff + - X-CONTENT-TYPE-XXX access-control-expose-headers: - - X-Request-ID + - ACCESS-CONTROL-XXX alt-svc: - h3=":443"; ma=86400 cf-cache-status: - DYNAMIC openai-organization: - - user-hortuttj2f3qtmxyik2zxf4q + - OPENAI-ORG-XXX openai-processing-ms: - - '770' + - '552' openai-project: - - proj_fL4UBWR1CMpAAdgzaSKqsVvA + - OPENAI-PROJECT-XXX openai-version: - '2020-10-01' - x-envoy-upstream-service-time: - - '796' + set-cookie: + - SET-COOKIE-XXX x-openai-proxy-wasm: - v0.1 x-ratelimit-limit-requests: - - '500' + - X-RATELIMIT-LIMIT-REQUESTS-XXX x-ratelimit-limit-tokens: - - '200000' + - X-RATELIMIT-LIMIT-TOKENS-XXX x-ratelimit-remaining-requests: - - '499' + - X-RATELIMIT-REMAINING-REQUESTS-XXX x-ratelimit-remaining-tokens: - - '199687' + - X-RATELIMIT-REMAINING-TOKENS-XXX x-ratelimit-reset-requests: - - 120ms + - X-RATELIMIT-RESET-REQUESTS-XXX x-ratelimit-reset-tokens: - - 93ms + - X-RATELIMIT-RESET-TOKENS-XXX x-request-id: - - req_REDACTED + - X-REQUEST-ID-XXX status: code: 200 message: OK diff --git a/lib/crewai/tests/cassettes/test_guardrail_emits_events.yaml b/lib/crewai/tests/cassettes/test_guardrail_emits_events.yaml index 530c495a0..5f2e3d9de 100644 --- a/lib/crewai/tests/cassettes/test_guardrail_emits_events.yaml +++ b/lib/crewai/tests/cassettes/test_guardrail_emits_events.yaml @@ -1,684 +1,684 @@ interactions: - request: - body: '{"messages":[{"role":"system","content":"You are Test Agent. Test Backstory\nYour personal goal is: Test Goal\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Gather information about available books on the First World War\n\nThis is the expected criteria for your final answer: A list of available books on the First World War\nyou MUST return the actual complete content as the final answer, not a summary.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}' + body: '{"messages":[{"role":"system","content":"You are Test Agent. Test Backstory\nYour + personal goal is: Test Goal"},{"role":"user","content":"\nCurrent Task: Gather + information about available books on the First World War\n\nThis is the expected + criteria for your final answer: A list of available books on the First World + War\nyou MUST return the actual complete content as the final answer, not a + summary.\n\nProvide your complete response:"}],"model":"gpt-4.1-mini"}' headers: + User-Agent: + - X-USER-AGENT-XXX accept: - application/json accept-encoding: - - gzip, deflate, zstd + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX connection: - keep-alive content-length: - - '866' + - '465' content-type: - application/json host: - api.openai.com - user-agent: - - OpenAI/Python 1.109.1 x-stainless-arch: - - arm64 + - X-STAINLESS-ARCH-XXX x-stainless-async: - 'false' x-stainless-lang: - python x-stainless-os: - - MacOS + - X-STAINLESS-OS-XXX x-stainless-package-version: - - 1.109.1 + - 1.83.0 x-stainless-read-timeout: - - '600' + - X-STAINLESS-READ-TIMEOUT-XXX x-stainless-retry-count: - '0' x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.12.9 + - 3.13.12 method: POST uri: https://api.openai.com/v1/chat/completions response: body: - string: "{\n \"id\": \"chatcmpl-CYgCMDtJgyyp33WkN0RtsJ9aI3zOA\",\n \"object\": \"chat.completion\",\n \"created\": 1762381398,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal Answer: \\n\\nList of available books on the First World War:\\n\\n1. \\\"The Guns of August\\\" by Barbara W. Tuchman \\n - A detailed narrative of the first month of World War I, focusing on the political and military events that set the stage for the conflict.\\n\\n2. \\\"The First World War\\\" by John Keegan \\n - A comprehensive analysis of the causes, battles, and consequences of WWI by one of the foremost military historians.\\n\\n3. \\\"A World Undone: The Story of the Great War, 1914 to 1918\\\" by G.J. Meyer \\n - An accessible and detailed account covering the entire war, including social and political impacts.\\n\\n4. \\\ - \"The Sleepwalkers: How Europe Went to War in 1914\\\" by Christopher Clark \\n - Explores the complex political landscape and decisions that led to the outbreak of the war.\\n\\n5. \\\"The Pity of War: Explaining World War I\\\" by Niall Ferguson \\n - Offers a controversial interpretation of the war’s causes and outcomes.\\n\\n6. \\\"World War I: A Very Short Introduction\\\" by Michael Howard \\n - A brief but thorough overview of WWI, including its military and political aspects.\\n\\n7. \\\"Goodbye to All That\\\" by Robert Graves \\n - A personal memoir of a British officer’s experiences during the war, highlighting the human side.\\n\\n8. \\\"Storm of Steel\\\" by Ernst Jünger \\n - A German soldier’s firsthand account of combat on the Western Front.\\n\\n9. \\\"The War That Ended Peace: The Road to 1914\\\" by Margaret MacMillan \\n - Focuses on the lead-up to WWI and the political tensions that culminated in the conflict.\\n\\n10. \\\"The First World War:\ - \ An Illustrated History\\\" by Hew Strachan \\n - Combines detailed research with rich illustrations to provide a comprehensive history of the war.\\n\\nThese books provide a range of perspectives, from military history and political analysis to personal memoirs and social impact, offering comprehensive information about the First World War.\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 170,\n \"completion_tokens\": 433,\n \"total_tokens\": 603,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_4c2851f862\"\n}\n" + string: "{\n \"id\": \"chatcmpl-DDGADaYtAf39e4orQQyF7DAeZbQLX\",\n \"object\": + \"chat.completion\",\n \"created\": 1772052769,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Here is a list of available books on + the First World War:\\n\\n1. \\\"The Guns of August\\\" by Barbara W. Tuchman\\n2. + \\\"A World Undone: The Story of the Great War, 1914 to 1918\\\" by G.J. Meyer\\n3. + \\\"The First World War\\\" by John Keegan\\n4. \\\"The Sleepwalkers: How + Europe Went to War in 1914\\\" by Christopher Clark\\n5. \\\"To End All Wars: + A Story of Loyalty and Rebellion, 1914-1918\\\" by Adam Hochschild\\n6. \\\"The + Pity of War\\\" by Niall Ferguson\\n7. \\\"Catastrophe 1914: Europe Goes to + War\\\" by Max Hastings\\n8. \\\"World War I: The Definitive Visual History\\\" + by R.G. Grant\\n9. \\\"The War That Ended Peace: The Road to 1914\\\" by Margaret + MacMillan\\n10. \\\"Paris 1919: Six Months That Changed the World\\\" by Margaret + MacMillan\\n\\nThese books cover various aspects of the First World War, including + its causes, key battles, political consequences, and personal experiences.\",\n + \ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": + null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 84,\n \"completion_tokens\": 240,\n \"total_tokens\": 324,\n \"prompt_tokens_details\": + {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_a391f2cee0\"\n}\n" headers: CF-RAY: - - REDACTED-RAY + - CF-RAY-XXX Connection: - keep-alive Content-Type: - application/json Date: - - Wed, 05 Nov 2025 22:23:24 GMT + - Wed, 25 Feb 2026 20:52:52 GMT Server: - cloudflare - Set-Cookie: - - __cf_bm=REDACTED; path=/; expires=Wed, 05-Nov-25 22:53:24 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - - _cfuvid=REDACTED; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None Strict-Transport-Security: - - max-age=31536000; includeSubDomains; preload + - STS-XXX Transfer-Encoding: - chunked X-Content-Type-Options: - - nosniff + - X-CONTENT-TYPE-XXX access-control-expose-headers: - - X-Request-ID + - ACCESS-CONTROL-XXX alt-svc: - h3=":443"; ma=86400 cf-cache-status: - DYNAMIC openai-organization: - - user-hortuttj2f3qtmxyik2zxf4q + - OPENAI-ORG-XXX openai-processing-ms: - - '5778' + - '3002' openai-project: - - proj_fL4UBWR1CMpAAdgzaSKqsVvA + - OPENAI-PROJECT-XXX openai-version: - '2020-10-01' - x-envoy-upstream-service-time: - - '5803' + set-cookie: + - SET-COOKIE-XXX x-openai-proxy-wasm: - v0.1 x-ratelimit-limit-requests: - - '500' + - X-RATELIMIT-LIMIT-REQUESTS-XXX x-ratelimit-limit-tokens: - - '200000' + - X-RATELIMIT-LIMIT-TOKENS-XXX x-ratelimit-remaining-requests: - - '499' + - X-RATELIMIT-REMAINING-REQUESTS-XXX x-ratelimit-remaining-tokens: - - '199808' + - X-RATELIMIT-REMAINING-TOKENS-XXX x-ratelimit-reset-requests: - - 120ms + - X-RATELIMIT-RESET-REQUESTS-XXX x-ratelimit-reset-tokens: - - 57ms + - X-RATELIMIT-RESET-TOKENS-XXX x-request-id: - - req_REDACTED + - X-REQUEST-ID-XXX status: code: 200 message: OK - request: - body: '{"messages":[{"role":"system","content":"You are Guardrail Agent. You are a expert at validating the output of a task. By providing effective feedback if the output is not valid.\nYour personal goal is: Validate the output of the task\n\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!Ensure your final answer strictly adheres to the following OpenAPI schema: {\n \"type\": \"json_schema\",\n \"json_schema\": {\n \"name\": \"LLMGuardrailResult\",\n \"strict\": true,\n \"schema\": {\n \"properties\": {\n \"valid\": {\n \"description\": \"Whether the task output complies with the guardrail\",\n \"title\": \"Valid\",\n \"type\": \"boolean\"\n },\n \"feedback\": {\n \"anyOf\": - [\n {\n \"type\": \"string\"\n },\n {\n \"type\": \"null\"\n }\n ],\n \"default\": null,\n \"description\": \"A feedback about the task output if it is not valid\",\n \"title\": \"Feedback\"\n }\n },\n \"required\": [\n \"valid\",\n \"feedback\"\n ],\n \"title\": \"LLMGuardrailResult\",\n \"type\": \"object\",\n \"additionalProperties\": false\n }\n }\n}\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python."},{"role":"user","content":"\n Ensure the following task result complies with the given guardrail.\n\n Task result:\n List of available books on the First World War:\n\n1. \"The Guns of August\" by Barbara W. Tuchman \n - A detailed narrative of the first month of World War I, focusing on the political and - military events that set the stage for the conflict.\n\n2. \"The First World War\" by John Keegan \n - A comprehensive analysis of the causes, battles, and consequences of WWI by one of the foremost military historians.\n\n3. \"A World Undone: The Story of the Great War, 1914 to 1918\" by G.J. Meyer \n - An accessible and detailed account covering the entire war, including social and political impacts.\n\n4. \"The Sleepwalkers: How Europe Went to War in 1914\" by Christopher Clark \n - Explores the complex political landscape and decisions that led to the outbreak of the war.\n\n5. \"The Pity of War: Explaining World War I\" by Niall Ferguson \n - Offers a controversial interpretation of the war’s causes and outcomes.\n\n6. \"World War I: A Very Short Introduction\" by Michael Howard \n - A brief but thorough overview of WWI, including its military and political aspects.\n\n7. \"Goodbye to All That\" by Robert Graves \n - A personal memoir of a British officer’s experiences - during the war, highlighting the human side.\n\n8. \"Storm of Steel\" by Ernst Jünger \n - A German soldier’s firsthand account of combat on the Western Front.\n\n9. \"The War That Ended Peace: The Road to 1914\" by Margaret MacMillan \n - Focuses on the lead-up to WWI and the political tensions that culminated in the conflict.\n\n10. \"The First World War: An Illustrated History\" by Hew Strachan \n - Combines detailed research with rich illustrations to provide a comprehensive history of the war.\n\nThese books provide a range of perspectives, from military history and political analysis to personal memoirs and social impact, offering comprehensive information about the First World War.\n\n Guardrail:\n Ensure the authors are from Italy\n\n Your task:\n - Confirm if the Task result complies with the guardrail.\n - If not, provide clear feedback explaining what is wrong (e.g., by how much it violates the rule, or what specific part fails).\n - - Focus only on identifying issues — do not propose corrections.\n - If the Task result complies with the guardrail, saying that is valid\n "}],"model":"gpt-4.1-mini"}' + body: "{\"messages\":[{\"role\":\"system\",\"content\":\"You are Guardrail Agent. + You are a expert at validating the output of a task. By providing effective + feedback if the output is not valid.\\nYour personal goal is: Validate the output + of the task\"},{\"role\":\"user\",\"content\":\"\\nCurrent Task: \\n Ensure + the following task result complies with the given guardrail.\\n\\n Task + result:\\n Here is a list of available books on the First World War:\\n\\n1. + \\\"The Guns of August\\\" by Barbara W. Tuchman\\n2. \\\"A World Undone: The + Story of the Great War, 1914 to 1918\\\" by G.J. Meyer\\n3. \\\"The First World + War\\\" by John Keegan\\n4. \\\"The Sleepwalkers: How Europe Went to War in + 1914\\\" by Christopher Clark\\n5. \\\"To End All Wars: A Story of Loyalty and + Rebellion, 1914-1918\\\" by Adam Hochschild\\n6. \\\"The Pity of War\\\" by + Niall Ferguson\\n7. \\\"Catastrophe 1914: Europe Goes to War\\\" by Max Hastings\\n8. + \\\"World War I: The Definitive Visual History\\\" by R.G. Grant\\n9. \\\"The + War That Ended Peace: The Road to 1914\\\" by Margaret MacMillan\\n10. \\\"Paris + 1919: Six Months That Changed the World\\\" by Margaret MacMillan\\n\\nThese + books cover various aspects of the First World War, including its causes, key + battles, political consequences, and personal experiences.\\n\\n Guardrail:\\n + \ Ensure the authors are from Italy\\n\\n Your task:\\n - + Confirm if the Task result complies with the guardrail.\\n - If not, + provide clear feedback explaining what is wrong (e.g., by how much it violates + the rule, or what specific part fails).\\n - Focus only on identifying + issues \u2014 do not propose corrections.\\n - If the Task result complies + with the guardrail, saying that is valid\\n \\n\\nProvide your complete + response:\"}],\"model\":\"gpt-4.1-mini\",\"response_format\":{\"type\":\"json_schema\",\"json_schema\":{\"schema\":{\"properties\":{\"valid\":{\"description\":\"Whether + the task output complies with the guardrail\",\"title\":\"Valid\",\"type\":\"boolean\"},\"feedback\":{\"anyOf\":[{\"type\":\"string\"},{\"type\":\"null\"}],\"description\":\"A + feedback about the task output if it is not valid\",\"title\":\"Feedback\"}},\"required\":[\"valid\",\"feedback\"],\"title\":\"LLMGuardrailResult\",\"type\":\"object\",\"additionalProperties\":false},\"name\":\"LLMGuardrailResult\",\"strict\":true}},\"stream\":false}" headers: + User-Agent: + - X-USER-AGENT-XXX accept: - application/json accept-encoding: - - gzip, deflate, zstd + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX connection: - keep-alive content-length: - - '4210' + - '2271' content-type: - application/json cookie: - - __cf_bm=REDACTED; _cfuvid=REDACTED + - COOKIE-XXX host: - api.openai.com - user-agent: - - OpenAI/Python 1.109.1 x-stainless-arch: - - arm64 - x-stainless-async: - - 'false' - x-stainless-lang: - - python - x-stainless-os: - - MacOS - x-stainless-package-version: - - 1.109.1 - x-stainless-read-timeout: - - '600' - x-stainless-retry-count: - - '0' - x-stainless-runtime: - - CPython - x-stainless-runtime-version: - - 3.12.9 - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - body: - string: "{\n \"id\": \"chatcmpl-CYgCSORvqXRvHk9WrMZInBh6xp6DI\",\n \"object\": \"chat.completion\",\n \"created\": 1762381404,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"{\\n \\\"valid\\\": false,\\n \\\"feedback\\\": \\\"None of the listed authors are from Italy; all authors mentioned are from various other countries such as the United States, United Kingdom, Germany, and Canada. This does not comply with the guardrail which requires authors to be from Italy.\\\"\\n}\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 878,\n \"completion_tokens\": 60,\n \"total_tokens\": 938,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \ - \ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_4c2851f862\"\n}\n" - headers: - CF-RAY: - - REDACTED-RAY - Connection: - - keep-alive - Content-Type: - - application/json - Date: - - Wed, 05 Nov 2025 22:23:25 GMT - Server: - - cloudflare - Strict-Transport-Security: - - max-age=31536000; includeSubDomains; preload - Transfer-Encoding: - - chunked - X-Content-Type-Options: - - nosniff - access-control-expose-headers: - - X-Request-ID - alt-svc: - - h3=":443"; ma=86400 - cf-cache-status: - - DYNAMIC - openai-organization: - - user-hortuttj2f3qtmxyik2zxf4q - openai-processing-ms: - - '920' - openai-project: - - proj_fL4UBWR1CMpAAdgzaSKqsVvA - openai-version: - - '2020-10-01' - x-envoy-upstream-service-time: - - '931' - x-openai-proxy-wasm: - - v0.1 - x-ratelimit-limit-requests: - - '500' - x-ratelimit-limit-tokens: - - '200000' - x-ratelimit-remaining-requests: - - '499' - x-ratelimit-remaining-tokens: - - '199013' - x-ratelimit-reset-requests: - - 120ms - x-ratelimit-reset-tokens: - - 296ms - x-request-id: - - req_REDACTED - status: - code: 200 - message: OK -- request: - body: '{"messages":[{"role":"system","content":"Ensure your final answer strictly adheres to the following OpenAPI schema: {\n \"type\": \"json_schema\",\n \"json_schema\": {\n \"name\": \"LLMGuardrailResult\",\n \"strict\": true,\n \"schema\": {\n \"properties\": {\n \"valid\": {\n \"description\": \"Whether the task output complies with the guardrail\",\n \"title\": \"Valid\",\n \"type\": \"boolean\"\n },\n \"feedback\": {\n \"anyOf\": [\n {\n \"type\": \"string\"\n },\n {\n \"type\": \"null\"\n }\n ],\n \"default\": null,\n \"description\": \"A feedback about the task output if it is not valid\",\n \"title\": \"Feedback\"\n }\n },\n \"required\": [\n \"valid\",\n \"feedback\"\n ],\n \"title\": \"LLMGuardrailResult\",\n \"type\": \"object\",\n \"additionalProperties\": - false\n }\n }\n}\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python."},{"role":"user","content":"{\n \"valid\": false,\n \"feedback\": \"None of the listed authors are from Italy; all authors mentioned are from various other countries such as the United States, United Kingdom, Germany, and Canada. This does not comply with the guardrail which requires authors to be from Italy.\"\n}"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"valid":{"description":"Whether the task output complies with the guardrail","title":"Valid","type":"boolean"},"feedback":{"anyOf":[{"type":"string"},{"type":"null"}],"description":"A feedback about the task output if it is not valid","title":"Feedback"}},"required":["valid","feedback"],"title":"LLMGuardrailResult","type":"object","additionalProperties":false},"name":"LLMGuardrailResult","strict":true}},"stream":false}' - headers: - accept: - - application/json - accept-encoding: - - gzip, deflate, zstd - connection: - - keep-alive - content-length: - - '2020' - content-type: - - application/json - cookie: - - __cf_bm=REDACTED; _cfuvid=REDACTED - host: - - api.openai.com - user-agent: - - OpenAI/Python 1.109.1 - x-stainless-arch: - - arm64 + - X-STAINLESS-ARCH-XXX x-stainless-async: - 'false' x-stainless-helper-method: - - chat.completions.parse + - beta.chat.completions.parse x-stainless-lang: - python x-stainless-os: - - MacOS + - X-STAINLESS-OS-XXX x-stainless-package-version: - - 1.109.1 + - 1.83.0 x-stainless-read-timeout: - - '600' + - X-STAINLESS-READ-TIMEOUT-XXX x-stainless-retry-count: - '0' x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.12.9 + - 3.13.12 method: POST uri: https://api.openai.com/v1/chat/completions response: body: - string: "{\n \"id\": \"chatcmpl-CYgCTfEoC2Q5aZFEAXbsW4zQq0Ihu\",\n \"object\": \"chat.completion\",\n \"created\": 1762381405,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"{\\\"valid\\\":false,\\\"feedback\\\":\\\"The output indicates that none of the authors mentioned are from Italy, while the guardrail requires authors to be from Italy. Therefore, the output does not comply with the guardrail.\\\"}\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 397,\n \"completion_tokens\": 44,\n \"total_tokens\": 441,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\"\ - : 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_4c2851f862\"\n}\n" + string: "{\n \"id\": \"chatcmpl-DDGAImuBOpJTAf2UGXPUCFcRcYXua\",\n \"object\": + \"chat.completion\",\n \"created\": 1772052774,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"{\\\"valid\\\":false,\\\"feedback\\\":\\\"None + of the authors listed are from Italy. The guardrail requires that the authors + be from Italy, but all the authors mentioned, such as Barbara W. Tuchman, + G.J. Meyer, John Keegan, Christopher Clark, Adam Hochschild, Niall Ferguson, + Max Hastings, R.G. Grant, and Margaret MacMillan, are not Italian. Therefore, + the task result does not comply with the guardrail.\\\"}\",\n \"refusal\": + null,\n \"annotations\": []\n },\n \"logprobs\": null,\n + \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 500,\n \"completion_tokens\": 93,\n \"total_tokens\": 593,\n \"prompt_tokens_details\": + {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_d96d8e3578\"\n}\n" headers: CF-RAY: - - REDACTED-RAY + - CF-RAY-XXX Connection: - keep-alive Content-Type: - application/json Date: - - Wed, 05 Nov 2025 22:23:26 GMT + - Wed, 25 Feb 2026 20:52:56 GMT Server: - cloudflare Strict-Transport-Security: - - max-age=31536000; includeSubDomains; preload + - STS-XXX Transfer-Encoding: - chunked X-Content-Type-Options: - - nosniff + - X-CONTENT-TYPE-XXX access-control-expose-headers: - - X-Request-ID + - ACCESS-CONTROL-XXX alt-svc: - h3=":443"; ma=86400 cf-cache-status: - DYNAMIC openai-organization: - - user-hortuttj2f3qtmxyik2zxf4q + - OPENAI-ORG-XXX openai-processing-ms: - - '1214' + - '1401' openai-project: - - proj_fL4UBWR1CMpAAdgzaSKqsVvA + - OPENAI-PROJECT-XXX openai-version: - '2020-10-01' - x-envoy-upstream-service-time: - - '1251' x-openai-proxy-wasm: - v0.1 x-ratelimit-limit-requests: - - '500' + - X-RATELIMIT-LIMIT-REQUESTS-XXX x-ratelimit-limit-tokens: - - '200000' + - X-RATELIMIT-LIMIT-TOKENS-XXX x-ratelimit-remaining-requests: - - '499' + - X-RATELIMIT-REMAINING-REQUESTS-XXX x-ratelimit-remaining-tokens: - - '199670' + - X-RATELIMIT-REMAINING-TOKENS-XXX x-ratelimit-reset-requests: - - 120ms + - X-RATELIMIT-RESET-REQUESTS-XXX x-ratelimit-reset-tokens: - - 99ms + - X-RATELIMIT-RESET-TOKENS-XXX x-request-id: - - req_REDACTED + - X-REQUEST-ID-XXX status: code: 200 message: OK - request: - body: '{"messages":[{"role":"system","content":"You are Test Agent. Test Backstory\nYour personal goal is: Test Goal\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Gather information about available books on the First World War\n\nThis is the expected criteria for your final answer: A list of available books on the First World War\nyou MUST return the actual complete content as the final answer, not a summary.\n\nThis is the context you''re working with:\n### Previous attempt failed validation: The output indicates that none of the authors mentioned are from Italy, while the guardrail requires authors to be from Italy. Therefore, the output does not comply with the guardrail.\n\n\n### Previous - result:\nList of available books on the First World War:\n\n1. \"The Guns of August\" by Barbara W. Tuchman \n - A detailed narrative of the first month of World War I, focusing on the political and military events that set the stage for the conflict.\n\n2. \"The First World War\" by John Keegan \n - A comprehensive analysis of the causes, battles, and consequences of WWI by one of the foremost military historians.\n\n3. \"A World Undone: The Story of the Great War, 1914 to 1918\" by G.J. Meyer \n - An accessible and detailed account covering the entire war, including social and political impacts.\n\n4. \"The Sleepwalkers: How Europe Went to War in 1914\" by Christopher Clark \n - Explores the complex political landscape and decisions that led to the outbreak of the war.\n\n5. \"The Pity of War: Explaining World War I\" by Niall Ferguson \n - Offers a controversial interpretation of the war’s causes and outcomes.\n\n6. \"World War I: A Very Short Introduction\" by Michael - Howard \n - A brief but thorough overview of WWI, including its military and political aspects.\n\n7. \"Goodbye to All That\" by Robert Graves \n - A personal memoir of a British officer’s experiences during the war, highlighting the human side.\n\n8. \"Storm of Steel\" by Ernst Jünger \n - A German soldier’s firsthand account of combat on the Western Front.\n\n9. \"The War That Ended Peace: The Road to 1914\" by Margaret MacMillan \n - Focuses on the lead-up to WWI and the political tensions that culminated in the conflict.\n\n10. \"The First World War: An Illustrated History\" by Hew Strachan \n - Combines detailed research with rich illustrations to provide a comprehensive history of the war.\n\nThese books provide a range of perspectives, from military history and political analysis to personal memoirs and social impact, offering comprehensive information about the First World War.\n\n\nTry again, making sure to address the validation error.\n\nBegin! This is VERY - important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}' + body: '{"messages":[{"role":"system","content":"You are Test Agent. Test Backstory\nYour + personal goal is: Test Goal"},{"role":"user","content":"\nCurrent Task: Gather + information about available books on the First World War\n\nThis is the expected + criteria for your final answer: A list of available books on the First World + War\nyou MUST return the actual complete content as the final answer, not a + summary.\n\nProvide your complete response:"},{"role":"assistant","content":"Here + is a list of available books on the First World War:\n\n1. \"The Guns of August\" + by Barbara W. Tuchman\n2. \"A World Undone: The Story of the Great War, 1914 + to 1918\" by G.J. Meyer\n3. \"The First World War\" by John Keegan\n4. \"The + Sleepwalkers: How Europe Went to War in 1914\" by Christopher Clark\n5. \"To + End All Wars: A Story of Loyalty and Rebellion, 1914-1918\" by Adam Hochschild\n6. + \"The Pity of War\" by Niall Ferguson\n7. \"Catastrophe 1914: Europe Goes to + War\" by Max Hastings\n8. \"World War I: The Definitive Visual History\" by + R.G. Grant\n9. \"The War That Ended Peace: The Road to 1914\" by Margaret MacMillan\n10. + \"Paris 1919: Six Months That Changed the World\" by Margaret MacMillan\n\nThese + books cover various aspects of the First World War, including its causes, key + battles, political consequences, and personal experiences."},{"role":"system","content":"You + are Test Agent. Test Backstory\nYour personal goal is: Test Goal"},{"role":"user","content":"\nCurrent + Task: Gather information about available books on the First World War\n\nThis + is the expected criteria for your final answer: A list of available books on + the First World War\nyou MUST return the actual complete content as the final + answer, not a summary.\n\nThis is the context you''re working with:\n### Previous + attempt failed validation: None of the authors listed are from Italy. The guardrail + requires that the authors be from Italy, but all the authors mentioned, such + as Barbara W. Tuchman, G.J. Meyer, John Keegan, Christopher Clark, Adam Hochschild, + Niall Ferguson, Max Hastings, R.G. Grant, and Margaret MacMillan, are not Italian. + Therefore, the task result does not comply with the guardrail.\n\n\n### Previous + result:\nHere is a list of available books on the First World War:\n\n1. \"The + Guns of August\" by Barbara W. Tuchman\n2. \"A World Undone: The Story of the + Great War, 1914 to 1918\" by G.J. Meyer\n3. \"The First World War\" by John + Keegan\n4. \"The Sleepwalkers: How Europe Went to War in 1914\" by Christopher + Clark\n5. \"To End All Wars: A Story of Loyalty and Rebellion, 1914-1918\" by + Adam Hochschild\n6. \"The Pity of War\" by Niall Ferguson\n7. \"Catastrophe + 1914: Europe Goes to War\" by Max Hastings\n8. \"World War I: The Definitive + Visual History\" by R.G. Grant\n9. \"The War That Ended Peace: The Road to 1914\" + by Margaret MacMillan\n10. \"Paris 1919: Six Months That Changed the World\" + by Margaret MacMillan\n\nThese books cover various aspects of the First World + War, including its causes, key battles, political consequences, and personal + experiences.\n\n\nTry again, making sure to address the validation error.\n\nProvide + your complete response:"}],"model":"gpt-4.1-mini"}' headers: + User-Agent: + - X-USER-AGENT-XXX accept: - application/json accept-encoding: - - gzip, deflate, zstd + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX connection: - keep-alive content-length: - - '3139' + - '3185' content-type: - application/json cookie: - - __cf_bm=REDACTED; _cfuvid=REDACTED + - COOKIE-XXX host: - api.openai.com - user-agent: - - OpenAI/Python 1.109.1 x-stainless-arch: - - arm64 + - X-STAINLESS-ARCH-XXX x-stainless-async: - 'false' x-stainless-lang: - python x-stainless-os: - - MacOS + - X-STAINLESS-OS-XXX x-stainless-package-version: - - 1.109.1 + - 1.83.0 x-stainless-read-timeout: - - '600' + - X-STAINLESS-READ-TIMEOUT-XXX x-stainless-retry-count: - '0' x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.12.9 + - 3.13.12 method: POST uri: https://api.openai.com/v1/chat/completions response: body: - string: "{\n \"id\": \"chatcmpl-CYgCUFGx7P3dG6mkKENsAdMSjbDP2\",\n \"object\": \"chat.completion\",\n \"created\": 1762381406,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal Answer: List of available books on the First World War by Italian authors or published in Italy:\\n\\n1. \\\"La Grande Guerra\\\" by Alessandro Barbero \\n - A comprehensive history of the First World War with a focus on Italy’s involvement and the impact on Italian society and politics.\\n\\n2. \\\"La guerra bianca: Come gli Italiani hanno combattuto la Prima guerra mondiale\\\" by Alessandro Barbero \\n - Explores the harsh conditions of mountain warfare endured by Italian soldiers, with vivid descriptions of battles and daily life on the Alpine front.\\n\\n3. \\\"La Prima Guerra Mondiale\\\" by Paolo Mieli \\n - Provides a broad overview\ - \ of the war, including Italy's role and the broader European context, combining historical facts with cultural insights.\\n\\n4. \\\"1915-1918: La guerra italiana sul fronte interno\\\" by Giovanni De Luna \\n - Focuses on the social and political effects of the war within Italy, examining public opinion, propaganda, and the home front.\\n\\n5. \\\"Il Giorno della Vittoria: 4 Novembre 1918\\\" by Paolo Gaspari \\n - Detailed analysis of Italy’s final victories and the aftermath of the war, including the political and social changes that followed.\\n\\n6. \\\"La Guerra prima della Grande Guerra\\\" by Alessandro Barbero \\n - Discusses the international tensions preceding the First World War with an Italian perspective on diplomatic and military developments.\\n\\n7. \\\"La guerra dimenticata: Il fronte orientale, 1915-1917\\\" by Luca Micheletti \\n - Covers Italy’s campaigns on the Eastern Front, less frequently discussed in general histories of the war, providing new\ - \ insights into Italy’s military efforts.\\n\\n8. \\\"Tornare a casa. Grande Guerra 1914-1918\\\" by Marco Balzano \\n - A novelized account based on testimonies and letters from Italian soldiers, blending historical documentation with personal narrative.\\n\\n9. \\\"I cento giorni: La battaglia finale della Grande Guerra\\\" by Giuseppe Cultrera \\n - Concentrates on the decisive Italian military operations in the war’s final phase, highlighting strategy and key personalities.\\n\\n10. \\\"La Grande Guerra sul Carso\\\" by Mario Isnenghi \\n - Focuses specifically on the Carso front, a crucial and brutal theater of war for Italian forces, combining military history with human stories.\\n\\nThese titles emphasize the Italian experience of the First World War, authored by prominent Italian historians and writers, thus fulfilling the requirement for Italian authorship and perspective.\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\"\ - : null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 654,\n \"completion_tokens\": 516,\n \"total_tokens\": 1170,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_4c2851f862\"\n}\n" + string: "{\n \"id\": \"chatcmpl-DDGAKo0MLHbMnvDBrDJ43r7OO4uh8\",\n \"object\": + \"chat.completion\",\n \"created\": 1772052776,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Here is a list of available books on + the First World War by Italian authors:\\n\\n1. \\\"La Grande Guerra\\\" by + Alessandro Barbero \\n2. \\\"Il grande nulla: La prima guerra mondiale\\\" + by Emilio Gentile \\n3. \\\"La guerra italiana 1915-1918\\\" by Paolo Piccione + \ \\n4. \\\"La prima guerra mondiale\\\" by Andrea di Robilant \\n5. \\\"1914-1918: + La guerra e la memoria\\\" edited by Paolo G. Minuto \\n\\nThese books provide + various perspectives on the First World War from Italian historians and authors.\",\n + \ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": + null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 763,\n \"completion_tokens\": 117,\n \"total_tokens\": 880,\n \"prompt_tokens_details\": + {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_a391f2cee0\"\n}\n" headers: CF-RAY: - - REDACTED-RAY + - CF-RAY-XXX Connection: - keep-alive Content-Type: - application/json Date: - - Wed, 05 Nov 2025 22:23:34 GMT + - Wed, 25 Feb 2026 20:52:58 GMT Server: - cloudflare Strict-Transport-Security: - - max-age=31536000; includeSubDomains; preload + - STS-XXX Transfer-Encoding: - chunked X-Content-Type-Options: - - nosniff + - X-CONTENT-TYPE-XXX access-control-expose-headers: - - X-Request-ID + - ACCESS-CONTROL-XXX alt-svc: - h3=":443"; ma=86400 cf-cache-status: - DYNAMIC openai-organization: - - user-hortuttj2f3qtmxyik2zxf4q + - OPENAI-ORG-XXX openai-processing-ms: - - '7917' + - '1725' openai-project: - - proj_fL4UBWR1CMpAAdgzaSKqsVvA + - OPENAI-PROJECT-XXX openai-version: - '2020-10-01' - x-envoy-upstream-service-time: - - '7943' x-openai-proxy-wasm: - v0.1 x-ratelimit-limit-requests: - - '500' + - X-RATELIMIT-LIMIT-REQUESTS-XXX x-ratelimit-limit-tokens: - - '200000' + - X-RATELIMIT-LIMIT-TOKENS-XXX x-ratelimit-remaining-requests: - - '499' + - X-RATELIMIT-REMAINING-REQUESTS-XXX x-ratelimit-remaining-tokens: - - '199255' + - X-RATELIMIT-REMAINING-TOKENS-XXX x-ratelimit-reset-requests: - - 120ms + - X-RATELIMIT-RESET-REQUESTS-XXX x-ratelimit-reset-tokens: - - 223ms + - X-RATELIMIT-RESET-TOKENS-XXX x-request-id: - - req_REDACTED + - X-REQUEST-ID-XXX status: code: 200 message: OK - request: - body: '{"messages":[{"role":"system","content":"You are Guardrail Agent. You are a expert at validating the output of a task. By providing effective feedback if the output is not valid.\nYour personal goal is: Validate the output of the task\n\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!Ensure your final answer strictly adheres to the following OpenAPI schema: {\n \"type\": \"json_schema\",\n \"json_schema\": {\n \"name\": \"LLMGuardrailResult\",\n \"strict\": true,\n \"schema\": {\n \"properties\": {\n \"valid\": {\n \"description\": \"Whether the task output complies with the guardrail\",\n \"title\": \"Valid\",\n \"type\": \"boolean\"\n },\n \"feedback\": {\n \"anyOf\": - [\n {\n \"type\": \"string\"\n },\n {\n \"type\": \"null\"\n }\n ],\n \"default\": null,\n \"description\": \"A feedback about the task output if it is not valid\",\n \"title\": \"Feedback\"\n }\n },\n \"required\": [\n \"valid\",\n \"feedback\"\n ],\n \"title\": \"LLMGuardrailResult\",\n \"type\": \"object\",\n \"additionalProperties\": false\n }\n }\n}\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python."},{"role":"user","content":"\n Ensure the following task result complies with the given guardrail.\n\n Task result:\n List of available books on the First World War by Italian authors or published in Italy:\n\n1. \"La Grande Guerra\" by Alessandro Barbero \n - A comprehensive history of the First World War - with a focus on Italy’s involvement and the impact on Italian society and politics.\n\n2. \"La guerra bianca: Come gli Italiani hanno combattuto la Prima guerra mondiale\" by Alessandro Barbero \n - Explores the harsh conditions of mountain warfare endured by Italian soldiers, with vivid descriptions of battles and daily life on the Alpine front.\n\n3. \"La Prima Guerra Mondiale\" by Paolo Mieli \n - Provides a broad overview of the war, including Italy''s role and the broader European context, combining historical facts with cultural insights.\n\n4. \"1915-1918: La guerra italiana sul fronte interno\" by Giovanni De Luna \n - Focuses on the social and political effects of the war within Italy, examining public opinion, propaganda, and the home front.\n\n5. \"Il Giorno della Vittoria: 4 Novembre 1918\" by Paolo Gaspari \n - Detailed analysis of Italy’s final victories and the aftermath of the war, including the political and social changes that followed.\n\n6. \"La Guerra - prima della Grande Guerra\" by Alessandro Barbero \n - Discusses the international tensions preceding the First World War with an Italian perspective on diplomatic and military developments.\n\n7. \"La guerra dimenticata: Il fronte orientale, 1915-1917\" by Luca Micheletti \n - Covers Italy’s campaigns on the Eastern Front, less frequently discussed in general histories of the war, providing new insights into Italy’s military efforts.\n\n8. \"Tornare a casa. Grande Guerra 1914-1918\" by Marco Balzano \n - A novelized account based on testimonies and letters from Italian soldiers, blending historical documentation with personal narrative.\n\n9. \"I cento giorni: La battaglia finale della Grande Guerra\" by Giuseppe Cultrera \n - Concentrates on the decisive Italian military operations in the war’s final phase, highlighting strategy and key personalities.\n\n10. \"La Grande Guerra sul Carso\" by Mario Isnenghi \n - Focuses specifically on the Carso front, a crucial and - brutal theater of war for Italian forces, combining military history with human stories.\n\nThese titles emphasize the Italian experience of the First World War, authored by prominent Italian historians and writers, thus fulfilling the requirement for Italian authorship and perspective.\n\n Guardrail:\n Ensure the authors are from Italy\n\n Your task:\n - Confirm if the Task result complies with the guardrail.\n - If not, provide clear feedback explaining what is wrong (e.g., by how much it violates the rule, or what specific part fails).\n - Focus only on identifying issues — do not propose corrections.\n - If the Task result complies with the guardrail, saying that is valid\n "}],"model":"gpt-4.1-mini"}' + body: "{\"messages\":[{\"role\":\"system\",\"content\":\"You are Guardrail Agent. + You are a expert at validating the output of a task. By providing effective + feedback if the output is not valid.\\nYour personal goal is: Validate the output + of the task\"},{\"role\":\"user\",\"content\":\"\\nCurrent Task: \\n Ensure + the following task result complies with the given guardrail.\\n\\n Task + result:\\n Here is a list of available books on the First World War by + Italian authors:\\n\\n1. \\\"La Grande Guerra\\\" by Alessandro Barbero \\n2. + \\\"Il grande nulla: La prima guerra mondiale\\\" by Emilio Gentile \\n3. \\\"La + guerra italiana 1915-1918\\\" by Paolo Piccione \\n4. \\\"La prima guerra mondiale\\\" + by Andrea di Robilant \\n5. \\\"1914-1918: La guerra e la memoria\\\" edited + by Paolo G. Minuto \\n\\nThese books provide various perspectives on the First + World War from Italian historians and authors.\\n\\n Guardrail:\\n Ensure + the authors are from Italy\\n\\n Your task:\\n - Confirm if the + Task result complies with the guardrail.\\n - If not, provide clear feedback + explaining what is wrong (e.g., by how much it violates the rule, or what specific + part fails).\\n - Focus only on identifying issues \u2014 do not propose + corrections.\\n - If the Task result complies with the guardrail, saying + that is valid\\n \\n\\nProvide your complete response:\"}],\"model\":\"gpt-4.1-mini\",\"response_format\":{\"type\":\"json_schema\",\"json_schema\":{\"schema\":{\"properties\":{\"valid\":{\"description\":\"Whether + the task output complies with the guardrail\",\"title\":\"Valid\",\"type\":\"boolean\"},\"feedback\":{\"anyOf\":[{\"type\":\"string\"},{\"type\":\"null\"}],\"description\":\"A + feedback about the task output if it is not valid\",\"title\":\"Feedback\"}},\"required\":[\"valid\",\"feedback\"],\"title\":\"LLMGuardrailResult\",\"type\":\"object\",\"additionalProperties\":false},\"name\":\"LLMGuardrailResult\",\"strict\":true}},\"stream\":false}" headers: + User-Agent: + - X-USER-AGENT-XXX accept: - application/json accept-encoding: - - gzip, deflate, zstd + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX connection: - keep-alive content-length: - - '4782' + - '1896' content-type: - application/json cookie: - - __cf_bm=REDACTED; _cfuvid=REDACTED + - COOKIE-XXX host: - api.openai.com - user-agent: - - OpenAI/Python 1.109.1 x-stainless-arch: - - arm64 + - X-STAINLESS-ARCH-XXX x-stainless-async: - 'false' + x-stainless-helper-method: + - beta.chat.completions.parse x-stainless-lang: - python x-stainless-os: - - MacOS + - X-STAINLESS-OS-XXX x-stainless-package-version: - - 1.109.1 + - 1.83.0 x-stainless-read-timeout: - - '600' + - X-STAINLESS-READ-TIMEOUT-XXX x-stainless-retry-count: - '0' x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.12.9 + - 3.13.12 method: POST uri: https://api.openai.com/v1/chat/completions response: body: - string: "{\n \"id\": \"chatcmpl-CYgCc8ZK4YyFBqgK1P1mOpdaCPfC7\",\n \"object\": \"chat.completion\",\n \"created\": 1762381414,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"{\\n \\\"valid\\\": true,\\n \\\"feedback\\\": null\\n}\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 962,\n \"completion_tokens\": 14,\n \"total_tokens\": 976,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_4c2851f862\"\n}\n" + string: "{\n \"id\": \"chatcmpl-DDGAMhqLQH0xSQdFBe8xOEyjBIa59\",\n \"object\": + \"chat.completion\",\n \"created\": 1772052778,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"{\\\"valid\\\":true,\\\"feedback\\\":null}\",\n + \ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": + null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 377,\n \"completion_tokens\": 9,\n \"total_tokens\": 386,\n \"prompt_tokens_details\": + {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_d96d8e3578\"\n}\n" headers: CF-RAY: - - REDACTED-RAY + - CF-RAY-XXX Connection: - keep-alive Content-Type: - application/json Date: - - Wed, 05 Nov 2025 22:23:35 GMT + - Wed, 25 Feb 2026 20:52:58 GMT Server: - cloudflare Strict-Transport-Security: - - max-age=31536000; includeSubDomains; preload + - STS-XXX Transfer-Encoding: - chunked X-Content-Type-Options: - - nosniff + - X-CONTENT-TYPE-XXX access-control-expose-headers: - - X-Request-ID + - ACCESS-CONTROL-XXX alt-svc: - h3=":443"; ma=86400 cf-cache-status: - DYNAMIC openai-organization: - - user-hortuttj2f3qtmxyik2zxf4q + - OPENAI-ORG-XXX openai-processing-ms: - - '395' - openai-project: - - proj_fL4UBWR1CMpAAdgzaSKqsVvA - openai-version: - - '2020-10-01' - x-envoy-upstream-service-time: - '420' - x-openai-proxy-wasm: - - v0.1 - x-ratelimit-limit-requests: - - '500' - x-ratelimit-limit-tokens: - - '200000' - x-ratelimit-remaining-requests: - - '499' - x-ratelimit-remaining-tokens: - - '198710' - x-ratelimit-reset-requests: - - 120ms - x-ratelimit-reset-tokens: - - 386ms - x-request-id: - - req_REDACTED - status: - code: 200 - message: OK -- request: - body: '{"messages":[{"role":"system","content":"Ensure your final answer strictly adheres to the following OpenAPI schema: {\n \"type\": \"json_schema\",\n \"json_schema\": {\n \"name\": \"LLMGuardrailResult\",\n \"strict\": true,\n \"schema\": {\n \"properties\": {\n \"valid\": {\n \"description\": \"Whether the task output complies with the guardrail\",\n \"title\": \"Valid\",\n \"type\": \"boolean\"\n },\n \"feedback\": {\n \"anyOf\": [\n {\n \"type\": \"string\"\n },\n {\n \"type\": \"null\"\n }\n ],\n \"default\": null,\n \"description\": \"A feedback about the task output if it is not valid\",\n \"title\": \"Feedback\"\n }\n },\n \"required\": [\n \"valid\",\n \"feedback\"\n ],\n \"title\": \"LLMGuardrailResult\",\n \"type\": \"object\",\n \"additionalProperties\": - false\n }\n }\n}\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python."},{"role":"user","content":"{\n \"valid\": true,\n \"feedback\": null\n}"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"valid":{"description":"Whether the task output complies with the guardrail","title":"Valid","type":"boolean"},"feedback":{"anyOf":[{"type":"string"},{"type":"null"}],"description":"A feedback about the task output if it is not valid","title":"Feedback"}},"required":["valid","feedback"],"title":"LLMGuardrailResult","type":"object","additionalProperties":false},"name":"LLMGuardrailResult","strict":true}},"stream":false}' - headers: - accept: - - application/json - accept-encoding: - - gzip, deflate, zstd - connection: - - keep-alive - content-length: - - '1777' - content-type: - - application/json - cookie: - - __cf_bm=REDACTED; _cfuvid=REDACTED - host: - - api.openai.com - user-agent: - - OpenAI/Python 1.109.1 - x-stainless-arch: - - arm64 - x-stainless-async: - - 'false' - x-stainless-helper-method: - - chat.completions.parse - x-stainless-lang: - - python - x-stainless-os: - - MacOS - x-stainless-package-version: - - 1.109.1 - x-stainless-read-timeout: - - '600' - x-stainless-retry-count: - - '0' - x-stainless-runtime: - - CPython - x-stainless-runtime-version: - - 3.12.9 - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - body: - string: "{\n \"id\": \"chatcmpl-CYgCdfjTW26WehWv9HjK3NHOT5e4l\",\n \"object\": \"chat.completion\",\n \"created\": 1762381415,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"{\\\"valid\\\":true,\\\"feedback\\\":null}\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 351,\n \"completion_tokens\": 9,\n \"total_tokens\": 360,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_4c2851f862\"\n}\n" - headers: - CF-RAY: - - REDACTED-RAY - Connection: - - keep-alive - Content-Type: - - application/json - Date: - - Wed, 05 Nov 2025 22:23:35 GMT - Server: - - cloudflare - Strict-Transport-Security: - - max-age=31536000; includeSubDomains; preload - Transfer-Encoding: - - chunked - X-Content-Type-Options: - - nosniff - access-control-expose-headers: - - X-Request-ID - alt-svc: - - h3=":443"; ma=86400 - cf-cache-status: - - DYNAMIC - openai-organization: - - user-hortuttj2f3qtmxyik2zxf4q - openai-processing-ms: - - '424' openai-project: - - proj_fL4UBWR1CMpAAdgzaSKqsVvA + - OPENAI-PROJECT-XXX openai-version: - '2020-10-01' - x-envoy-upstream-service-time: - - '450' x-openai-proxy-wasm: - v0.1 x-ratelimit-limit-requests: - - '500' + - X-RATELIMIT-LIMIT-REQUESTS-XXX x-ratelimit-limit-tokens: - - '200000' + - X-RATELIMIT-LIMIT-TOKENS-XXX x-ratelimit-remaining-requests: - - '499' + - X-RATELIMIT-REMAINING-REQUESTS-XXX x-ratelimit-remaining-tokens: - - '199730' + - X-RATELIMIT-REMAINING-TOKENS-XXX x-ratelimit-reset-requests: - - 120ms + - X-RATELIMIT-RESET-REQUESTS-XXX x-ratelimit-reset-tokens: - - 81ms + - X-RATELIMIT-RESET-TOKENS-XXX x-request-id: - - req_REDACTED + - X-REQUEST-ID-XXX status: code: 200 message: OK - request: - body: '{"messages":[{"role":"system","content":"You are Test Agent. Test Backstory\nYour personal goal is: Test Goal\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Test task\n\nThis is the expected criteria for your final answer: Output\nyou MUST return the actual complete content as the final answer, not a summary.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}' + body: '{"messages":[{"role":"system","content":"You are Test Agent. Test Backstory\nYour + personal goal is: Test Goal"},{"role":"user","content":"\nCurrent Task: Gather + information about available books on the First World War\n\nThis is the expected + criteria for your final answer: A list of available books on the First World + War\nyou MUST return the actual complete content as the final answer, not a + summary.\n\nProvide your complete response:"},{"role":"assistant","content":"Here + is a list of available books on the First World War:\n\n1. \"The Guns of August\" + by Barbara W. Tuchman\n2. \"A World Undone: The Story of the Great War, 1914 + to 1918\" by G.J. Meyer\n3. \"The First World War\" by John Keegan\n4. \"The + Sleepwalkers: How Europe Went to War in 1914\" by Christopher Clark\n5. \"To + End All Wars: A Story of Loyalty and Rebellion, 1914-1918\" by Adam Hochschild\n6. + \"The Pity of War\" by Niall Ferguson\n7. \"Catastrophe 1914: Europe Goes to + War\" by Max Hastings\n8. \"World War I: The Definitive Visual History\" by + R.G. Grant\n9. \"The War That Ended Peace: The Road to 1914\" by Margaret MacMillan\n10. + \"Paris 1919: Six Months That Changed the World\" by Margaret MacMillan\n\nThese + books cover various aspects of the First World War, including its causes, key + battles, political consequences, and personal experiences."},{"role":"system","content":"You + are Test Agent. Test Backstory\nYour personal goal is: Test Goal"},{"role":"user","content":"\nCurrent + Task: Gather information about available books on the First World War\n\nThis + is the expected criteria for your final answer: A list of available books on + the First World War\nyou MUST return the actual complete content as the final + answer, not a summary.\n\nThis is the context you''re working with:\n### Previous + attempt failed validation: None of the authors listed are from Italy. The guardrail + requires that the authors be from Italy, but all the authors mentioned, such + as Barbara W. Tuchman, G.J. Meyer, John Keegan, Christopher Clark, Adam Hochschild, + Niall Ferguson, Max Hastings, R.G. Grant, and Margaret MacMillan, are not Italian. + Therefore, the task result does not comply with the guardrail.\n\n\n### Previous + result:\nHere is a list of available books on the First World War:\n\n1. \"The + Guns of August\" by Barbara W. Tuchman\n2. \"A World Undone: The Story of the + Great War, 1914 to 1918\" by G.J. Meyer\n3. \"The First World War\" by John + Keegan\n4. \"The Sleepwalkers: How Europe Went to War in 1914\" by Christopher + Clark\n5. \"To End All Wars: A Story of Loyalty and Rebellion, 1914-1918\" by + Adam Hochschild\n6. \"The Pity of War\" by Niall Ferguson\n7. \"Catastrophe + 1914: Europe Goes to War\" by Max Hastings\n8. \"World War I: The Definitive + Visual History\" by R.G. Grant\n9. \"The War That Ended Peace: The Road to 1914\" + by Margaret MacMillan\n10. \"Paris 1919: Six Months That Changed the World\" + by Margaret MacMillan\n\nThese books cover various aspects of the First World + War, including its causes, key battles, political consequences, and personal + experiences.\n\n\nTry again, making sure to address the validation error.\n\nProvide + your complete response:"},{"role":"assistant","content":"Here is a list of available + books on the First World War by Italian authors:\n\n1. \"La Grande Guerra\" + by Alessandro Barbero \n2. \"Il grande nulla: La prima guerra mondiale\" by + Emilio Gentile \n3. \"La guerra italiana 1915-1918\" by Paolo Piccione \n4. + \"La prima guerra mondiale\" by Andrea di Robilant \n5. \"1914-1918: La guerra + e la memoria\" edited by Paolo G. Minuto \n\nThese books provide various perspectives + on the First World War from Italian historians and authors."},{"role":"system","content":"You + are Test Agent. Test Backstory\nYour personal goal is: Test Goal"},{"role":"user","content":"\nCurrent + Task: Test task\n\nThis is the expected criteria for your final answer: Output\nyou + MUST return the actual complete content as the final answer, not a summary.\n\nProvide + your complete response:"}],"model":"gpt-4.1-mini"}' headers: + User-Agent: + - X-USER-AGENT-XXX accept: - application/json accept-encoding: - - gzip, deflate, zstd + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX connection: - keep-alive content-length: - - '770' + - '4036' content-type: - application/json cookie: - - __cf_bm=REDACTED; _cfuvid=REDACTED + - COOKIE-XXX host: - api.openai.com - user-agent: - - OpenAI/Python 1.109.1 x-stainless-arch: - - arm64 + - X-STAINLESS-ARCH-XXX x-stainless-async: - 'false' x-stainless-lang: - python x-stainless-os: - - MacOS + - X-STAINLESS-OS-XXX x-stainless-package-version: - - 1.109.1 + - 1.83.0 x-stainless-read-timeout: - - '600' + - X-STAINLESS-READ-TIMEOUT-XXX x-stainless-retry-count: - '0' x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.12.9 + - 3.13.12 method: POST uri: https://api.openai.com/v1/chat/completions response: body: - string: "{\n \"id\": \"chatcmpl-CYgCdUkawcuGKhJQYVfLlwbbDwljX\",\n \"object\": \"chat.completion\",\n \"created\": 1762381415,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal Answer: Output\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 153,\n \"completion_tokens\": 14,\n \"total_tokens\": 167,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_4c2851f862\"\n}\n" + string: "{\n \"id\": \"chatcmpl-DDGAMOb5FXfsZwEhCqnUP11VAcuqF\",\n \"object\": + \"chat.completion\",\n \"created\": 1772052778,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Output\",\n \"refusal\": null,\n + \ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": + \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 948,\n \"completion_tokens\": + 1,\n \"total_tokens\": 949,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_a391f2cee0\"\n}\n" headers: CF-RAY: - - REDACTED-RAY + - CF-RAY-XXX Connection: - keep-alive Content-Type: - application/json Date: - - Wed, 05 Nov 2025 22:23:36 GMT + - Wed, 25 Feb 2026 20:52:59 GMT Server: - cloudflare Strict-Transport-Security: - - max-age=31536000; includeSubDomains; preload + - STS-XXX Transfer-Encoding: - chunked X-Content-Type-Options: - - nosniff + - X-CONTENT-TYPE-XXX access-control-expose-headers: - - X-Request-ID + - ACCESS-CONTROL-XXX alt-svc: - h3=":443"; ma=86400 cf-cache-status: - DYNAMIC openai-organization: - - user-hortuttj2f3qtmxyik2zxf4q + - OPENAI-ORG-XXX openai-processing-ms: - - '441' + - '347' openai-project: - - proj_fL4UBWR1CMpAAdgzaSKqsVvA + - OPENAI-PROJECT-XXX openai-version: - '2020-10-01' - x-envoy-upstream-service-time: - - '462' x-openai-proxy-wasm: - v0.1 x-ratelimit-limit-requests: - - '500' + - X-RATELIMIT-LIMIT-REQUESTS-XXX x-ratelimit-limit-tokens: - - '200000' + - X-RATELIMIT-LIMIT-TOKENS-XXX x-ratelimit-remaining-requests: - - '499' + - X-RATELIMIT-REMAINING-REQUESTS-XXX x-ratelimit-remaining-tokens: - - '199832' + - X-RATELIMIT-REMAINING-TOKENS-XXX x-ratelimit-reset-requests: - - 120ms + - X-RATELIMIT-RESET-REQUESTS-XXX x-ratelimit-reset-tokens: - - 50ms + - X-RATELIMIT-RESET-TOKENS-XXX x-request-id: - - req_REDACTED + - X-REQUEST-ID-XXX status: code: 200 message: OK diff --git a/lib/crewai/tests/cassettes/test_guardrail_when_an_error_occurs.yaml b/lib/crewai/tests/cassettes/test_guardrail_when_an_error_occurs.yaml index 1ac34dfe5..af0bc5aaf 100644 --- a/lib/crewai/tests/cassettes/test_guardrail_when_an_error_occurs.yaml +++ b/lib/crewai/tests/cassettes/test_guardrail_when_an_error_occurs.yaml @@ -1,189 +1,121 @@ interactions: - request: - body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test Backstory\nYour personal goal is: Test Goal\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task: Gather information about available books on the First World War\n\nThis is the expected criteria for your final answer: A list of available books on the First World War\nyou MUST return the actual complete content as the final answer, not a summary.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}' + body: '{"messages":[{"role":"system","content":"You are Test Agent. Test Backstory\nYour + personal goal is: Test Goal"},{"role":"user","content":"\nCurrent Task: Gather + information about available books on the First World War\n\nThis is the expected + criteria for your final answer: A list of available books on the First World + War\nyou MUST return the actual complete content as the final answer, not a + summary.\n\nProvide your complete response:"}],"model":"gpt-4.1-mini"}' headers: + User-Agent: + - X-USER-AGENT-XXX accept: - application/json accept-encoding: - - gzip, deflate + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX connection: - keep-alive content-length: - - '903' + - '465' content-type: - application/json host: - api.openai.com - user-agent: - - OpenAI/Python 1.68.2 x-stainless-arch: - - arm64 + - X-STAINLESS-ARCH-XXX x-stainless-async: - 'false' x-stainless-lang: - python x-stainless-os: - - MacOS + - X-STAINLESS-OS-XXX x-stainless-package-version: - - 1.68.2 - x-stainless-raw-response: - - 'true' + - 1.83.0 x-stainless-read-timeout: - - '600.0' + - X-STAINLESS-READ-TIMEOUT-XXX x-stainless-retry-count: - '0' x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.12.9 + - 3.13.12 method: POST uri: https://api.openai.com/v1/chat/completions response: body: - string: "{\n \"id\": \"chatcmpl-BReRV6HdeL9wUgmKwfAZfVjuGdpAo\",\n \"object\": \"chat.completion\",\n \"created\": 1745930017,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"I now can give a great answer \\nFinal Answer: Here is a comprehensive list of available books on the First World War:\\n\\n1. **\\\"The Sleepwalkers: How Europe Went to War in 1914\\\" by Christopher Clark** \\n This book delves into the complex factors that led to the outbreak of the war, offering insights into the political and social dynamics of early 20th century Europe.\\n\\n2. **\\\"A World Undone: The Story of the Great War, 1914 to 1918\\\" by G.J. Meyer** \\n Meyer's expansive narrative covers the entire war with a focus on both military strategies and the human experiences endured by soldiers and civilians alike.\\n\\n3. **\\\"All Quiet on the Western Front\\\" by Erich Maria\ - \ Remarque** \\n A poignant novel that captures the resilience and trauma experienced by German soldiers during World War I, based on the author's own experiences.\\n\\n4. **\\\"The First World War\\\" by John Keegan** \\n Keegan provides a detailed military history of the war, featuring insights on battles, strategies, and the overall impact on global affairs.\\n\\n5. **\\\"Goodbye to All That\\\" by Robert Graves** \\n This autobiography recounts the author's experiences as a soldier during the war, offering a personal and critical perspective on the conflicts and the post-war era.\\n\\n6. **\\\"Catastrophe 1914: Europe Goes to War\\\" by Max Hastings** \\n Hastings chronicles the events leading up to World War I and the early battles, detailing the war's initial impact on European societies.\\n\\n7. **\\\"The War That Ended Peace: The Road to 1914\\\" by Margaret MacMillan** \\n MacMillan explores the political and historical factors that contributed to the outbreak\ - \ of war, emphasizing the decisions made by leaders across Europe.\\n\\n8. **\\\"The First World War: A Complete History\\\" by Martin Gilbert** \\n This complete history takes readers through the entirety of the war, from its causes to its aftermath, using a wide range of sources.\\n\\n9. **\\\"1914: The Year the World Ended\\\" by Paul Ham** \\n Ham focuses on the pivotal year of 1914 and the early war's devastation, analyzing its long-lasting effects on the world.\\n\\n10. **\\\"War Horse\\\" by Michael Morpurgo** \\n This children's novel tells the story of a horse and his experiences during the war, highlighting the bond between animals and humans amidst the chaos.\\n\\nEach of these books offers unique perspectives and rich details about the First World War, making them valuable resources for anyone interested in this pivotal period in history.\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\"\ - : \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 170,\n \"completion_tokens\": 534,\n \"total_tokens\": 704,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_dbaca60df0\"\n}\n" + string: "{\n \"id\": \"chatcmpl-DDGA6ArRnT0S8ME2I1R4x9Mo4JyGJ\",\n \"object\": + \"chat.completion\",\n \"created\": 1772052762,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Here is a list of available books on + the First World War:\\n\\n1. \\\"The Guns of August\\\" by Barbara W. Tuchman\\n2. + \\\"A World Undone: The Story of the Great War, 1914 to 1918\\\" by G.J. Meyer\\n3. + \\\"The First World War\\\" by John Keegan\\n4. \\\"The Sleepwalkers: How + Europe Went to War in 1914\\\" by Christopher Clark\\n5. \\\"To End All Wars: + A Story of Loyalty and Rebellion, 1914-1918\\\" by Adam Hochschild\\n6. \\\"World + War I: The Definitive Visual History\\\" by R.G. Grant\\n7. \\\"Catastrophe + 1914: Europe Goes to War\\\" by Max Hastings\\n8. \\\"The Great War and Modern + Memory\\\" by Paul Fussell\\n9. \\\"Paris 1919: Six Months That Changed the + World\\\" by Margaret MacMillan\\n10. \\\"The Pity of War: Explaining World + War I\\\" by Niall Ferguson\\n\\nIf you need further details on any of these + titles, feel free to ask.\",\n \"refusal\": null,\n \"annotations\": + []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n + \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 84,\n \"completion_tokens\": + 230,\n \"total_tokens\": 314,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_a391f2cee0\"\n}\n" headers: CF-RAY: - - 937ed42dee2e621f-GRU + - CF-RAY-XXX Connection: - keep-alive Content-Type: - application/json Date: - - Tue, 29 Apr 2025 12:33:48 GMT + - Wed, 25 Feb 2026 20:52:46 GMT Server: - cloudflare - Set-Cookie: - - __cf_bm=mLRCnpdB3n_6medIZWHnUu8MNRGZsD6riaRhN47PK74-1745930028-1.0.1.1-M2lDM1_V9hNCK0MZrBnFalF3lndC3JkS8zhDOGww_LmOrgdpU9fZLpNZUmyinCQOnlCjDjDYJUECM82ffT1anqBiO1NoDeNp91EPKiK7s.8; path=/; expires=Tue, 29-Apr-25 13:03:48 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - - _cfuvid=eTrj_ZhCx2XuylS5vYROwUlPrJBwOyrbS2Ki.msl45E-1745930028010-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Strict-Transport-Security: + - STS-XXX Transfer-Encoding: - chunked X-Content-Type-Options: - - nosniff + - X-CONTENT-TYPE-XXX access-control-expose-headers: - - X-Request-ID + - ACCESS-CONTROL-XXX alt-svc: - h3=":443"; ma=86400 cf-cache-status: - DYNAMIC openai-organization: - - crewai-iuxna1 + - OPENAI-ORG-XXX openai-processing-ms: - - '10856' + - '3250' + openai-project: + - OPENAI-PROJECT-XXX openai-version: - '2020-10-01' - strict-transport-security: - - max-age=31536000; includeSubDomains; preload + set-cookie: + - SET-COOKIE-XXX + x-openai-proxy-wasm: + - v0.1 x-ratelimit-limit-requests: - - '30000' + - X-RATELIMIT-LIMIT-REQUESTS-XXX x-ratelimit-limit-tokens: - - '150000000' + - X-RATELIMIT-LIMIT-TOKENS-XXX x-ratelimit-remaining-requests: - - '29999' + - X-RATELIMIT-REMAINING-REQUESTS-XXX x-ratelimit-remaining-tokens: - - '149999807' + - X-RATELIMIT-REMAINING-TOKENS-XXX x-ratelimit-reset-requests: - - 2ms + - X-RATELIMIT-RESET-REQUESTS-XXX x-ratelimit-reset-tokens: - - 0s + - X-RATELIMIT-RESET-TOKENS-XXX x-request-id: - - req_bc2d62d8325b2bdd3e98544a66389132 - status: - code: 200 - message: OK -- request: - body: '{"messages": [{"role": "system", "content": "You are Guardrail Agent. You are a expert at validating the output of a task. By providing effective feedback if the output is not valid.\nYour personal goal is: Validate the output of the task\n\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!\nIMPORTANT: Your final answer MUST contain all the information requested in the following format: {\n \"valid\": bool,\n \"feedback\": str | None\n}\n\nIMPORTANT: Ensure the final output does not include any code block markers like ```json or ```python."}, {"role": "user", "content": "\n Ensure the following task result complies with the given guardrail.\n\n Task result:\n Here is a comprehensive list of available books on - the First World War:\n\n1. **\"The Sleepwalkers: How Europe Went to War in 1914\" by Christopher Clark** \n This book delves into the complex factors that led to the outbreak of the war, offering insights into the political and social dynamics of early 20th century Europe.\n\n2. **\"A World Undone: The Story of the Great War, 1914 to 1918\" by G.J. Meyer** \n Meyer''s expansive narrative covers the entire war with a focus on both military strategies and the human experiences endured by soldiers and civilians alike.\n\n3. **\"All Quiet on the Western Front\" by Erich Maria Remarque** \n A poignant novel that captures the resilience and trauma experienced by German soldiers during World War I, based on the author''s own experiences.\n\n4. **\"The First World War\" by John Keegan** \n Keegan provides a detailed military history of the war, featuring insights on battles, strategies, and the overall impact on global affairs.\n\n5. **\"Goodbye to All That\" by Robert Graves** \n This - autobiography recounts the author''s experiences as a soldier during the war, offering a personal and critical perspective on the conflicts and the post-war era.\n\n6. **\"Catastrophe 1914: Europe Goes to War\" by Max Hastings** \n Hastings chronicles the events leading up to World War I and the early battles, detailing the war''s initial impact on European societies.\n\n7. **\"The War That Ended Peace: The Road to 1914\" by Margaret MacMillan** \n MacMillan explores the political and historical factors that contributed to the outbreak of war, emphasizing the decisions made by leaders across Europe.\n\n8. **\"The First World War: A Complete History\" by Martin Gilbert** \n This complete history takes readers through the entirety of the war, from its causes to its aftermath, using a wide range of sources.\n\n9. **\"1914: The Year the World Ended\" by Paul Ham** \n Ham focuses on the pivotal year of 1914 and the early war''s devastation, analyzing its long-lasting effects - on the world.\n\n10. **\"War Horse\" by Michael Morpurgo** \n This children''s novel tells the story of a horse and his experiences during the war, highlighting the bond between animals and humans amidst the chaos.\n\nEach of these books offers unique perspectives and rich details about the First World War, making them valuable resources for anyone interested in this pivotal period in history.\n\n Guardrail:\n Ensure the authors are from Italy\n \n Your task:\n - Confirm if the Task result complies with the guardrail.\n - If not, provide clear feedback explaining what is wrong (e.g., by how much it violates the rule, or what specific part fails).\n - Focus only on identifying issues \u2014 do not propose corrections.\n - If the Task result complies with the guardrail, saying that is valid\n "}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}' - headers: - accept: - - application/json - accept-encoding: - - gzip, deflate - connection: - - keep-alive - content-length: - - '3917' - content-type: - - application/json - cookie: - - __cf_bm=mLRCnpdB3n_6medIZWHnUu8MNRGZsD6riaRhN47PK74-1745930028-1.0.1.1-M2lDM1_V9hNCK0MZrBnFalF3lndC3JkS8zhDOGww_LmOrgdpU9fZLpNZUmyinCQOnlCjDjDYJUECM82ffT1anqBiO1NoDeNp91EPKiK7s.8; _cfuvid=eTrj_ZhCx2XuylS5vYROwUlPrJBwOyrbS2Ki.msl45E-1745930028010-0.0.1.1-604800000 - host: - - api.openai.com - user-agent: - - OpenAI/Python 1.68.2 - x-stainless-arch: - - arm64 - x-stainless-async: - - 'false' - x-stainless-lang: - - python - x-stainless-os: - - MacOS - x-stainless-package-version: - - 1.68.2 - x-stainless-raw-response: - - 'true' - x-stainless-read-timeout: - - '600.0' - x-stainless-retry-count: - - '0' - x-stainless-runtime: - - CPython - x-stainless-runtime-version: - - 3.12.9 - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - body: - string: "{\n \"id\": \"chatcmpl-BReTBRCAvSDG5VMdtF9ZjByy7lqSJ\",\n \"object\": \"chat.completion\",\n \"created\": 1745930121,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Thought: I now can give a great answer \\nFinal Answer: {\\n \\\"valid\\\": false,\\n \\\"feedback\\\": \\\"None of the authors listed in the task result are from Italy. All the authors mentioned are from other countries, such as Germany, the UK, and the US.\\\"\\n}\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 797,\n \"completion_tokens\": 60,\n \"total_tokens\": 857,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"\ - audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_0392822090\"\n}\n" - headers: - CF-RAY: - - 937ed6bd68faa435-GRU - Connection: - - keep-alive - Content-Type: - - application/json - Date: - - Tue, 29 Apr 2025 12:35:23 GMT - Server: - - cloudflare - Transfer-Encoding: - - chunked - X-Content-Type-Options: - - nosniff - access-control-expose-headers: - - X-Request-ID - alt-svc: - - h3=":443"; ma=86400 - cf-cache-status: - - DYNAMIC - openai-organization: - - crewai-iuxna1 - openai-processing-ms: - - '1138' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=31536000; includeSubDomains; preload - x-ratelimit-limit-requests: - - '30000' - x-ratelimit-limit-tokens: - - '150000000' - x-ratelimit-remaining-requests: - - '29999' - x-ratelimit-remaining-tokens: - - '149999072' - x-ratelimit-reset-requests: - - 2ms - x-ratelimit-reset-tokens: - - 0s - x-request-id: - - req_2ba1be014a5974ba354aff564e26516a + - X-REQUEST-ID-XXX status: code: 200 message: OK diff --git a/lib/crewai/tests/cassettes/test_json_property_without_output_json.yaml b/lib/crewai/tests/cassettes/test_json_property_without_output_json.yaml index df5205161..f7089e7d2 100644 --- a/lib/crewai/tests/cassettes/test_json_property_without_output_json.yaml +++ b/lib/crewai/tests/cassettes/test_json_property_without_output_json.yaml @@ -1,11 +1,14 @@ interactions: - request: - body: '{"trace_id": "4ced1ade-0d34-4d28-a47d-61011b1f3582", "execution_type": "crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null, "crew_name": "crew", "flow_name": null, "crewai_version": "1.2.1", "privacy_level": "standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count": 0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-10-31T07:25:08.937105+00:00"}, "ephemeral_trace_id": "4ced1ade-0d34-4d28-a47d-61011b1f3582"}' + body: '{"trace_id": "4ced1ade-0d34-4d28-a47d-61011b1f3582", "execution_type": + "crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null, + "crew_name": "crew", "flow_name": null, "crewai_version": "1.2.1", "privacy_level": + "standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count": + 0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-10-31T07:25:08.937105+00:00"}, + "ephemeral_trace_id": "4ced1ade-0d34-4d28-a47d-61011b1f3582"}' headers: Accept: - '*/*' - Accept-Encoding: - - gzip, deflate, zstd Connection: - keep-alive Content-Length: @@ -13,11 +16,13 @@ interactions: Content-Type: - application/json User-Agent: - - CrewAI-CLI/1.2.1 + - X-USER-AGENT-XXX X-Crewai-Organization-Id: - 73c2b193-f579-422c-84c7-76a39a1da77f X-Crewai-Version: - 1.2.1 + accept-encoding: + - ACCEPT-ENCODING-XXX method: POST uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches response: @@ -35,46 +40,60 @@ interactions: cache-control: - no-store content-security-policy: - - 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self'' ''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/ https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net https://js.hscollectedforms.net - https://js.usemessages.com https://snap.licdn.com https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data: *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com; connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/* https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com https://api.hubspot.com - https://forms.hscollectedforms.net https://api.hubapi.com https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509 https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self'' *.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com https://drive.google.com https://slides.google.com https://accounts.google.com https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/ https://www.youtube.com https://share.descript.com' + - CSP-FILTERED etag: - - W/"684f9dff2cfefa325ac69ea38dba2309" + - ETAG-XXX expires: - '0' permissions-policy: - - camera=(), microphone=(self), geolocation=() + - PERMISSIONS-POLICY-XXX pragma: - no-cache referrer-policy: - - strict-origin-when-cross-origin + - REFERRER-POLICY-XXX strict-transport-security: - - max-age=63072000; includeSubDomains + - STS-XXX vary: - Accept x-content-type-options: - - nosniff + - X-CONTENT-TYPE-XXX x-frame-options: - - SAMEORIGIN + - X-FRAME-OPTIONS-XXX x-permitted-cross-domain-policies: - - none + - X-PERMITTED-XXX x-request-id: - - 630cda16-c991-4ed0-b534-16c03eb2ffca + - X-REQUEST-ID-XXX x-runtime: - - '0.072382' + - X-RUNTIME-XXX x-xss-protection: - - 1; mode=block + - X-XSS-PROTECTION-XXX status: code: 201 message: Created - request: - body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Give me an integer score between 1-5 for the following title: ''The impact of AI in the future of work''\n\nThis is the expected criteria for your final answer: The score of the title.\nyou MUST return the actual complete content as the final answer, not a summary.\nEnsure your final answer contains only the content in the following format: {\n \"properties\": {\n \"score\": {\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": - \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nEnsure the final output does not include any code block markers like ```json or ```python.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}' + body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert + scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo + give my best complete final answer to the task respond using the exact following + format:\n\nThought: I now can give a great answer\nFinal Answer: Your final + answer must be the great and the most complete as possible, it must be outcome + described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent + Task: Give me an integer score between 1-5 for the following title: ''The impact + of AI in the future of work''\n\nThis is the expected criteria for your final + answer: The score of the title.\nyou MUST return the actual complete content + as the final answer, not a summary.\nEnsure your final answer contains only + the content in the following format: {\n \"properties\": {\n \"score\": + {\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": + [\n \"score\"\n ],\n \"title\": \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": + false\n}\n\nEnsure the final output does not include any code block markers + like ```json or ```python.\n\nBegin! This is VERY important to you, use the + tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}' headers: + User-Agent: + - X-USER-AGENT-XXX accept: - application/json accept-encoding: - - gzip, deflate, zstd + - ACCEPT-ENCODING-XXX connection: - keep-alive content-length: @@ -83,20 +102,18 @@ interactions: - application/json host: - api.openai.com - user-agent: - - OpenAI/Python 1.109.1 x-stainless-arch: - - arm64 + - X-STAINLESS-ARCH-XXX x-stainless-async: - 'false' x-stainless-lang: - python x-stainless-os: - - MacOS + - X-STAINLESS-OS-XXX x-stainless-package-version: - 1.109.1 x-stainless-read-timeout: - - '600' + - X-STAINLESS-READ-TIMEOUT-XXX x-stainless-retry-count: - '0' x-stainless-runtime: @@ -107,10 +124,21 @@ interactions: uri: https://api.openai.com/v1/chat/completions response: body: - string: "{\n \"id\": \"chatcmpl-CWdnRkRPYTVe5JfVO7aC1cdVfqIdd\",\n \"object\": \"chat.completion\",\n \"created\": 1761895509,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\n{\\n \\\"score\\\": 4\\n}\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 281,\n \"completion_tokens\": 19,\n \"total_tokens\": 300,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_4c2851f862\"\n}\n" + string: "{\n \"id\": \"chatcmpl-CWdnRkRPYTVe5JfVO7aC1cdVfqIdd\",\n \"object\": + \"chat.completion\",\n \"created\": 1761895509,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\n{\\n + \ \\\"score\\\": 4\\n}\",\n \"refusal\": null,\n \"annotations\": + []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n + \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 281,\n \"completion_tokens\": + 19,\n \"total_tokens\": 300,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_4c2851f862\"\n}\n" headers: CF-RAY: - - 99716ab4788dea35-FCO + - CF-RAY-XXX Connection: - keep-alive Content-Type: @@ -120,26 +148,25 @@ interactions: Server: - cloudflare Set-Cookie: - - __cf_bm=S.q8_0ONHDHBHNOJdMZHwJDue9lKhWQHpKuP2lsspx4-1761895510-1.0.1.1-QUDxMm9SVfRT2R188bLcvxUd6SXIBmZgnz3D35UF95nNg8zX5Gzdg2OmU.uo29rqaGatjupcLPNMyhfOqeoyhNQ28Zz1ESSQLq0y70x3IvM; path=/; expires=Fri, 31-Oct-25 07:55:10 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - - _cfuvid=TvP4GePeQO8E5c_xWNGzJb84f940MFRG_lZ_0hWAc5M-1761895510432-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + - SET-COOKIE-XXX Strict-Transport-Security: - - max-age=31536000; includeSubDomains; preload + - STS-XXX Transfer-Encoding: - chunked X-Content-Type-Options: - - nosniff + - X-CONTENT-TYPE-XXX access-control-expose-headers: - - X-Request-ID + - ACCESS-CONTROL-XXX alt-svc: - h3=":443"; ma=86400 cf-cache-status: - DYNAMIC openai-organization: - - crewai-iuxna1 + - OPENAI-ORG-XXX openai-processing-ms: - '569' openai-project: - - proj_xitITlrFeen7zjNSzML82h9x + - OPENAI-PROJECT-XXX openai-version: - '2020-10-01' x-envoy-upstream-service-time: @@ -149,40 +176,119 @@ interactions: x-ratelimit-limit-project-tokens: - '150000000' x-ratelimit-limit-requests: - - '30000' + - X-RATELIMIT-LIMIT-REQUESTS-XXX x-ratelimit-limit-tokens: - - '150000000' + - X-RATELIMIT-LIMIT-TOKENS-XXX x-ratelimit-remaining-project-tokens: - '149999700' x-ratelimit-remaining-requests: - - '29999' + - X-RATELIMIT-REMAINING-REQUESTS-XXX x-ratelimit-remaining-tokens: - - '149999700' + - X-RATELIMIT-REMAINING-TOKENS-XXX x-ratelimit-reset-project-tokens: - 0s x-ratelimit-reset-requests: - - 2ms + - X-RATELIMIT-RESET-REQUESTS-XXX x-ratelimit-reset-tokens: - - 0s + - X-RATELIMIT-RESET-TOKENS-XXX x-request-id: - - req_393e029e99d54ab0b4e7c69c5cba099f + - X-REQUEST-ID-XXX status: code: 200 message: OK - request: - body: '{"events": [{"event_id": "ea607d3f-c9ff-4aa8-babb-a84eb6d16663", "timestamp": "2025-10-31T07:25:08.935640+00:00", "type": "crew_kickoff_started", "event_data": {"timestamp": "2025-10-31T07:25:08.935640+00:00", "type": "crew_kickoff_started", "source_fingerprint": null, "source_type": null, "fingerprint_metadata": null, "task_id": null, "task_name": null, "agent_id": null, "agent_role": null, "crew_name": "crew", "crew": null, "inputs": null}}, {"event_id": "8e792d78-fe9c-4601-a7b4-7b105fa8fb40", "timestamp": "2025-10-31T07:25:08.937816+00:00", "type": "task_started", "event_data": {"task_description": "Give me an integer score between 1-5 for the following title: ''The impact of AI in the future of work''", "expected_output": "The score of the title.", "task_name": "Give me an integer score between 1-5 for the following title: ''The impact of AI in the future of work''", "context": "", "agent_role": "Scorer", "task_id": "677cf2dd-96a9-4eac-9140-0ecaba9609f7"}}, {"event_id": "a2fcdfee-a395-4dc8-99b8-ba3d8d843a70", - "timestamp": "2025-10-31T07:25:08.938816+00:00", "type": "agent_execution_started", "event_data": {"agent_role": "Scorer", "agent_goal": "Score the title", "agent_backstory": "You''re an expert scorer, specialized in scoring titles."}}, {"event_id": "b0ba7582-6ea0-4b66-a64a-0a1e38d57502", "timestamp": "2025-10-31T07:25:08.938996+00:00", "type": "llm_call_started", "event_data": {"timestamp": "2025-10-31T07:25:08.938996+00:00", "type": "llm_call_started", "source_fingerprint": null, "source_type": null, "fingerprint_metadata": null, "task_id": "677cf2dd-96a9-4eac-9140-0ecaba9609f7", "task_name": "Give me an integer score between 1-5 for the following title: ''The impact of AI in the future of work''", "agent_id": "8d6e3481-36fa-4fca-9665-977e6d76a969", "agent_role": "Scorer", "from_task": null, "from_agent": null, "model": "gpt-4.1-mini", "messages": [{"role": "system", "content": "You are Scorer. You''re an expert scorer, specialized in scoring titles.\nYour personal goal is: Score - the title\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task: Give me an integer score between 1-5 for the following title: ''The impact of AI in the future of work''\n\nThis is the expected criteria for your final answer: The score of the title.\nyou MUST return the actual complete content as the final answer, not a summary.\nEnsure your final answer contains only the content in the following format: {\n \"properties\": {\n \"score\": {\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nEnsure the final output does not include any - code block markers like ```json or ```python.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}], "tools": null, "callbacks": [""], "available_functions": null}}, {"event_id": "ab6b168b-d954-494f-ae58-d9ef7a1941dc", "timestamp": "2025-10-31T07:25:10.466669+00:00", "type": "llm_call_completed", "event_data": {"timestamp": "2025-10-31T07:25:10.466669+00:00", "type": "llm_call_completed", "source_fingerprint": null, "source_type": null, "fingerprint_metadata": null, "task_id": "677cf2dd-96a9-4eac-9140-0ecaba9609f7", "task_name": "Give me an integer score between 1-5 for the following title: ''The impact of AI in the future of work''", "agent_id": "8d6e3481-36fa-4fca-9665-977e6d76a969", "agent_role": "Scorer", "from_task": null, "from_agent": null, "messages": [{"role": "system", "content": "You are Scorer. You''re an expert - scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task: Give me an integer score between 1-5 for the following title: ''The impact of AI in the future of work''\n\nThis is the expected criteria for your final answer: The score of the title.\nyou MUST return the actual complete content as the final answer, not a summary.\nEnsure your final answer contains only the content in the following format: {\n \"properties\": {\n \"score\": {\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": - false\n}\n\nEnsure the final output does not include any code block markers like ```json or ```python.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}], "response": "Thought: I now can give a great answer\n{\n \"score\": 4\n}", "call_type": "", "model": "gpt-4.1-mini"}}, {"event_id": "0b8a17b6-e7d2-464d-a969-56dd705a40ef", "timestamp": "2025-10-31T07:25:10.466933+00:00", "type": "agent_execution_completed", "event_data": {"agent_role": "Scorer", "agent_goal": "Score the title", "agent_backstory": "You''re an expert scorer, specialized in scoring titles."}}, {"event_id": "b835b8e7-992b-4364-9ff8-25c81203ef77", "timestamp": "2025-10-31T07:25:10.467175+00:00", "type": "task_completed", "event_data": {"task_description": "Give me an integer score between 1-5 for the following title: ''The impact of AI in the future of work''", "task_name": "Give me an integer score - between 1-5 for the following title: ''The impact of AI in the future of work''", "task_id": "677cf2dd-96a9-4eac-9140-0ecaba9609f7", "output_raw": "Thought: I now can give a great answer\n{\n \"score\": 4\n}", "output_format": "OutputFormat.PYDANTIC", "agent_role": "Scorer"}}, {"event_id": "a9973b74-9ca6-46c3-b219-0b11ffa9e210", "timestamp": "2025-10-31T07:25:10.469421+00:00", "type": "crew_kickoff_completed", "event_data": {"timestamp": "2025-10-31T07:25:10.469421+00:00", "type": "crew_kickoff_completed", "source_fingerprint": null, "source_type": null, "fingerprint_metadata": null, "task_id": null, "task_name": null, "agent_id": null, "agent_role": null, "crew_name": "crew", "crew": null, "output": {"description": "Give me an integer score between 1-5 for the following title: ''The impact of AI in the future of work''", "name": "Give me an integer score between 1-5 for the following title: ''The impact of AI in the future of work''", "expected_output": "The score of the title.", - "summary": "Give me an integer score between 1-5 for the following...", "raw": "Thought: I now can give a great answer\n{\n \"score\": 4\n}", "pydantic": {}, "json_dict": null, "agent": "Scorer", "output_format": "pydantic"}, "total_tokens": 300}}], "batch_metadata": {"events_count": 8, "batch_sequence": 1, "is_final_batch": false}}' + body: '{"events": [{"event_id": "ea607d3f-c9ff-4aa8-babb-a84eb6d16663", "timestamp": + "2025-10-31T07:25:08.935640+00:00", "type": "crew_kickoff_started", "event_data": + {"timestamp": "2025-10-31T07:25:08.935640+00:00", "type": "crew_kickoff_started", + "source_fingerprint": null, "source_type": null, "fingerprint_metadata": null, + "task_id": null, "task_name": null, "agent_id": null, "agent_role": null, "crew_name": + "crew", "crew": null, "inputs": null}}, {"event_id": "8e792d78-fe9c-4601-a7b4-7b105fa8fb40", + "timestamp": "2025-10-31T07:25:08.937816+00:00", "type": "task_started", "event_data": + {"task_description": "Give me an integer score between 1-5 for the following + title: ''The impact of AI in the future of work''", "expected_output": "The + score of the title.", "task_name": "Give me an integer score between 1-5 for + the following title: ''The impact of AI in the future of work''", "context": + "", "agent_role": "Scorer", "task_id": "677cf2dd-96a9-4eac-9140-0ecaba9609f7"}}, + {"event_id": "a2fcdfee-a395-4dc8-99b8-ba3d8d843a70", "timestamp": "2025-10-31T07:25:08.938816+00:00", + "type": "agent_execution_started", "event_data": {"agent_role": "Scorer", "agent_goal": + "Score the title", "agent_backstory": "You''re an expert scorer, specialized + in scoring titles."}}, {"event_id": "b0ba7582-6ea0-4b66-a64a-0a1e38d57502", + "timestamp": "2025-10-31T07:25:08.938996+00:00", "type": "llm_call_started", + "event_data": {"timestamp": "2025-10-31T07:25:08.938996+00:00", "type": "llm_call_started", + "source_fingerprint": null, "source_type": null, "fingerprint_metadata": null, + "task_id": "677cf2dd-96a9-4eac-9140-0ecaba9609f7", "task_name": "Give me an + integer score between 1-5 for the following title: ''The impact of AI in the + future of work''", "agent_id": "8d6e3481-36fa-4fca-9665-977e6d76a969", "agent_role": + "Scorer", "from_task": null, "from_agent": null, "model": "gpt-4.1-mini", "messages": + [{"role": "system", "content": "You are Scorer. You''re an expert scorer, specialized + in scoring titles.\nYour personal goal is: Score the title\nTo give my best + complete final answer to the task respond using the exact following format:\n\nThought: + I now can give a great answer\nFinal Answer: Your final answer must be the great + and the most complete as possible, it must be outcome described.\n\nI MUST use + these formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent + Task: Give me an integer score between 1-5 for the following title: ''The impact + of AI in the future of work''\n\nThis is the expected criteria for your final + answer: The score of the title.\nyou MUST return the actual complete content + as the final answer, not a summary.\nEnsure your final answer contains only + the content in the following format: {\n \"properties\": {\n \"score\": + {\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": + [\n \"score\"\n ],\n \"title\": \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": + false\n}\n\nEnsure the final output does not include any code block markers + like ```json or ```python.\n\nBegin! This is VERY important to you, use the + tools available and give your best Final Answer, your job depends on it!\n\nThought:"}], + "tools": null, "callbacks": [""], "available_functions": null}}, {"event_id": "ab6b168b-d954-494f-ae58-d9ef7a1941dc", + "timestamp": "2025-10-31T07:25:10.466669+00:00", "type": "llm_call_completed", + "event_data": {"timestamp": "2025-10-31T07:25:10.466669+00:00", "type": "llm_call_completed", + "source_fingerprint": null, "source_type": null, "fingerprint_metadata": null, + "task_id": "677cf2dd-96a9-4eac-9140-0ecaba9609f7", "task_name": "Give me an + integer score between 1-5 for the following title: ''The impact of AI in the + future of work''", "agent_id": "8d6e3481-36fa-4fca-9665-977e6d76a969", "agent_role": + "Scorer", "from_task": null, "from_agent": null, "messages": [{"role": "system", + "content": "You are Scorer. You''re an expert scorer, specialized in scoring + titles.\nYour personal goal is: Score the title\nTo give my best complete final + answer to the task respond using the exact following format:\n\nThought: I now + can give a great answer\nFinal Answer: Your final answer must be the great and + the most complete as possible, it must be outcome described.\n\nI MUST use these + formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task: + Give me an integer score between 1-5 for the following title: ''The impact of + AI in the future of work''\n\nThis is the expected criteria for your final answer: + The score of the title.\nyou MUST return the actual complete content as the + final answer, not a summary.\nEnsure your final answer contains only the content + in the following format: {\n \"properties\": {\n \"score\": {\n \"title\": + \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": + \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nEnsure + the final output does not include any code block markers like ```json or ```python.\n\nBegin! + This is VERY important to you, use the tools available and give your best Final + Answer, your job depends on it!\n\nThought:"}], "response": "Thought: I now + can give a great answer\n{\n \"score\": 4\n}", "call_type": "", "model": "gpt-4.1-mini"}}, {"event_id": "0b8a17b6-e7d2-464d-a969-56dd705a40ef", + "timestamp": "2025-10-31T07:25:10.466933+00:00", "type": "agent_execution_completed", + "event_data": {"agent_role": "Scorer", "agent_goal": "Score the title", "agent_backstory": + "You''re an expert scorer, specialized in scoring titles."}}, {"event_id": "b835b8e7-992b-4364-9ff8-25c81203ef77", + "timestamp": "2025-10-31T07:25:10.467175+00:00", "type": "task_completed", "event_data": + {"task_description": "Give me an integer score between 1-5 for the following + title: ''The impact of AI in the future of work''", "task_name": "Give me an + integer score between 1-5 for the following title: ''The impact of AI in the + future of work''", "task_id": "677cf2dd-96a9-4eac-9140-0ecaba9609f7", "output_raw": + "Thought: I now can give a great answer\n{\n \"score\": 4\n}", "output_format": + "OutputFormat.PYDANTIC", "agent_role": "Scorer"}}, {"event_id": "a9973b74-9ca6-46c3-b219-0b11ffa9e210", + "timestamp": "2025-10-31T07:25:10.469421+00:00", "type": "crew_kickoff_completed", + "event_data": {"timestamp": "2025-10-31T07:25:10.469421+00:00", "type": "crew_kickoff_completed", + "source_fingerprint": null, "source_type": null, "fingerprint_metadata": null, + "task_id": null, "task_name": null, "agent_id": null, "agent_role": null, "crew_name": + "crew", "crew": null, "output": {"description": "Give me an integer score between + 1-5 for the following title: ''The impact of AI in the future of work''", "name": + "Give me an integer score between 1-5 for the following title: ''The impact + of AI in the future of work''", "expected_output": "The score of the title.", + "summary": "Give me an integer score between 1-5 for the following...", "raw": + "Thought: I now can give a great answer\n{\n \"score\": 4\n}", "pydantic": + {}, "json_dict": null, "agent": "Scorer", "output_format": "pydantic"}, "total_tokens": + 300}}], "batch_metadata": {"events_count": 8, "batch_sequence": 1, "is_final_batch": + false}}' headers: Accept: - '*/*' - Accept-Encoding: - - gzip, deflate, zstd Connection: - keep-alive Content-Length: @@ -190,11 +296,13 @@ interactions: Content-Type: - application/json User-Agent: - - CrewAI-CLI/1.2.1 + - X-USER-AGENT-XXX X-Crewai-Organization-Id: - 73c2b193-f579-422c-84c7-76a39a1da77f X-Crewai-Version: - 1.2.1 + accept-encoding: + - ACCEPT-ENCODING-XXX method: POST uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches/4ced1ade-0d34-4d28-a47d-61011b1f3582/events response: @@ -212,35 +320,33 @@ interactions: cache-control: - no-store content-security-policy: - - 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self'' ''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/ https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net https://js.hscollectedforms.net - https://js.usemessages.com https://snap.licdn.com https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data: *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com; connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/* https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com https://api.hubspot.com - https://forms.hscollectedforms.net https://api.hubapi.com https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509 https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self'' *.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com https://drive.google.com https://slides.google.com https://accounts.google.com https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/ https://www.youtube.com https://share.descript.com' + - CSP-FILTERED etag: - - W/"be223998b84365d3a863f942c880adfb" + - ETAG-XXX expires: - '0' permissions-policy: - - camera=(), microphone=(self), geolocation=() + - PERMISSIONS-POLICY-XXX pragma: - no-cache referrer-policy: - - strict-origin-when-cross-origin + - REFERRER-POLICY-XXX strict-transport-security: - - max-age=63072000; includeSubDomains + - STS-XXX vary: - Accept x-content-type-options: - - nosniff + - X-CONTENT-TYPE-XXX x-frame-options: - - SAMEORIGIN + - X-FRAME-OPTIONS-XXX x-permitted-cross-domain-policies: - - none + - X-PERMITTED-XXX x-request-id: - - 9c19d6df-9190-4764-afed-f3444939d2e4 + - X-REQUEST-ID-XXX x-runtime: - - '0.123911' + - X-RUNTIME-XXX x-xss-protection: - - 1; mode=block + - X-XSS-PROTECTION-XXX status: code: 200 message: OK @@ -249,8 +355,6 @@ interactions: headers: Accept: - '*/*' - Accept-Encoding: - - gzip, deflate, zstd Connection: - keep-alive Content-Length: @@ -258,11 +362,13 @@ interactions: Content-Type: - application/json User-Agent: - - CrewAI-CLI/1.2.1 + - X-USER-AGENT-XXX X-Crewai-Organization-Id: - 73c2b193-f579-422c-84c7-76a39a1da77f X-Crewai-Version: - 1.2.1 + accept-encoding: + - ACCEPT-ENCODING-XXX method: PATCH uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches/4ced1ade-0d34-4d28-a47d-61011b1f3582/finalize response: @@ -280,35 +386,167 @@ interactions: cache-control: - no-store content-security-policy: - - 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self'' ''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/ https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net https://js.hscollectedforms.net - https://js.usemessages.com https://snap.licdn.com https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data: *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com; connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/* https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com https://api.hubspot.com - https://forms.hscollectedforms.net https://api.hubapi.com https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509 https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self'' *.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com https://drive.google.com https://slides.google.com https://accounts.google.com https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/ https://www.youtube.com https://share.descript.com' + - CSP-FILTERED etag: - - W/"bff97e21bd1971750dcfdb102fba9dcd" + - ETAG-XXX expires: - '0' permissions-policy: - - camera=(), microphone=(self), geolocation=() + - PERMISSIONS-POLICY-XXX pragma: - no-cache referrer-policy: - - strict-origin-when-cross-origin + - REFERRER-POLICY-XXX strict-transport-security: - - max-age=63072000; includeSubDomains + - STS-XXX vary: - Accept x-content-type-options: - - nosniff + - X-CONTENT-TYPE-XXX x-frame-options: - - SAMEORIGIN + - X-FRAME-OPTIONS-XXX x-permitted-cross-domain-policies: - - none + - X-PERMITTED-XXX x-request-id: - - 2b6cd38d-78fa-4676-94ff-80e3bcf48a03 + - X-REQUEST-ID-XXX x-runtime: - - '0.064858' + - X-RUNTIME-XXX x-xss-protection: - - 1; mode=block + - X-XSS-PROTECTION-XXX + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert + scorer, specialized in scoring titles.\nYour personal goal is: Score the title"},{"role":"user","content":"\nCurrent + Task: Give me an integer score between 1-5 for the following title: ''The impact + of AI in the future of work''\n\nThis is the expected criteria for your final + answer: The score of the title.\nyou MUST return the actual complete content + as the final answer, not a summary.\nFormat your final answer according to the + following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\": + \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": + \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nIMPORTANT: + Preserve the original content exactly as-is. Do NOT rewrite, paraphrase, or + modify the meaning of the content. Only structure it to match the schema format.\n\nDo + not include the OpenAPI schema in the final output. Ensure the final output + does not include any code block markers like ```json or ```python.\n\nProvide + your complete response:"},{"role":"system","content":"You are Scorer. You''re + an expert scorer, specialized in scoring titles.\nYour personal goal is: Score + the title"},{"role":"user","content":"\nCurrent Task: Give me an integer score + between 1-5 for the following title: ''The impact of AI in the future of work''\n\nThis + is the expected criteria for your final answer: The score of the title.\nyou + MUST return the actual complete content as the final answer, not a summary.\nFormat + your final answer according to the following OpenAPI schema: {\n \"properties\": + {\n \"score\": {\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": + [\n \"score\"\n ],\n \"title\": \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": + false\n}\n\nIMPORTANT: Preserve the original content exactly as-is. Do NOT rewrite, + paraphrase, or modify the meaning of the content. Only structure it to match + the schema format.\n\nDo not include the OpenAPI schema in the final output. + Ensure the final output does not include any code block markers like ```json + or ```python.\n\nProvide your complete response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"score":{"title":"Score","type":"integer"}},"required":["score"],"title":"ScoreOutput","type":"object","additionalProperties":false},"name":"ScoreOutput","strict":true}},"stream":false}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '2541' + content-type: + - application/json + cookie: + - COOKIE-XXX + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-helper-method: + - beta.chat.completions.parse + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.12 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-DDE0D15NvBLDvn8Wy68ZscARhqMaX\",\n \"object\": + \"chat.completion\",\n \"created\": 1772044461,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"{\\\"score\\\":4}\",\n \"refusal\": + null,\n \"annotations\": []\n },\n \"logprobs\": null,\n + \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 513,\n \"completion_tokens\": 5,\n \"total_tokens\": 518,\n \"prompt_tokens_details\": + {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_a391f2cee0\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Wed, 25 Feb 2026 18:34:21 GMT + Server: + - cloudflare + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '477' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + set-cookie: + - SET-COOKIE-XXX + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX status: code: 200 message: OK diff --git a/lib/crewai/tests/cassettes/test_output_json_dict_hierarchical.yaml b/lib/crewai/tests/cassettes/test_output_json_dict_hierarchical.yaml index 59fc8a1e9..472fc68e7 100644 --- a/lib/crewai/tests/cassettes/test_output_json_dict_hierarchical.yaml +++ b/lib/crewai/tests/cassettes/test_output_json_dict_hierarchical.yaml @@ -426,4 +426,121 @@ interactions: status: code: 200 message: OK +- request: + body: '{"messages":[{"role":"user","content":"Thought: I now can give a great + answer\nFinal Answer: I would assign a score of 4 to the title \"The impact + of AI in the future of work.\" The title is very relevant and timely, as artificial + intelligence is a major transformative force affecting the labor market and + employment trends. It is clear and concise, effectively highlighting the focus + on AI''s influence on the future of work. However, while it is engaging and + implies substantial potential impact, it could be slightly more specific or + dynamic to reach an excellent level. Overall, it meets very good standards for + potential impact, engagement, relevance, and clarity."}],"model":"gpt-4o","tool_choice":{"type":"function","function":{"name":"ScoreOutput"}},"tools":[{"type":"function","function":{"name":"ScoreOutput","description":"Correctly + extracted `ScoreOutput` with all the required parameters with correct types","parameters":{"properties":{"score":{"title":"Score","type":"integer"}},"required":["score"],"type":"object"}}}]}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '1034' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.12 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-DDE0G4tjiC8Je3BD8xhWMey7kZF66\",\n \"object\": + \"chat.completion\",\n \"created\": 1772044464,\n \"model\": \"gpt-4o-2024-08-06\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n + \ \"id\": \"call_x95I7UxdCvFccZ87imExKzu9\",\n \"type\": + \"function\",\n \"function\": {\n \"name\": \"ScoreOutput\",\n + \ \"arguments\": \"{\\\"score\\\":4}\"\n }\n }\n + \ ],\n \"refusal\": null,\n \"annotations\": []\n },\n + \ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n + \ \"usage\": {\n \"prompt_tokens\": 188,\n \"completion_tokens\": 5,\n + \ \"total_tokens\": 193,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_64dfa806c7\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Wed, 25 Feb 2026 18:34:24 GMT + Server: + - cloudflare + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '385' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + set-cookie: + - SET-COOKIE-XXX + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK version: 1 diff --git a/lib/crewai/tests/cassettes/test_output_json_dict_sequential.yaml b/lib/crewai/tests/cassettes/test_output_json_dict_sequential.yaml index 47a55673a..875005ced 100644 --- a/lib/crewai/tests/cassettes/test_output_json_dict_sequential.yaml +++ b/lib/crewai/tests/cassettes/test_output_json_dict_sequential.yaml @@ -1,98 +1,120 @@ interactions: - request: - body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Give me an integer score between 1-5 for the following title: ''The impact of AI in the future of work''\n\nThis is the expected criteria for your final answer: The score of the title.\nyou MUST return the actual complete content as the final answer, not a summary.\nEnsure your final answer strictly adheres to the following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": - \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}' + body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert + scorer, specialized in scoring titles.\nYour personal goal is: Score the title"},{"role":"user","content":"\nCurrent + Task: Give me an integer score between 1-5 for the following title: ''The impact + of AI in the future of work''\n\nThis is the expected criteria for your final + answer: The score of the title.\nyou MUST return the actual complete content + as the final answer, not a summary.\nFormat your final answer according to the + following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\": + \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": + \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nIMPORTANT: + Preserve the original content exactly as-is. Do NOT rewrite, paraphrase, or + modify the meaning of the content. Only structure it to match the schema format.\n\nDo + not include the OpenAPI schema in the final output. Ensure the final output + does not include any code block markers like ```json or ```python.\n\nProvide + your complete response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"score":{"title":"Score","type":"integer"}},"required":["score"],"title":"ScoreOutput","type":"object","additionalProperties":false},"name":"ScoreOutput","strict":true}},"stream":false}' headers: + User-Agent: + - X-USER-AGENT-XXX accept: - application/json accept-encoding: - - gzip, deflate, zstd + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX connection: - keep-alive content-length: - - '1394' + - '1421' content-type: - application/json host: - api.openai.com - user-agent: - - OpenAI/Python 1.109.1 x-stainless-arch: - - arm64 + - X-STAINLESS-ARCH-XXX x-stainless-async: - 'false' + x-stainless-helper-method: + - beta.chat.completions.parse x-stainless-lang: - python x-stainless-os: - - MacOS + - X-STAINLESS-OS-XXX x-stainless-package-version: - - 1.109.1 + - 1.83.0 x-stainless-read-timeout: - - '600' + - X-STAINLESS-READ-TIMEOUT-XXX x-stainless-retry-count: - '0' x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.12.9 + - 3.13.12 method: POST uri: https://api.openai.com/v1/chat/completions response: body: - string: "{\n \"id\": \"chatcmpl-CYg0UpOvDuMqlqYkt9WW8lQSkyatz\",\n \"object\": \"chat.completion\",\n \"created\": 1762380662,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal Answer: {\\n \\\"score\\\": 4\\n}\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 294,\n \"completion_tokens\": 22,\n \"total_tokens\": 316,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_4c2851f862\"\ - \n}\n" + string: "{\n \"id\": \"chatcmpl-DDE5QUOVeJDiOh6TuObUjh32f7Q0g\",\n \"object\": + \"chat.completion\",\n \"created\": 1772044784,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"{\\\"score\\\":4}\",\n \"refusal\": + null,\n \"annotations\": []\n },\n \"logprobs\": null,\n + \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 276,\n \"completion_tokens\": 5,\n \"total_tokens\": 281,\n \"prompt_tokens_details\": + {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_a391f2cee0\"\n}\n" headers: CF-RAY: - - REDACTED-RAY + - CF-RAY-XXX Connection: - keep-alive Content-Type: - application/json Date: - - Wed, 05 Nov 2025 22:11:02 GMT + - Wed, 25 Feb 2026 18:39:44 GMT Server: - cloudflare - Set-Cookie: - - __cf_bm=REDACTED; path=/; expires=Wed, 05-Nov-25 22:41:02 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - - _cfuvid=REDACTED; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None Strict-Transport-Security: - - max-age=31536000; includeSubDomains; preload + - STS-XXX Transfer-Encoding: - chunked X-Content-Type-Options: - - nosniff + - X-CONTENT-TYPE-XXX access-control-expose-headers: - - X-Request-ID + - ACCESS-CONTROL-XXX alt-svc: - h3=":443"; ma=86400 cf-cache-status: - DYNAMIC openai-organization: - - user-hortuttj2f3qtmxyik2zxf4q + - OPENAI-ORG-XXX openai-processing-ms: - - '864' + - '303' openai-project: - - proj_fL4UBWR1CMpAAdgzaSKqsVvA + - OPENAI-PROJECT-XXX openai-version: - '2020-10-01' - x-envoy-upstream-service-time: - - '3087' + set-cookie: + - SET-COOKIE-XXX x-openai-proxy-wasm: - v0.1 x-ratelimit-limit-requests: - - '500' + - X-RATELIMIT-LIMIT-REQUESTS-XXX x-ratelimit-limit-tokens: - - '200000' + - X-RATELIMIT-LIMIT-TOKENS-XXX x-ratelimit-remaining-requests: - - '499' + - X-RATELIMIT-REMAINING-REQUESTS-XXX x-ratelimit-remaining-tokens: - - '199687' + - X-RATELIMIT-REMAINING-TOKENS-XXX x-ratelimit-reset-requests: - - 120ms + - X-RATELIMIT-RESET-REQUESTS-XXX x-ratelimit-reset-tokens: - - 93ms + - X-RATELIMIT-RESET-TOKENS-XXX x-request-id: - - req_REDACTED + - X-REQUEST-ID-XXX status: code: 200 message: OK diff --git a/lib/crewai/tests/cassettes/test_output_json_hierarchical.yaml b/lib/crewai/tests/cassettes/test_output_json_hierarchical.yaml index 3596e3334..eff290b18 100644 --- a/lib/crewai/tests/cassettes/test_output_json_hierarchical.yaml +++ b/lib/crewai/tests/cassettes/test_output_json_hierarchical.yaml @@ -427,4 +427,122 @@ interactions: status: code: 200 message: OK +- request: + body: '{"messages":[{"role":"user","content":"Thought: The title \"The impact + of AI in the future of work\" is highly relevant given the widespread and ongoing + discussions about AI''s role in transforming workplaces globally. It is clear + and concise, directly indicating the subject and scope, which helps the reader + understand what to expect. In terms of engagement, it has strong potential to + attract interest from professionals, researchers, and the general public curious + about how AI will shape jobs and employment trends. Although it is somewhat + broad and could be more specific to a particular aspect of work or type of AI, + it remains focused enough to be effective as a general overview title.\n\nFinal + Answer: 4"}],"model":"gpt-4o","tool_choice":{"type":"function","function":{"name":"ScoreOutput"}},"tools":[{"type":"function","function":{"name":"ScoreOutput","description":"Correctly + extracted `ScoreOutput` with all the required parameters with correct types","parameters":{"properties":{"score":{"title":"Score","type":"integer"}},"required":["score"],"type":"object"}}}]}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '1077' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.12 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-DDE0FPRrXCbAAssWcvT9wUojN8yPa\",\n \"object\": + \"chat.completion\",\n \"created\": 1772044463,\n \"model\": \"gpt-4o-2024-08-06\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n + \ \"id\": \"call_237IZJqLGcX4N5MZYEd6Wz2n\",\n \"type\": + \"function\",\n \"function\": {\n \"name\": \"ScoreOutput\",\n + \ \"arguments\": \"{\\\"score\\\":4}\"\n }\n }\n + \ ],\n \"refusal\": null,\n \"annotations\": []\n },\n + \ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n + \ \"usage\": {\n \"prompt_tokens\": 191,\n \"completion_tokens\": 5,\n + \ \"total_tokens\": 196,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_64dfa806c7\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Wed, 25 Feb 2026 18:34:23 GMT + Server: + - cloudflare + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '365' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + set-cookie: + - SET-COOKIE-XXX + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK version: 1 diff --git a/lib/crewai/tests/cassettes/test_output_json_sequential.yaml b/lib/crewai/tests/cassettes/test_output_json_sequential.yaml index 34d52b658..397f46fea 100644 --- a/lib/crewai/tests/cassettes/test_output_json_sequential.yaml +++ b/lib/crewai/tests/cassettes/test_output_json_sequential.yaml @@ -1,12 +1,29 @@ interactions: - request: - body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Give me an integer score between 1-5 for the following title: ''The impact of AI in the future of work''\n\nThis is the expected criteria for your final answer: The score of the title.\nyou MUST return the actual complete content as the final answer, not a summary.\nEnsure your final answer strictly adheres to the following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": - \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}' + body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert + scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo + give my best complete final answer to the task respond using the exact following + format:\n\nThought: I now can give a great answer\nFinal Answer: Your final + answer must be the great and the most complete as possible, it must be outcome + described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent + Task: Give me an integer score between 1-5 for the following title: ''The impact + of AI in the future of work''\n\nThis is the expected criteria for your final + answer: The score of the title.\nyou MUST return the actual complete content + as the final answer, not a summary.\nEnsure your final answer strictly adheres + to the following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\": + \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": + \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nDo + not include the OpenAPI schema in the final output. Ensure the final output + does not include any code block markers like ```json or ```python.\n\nBegin! + This is VERY important to you, use the tools available and give your best Final + Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}' headers: + User-Agent: + - X-USER-AGENT-XXX accept: - application/json accept-encoding: - - gzip, deflate, zstd + - ACCEPT-ENCODING-XXX connection: - keep-alive content-length: @@ -15,20 +32,18 @@ interactions: - application/json host: - api.openai.com - user-agent: - - OpenAI/Python 1.109.1 x-stainless-arch: - - arm64 + - X-STAINLESS-ARCH-XXX x-stainless-async: - 'false' x-stainless-lang: - python x-stainless-os: - - MacOS + - X-STAINLESS-OS-XXX x-stainless-package-version: - 1.109.1 x-stainless-read-timeout: - - '600' + - X-STAINLESS-READ-TIMEOUT-XXX x-stainless-retry-count: - '0' x-stainless-runtime: @@ -39,11 +54,21 @@ interactions: uri: https://api.openai.com/v1/chat/completions response: body: - string: "{\n \"id\": \"chatcmpl-CYg0P4wugCaRcXw9kmLG3BAMBmkA0\",\n \"object\": \"chat.completion\",\n \"created\": 1762380657,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal Answer: {\\n \\\"score\\\": 4\\n}\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 294,\n \"completion_tokens\": 22,\n \"total_tokens\": 316,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_4c2851f862\"\ - \n}\n" + string: "{\n \"id\": \"chatcmpl-CYg0P4wugCaRcXw9kmLG3BAMBmkA0\",\n \"object\": + \"chat.completion\",\n \"created\": 1762380657,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal + Answer: {\\n \\\"score\\\": 4\\n}\",\n \"refusal\": null,\n \"annotations\": + []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n + \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 294,\n \"completion_tokens\": + 22,\n \"total_tokens\": 316,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_4c2851f862\"\n}\n" headers: CF-RAY: - - REDACTED-RAY + - CF-RAY-XXX Connection: - keep-alive Content-Type: @@ -53,26 +78,25 @@ interactions: Server: - cloudflare Set-Cookie: - - __cf_bm=REDACTED; path=/; expires=Wed, 05-Nov-25 22:40:57 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - - _cfuvid=REDACTED; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + - SET-COOKIE-XXX Strict-Transport-Security: - - max-age=31536000; includeSubDomains; preload + - STS-XXX Transfer-Encoding: - chunked X-Content-Type-Options: - - nosniff + - X-CONTENT-TYPE-XXX access-control-expose-headers: - - X-Request-ID + - ACCESS-CONTROL-XXX alt-svc: - h3=":443"; ma=86400 cf-cache-status: - DYNAMIC openai-organization: - - user-hortuttj2f3qtmxyik2zxf4q + - OPENAI-ORG-XXX openai-processing-ms: - '537' openai-project: - - proj_fL4UBWR1CMpAAdgzaSKqsVvA + - OPENAI-PROJECT-XXX openai-version: - '2020-10-01' x-envoy-upstream-service-time: @@ -80,19 +104,153 @@ interactions: x-openai-proxy-wasm: - v0.1 x-ratelimit-limit-requests: - - '500' + - X-RATELIMIT-LIMIT-REQUESTS-XXX x-ratelimit-limit-tokens: - - '200000' + - X-RATELIMIT-LIMIT-TOKENS-XXX x-ratelimit-remaining-requests: - - '499' + - X-RATELIMIT-REMAINING-REQUESTS-XXX x-ratelimit-remaining-tokens: - - '199687' + - X-RATELIMIT-REMAINING-TOKENS-XXX x-ratelimit-reset-requests: - - 120ms + - X-RATELIMIT-RESET-REQUESTS-XXX x-ratelimit-reset-tokens: - - 93ms + - X-RATELIMIT-RESET-TOKENS-XXX x-request-id: - - req_REDACTED + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert + scorer, specialized in scoring titles.\nYour personal goal is: Score the title"},{"role":"user","content":"\nCurrent + Task: Give me an integer score between 1-5 for the following title: ''The impact + of AI in the future of work''\n\nThis is the expected criteria for your final + answer: The score of the title.\nyou MUST return the actual complete content + as the final answer, not a summary.\nFormat your final answer according to the + following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\": + \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": + \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nIMPORTANT: + Preserve the original content exactly as-is. Do NOT rewrite, paraphrase, or + modify the meaning of the content. Only structure it to match the schema format.\n\nDo + not include the OpenAPI schema in the final output. Ensure the final output + does not include any code block markers like ```json or ```python.\n\nProvide + your complete response:"},{"role":"system","content":"You are Scorer. You''re + an expert scorer, specialized in scoring titles.\nYour personal goal is: Score + the title"},{"role":"user","content":"\nCurrent Task: Give me an integer score + between 1-5 for the following title: ''The impact of AI in the future of work''\n\nThis + is the expected criteria for your final answer: The score of the title.\nyou + MUST return the actual complete content as the final answer, not a summary.\nFormat + your final answer according to the following OpenAPI schema: {\n \"properties\": + {\n \"score\": {\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": + [\n \"score\"\n ],\n \"title\": \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": + false\n}\n\nIMPORTANT: Preserve the original content exactly as-is. Do NOT rewrite, + paraphrase, or modify the meaning of the content. Only structure it to match + the schema format.\n\nDo not include the OpenAPI schema in the final output. + Ensure the final output does not include any code block markers like ```json + or ```python.\n\nProvide your complete response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"score":{"title":"Score","type":"integer"}},"required":["score"],"title":"ScoreOutput","type":"object","additionalProperties":false},"name":"ScoreOutput","strict":true}},"stream":false}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '2541' + content-type: + - application/json + cookie: + - COOKIE-XXX + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-helper-method: + - beta.chat.completions.parse + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.12 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-DDDzz40VXTe9AsmG5ZSlL0IufvYKz\",\n \"object\": + \"chat.completion\",\n \"created\": 1772044447,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"{\\\"score\\\":4}\",\n \"refusal\": + null,\n \"annotations\": []\n },\n \"logprobs\": null,\n + \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 513,\n \"completion_tokens\": 5,\n \"total_tokens\": 518,\n \"prompt_tokens_details\": + {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_a391f2cee0\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Wed, 25 Feb 2026 18:34:07 GMT + Server: + - cloudflare + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '426' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + set-cookie: + - SET-COOKIE-XXX + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX status: code: 200 message: OK diff --git a/lib/crewai/tests/cassettes/test_output_json_to_another_task.yaml b/lib/crewai/tests/cassettes/test_output_json_to_another_task.yaml index 575e9f85f..dac267822 100644 --- a/lib/crewai/tests/cassettes/test_output_json_to_another_task.yaml +++ b/lib/crewai/tests/cassettes/test_output_json_to_another_task.yaml @@ -1,194 +1,254 @@ interactions: - request: - body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Give me an integer score between 1-5 for the following title: ''The impact of AI in the future of work''\n\nThis is the expected criteria for your final answer: The score of the title.\nyou MUST return the actual complete content as the final answer, not a summary.\nEnsure your final answer strictly adheres to the following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": - \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}' + body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert + scorer, specialized in scoring titles.\nYour personal goal is: Score the title"},{"role":"user","content":"\nCurrent + Task: Give me an integer score between 1-5 for the following title: ''The impact + of AI in the future of work''\n\nThis is the expected criteria for your final + answer: The score of the title.\nyou MUST return the actual complete content + as the final answer, not a summary.\nFormat your final answer according to the + following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\": + \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": + \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nIMPORTANT: + Preserve the original content exactly as-is. Do NOT rewrite, paraphrase, or + modify the meaning of the content. Only structure it to match the schema format.\n\nDo + not include the OpenAPI schema in the final output. Ensure the final output + does not include any code block markers like ```json or ```python.\n\nProvide + your complete response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"score":{"title":"Score","type":"integer"}},"required":["score"],"title":"ScoreOutput","type":"object","additionalProperties":false},"name":"ScoreOutput","strict":true}},"stream":false}' headers: + User-Agent: + - X-USER-AGENT-XXX accept: - application/json accept-encoding: - - gzip, deflate, zstd + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX connection: - keep-alive content-length: - - '1394' + - '1421' content-type: - application/json host: - api.openai.com - user-agent: - - OpenAI/Python 1.109.1 x-stainless-arch: - - arm64 + - X-STAINLESS-ARCH-XXX x-stainless-async: - 'false' + x-stainless-helper-method: + - beta.chat.completions.parse x-stainless-lang: - python x-stainless-os: - - MacOS + - X-STAINLESS-OS-XXX x-stainless-package-version: - - 1.109.1 + - 1.83.0 x-stainless-read-timeout: - - '600' + - X-STAINLESS-READ-TIMEOUT-XXX x-stainless-retry-count: - '0' x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.12.9 + - 3.13.12 method: POST uri: https://api.openai.com/v1/chat/completions response: body: - string: "{\n \"id\": \"chatcmpl-CYg0M3aPReBrUikkn7QiHFyZG8ETn\",\n \"object\": \"chat.completion\",\n \"created\": 1762380654,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal Answer: {\\n \\\"score\\\": 4\\n}\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 294,\n \"completion_tokens\": 22,\n \"total_tokens\": 316,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_4c2851f862\"\ - \n}\n" + string: "{\n \"id\": \"chatcmpl-DDE5OBoRr3j1NGXkef0waj9TCBmLb\",\n \"object\": + \"chat.completion\",\n \"created\": 1772044782,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"{\\\"score\\\":4}\",\n \"refusal\": + null,\n \"annotations\": []\n },\n \"logprobs\": null,\n + \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 276,\n \"completion_tokens\": 5,\n \"total_tokens\": 281,\n \"prompt_tokens_details\": + {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_a391f2cee0\"\n}\n" headers: CF-RAY: - - REDACTED-RAY + - CF-RAY-XXX Connection: - keep-alive Content-Type: - application/json Date: - - Wed, 05 Nov 2025 22:10:54 GMT + - Wed, 25 Feb 2026 18:39:42 GMT Server: - cloudflare - Set-Cookie: - - __cf_bm=REDACTED; path=/; expires=Wed, 05-Nov-25 22:40:54 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - - _cfuvid=REDACTED; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None Strict-Transport-Security: - - max-age=31536000; includeSubDomains; preload + - STS-XXX Transfer-Encoding: - chunked X-Content-Type-Options: - - nosniff + - X-CONTENT-TYPE-XXX access-control-expose-headers: - - X-Request-ID + - ACCESS-CONTROL-XXX alt-svc: - h3=":443"; ma=86400 cf-cache-status: - DYNAMIC openai-organization: - - user-hortuttj2f3qtmxyik2zxf4q + - OPENAI-ORG-XXX openai-processing-ms: - - '730' + - '435' openai-project: - - proj_fL4UBWR1CMpAAdgzaSKqsVvA + - OPENAI-PROJECT-XXX openai-version: - '2020-10-01' - x-envoy-upstream-service-time: - - '754' + set-cookie: + - SET-COOKIE-XXX x-openai-proxy-wasm: - v0.1 x-ratelimit-limit-requests: - - '500' + - X-RATELIMIT-LIMIT-REQUESTS-XXX x-ratelimit-limit-tokens: - - '200000' + - X-RATELIMIT-LIMIT-TOKENS-XXX x-ratelimit-remaining-requests: - - '499' + - X-RATELIMIT-REMAINING-REQUESTS-XXX x-ratelimit-remaining-tokens: - - '199687' + - X-RATELIMIT-REMAINING-TOKENS-XXX x-ratelimit-reset-requests: - - 120ms + - X-RATELIMIT-RESET-REQUESTS-XXX x-ratelimit-reset-tokens: - - 93ms + - X-RATELIMIT-RESET-TOKENS-XXX x-request-id: - - req_REDACTED + - X-REQUEST-ID-XXX status: code: 200 message: OK - request: - body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Given the score the title ''The impact of AI in the future of work'' got, give me an integer score between 1-5 for the following title: ''Return of the Jedi''\n\nThis is the expected criteria for your final answer: The score of the title.\nyou MUST return the actual complete content as the final answer, not a summary.\nEnsure your final answer strictly adheres to the following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": - [\n \"score\"\n ],\n \"title\": \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python.\n\nThis is the context you''re working with:\n{\n \"score\": 4\n}\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}' + body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert + scorer, specialized in scoring titles.\nYour personal goal is: Score the title"},{"role":"user","content":"\nCurrent + Task: Give me an integer score between 1-5 for the following title: ''The impact + of AI in the future of work''\n\nThis is the expected criteria for your final + answer: The score of the title.\nyou MUST return the actual complete content + as the final answer, not a summary.\nFormat your final answer according to the + following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\": + \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": + \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nIMPORTANT: + Preserve the original content exactly as-is. Do NOT rewrite, paraphrase, or + modify the meaning of the content. Only structure it to match the schema format.\n\nDo + not include the OpenAPI schema in the final output. Ensure the final output + does not include any code block markers like ```json or ```python.\n\nProvide + your complete response:"},{"role":"assistant","content":"{\"score\":4}"},{"role":"system","content":"You + are Scorer. You''re an expert scorer, specialized in scoring titles.\nYour personal + goal is: Score the title"},{"role":"user","content":"\nCurrent Task: Given the + score the title ''The impact of AI in the future of work'' got, give me an integer + score between 1-5 for the following title: ''Return of the Jedi''\n\nThis is + the expected criteria for your final answer: The score of the title.\nyou MUST + return the actual complete content as the final answer, not a summary.\nFormat + your final answer according to the following OpenAPI schema: {\n \"properties\": + {\n \"score\": {\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": + [\n \"score\"\n ],\n \"title\": \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": + false\n}\n\nIMPORTANT: Preserve the original content exactly as-is. Do NOT rewrite, + paraphrase, or modify the meaning of the content. Only structure it to match + the schema format.\n\nDo not include the OpenAPI schema in the final output. + Ensure the final output does not include any code block markers like ```json + or ```python.\n\nThis is the context you''re working with:\n{\"score\":4}\n\nProvide + your complete response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"score":{"title":"Score","type":"integer"}},"required":["score"],"title":"ScoreOutput","type":"object","additionalProperties":false},"name":"ScoreOutput","strict":true}},"stream":false}' headers: + User-Agent: + - X-USER-AGENT-XXX accept: - application/json accept-encoding: - - gzip, deflate, zstd + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX connection: - keep-alive content-length: - - '1512' + - '2699' content-type: - application/json cookie: - - __cf_bm=REDACTED; _cfuvid=REDACTED + - COOKIE-XXX host: - api.openai.com - user-agent: - - OpenAI/Python 1.109.1 x-stainless-arch: - - arm64 + - X-STAINLESS-ARCH-XXX x-stainless-async: - 'false' + x-stainless-helper-method: + - beta.chat.completions.parse x-stainless-lang: - python x-stainless-os: - - MacOS + - X-STAINLESS-OS-XXX x-stainless-package-version: - - 1.109.1 + - 1.83.0 x-stainless-read-timeout: - - '600' + - X-STAINLESS-READ-TIMEOUT-XXX x-stainless-retry-count: - '0' x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.12.9 + - 3.13.12 method: POST uri: https://api.openai.com/v1/chat/completions response: body: - string: "{\n \"id\": \"chatcmpl-CYg0MEYp1MebCu2eCMBqCwXtNYTbD\",\n \"object\": \"chat.completion\",\n \"created\": 1762380654,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal Answer: {\\n \\\"score\\\": 3\\n}\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 324,\n \"completion_tokens\": 22,\n \"total_tokens\": 346,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_4c2851f862\"\ - \n}\n" + string: "{\n \"id\": \"chatcmpl-DDE5OEawexwaazoOAgn4QD9W8roe6\",\n \"object\": + \"chat.completion\",\n \"created\": 1772044782,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"{\\\"score\\\":3}\",\n \"refusal\": + null,\n \"annotations\": []\n },\n \"logprobs\": null,\n + \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 552,\n \"completion_tokens\": 5,\n \"total_tokens\": 557,\n \"prompt_tokens_details\": + {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_a391f2cee0\"\n}\n" headers: CF-RAY: - - REDACTED-RAY + - CF-RAY-XXX Connection: - keep-alive Content-Type: - application/json Date: - - Wed, 05 Nov 2025 22:10:55 GMT + - Wed, 25 Feb 2026 18:39:43 GMT Server: - cloudflare Strict-Transport-Security: - - max-age=31536000; includeSubDomains; preload + - STS-XXX Transfer-Encoding: - chunked X-Content-Type-Options: - - nosniff + - X-CONTENT-TYPE-XXX access-control-expose-headers: - - X-Request-ID + - ACCESS-CONTROL-XXX alt-svc: - h3=":443"; ma=86400 cf-cache-status: - DYNAMIC openai-organization: - - user-hortuttj2f3qtmxyik2zxf4q + - OPENAI-ORG-XXX openai-processing-ms: - - '983' + - '309' openai-project: - - proj_fL4UBWR1CMpAAdgzaSKqsVvA + - OPENAI-PROJECT-XXX openai-version: - '2020-10-01' - x-envoy-upstream-service-time: - - '1002' x-openai-proxy-wasm: - v0.1 x-ratelimit-limit-requests: - - '500' + - X-RATELIMIT-LIMIT-REQUESTS-XXX x-ratelimit-limit-tokens: - - '200000' + - X-RATELIMIT-LIMIT-TOKENS-XXX x-ratelimit-remaining-requests: - - '499' + - X-RATELIMIT-REMAINING-REQUESTS-XXX x-ratelimit-remaining-tokens: - - '199659' + - X-RATELIMIT-REMAINING-TOKENS-XXX x-ratelimit-reset-requests: - - 120ms + - X-RATELIMIT-RESET-REQUESTS-XXX x-ratelimit-reset-tokens: - - 102ms + - X-RATELIMIT-RESET-TOKENS-XXX x-request-id: - - req_REDACTED + - X-REQUEST-ID-XXX status: code: 200 message: OK diff --git a/lib/crewai/tests/cassettes/test_output_pydantic_hierarchical.yaml b/lib/crewai/tests/cassettes/test_output_pydantic_hierarchical.yaml index f48000904..7ee93fe76 100644 --- a/lib/crewai/tests/cassettes/test_output_pydantic_hierarchical.yaml +++ b/lib/crewai/tests/cassettes/test_output_pydantic_hierarchical.yaml @@ -10,28 +10,29 @@ interactions: Task: Give me an integer score between 1-5 for the following title: ''The impact of AI in the future of work''\n\nThis is the expected criteria for your final answer: The score of the title.\nyou MUST return the actual complete content - as the final answer, not a summary.\nEnsure your final answer strictly adheres - to the following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\": + as the final answer, not a summary.\nFormat your final answer according to the + following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": - \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nDo + \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nIMPORTANT: + Preserve the original content exactly as-is. Do NOT rewrite, paraphrase, or + modify the meaning of the content. Only structure it to match the schema format.\n\nDo not include the OpenAPI schema in the final output. Ensure the final output - does not include any code block markers like ```json or ```python.\n\nThis is - VERY important to you, your job depends on it!"}],"model":"gpt-4o","tool_choice":"auto","tools":[{"type":"function","function":{"name":"Delegate_work_to_coworker","description":"Delegate + does not include any code block markers like ```json or ```python."}],"model":"gpt-4o","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"score":{"title":"Score","type":"integer"}},"required":["score"],"title":"ScoreOutput","type":"object","additionalProperties":false},"name":"ScoreOutput","strict":true}},"stream":false,"tool_choice":"auto","tools":[{"type":"function","function":{"name":"delegate_work_to_coworker","description":"Delegate a specific task to one of the following coworkers: Scorer\nThe input to this tool should be the coworker, the task you want them to do, and ALL necessary context to execute the task, they know nothing about the task, so share absolutely - everything you know, don''t reference things but instead explain them.","parameters":{"properties":{"task":{"description":"The + everything you know, don''t reference things but instead explain them.","strict":true,"parameters":{"properties":{"task":{"description":"The task to delegate","title":"Task","type":"string"},"context":{"description":"The context for the task","title":"Context","type":"string"},"coworker":{"description":"The - role/name of the coworker to delegate to","title":"Coworker","type":"string"}},"required":["task","context","coworker"],"type":"object"}}},{"type":"function","function":{"name":"Ask_question_to_coworker","description":"Ask + role/name of the coworker to delegate to","title":"Coworker","type":"string"}},"required":["task","context","coworker"],"type":"object","additionalProperties":false}}},{"type":"function","function":{"name":"ask_question_to_coworker","description":"Ask a specific question to one of the following coworkers: Scorer\nThe input to this tool should be the coworker, the question you have for them, and ALL necessary context to ask the question properly, they know nothing about the question, so share absolutely everything you know, don''t reference things but instead - explain them.","parameters":{"properties":{"question":{"description":"The question - to ask","title":"Question","type":"string"},"context":{"description":"The context - for the question","title":"Context","type":"string"},"coworker":{"description":"The - role/name of the coworker to ask","title":"Coworker","type":"string"}},"required":["question","context","coworker"],"type":"object"}}}]}' + explain them.","strict":true,"parameters":{"properties":{"question":{"description":"The + question to ask","title":"Question","type":"string"},"context":{"description":"The + context for the question","title":"Context","type":"string"},"coworker":{"description":"The + role/name of the coworker to ask","title":"Coworker","type":"string"}},"required":["question","context","coworker"],"type":"object","additionalProperties":false}}}]}' headers: User-Agent: - X-USER-AGENT-XXX @@ -44,7 +45,7 @@ interactions: connection: - keep-alive content-length: - - '2959' + - '3415' content-type: - application/json host: @@ -53,6 +54,8 @@ interactions: - X-STAINLESS-ARCH-XXX x-stainless-async: - 'false' + x-stainless-helper-method: + - beta.chat.completions.parse x-stainless-lang: - python x-stainless-os: @@ -66,31 +69,33 @@ interactions: x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.13.3 + - 3.13.12 method: POST uri: https://api.openai.com/v1/chat/completions response: body: - string: "{\n \"id\": \"chatcmpl-D0u1dSPVqe5art2HXWibsPOp3SOti\",\n \"object\": - \"chat.completion\",\n \"created\": 1769107733,\n \"model\": \"gpt-4o-2024-08-06\",\n + string: "{\n \"id\": \"chatcmpl-DDG9wKD6IRmnAwBS1tw4NMVccsPnZ\",\n \"object\": + \"chat.completion\",\n \"created\": 1772052752,\n \"model\": \"gpt-4o-2024-08-06\",\n \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n - \ \"id\": \"call_AEHe6pv1NqguBRA5q9CHVSn3\",\n \"type\": - \"function\",\n \"function\": {\n \"name\": \"Delegate_work_to_coworker\",\n - \ \"arguments\": \"{\\\"task\\\":\\\"Provide an integer score - between 1-5 for the title 'The impact of AI in the future of work'. The score - should reflect how engaging, relevant, and thought-provoking the title is.\\\",\\\"context\\\":\\\"You - need to evaluate how well the title 'The impact of AI in the future of work' - meets the criteria of being engaging, relevant, and thought-provoking in the - context of emerging technologies and their implications on future work environments.\\\",\\\"coworker\\\":\\\"Scorer\\\"}\"\n - \ }\n }\n ],\n \"refusal\": null,\n \"annotations\": - []\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n - \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 562,\n \"completion_tokens\": - 111,\n \"total_tokens\": 673,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + \ \"id\": \"call_VzfUuCi89kzEC9gJgiMCz5B2\",\n \"type\": + \"function\",\n \"function\": {\n \"name\": \"delegate_work_to_coworker\",\n + \ \"arguments\": \"{\\\"task\\\":\\\"Evaluate the title 'The impact + of AI in the future of work' and give an integer score between 1-5 based on + how compelling or effective the title is.\\\",\\\"context\\\":\\\"You are + asked to evaluate a title 'The impact of AI in the future of work' and provide + an integer score between 1-5. The criteria for evaluation include how informative, + engaging, relevant, and clear the title is. Additionally, consider how the + title may attract the intended audience's interest and its potential impact + on readers.\\\",\\\"coworker\\\":\\\"Scorer\\\"}\"\n }\n }\n + \ ],\n \"refusal\": null,\n \"annotations\": []\n },\n + \ \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n }\n + \ ],\n \"usage\": {\n \"prompt_tokens\": 613,\n \"completion_tokens\": + 127,\n \"total_tokens\": 740,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": - \"default\",\n \"system_fingerprint\": \"fp_deacdd5f6f\"\n}\n" + \"default\",\n \"system_fingerprint\": \"fp_64dfa806c7\"\n}\n" headers: CF-RAY: - CF-RAY-XXX @@ -99,11 +104,9 @@ interactions: Content-Type: - application/json Date: - - Thu, 22 Jan 2026 18:48:56 GMT + - Wed, 25 Feb 2026 20:52:34 GMT Server: - cloudflare - Set-Cookie: - - SET-COOKIE-XXX Strict-Transport-Security: - STS-XXX Transfer-Encoding: @@ -119,146 +122,13 @@ interactions: openai-organization: - OPENAI-ORG-XXX openai-processing-ms: - - '3849' + - '2259' openai-project: - OPENAI-PROJECT-XXX openai-version: - '2020-10-01' - x-envoy-upstream-service-time: - - '3973' - x-openai-proxy-wasm: - - v0.1 - x-ratelimit-limit-requests: - - X-RATELIMIT-LIMIT-REQUESTS-XXX - x-ratelimit-limit-tokens: - - X-RATELIMIT-LIMIT-TOKENS-XXX - x-ratelimit-remaining-requests: - - X-RATELIMIT-REMAINING-REQUESTS-XXX - x-ratelimit-remaining-tokens: - - X-RATELIMIT-REMAINING-TOKENS-XXX - x-ratelimit-reset-requests: - - X-RATELIMIT-RESET-REQUESTS-XXX - x-ratelimit-reset-tokens: - - X-RATELIMIT-RESET-TOKENS-XXX - x-request-id: - - X-REQUEST-ID-XXX - status: - code: 200 - message: OK -- request: - body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert - scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo - give my best complete final answer to the task respond using the exact following - format:\n\nThought: I now can give a great answer\nFinal Answer: Your final - answer must be the great and the most complete as possible, it must be outcome - described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent - Task: Provide an integer score between 1-5 for the title ''The impact of AI - in the future of work''. The score should reflect how engaging, relevant, and - thought-provoking the title is.\n\nThis is the expected criteria for your final - answer: Your best answer to your coworker asking you this, accounting for the - context shared.\nyou MUST return the actual complete content as the final answer, - not a summary.\n\nThis is the context you''re working with:\nYou need to evaluate - how well the title ''The impact of AI in the future of work'' meets the criteria - of being engaging, relevant, and thought-provoking in the context of emerging - technologies and their implications on future work environments.\n\nBegin! This - is VERY important to you, use the tools available and give your best Final Answer, - your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}' - headers: - User-Agent: - - X-USER-AGENT-XXX - accept: - - application/json - accept-encoding: - - ACCEPT-ENCODING-XXX - authorization: - - AUTHORIZATION-XXX - connection: - - keep-alive - content-length: - - '1348' - content-type: - - application/json - host: - - api.openai.com - x-stainless-arch: - - X-STAINLESS-ARCH-XXX - x-stainless-async: - - 'false' - x-stainless-lang: - - python - x-stainless-os: - - X-STAINLESS-OS-XXX - x-stainless-package-version: - - 1.83.0 - x-stainless-read-timeout: - - X-STAINLESS-READ-TIMEOUT-XXX - x-stainless-retry-count: - - '0' - x-stainless-runtime: - - CPython - x-stainless-runtime-version: - - 3.13.3 - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - body: - string: "{\n \"id\": \"chatcmpl-D0u1hKGQrrJVYOcW1tAlQMgAjcaDX\",\n \"object\": - \"chat.completion\",\n \"created\": 1769107737,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal - Answer: The title 'The impact of AI in the future of work' is highly relevant - given the current and growing significance of artificial intelligence in transforming - work environments across industries. It is engaging because AI's influence - on future employment is a topic of widespread interest and concern, prompting - readers to explore its implications. Furthermore, it is thought-provoking - as it invites consideration of both the opportunities and challenges AI presents - for the workforce, including changes in job roles, skills, and economic structures. - However, the title could be more captivating or specific to heighten curiosity - and emphasize particular aspects of AI's impact. Overall, it effectively meets - the criteria but could be slightly enhanced for maximum engagement. Considering - all factors, I would score it a 4 out of 5.\",\n \"refusal\": null,\n - \ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": - \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 264,\n \"completion_tokens\": - 160,\n \"total_tokens\": 424,\n \"prompt_tokens_details\": {\n \"cached_tokens\": - 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": - {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": - 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": - \"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n" - headers: - CF-RAY: - - CF-RAY-XXX - Connection: - - keep-alive - Content-Type: - - application/json - Date: - - Thu, 22 Jan 2026 18:49:00 GMT - Server: - - cloudflare - Set-Cookie: + set-cookie: - SET-COOKIE-XXX - Strict-Transport-Security: - - STS-XXX - Transfer-Encoding: - - chunked - X-Content-Type-Options: - - X-CONTENT-TYPE-XXX - access-control-expose-headers: - - ACCESS-CONTROL-XXX - alt-svc: - - h3=":443"; ma=86400 - cf-cache-status: - - DYNAMIC - openai-organization: - - OPENAI-ORG-XXX - openai-processing-ms: - - '3273' - openai-project: - - OPENAI-PROJECT-XXX - openai-version: - - '2020-10-01' - x-envoy-upstream-service-time: - - '3299' x-openai-proxy-wasm: - v0.1 x-ratelimit-limit-requests: @@ -289,47 +159,29 @@ interactions: Task: Give me an integer score between 1-5 for the following title: ''The impact of AI in the future of work''\n\nThis is the expected criteria for your final answer: The score of the title.\nyou MUST return the actual complete content - as the final answer, not a summary.\nEnsure your final answer strictly adheres - to the following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\": + as the final answer, not a summary.\nFormat your final answer according to the + following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": - \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nDo + \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nIMPORTANT: + Preserve the original content exactly as-is. Do NOT rewrite, paraphrase, or + modify the meaning of the content. Only structure it to match the schema format.\n\nDo not include the OpenAPI schema in the final output. Ensure the final output - does not include any code block markers like ```json or ```python.\n\nThis is - VERY important to you, your job depends on it!"},{"role":"assistant","content":null,"tool_calls":[{"id":"call_AEHe6pv1NqguBRA5q9CHVSn3","type":"function","function":{"name":"Delegate_work_to_coworker","arguments":"{\"task\":\"Provide - an integer score between 1-5 for the title ''The impact of AI in the future - of work''. The score should reflect how engaging, relevant, and thought-provoking - the title is.\",\"context\":\"You need to evaluate how well the title ''The - impact of AI in the future of work'' meets the criteria of being engaging, relevant, - and thought-provoking in the context of emerging technologies and their implications - on future work environments.\",\"coworker\":\"Scorer\"}"}}]},{"role":"tool","tool_call_id":"call_AEHe6pv1NqguBRA5q9CHVSn3","content":"The - title ''The impact of AI in the future of work'' is highly relevant given the - current and growing significance of artificial intelligence in transforming - work environments across industries. It is engaging because AI''s influence - on future employment is a topic of widespread interest and concern, prompting - readers to explore its implications. Furthermore, it is thought-provoking as - it invites consideration of both the opportunities and challenges AI presents - for the workforce, including changes in job roles, skills, and economic structures. - However, the title could be more captivating or specific to heighten curiosity - and emphasize particular aspects of AI''s impact. Overall, it effectively meets - the criteria but could be slightly enhanced for maximum engagement. Considering - all factors, I would score it a 4 out of 5."},{"role":"user","content":"Analyze - the tool result. If requirements are met, provide the Final Answer. Otherwise, - call the next tool. Deliver only the answer without meta-commentary."}],"model":"gpt-4o","tool_choice":"auto","tools":[{"type":"function","function":{"name":"Delegate_work_to_coworker","description":"Delegate + does not include any code block markers like ```json or ```python."}],"model":"gpt-4o","tool_choice":"auto","tools":[{"type":"function","function":{"name":"delegate_work_to_coworker","description":"Delegate a specific task to one of the following coworkers: Scorer\nThe input to this tool should be the coworker, the task you want them to do, and ALL necessary context to execute the task, they know nothing about the task, so share absolutely - everything you know, don''t reference things but instead explain them.","parameters":{"properties":{"task":{"description":"The + everything you know, don''t reference things but instead explain them.","strict":true,"parameters":{"properties":{"task":{"description":"The task to delegate","title":"Task","type":"string"},"context":{"description":"The context for the task","title":"Context","type":"string"},"coworker":{"description":"The - role/name of the coworker to delegate to","title":"Coworker","type":"string"}},"required":["task","context","coworker"],"type":"object"}}},{"type":"function","function":{"name":"Ask_question_to_coworker","description":"Ask + role/name of the coworker to delegate to","title":"Coworker","type":"string"}},"required":["task","context","coworker"],"type":"object","additionalProperties":false}}},{"type":"function","function":{"name":"ask_question_to_coworker","description":"Ask a specific question to one of the following coworkers: Scorer\nThe input to this tool should be the coworker, the question you have for them, and ALL necessary context to ask the question properly, they know nothing about the question, so share absolutely everything you know, don''t reference things but instead - explain them.","parameters":{"properties":{"question":{"description":"The question - to ask","title":"Question","type":"string"},"context":{"description":"The context - for the question","title":"Context","type":"string"},"coworker":{"description":"The - role/name of the coworker to ask","title":"Coworker","type":"string"}},"required":["question","context","coworker"],"type":"object"}}}]}' + explain them.","strict":true,"parameters":{"properties":{"question":{"description":"The + question to ask","title":"Question","type":"string"},"context":{"description":"The + context for the question","title":"Context","type":"string"},"coworker":{"description":"The + role/name of the coworker to ask","title":"Coworker","type":"string"}},"required":["question","context","coworker"],"type":"object","additionalProperties":false}}}]}' headers: User-Agent: - X-USER-AGENT-XXX @@ -342,7 +194,7 @@ interactions: connection: - keep-alive content-length: - - '4694' + - '3151' content-type: - application/json cookie: @@ -366,22 +218,31 @@ interactions: x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.13.3 + - 3.13.12 method: POST uri: https://api.openai.com/v1/chat/completions response: body: - string: "{\n \"id\": \"chatcmpl-D0u1kZrAEdxxk1GHhh8iEvvddrv5C\",\n \"object\": - \"chat.completion\",\n \"created\": 1769107740,\n \"model\": \"gpt-4o-2024-08-06\",\n + string: "{\n \"id\": \"chatcmpl-DDG9zJ5ZtuBIJLBxuTBqV4pYyaAf3\",\n \"object\": + \"chat.completion\",\n \"created\": 1772052755,\n \"model\": \"gpt-4o-2024-08-06\",\n \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"{\\\"score\\\":4}\",\n \"refusal\": - null,\n \"annotations\": []\n },\n \"logprobs\": null,\n - \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 868,\n \"completion_tokens\": 6,\n \"total_tokens\": 874,\n \"prompt_tokens_details\": - {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n + \ \"id\": \"call_IdyahKEb4Ez9fWTlL0SWNU97\",\n \"type\": + \"function\",\n \"function\": {\n \"name\": \"ask_question_to_coworker\",\n + \ \"arguments\": \"{\\\"question\\\":\\\"What score would you + give between 1-5 to the following title: 'The impact of AI in the future of + work' and why?\\\",\\\"context\\\":\\\"Your task is to evaluate the title + based on its ability to intrigue, its clarity, and relevance. You need to + provide an integer score between 1 and 5 for this title, considering these + aspects.\\\",\\\"coworker\\\":\\\"Scorer\\\"}\"\n }\n }\n + \ ],\n \"refusal\": null,\n \"annotations\": []\n },\n + \ \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n }\n + \ ],\n \"usage\": {\n \"prompt_tokens\": 581,\n \"completion_tokens\": + 97,\n \"total_tokens\": 678,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": - \"default\",\n \"system_fingerprint\": \"fp_deacdd5f6f\"\n}\n" + \"default\",\n \"system_fingerprint\": \"fp_18e61aa3bc\"\n}\n" headers: CF-RAY: - CF-RAY-XXX @@ -390,7 +251,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 22 Jan 2026 18:49:00 GMT + - Wed, 25 Feb 2026 20:52:36 GMT Server: - cloudflare Strict-Transport-Security: @@ -408,13 +269,299 @@ interactions: openai-organization: - OPENAI-ORG-XXX openai-processing-ms: - - '480' + - '1686' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert + scorer, specialized in scoring titles.\nYour personal goal is: Score the title"},{"role":"user","content":"\nCurrent + Task: What score would you give between 1-5 to the following title: ''The impact + of AI in the future of work'' and why?\n\nThis is the expected criteria for + your final answer: Your best answer to your coworker asking you this, accounting + for the context shared.\nyou MUST return the actual complete content as the + final answer, not a summary.\n\nThis is the context you''re working with:\nYour + task is to evaluate the title based on its ability to intrigue, its clarity, + and relevance. You need to provide an integer score between 1 and 5 for this + title, considering these aspects.\n\nProvide your complete response:"}],"model":"gpt-4.1-mini"}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '831' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.12 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-DDGA1eLxVsUvh5Ptopxsrctx3s8fF\",\n \"object\": + \"chat.completion\",\n \"created\": 1772052757,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"I would give the title \\\"The impact + of AI in the future of work\\\" a score of 4 out of 5.\\n\\nHere's why:\\n\\n- + **Clarity:** The title is clear and straightforward; it immediately tells + the reader that the focus is on how AI will influence the work landscape going + forward. There is no ambiguity about the subject matter.\\n\\n- **Relevance:** + The topic is highly relevant in today's context, as AI technologies are rapidly + transforming industries and workplace dynamics. This makes the title timely + and likely to attract interest from professionals, academics, and anyone curious + about technological impacts on employment.\\n\\n- **Intrigue:** While the + title is clear and relevant, it lacks a bit of punch or uniqueness that might + make it stand out more. It's somewhat generic\u2014many articles use similar + phrasing. Adding an element that hints at specific insights or a fresh perspective + could increase intrigue.\\n\\nOverall, the title effectively conveys the subject + and relevance but could be slightly improved with more compelling language + to boost interest.\",\n \"refusal\": null,\n \"annotations\": + []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n + \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 164,\n \"completion_tokens\": + 198,\n \"total_tokens\": 362,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_a391f2cee0\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Wed, 25 Feb 2026 20:52:41 GMT + Server: + - cloudflare + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '4344' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + set-cookie: + - SET-COOKIE-XXX + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +- request: + body: "{\"messages\":[{\"role\":\"system\",\"content\":\"You are Crew Manager. + You are a seasoned manager with a knack for getting the best out of your team.\\nYou + are also known for your ability to delegate work to the right people, and to + ask the right questions to get the best out of your team.\\nEven though you + don't perform tasks by yourself, you have a lot of experience in the field, + which allows you to properly evaluate the work of your team members.\\nYour + personal goal is: Manage the team to complete the task in the best way possible.\"},{\"role\":\"user\",\"content\":\"\\nCurrent + Task: Give me an integer score between 1-5 for the following title: 'The impact + of AI in the future of work'\\n\\nThis is the expected criteria for your final + answer: The score of the title.\\nyou MUST return the actual complete content + as the final answer, not a summary.\\nFormat your final answer according to + the following OpenAPI schema: {\\n \\\"properties\\\": {\\n \\\"score\\\": + {\\n \\\"title\\\": \\\"Score\\\",\\n \\\"type\\\": \\\"integer\\\"\\n + \ }\\n },\\n \\\"required\\\": [\\n \\\"score\\\"\\n ],\\n \\\"title\\\": + \\\"ScoreOutput\\\",\\n \\\"type\\\": \\\"object\\\",\\n \\\"additionalProperties\\\": + false\\n}\\n\\nIMPORTANT: Preserve the original content exactly as-is. Do NOT + rewrite, paraphrase, or modify the meaning of the content. Only structure it + to match the schema format.\\n\\nDo not include the OpenAPI schema in the final + output. Ensure the final output does not include any code block markers like + ```json or ```python.\"},{\"role\":\"assistant\",\"content\":null,\"tool_calls\":[{\"id\":\"call_IdyahKEb4Ez9fWTlL0SWNU97\",\"type\":\"function\",\"function\":{\"name\":\"ask_question_to_coworker\",\"arguments\":\"{\\\"question\\\":\\\"What + score would you give between 1-5 to the following title: 'The impact of AI in + the future of work' and why?\\\",\\\"context\\\":\\\"Your task is to evaluate + the title based on its ability to intrigue, its clarity, and relevance. You + need to provide an integer score between 1 and 5 for this title, considering + these aspects.\\\",\\\"coworker\\\":\\\"Scorer\\\"}\"}}]},{\"role\":\"tool\",\"tool_call_id\":\"call_IdyahKEb4Ez9fWTlL0SWNU97\",\"name\":\"ask_question_to_coworker\",\"content\":\"I + would give the title \\\"The impact of AI in the future of work\\\" a score + of 4 out of 5.\\n\\nHere's why:\\n\\n- **Clarity:** The title is clear and straightforward; + it immediately tells the reader that the focus is on how AI will influence the + work landscape going forward. There is no ambiguity about the subject matter.\\n\\n- + **Relevance:** The topic is highly relevant in today's context, as AI technologies + are rapidly transforming industries and workplace dynamics. This makes the title + timely and likely to attract interest from professionals, academics, and anyone + curious about technological impacts on employment.\\n\\n- **Intrigue:** While + the title is clear and relevant, it lacks a bit of punch or uniqueness that + might make it stand out more. It's somewhat generic\u2014many articles use similar + phrasing. Adding an element that hints at specific insights or a fresh perspective + could increase intrigue.\\n\\nOverall, the title effectively conveys the subject + and relevance but could be slightly improved with more compelling language to + boost interest.\"},{\"role\":\"user\",\"content\":\"Analyze the tool result. + If requirements are met, provide the Final Answer. Otherwise, call the next + tool. Deliver only the answer without meta-commentary.\"}],\"model\":\"gpt-4o\",\"response_format\":{\"type\":\"json_schema\",\"json_schema\":{\"schema\":{\"properties\":{\"score\":{\"title\":\"Score\",\"type\":\"integer\"}},\"required\":[\"score\"],\"title\":\"ScoreOutput\",\"type\":\"object\",\"additionalProperties\":false},\"name\":\"ScoreOutput\",\"strict\":true}},\"stream\":false,\"tool_choice\":\"auto\",\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"delegate_work_to_coworker\",\"description\":\"Delegate + a specific task to one of the following coworkers: Scorer\\nThe input to this + tool should be the coworker, the task you want them to do, and ALL necessary + context to execute the task, they know nothing about the task, so share absolutely + everything you know, don't reference things but instead explain them.\",\"strict\":true,\"parameters\":{\"properties\":{\"task\":{\"description\":\"The + task to delegate\",\"title\":\"Task\",\"type\":\"string\"},\"context\":{\"description\":\"The + context for the task\",\"title\":\"Context\",\"type\":\"string\"},\"coworker\":{\"description\":\"The + role/name of the coworker to delegate to\",\"title\":\"Coworker\",\"type\":\"string\"}},\"required\":[\"task\",\"context\",\"coworker\"],\"type\":\"object\",\"additionalProperties\":false}}},{\"type\":\"function\",\"function\":{\"name\":\"ask_question_to_coworker\",\"description\":\"Ask + a specific question to one of the following coworkers: Scorer\\nThe input to + this tool should be the coworker, the question you have for them, and ALL necessary + context to ask the question properly, they know nothing about the question, + so share absolutely everything you know, don't reference things but instead + explain them.\",\"strict\":true,\"parameters\":{\"properties\":{\"question\":{\"description\":\"The + question to ask\",\"title\":\"Question\",\"type\":\"string\"},\"context\":{\"description\":\"The + context for the question\",\"title\":\"Context\",\"type\":\"string\"},\"coworker\":{\"description\":\"The + role/name of the coworker to ask\",\"title\":\"Coworker\",\"type\":\"string\"}},\"required\":[\"question\",\"context\",\"coworker\"],\"type\":\"object\",\"additionalProperties\":false}}}]}" + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '5297' + content-type: + - application/json + cookie: + - COOKIE-XXX + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-helper-method: + - beta.chat.completions.parse + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.12 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-DDGA5qDbleuzKoN7uVs5MFOC6X5DG\",\n \"object\": + \"chat.completion\",\n \"created\": 1772052761,\n \"model\": \"gpt-4o-2024-08-06\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"{\\\"score\\\":4}\",\n \"refusal\": + null,\n \"annotations\": []\n },\n \"logprobs\": null,\n + \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 956,\n \"completion_tokens\": 10,\n \"total_tokens\": 966,\n \"prompt_tokens_details\": + {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_64dfa806c7\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Wed, 25 Feb 2026 20:52:42 GMT + Server: + - cloudflare + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '508' openai-project: - OPENAI-PROJECT-XXX openai-version: - '2020-10-01' - x-envoy-upstream-service-time: - - '503' x-openai-proxy-wasm: - v0.1 x-ratelimit-limit-requests: diff --git a/lib/crewai/tests/cassettes/test_output_pydantic_sequential.yaml b/lib/crewai/tests/cassettes/test_output_pydantic_sequential.yaml index c8e210ed2..c1a86e1f0 100644 --- a/lib/crewai/tests/cassettes/test_output_pydantic_sequential.yaml +++ b/lib/crewai/tests/cassettes/test_output_pydantic_sequential.yaml @@ -1,98 +1,120 @@ interactions: - request: - body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Give me an integer score between 1-5 for the following title: ''The impact of AI in the future of work''\n\nThis is the expected criteria for your final answer: The score of the title.\nyou MUST return the actual complete content as the final answer, not a summary.\nEnsure your final answer strictly adheres to the following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": - \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}' + body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert + scorer, specialized in scoring titles.\nYour personal goal is: Score the title"},{"role":"user","content":"\nCurrent + Task: Give me an integer score between 1-5 for the following title: ''The impact + of AI in the future of work''\n\nThis is the expected criteria for your final + answer: The score of the title.\nyou MUST return the actual complete content + as the final answer, not a summary.\nFormat your final answer according to the + following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\": + \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": + \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nIMPORTANT: + Preserve the original content exactly as-is. Do NOT rewrite, paraphrase, or + modify the meaning of the content. Only structure it to match the schema format.\n\nDo + not include the OpenAPI schema in the final output. Ensure the final output + does not include any code block markers like ```json or ```python.\n\nProvide + your complete response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"score":{"title":"Score","type":"integer"}},"required":["score"],"title":"ScoreOutput","type":"object","additionalProperties":false},"name":"ScoreOutput","strict":true}},"stream":false}' headers: + User-Agent: + - X-USER-AGENT-XXX accept: - application/json accept-encoding: - - gzip, deflate, zstd + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX connection: - keep-alive content-length: - - '1394' + - '1421' content-type: - application/json host: - api.openai.com - user-agent: - - OpenAI/Python 1.109.1 x-stainless-arch: - - arm64 + - X-STAINLESS-ARCH-XXX x-stainless-async: - 'false' + x-stainless-helper-method: + - beta.chat.completions.parse x-stainless-lang: - python x-stainless-os: - - MacOS + - X-STAINLESS-OS-XXX x-stainless-package-version: - - 1.109.1 + - 1.83.0 x-stainless-read-timeout: - - '600' + - X-STAINLESS-READ-TIMEOUT-XXX x-stainless-retry-count: - '0' x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.12.9 + - 3.13.12 method: POST uri: https://api.openai.com/v1/chat/completions response: body: - string: "{\n \"id\": \"chatcmpl-CYg0ICsr8nVjoOoVFpnOLUh71LgfJ\",\n \"object\": \"chat.completion\",\n \"created\": 1762380650,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal Answer: {\\n \\\"score\\\": 4\\n}\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 294,\n \"completion_tokens\": 22,\n \"total_tokens\": 316,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_4c2851f862\"\ - \n}\n" + string: "{\n \"id\": \"chatcmpl-DDDxMk9AEzSz8xZnza3XoSeijSI5R\",\n \"object\": + \"chat.completion\",\n \"created\": 1772044284,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"{\\\"score\\\":4}\",\n \"refusal\": + null,\n \"annotations\": []\n },\n \"logprobs\": null,\n + \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 276,\n \"completion_tokens\": 5,\n \"total_tokens\": 281,\n \"prompt_tokens_details\": + {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_a391f2cee0\"\n}\n" headers: CF-RAY: - - REDACTED-RAY + - CF-RAY-XXX Connection: - keep-alive Content-Type: - application/json Date: - - Wed, 05 Nov 2025 22:10:50 GMT + - Wed, 25 Feb 2026 18:31:25 GMT Server: - cloudflare - Set-Cookie: - - __cf_bm=REDACTED; path=/; expires=Wed, 05-Nov-25 22:40:50 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - - _cfuvid=REDACTED; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None Strict-Transport-Security: - - max-age=31536000; includeSubDomains; preload + - STS-XXX Transfer-Encoding: - chunked X-Content-Type-Options: - - nosniff + - X-CONTENT-TYPE-XXX access-control-expose-headers: - - X-Request-ID + - ACCESS-CONTROL-XXX alt-svc: - h3=":443"; ma=86400 cf-cache-status: - DYNAMIC openai-organization: - - user-hortuttj2f3qtmxyik2zxf4q + - OPENAI-ORG-XXX openai-processing-ms: - - '482' + - '385' openai-project: - - proj_fL4UBWR1CMpAAdgzaSKqsVvA + - OPENAI-PROJECT-XXX openai-version: - '2020-10-01' - x-envoy-upstream-service-time: - - '495' + set-cookie: + - SET-COOKIE-XXX x-openai-proxy-wasm: - v0.1 x-ratelimit-limit-requests: - - '500' + - X-RATELIMIT-LIMIT-REQUESTS-XXX x-ratelimit-limit-tokens: - - '200000' + - X-RATELIMIT-LIMIT-TOKENS-XXX x-ratelimit-remaining-requests: - - '499' + - X-RATELIMIT-REMAINING-REQUESTS-XXX x-ratelimit-remaining-tokens: - - '199687' + - X-RATELIMIT-REMAINING-TOKENS-XXX x-ratelimit-reset-requests: - - 120ms + - X-RATELIMIT-RESET-REQUESTS-XXX x-ratelimit-reset-tokens: - - 93ms + - X-RATELIMIT-RESET-TOKENS-XXX x-request-id: - - req_REDACTED + - X-REQUEST-ID-XXX status: code: 200 message: OK diff --git a/lib/crewai/tests/cassettes/test_output_pydantic_to_another_task.yaml b/lib/crewai/tests/cassettes/test_output_pydantic_to_another_task.yaml index 5eec1ed00..4fd91fc44 100644 --- a/lib/crewai/tests/cassettes/test_output_pydantic_to_another_task.yaml +++ b/lib/crewai/tests/cassettes/test_output_pydantic_to_another_task.yaml @@ -1,192 +1,254 @@ interactions: - request: - body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Give me an integer score between 1-5 for the following title: ''The impact of AI in the future of work''\n\nThis is the expected criteria for your final answer: The score of the title.\nyou MUST return the actual complete content as the final answer, not a summary.\nEnsure your final answer strictly adheres to the following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": - \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4o"}' + body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert + scorer, specialized in scoring titles.\nYour personal goal is: Score the title"},{"role":"user","content":"\nCurrent + Task: Give me an integer score between 1-5 for the following title: ''The impact + of AI in the future of work''\n\nThis is the expected criteria for your final + answer: The score of the title.\nyou MUST return the actual complete content + as the final answer, not a summary.\nFormat your final answer according to the + following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\": + \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": + \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nIMPORTANT: + Preserve the original content exactly as-is. Do NOT rewrite, paraphrase, or + modify the meaning of the content. Only structure it to match the schema format.\n\nDo + not include the OpenAPI schema in the final output. Ensure the final output + does not include any code block markers like ```json or ```python.\n\nProvide + your complete response:"}],"model":"gpt-4o","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"score":{"title":"Score","type":"integer"}},"required":["score"],"title":"ScoreOutput","type":"object","additionalProperties":false},"name":"ScoreOutput","strict":true}},"stream":false}' headers: + User-Agent: + - X-USER-AGENT-XXX accept: - application/json accept-encoding: - - gzip, deflate, zstd + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX connection: - keep-alive content-length: - - '1388' + - '1415' content-type: - application/json host: - api.openai.com - user-agent: - - OpenAI/Python 1.109.1 x-stainless-arch: - - arm64 + - X-STAINLESS-ARCH-XXX x-stainless-async: - 'false' + x-stainless-helper-method: + - beta.chat.completions.parse x-stainless-lang: - python x-stainless-os: - - MacOS + - X-STAINLESS-OS-XXX x-stainless-package-version: - - 1.109.1 + - 1.83.0 x-stainless-read-timeout: - - '600' + - X-STAINLESS-READ-TIMEOUT-XXX x-stainless-retry-count: - '0' x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.12.9 + - 3.13.12 method: POST uri: https://api.openai.com/v1/chat/completions response: body: - string: "{\n \"id\": \"chatcmpl-CYg0Jd7eOJXIC6Yc1xB0F6Ve3KK1M\",\n \"object\": \"chat.completion\",\n \"created\": 1762380651,\n \"model\": \"gpt-4o-2024-08-06\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"I now can give a great answer\\nFinal Answer: {\\\"score\\\": 4}\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 294,\n \"completion_tokens\": 17,\n \"total_tokens\": 311,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_65564d8ba5\"\n}\n" + string: "{\n \"id\": \"chatcmpl-DDDxNULmWtIUe1SAGHcArDXYSifV8\",\n \"object\": + \"chat.completion\",\n \"created\": 1772044285,\n \"model\": \"gpt-4o-2024-08-06\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"{\\\"score\\\":4}\",\n \"refusal\": + null,\n \"annotations\": []\n },\n \"logprobs\": null,\n + \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 276,\n \"completion_tokens\": 5,\n \"total_tokens\": 281,\n \"prompt_tokens_details\": + {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_9e0d253e63\"\n}\n" headers: CF-RAY: - - REDACTED-RAY + - CF-RAY-XXX Connection: - keep-alive Content-Type: - application/json Date: - - Wed, 05 Nov 2025 22:10:52 GMT + - Wed, 25 Feb 2026 18:31:26 GMT Server: - cloudflare - Set-Cookie: - - __cf_bm=REDACTED; path=/; expires=Wed, 05-Nov-25 22:40:52 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - - _cfuvid=REDACTED; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None Strict-Transport-Security: - - max-age=31536000; includeSubDomains; preload + - STS-XXX Transfer-Encoding: - chunked X-Content-Type-Options: - - nosniff + - X-CONTENT-TYPE-XXX access-control-expose-headers: - - X-Request-ID + - ACCESS-CONTROL-XXX alt-svc: - h3=":443"; ma=86400 cf-cache-status: - DYNAMIC openai-organization: - - user-hortuttj2f3qtmxyik2zxf4q + - OPENAI-ORG-XXX openai-processing-ms: - - '1337' + - '364' openai-project: - - proj_fL4UBWR1CMpAAdgzaSKqsVvA + - OPENAI-PROJECT-XXX openai-version: - '2020-10-01' - x-envoy-upstream-service-time: - - '1487' + set-cookie: + - SET-COOKIE-XXX x-openai-proxy-wasm: - v0.1 x-ratelimit-limit-requests: - - '500' + - X-RATELIMIT-LIMIT-REQUESTS-XXX x-ratelimit-limit-tokens: - - '30000' + - X-RATELIMIT-LIMIT-TOKENS-XXX x-ratelimit-remaining-requests: - - '499' + - X-RATELIMIT-REMAINING-REQUESTS-XXX x-ratelimit-remaining-tokens: - - '29687' + - X-RATELIMIT-REMAINING-TOKENS-XXX x-ratelimit-reset-requests: - - 120ms + - X-RATELIMIT-RESET-REQUESTS-XXX x-ratelimit-reset-tokens: - - 626ms + - X-RATELIMIT-RESET-TOKENS-XXX x-request-id: - - req_REDACTED + - X-REQUEST-ID-XXX status: code: 200 message: OK - request: - body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Given the score the title ''The impact of AI in the future of work'' got, give me an integer score between 1-5 for the following title: ''Return of the Jedi'', you MUST give it a score, use your best judgment\n\nThis is the expected criteria for your final answer: The score of the title.\nyou MUST return the actual complete content as the final answer, not a summary.\nEnsure your final answer strictly adheres to the following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\": - \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python.\n\nThis is the context you''re working with:\n{\"score\": 4}\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4o"}' + body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert + scorer, specialized in scoring titles.\nYour personal goal is: Score the title"},{"role":"user","content":"\nCurrent + Task: Give me an integer score between 1-5 for the following title: ''The impact + of AI in the future of work''\n\nThis is the expected criteria for your final + answer: The score of the title.\nyou MUST return the actual complete content + as the final answer, not a summary.\nFormat your final answer according to the + following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\": + \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": + \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nIMPORTANT: + Preserve the original content exactly as-is. Do NOT rewrite, paraphrase, or + modify the meaning of the content. Only structure it to match the schema format.\n\nDo + not include the OpenAPI schema in the final output. Ensure the final output + does not include any code block markers like ```json or ```python.\n\nProvide + your complete response:"},{"role":"assistant","content":"{\"score\":4}"},{"role":"system","content":"You + are Scorer. You''re an expert scorer, specialized in scoring titles.\nYour personal + goal is: Score the title"},{"role":"user","content":"\nCurrent Task: Given the + score the title ''The impact of AI in the future of work'' got, give me an integer + score between 1-5 for the following title: ''Return of the Jedi'', you MUST + give it a score, use your best judgment\n\nThis is the expected criteria for + your final answer: The score of the title.\nyou MUST return the actual complete + content as the final answer, not a summary.\nFormat your final answer according + to the following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\": + \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": + \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nIMPORTANT: + Preserve the original content exactly as-is. Do NOT rewrite, paraphrase, or + modify the meaning of the content. Only structure it to match the schema format.\n\nDo + not include the OpenAPI schema in the final output. Ensure the final output + does not include any code block markers like ```json or ```python.\n\nThis is + the context you''re working with:\n{\"score\":4}\n\nProvide your complete response:"}],"model":"gpt-4o","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"score":{"title":"Score","type":"integer"}},"required":["score"],"title":"ScoreOutput","type":"object","additionalProperties":false},"name":"ScoreOutput","strict":true}},"stream":false}' headers: + User-Agent: + - X-USER-AGENT-XXX accept: - application/json accept-encoding: - - gzip, deflate, zstd + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX connection: - keep-alive content-length: - - '1550' + - '2743' content-type: - application/json cookie: - - __cf_bm=REDACTED; _cfuvid=REDACTED + - COOKIE-XXX host: - api.openai.com - user-agent: - - OpenAI/Python 1.109.1 x-stainless-arch: - - arm64 + - X-STAINLESS-ARCH-XXX x-stainless-async: - 'false' + x-stainless-helper-method: + - beta.chat.completions.parse x-stainless-lang: - python x-stainless-os: - - MacOS + - X-STAINLESS-OS-XXX x-stainless-package-version: - - 1.109.1 + - 1.83.0 x-stainless-read-timeout: - - '600' + - X-STAINLESS-READ-TIMEOUT-XXX x-stainless-retry-count: - '0' x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.12.9 + - 3.13.12 method: POST uri: https://api.openai.com/v1/chat/completions response: body: - string: "{\n \"id\": \"chatcmpl-CYg0KidOU2tphhqhW69ygSBSubHBQ\",\n \"object\": \"chat.completion\",\n \"created\": 1762380652,\n \"model\": \"gpt-4o-2024-08-06\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"I now can give a great answer\\nFinal Answer: {\\\"score\\\": 5}\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 333,\n \"completion_tokens\": 17,\n \"total_tokens\": 350,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_a788c5aef0\"\n}\n" + string: "{\n \"id\": \"chatcmpl-DDDxOIf7hV4pRmOxmlsA7bO8L2z5w\",\n \"object\": + \"chat.completion\",\n \"created\": 1772044286,\n \"model\": \"gpt-4o-2024-08-06\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"{\\\"score\\\":5}\",\n \"refusal\": + null,\n \"annotations\": []\n },\n \"logprobs\": null,\n + \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 564,\n \"completion_tokens\": 5,\n \"total_tokens\": 569,\n \"prompt_tokens_details\": + {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_9e0d253e63\"\n}\n" headers: CF-RAY: - - REDACTED-RAY + - CF-RAY-XXX Connection: - keep-alive Content-Type: - application/json Date: - - Wed, 05 Nov 2025 22:10:53 GMT + - Wed, 25 Feb 2026 18:31:27 GMT Server: - cloudflare Strict-Transport-Security: - - max-age=31536000; includeSubDomains; preload + - STS-XXX Transfer-Encoding: - chunked X-Content-Type-Options: - - nosniff + - X-CONTENT-TYPE-XXX access-control-expose-headers: - - X-Request-ID + - ACCESS-CONTROL-XXX alt-svc: - h3=":443"; ma=86400 cf-cache-status: - DYNAMIC openai-organization: - - user-hortuttj2f3qtmxyik2zxf4q + - OPENAI-ORG-XXX openai-processing-ms: - - '1009' + - '393' openai-project: - - proj_fL4UBWR1CMpAAdgzaSKqsVvA + - OPENAI-PROJECT-XXX openai-version: - '2020-10-01' - x-envoy-upstream-service-time: - - '1106' x-openai-proxy-wasm: - v0.1 x-ratelimit-limit-requests: - - '500' + - X-RATELIMIT-LIMIT-REQUESTS-XXX x-ratelimit-limit-tokens: - - '30000' + - X-RATELIMIT-LIMIT-TOKENS-XXX x-ratelimit-remaining-requests: - - '499' + - X-RATELIMIT-REMAINING-REQUESTS-XXX x-ratelimit-remaining-tokens: - - '29647' + - X-RATELIMIT-REMAINING-TOKENS-XXX x-ratelimit-reset-requests: - - 120ms + - X-RATELIMIT-RESET-REQUESTS-XXX x-ratelimit-reset-tokens: - - 706ms + - X-RATELIMIT-RESET-TOKENS-XXX x-request-id: - - req_REDACTED + - X-REQUEST-ID-XXX status: code: 200 message: OK diff --git a/lib/crewai/tests/cassettes/test_save_task_json_output.yaml b/lib/crewai/tests/cassettes/test_save_task_json_output.yaml index 2692841e2..d7449fa08 100644 --- a/lib/crewai/tests/cassettes/test_save_task_json_output.yaml +++ b/lib/crewai/tests/cassettes/test_save_task_json_output.yaml @@ -1,11 +1,14 @@ interactions: - request: - body: '{"trace_id": "00000000-0000-0000-0000-000000000000", "execution_type": "crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null, "crew_name": "crew", "flow_name": null, "crewai_version": "1.3.0", "privacy_level": "standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count": 0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-11-05T22:10:38.307164+00:00"}, "ephemeral_trace_id": "00000000-0000-0000-0000-000000000000"}' + body: '{"trace_id": "00000000-0000-0000-0000-000000000000", "execution_type": + "crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null, + "crew_name": "crew", "flow_name": null, "crewai_version": "1.3.0", "privacy_level": + "standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count": + 0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-11-05T22:10:38.307164+00:00"}, + "ephemeral_trace_id": "00000000-0000-0000-0000-000000000000"}' headers: Accept: - '*/*' - Accept-Encoding: - - gzip, deflate, zstd Connection: - keep-alive Content-Length: @@ -13,14 +16,18 @@ interactions: Content-Type: - application/json User-Agent: - - CrewAI-CLI/1.3.0 + - X-USER-AGENT-XXX X-Crewai-Version: - 1.3.0 + accept-encoding: + - ACCEPT-ENCODING-XXX method: POST uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches response: body: - string: '{"id": "00000000-0000-0000-0000-000000000000","ephemeral_trace_id": "00000000-0000-0000-0000-000000000000","execution_type":"crew","crew_name":"crew","flow_name":null,"status":"running","duration_ms":null,"crewai_version":"1.3.0","total_events":0,"execution_context":{"crew_fingerprint":null,"crew_name":"crew","flow_name":null,"crewai_version":"1.3.0","privacy_level":"standard"},"created_at":"2025-11-05T22:10:38.904Z","updated_at":"2025-11-05T22:10:38.904Z","access_code": "TRACE-0000000000","user_identifier":null}' + string: '{"id": "00000000-0000-0000-0000-000000000000","ephemeral_trace_id": + "00000000-0000-0000-0000-000000000000","execution_type":"crew","crew_name":"crew","flow_name":null,"status":"running","duration_ms":null,"crewai_version":"1.3.0","total_events":0,"execution_context":{"crew_fingerprint":null,"crew_name":"crew","flow_name":null,"crewai_version":"1.3.0","privacy_level":"standard"},"created_at":"2025-11-05T22:10:38.904Z","updated_at":"2025-11-05T22:10:38.904Z","access_code": + "TRACE-0000000000","user_identifier":null}' headers: Connection: - keep-alive @@ -33,46 +40,61 @@ interactions: cache-control: - no-store content-security-policy: - - 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self'' ''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/ https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net https://js.hscollectedforms.net - https://js.usemessages.com https://snap.licdn.com https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data: *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com; connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/* https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com https://api.hubspot.com - https://forms.hscollectedforms.net https://api.hubapi.com https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509 https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self'' *.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com https://drive.google.com https://slides.google.com https://accounts.google.com https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/ https://www.youtube.com https://share.descript.com' + - CSP-FILTERED etag: - - W/"06db9ad73130a1da388846e83fc98135" + - ETAG-XXX expires: - '0' permissions-policy: - - camera=(), microphone=(self), geolocation=() + - PERMISSIONS-POLICY-XXX pragma: - no-cache referrer-policy: - - strict-origin-when-cross-origin + - REFERRER-POLICY-XXX strict-transport-security: - - max-age=63072000; includeSubDomains + - STS-XXX vary: - Accept x-content-type-options: - - nosniff + - X-CONTENT-TYPE-XXX x-frame-options: - - SAMEORIGIN + - X-FRAME-OPTIONS-XXX x-permitted-cross-domain-policies: - - none + - X-PERMITTED-XXX x-request-id: - - 34f34729-198e-482e-8c87-163a997bc3f4 + - X-REQUEST-ID-XXX x-runtime: - - '0.239932' + - X-RUNTIME-XXX x-xss-protection: - - 1; mode=block + - X-XSS-PROTECTION-XXX status: code: 201 message: Created - request: - body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Give me an integer score between 1-5 for the following title: ''The impact of AI in the future of work''\n\nThis is the expected criteria for your final answer: The score of the title.\nyou MUST return the actual complete content as the final answer, not a summary.\nEnsure your final answer strictly adheres to the following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": - \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}' + body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert + scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo + give my best complete final answer to the task respond using the exact following + format:\n\nThought: I now can give a great answer\nFinal Answer: Your final + answer must be the great and the most complete as possible, it must be outcome + described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent + Task: Give me an integer score between 1-5 for the following title: ''The impact + of AI in the future of work''\n\nThis is the expected criteria for your final + answer: The score of the title.\nyou MUST return the actual complete content + as the final answer, not a summary.\nEnsure your final answer strictly adheres + to the following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\": + \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": + \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nDo + not include the OpenAPI schema in the final output. Ensure the final output + does not include any code block markers like ```json or ```python.\n\nBegin! + This is VERY important to you, use the tools available and give your best Final + Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}' headers: + User-Agent: + - X-USER-AGENT-XXX accept: - application/json accept-encoding: - - gzip, deflate, zstd + - ACCEPT-ENCODING-XXX connection: - keep-alive content-length: @@ -81,20 +103,18 @@ interactions: - application/json host: - api.openai.com - user-agent: - - OpenAI/Python 1.109.1 x-stainless-arch: - - arm64 + - X-STAINLESS-ARCH-XXX x-stainless-async: - 'false' x-stainless-lang: - python x-stainless-os: - - MacOS + - X-STAINLESS-OS-XXX x-stainless-package-version: - 1.109.1 x-stainless-read-timeout: - - '600' + - X-STAINLESS-READ-TIMEOUT-XXX x-stainless-retry-count: - '0' x-stainless-runtime: @@ -105,11 +125,21 @@ interactions: uri: https://api.openai.com/v1/chat/completions response: body: - string: "{\n \"id\": \"chatcmpl-CYg0656gofDPbkHnqVBtb4a5cX4I0\",\n \"object\": \"chat.completion\",\n \"created\": 1762380638,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal Answer: {\\n \\\"score\\\": 4\\n}\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 294,\n \"completion_tokens\": 22,\n \"total_tokens\": 316,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_4c2851f862\"\ - \n}\n" + string: "{\n \"id\": \"chatcmpl-CYg0656gofDPbkHnqVBtb4a5cX4I0\",\n \"object\": + \"chat.completion\",\n \"created\": 1762380638,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal + Answer: {\\n \\\"score\\\": 4\\n}\",\n \"refusal\": null,\n \"annotations\": + []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n + \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 294,\n \"completion_tokens\": + 22,\n \"total_tokens\": 316,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_4c2851f862\"\n}\n" headers: CF-RAY: - - REDACTED-RAY + - CF-RAY-XXX Connection: - keep-alive Content-Type: @@ -119,26 +149,25 @@ interactions: Server: - cloudflare Set-Cookie: - - __cf_bm=REDACTED; path=/; expires=Wed, 05-Nov-25 22:40:39 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - - _cfuvid=REDACTED; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + - SET-COOKIE-XXX Strict-Transport-Security: - - max-age=31536000; includeSubDomains; preload + - STS-XXX Transfer-Encoding: - chunked X-Content-Type-Options: - - nosniff + - X-CONTENT-TYPE-XXX access-control-expose-headers: - - X-Request-ID + - ACCESS-CONTROL-XXX alt-svc: - h3=":443"; ma=86400 cf-cache-status: - DYNAMIC openai-organization: - - user-hortuttj2f3qtmxyik2zxf4q + - OPENAI-ORG-XXX openai-processing-ms: - '491' openai-project: - - proj_fL4UBWR1CMpAAdgzaSKqsVvA + - OPENAI-PROJECT-XXX openai-version: - '2020-10-01' x-envoy-upstream-service-time: @@ -146,19 +175,153 @@ interactions: x-openai-proxy-wasm: - v0.1 x-ratelimit-limit-requests: - - '500' + - X-RATELIMIT-LIMIT-REQUESTS-XXX x-ratelimit-limit-tokens: - - '200000' + - X-RATELIMIT-LIMIT-TOKENS-XXX x-ratelimit-remaining-requests: - - '499' + - X-RATELIMIT-REMAINING-REQUESTS-XXX x-ratelimit-remaining-tokens: - - '199687' + - X-RATELIMIT-REMAINING-TOKENS-XXX x-ratelimit-reset-requests: - - 120ms + - X-RATELIMIT-RESET-REQUESTS-XXX x-ratelimit-reset-tokens: - - 93ms + - X-RATELIMIT-RESET-TOKENS-XXX x-request-id: - - req_REDACTED + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert + scorer, specialized in scoring titles.\nYour personal goal is: Score the title"},{"role":"user","content":"\nCurrent + Task: Give me an integer score between 1-5 for the following title: ''The impact + of AI in the future of work''\n\nThis is the expected criteria for your final + answer: The score of the title.\nyou MUST return the actual complete content + as the final answer, not a summary.\nFormat your final answer according to the + following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\": + \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": + \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nIMPORTANT: + Preserve the original content exactly as-is. Do NOT rewrite, paraphrase, or + modify the meaning of the content. Only structure it to match the schema format.\n\nDo + not include the OpenAPI schema in the final output. Ensure the final output + does not include any code block markers like ```json or ```python.\n\nProvide + your complete response:"},{"role":"system","content":"You are Scorer. You''re + an expert scorer, specialized in scoring titles.\nYour personal goal is: Score + the title"},{"role":"user","content":"\nCurrent Task: Give me an integer score + between 1-5 for the following title: ''The impact of AI in the future of work''\n\nThis + is the expected criteria for your final answer: The score of the title.\nyou + MUST return the actual complete content as the final answer, not a summary.\nFormat + your final answer according to the following OpenAPI schema: {\n \"properties\": + {\n \"score\": {\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": + [\n \"score\"\n ],\n \"title\": \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": + false\n}\n\nIMPORTANT: Preserve the original content exactly as-is. Do NOT rewrite, + paraphrase, or modify the meaning of the content. Only structure it to match + the schema format.\n\nDo not include the OpenAPI schema in the final output. + Ensure the final output does not include any code block markers like ```json + or ```python.\n\nProvide your complete response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"score":{"title":"Score","type":"integer"}},"required":["score"],"title":"ScoreOutput","type":"object","additionalProperties":false},"name":"ScoreOutput","strict":true}},"stream":false}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '2541' + content-type: + - application/json + cookie: + - COOKIE-XXX + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-helper-method: + - beta.chat.completions.parse + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.12 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-DDDzycCKiyLb7UfPI2tKGyQAw8LGi\",\n \"object\": + \"chat.completion\",\n \"created\": 1772044446,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"{\\\"score\\\":4}\",\n \"refusal\": + null,\n \"annotations\": []\n },\n \"logprobs\": null,\n + \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 513,\n \"completion_tokens\": 5,\n \"total_tokens\": 518,\n \"prompt_tokens_details\": + {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_a391f2cee0\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Wed, 25 Feb 2026 18:34:07 GMT + Server: + - cloudflare + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '497' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + set-cookie: + - SET-COOKIE-XXX + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX status: code: 200 message: OK diff --git a/lib/crewai/tests/cassettes/test_save_task_output.yaml b/lib/crewai/tests/cassettes/test_save_task_output.yaml index ddd2caa05..ddf434235 100644 --- a/lib/crewai/tests/cassettes/test_save_task_output.yaml +++ b/lib/crewai/tests/cassettes/test_save_task_output.yaml @@ -1,106 +1,110 @@ interactions: - request: - body: '{"messages": [{"role": "system", "content": "You are Scorer. You''re an - expert scorer, specialized in scoring titles.\nYour personal goal is: Score - the title\nTo give my best complete final answer to the task use the exact following - format:\n\nThought: I now can give a great answer\nFinal Answer: Your final - answer must be the great and the most complete as possible, it must be outcome - described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user", - "content": "\nCurrent Task: Give me an integer score between 1-5 for the following - title: ''The impact of AI in the future of work''\n\nThis is the expect criteria - for your final answer: The score of the title.\nyou MUST return the actual complete - content as the final answer, not a summary.\n\nBegin! This is VERY important - to you, use the tools available and give your best Final Answer, your job depends - on it!\n\nThought:"}], "model": "gpt-4o"}' + body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert + scorer, specialized in scoring titles.\nYour personal goal is: Score the title"},{"role":"user","content":"\nCurrent + Task: Give me an integer score between 1-5 for the following title: ''The impact + of AI in the future of work''\n\nThis is the expected criteria for your final + answer: The score of the title.\nyou MUST return the actual complete content + as the final answer, not a summary.\n\nProvide your complete response:"}],"model":"gpt-4.1-mini"}' headers: + User-Agent: + - X-USER-AGENT-XXX accept: - application/json accept-encoding: - - gzip, deflate + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX connection: - keep-alive content-length: - - '915' + - '522' content-type: - application/json - cookie: - - __cf_bm=9.8sBYBkvBR8R1K_bVF7xgU..80XKlEIg3N2OBbTSCU-1727214102-1.0.1.1-.qiTLXbPamYUMSuyNsOEB9jhGu.jOifujOrx9E2JZvStbIZ9RTIiE44xKKNfLPxQkOi6qAT3h6htK8lPDGV_5g; - _cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000 host: - api.openai.com - user-agent: - - OpenAI/Python 1.47.0 x-stainless-arch: - - arm64 + - X-STAINLESS-ARCH-XXX x-stainless-async: - 'false' x-stainless-lang: - python x-stainless-os: - - MacOS + - X-STAINLESS-OS-XXX x-stainless-package-version: - - 1.47.0 - x-stainless-raw-response: - - 'true' + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.11.7 + - 3.13.12 method: POST uri: https://api.openai.com/v1/chat/completions response: body: - string: "{\n \"id\": \"chatcmpl-AB7gKOb785BSjHMwGUL7QpXJHDfmJ\",\n \"object\"\ - : \"chat.completion\",\n \"created\": 1727214500,\n \"model\": \"gpt-4o-2024-05-13\"\ - ,\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \ - \ \"role\": \"assistant\",\n \"content\": \"Thought: I now can\ - \ give a great answer\\nFinal Answer: 4\",\n \"refusal\": null\n \ - \ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n \ - \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 186,\n \"completion_tokens\"\ - : 15,\n \"total_tokens\": 201,\n \"completion_tokens_details\": {\n\ - \ \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"\ - fp_52a7f40b0b\"\n}\n" + string: "{\n \"id\": \"chatcmpl-DDG9vqGZskrNpGfY0XnTHvzJGDu5u\",\n \"object\": + \"chat.completion\",\n \"created\": 1772052751,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"4\",\n \"refusal\": null,\n + \ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": + \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 101,\n \"completion_tokens\": + 1,\n \"total_tokens\": 102,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_a391f2cee0\"\n}\n" headers: - CF-Cache-Status: - - DYNAMIC CF-RAY: - - 8c85fa63ed091cf3-GRU + - CF-RAY-XXX Connection: - keep-alive Content-Type: - application/json Date: - - Tue, 24 Sep 2024 21:48:21 GMT + - Wed, 25 Feb 2026 20:52:32 GMT Server: - cloudflare + Strict-Transport-Security: + - STS-XXX Transfer-Encoding: - chunked X-Content-Type-Options: - - nosniff + - X-CONTENT-TYPE-XXX access-control-expose-headers: - - X-Request-ID + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC openai-organization: - - crewai-iuxna1 + - OPENAI-ORG-XXX openai-processing-ms: - - '199' + - '276' + openai-project: + - OPENAI-PROJECT-XXX openai-version: - '2020-10-01' - strict-transport-security: - - max-age=31536000; includeSubDomains; preload + set-cookie: + - SET-COOKIE-XXX + x-openai-proxy-wasm: + - v0.1 x-ratelimit-limit-requests: - - '10000' + - X-RATELIMIT-LIMIT-REQUESTS-XXX x-ratelimit-limit-tokens: - - '30000000' + - X-RATELIMIT-LIMIT-TOKENS-XXX x-ratelimit-remaining-requests: - - '9999' + - X-RATELIMIT-REMAINING-REQUESTS-XXX x-ratelimit-remaining-tokens: - - '29999781' + - X-RATELIMIT-REMAINING-TOKENS-XXX x-ratelimit-reset-requests: - - 6ms + - X-RATELIMIT-RESET-REQUESTS-XXX x-ratelimit-reset-tokens: - - 0s + - X-RATELIMIT-RESET-TOKENS-XXX x-request-id: - - req_93411fed8e9bb5607df0dbc5d178f2cb + - X-REQUEST-ID-XXX status: code: 200 message: OK diff --git a/lib/crewai/tests/cassettes/test_save_task_pydantic_output.yaml b/lib/crewai/tests/cassettes/test_save_task_pydantic_output.yaml index f854903fa..6959ae12f 100644 --- a/lib/crewai/tests/cassettes/test_save_task_pydantic_output.yaml +++ b/lib/crewai/tests/cassettes/test_save_task_pydantic_output.yaml @@ -1,12 +1,29 @@ interactions: - request: - body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Give me an integer score between 1-5 for the following title: ''The impact of AI in the future of work''\n\nThis is the expected criteria for your final answer: The score of the title.\nyou MUST return the actual complete content as the final answer, not a summary.\nEnsure your final answer strictly adheres to the following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": - \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}' + body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert + scorer, specialized in scoring titles.\nYour personal goal is: Score the title\nTo + give my best complete final answer to the task respond using the exact following + format:\n\nThought: I now can give a great answer\nFinal Answer: Your final + answer must be the great and the most complete as possible, it must be outcome + described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent + Task: Give me an integer score between 1-5 for the following title: ''The impact + of AI in the future of work''\n\nThis is the expected criteria for your final + answer: The score of the title.\nyou MUST return the actual complete content + as the final answer, not a summary.\nEnsure your final answer strictly adheres + to the following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\": + \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": + \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nDo + not include the OpenAPI schema in the final output. Ensure the final output + does not include any code block markers like ```json or ```python.\n\nBegin! + This is VERY important to you, use the tools available and give your best Final + Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}' headers: + User-Agent: + - X-USER-AGENT-XXX accept: - application/json accept-encoding: - - gzip, deflate, zstd + - ACCEPT-ENCODING-XXX connection: - keep-alive content-length: @@ -15,20 +32,18 @@ interactions: - application/json host: - api.openai.com - user-agent: - - OpenAI/Python 1.109.1 x-stainless-arch: - - arm64 + - X-STAINLESS-ARCH-XXX x-stainless-async: - 'false' x-stainless-lang: - python x-stainless-os: - - MacOS + - X-STAINLESS-OS-XXX x-stainless-package-version: - 1.109.1 x-stainless-read-timeout: - - '600' + - X-STAINLESS-READ-TIMEOUT-XXX x-stainless-retry-count: - '0' x-stainless-runtime: @@ -39,11 +54,25 @@ interactions: uri: https://api.openai.com/v1/chat/completions response: body: - string: "{\n \"id\": \"chatcmpl-CYg0PI2q4kRtIkqoIwCl9TVmZiD0o\",\n \"object\": \"chat.completion\",\n \"created\": 1762380657,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Thought: The title \\\"The impact of AI in the future of work\\\" is clear, relevant, and concise. It accurately reflects a significant and current topic that is likely to attract interest. However, it could be more specific about the type of impact or scope to make it more compelling. Overall, it is a strong and effective title.\\n\\nFinal Answer: {\\n \\\"score\\\": 4\\n}\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 294,\n \"completion_tokens\": 80,\n \"total_tokens\": 374,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \ - \ \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_4c2851f862\"\n}\n" + string: "{\n \"id\": \"chatcmpl-CYg0PI2q4kRtIkqoIwCl9TVmZiD0o\",\n \"object\": + \"chat.completion\",\n \"created\": 1762380657,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Thought: The title \\\"The impact of + AI in the future of work\\\" is clear, relevant, and concise. It accurately + reflects a significant and current topic that is likely to attract interest. + However, it could be more specific about the type of impact or scope to make + it more compelling. Overall, it is a strong and effective title.\\n\\nFinal + Answer: {\\n \\\"score\\\": 4\\n}\",\n \"refusal\": null,\n \"annotations\": + []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n + \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 294,\n \"completion_tokens\": + 80,\n \"total_tokens\": 374,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_4c2851f862\"\n}\n" headers: CF-RAY: - - REDACTED-RAY + - CF-RAY-XXX Connection: - keep-alive Content-Type: @@ -53,26 +82,25 @@ interactions: Server: - cloudflare Set-Cookie: - - __cf_bm=REDACTED; path=/; expires=Wed, 05-Nov-25 22:40:59 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - - _cfuvid=REDACTED; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + - SET-COOKIE-XXX Strict-Transport-Security: - - max-age=31536000; includeSubDomains; preload + - STS-XXX Transfer-Encoding: - chunked X-Content-Type-Options: - - nosniff + - X-CONTENT-TYPE-XXX access-control-expose-headers: - - X-Request-ID + - ACCESS-CONTROL-XXX alt-svc: - h3=":443"; ma=86400 cf-cache-status: - DYNAMIC openai-organization: - - user-hortuttj2f3qtmxyik2zxf4q + - OPENAI-ORG-XXX openai-processing-ms: - '1476' openai-project: - - proj_fL4UBWR1CMpAAdgzaSKqsVvA + - OPENAI-PROJECT-XXX openai-version: - '2020-10-01' x-envoy-upstream-service-time: @@ -80,29 +108,32 @@ interactions: x-openai-proxy-wasm: - v0.1 x-ratelimit-limit-requests: - - '500' + - X-RATELIMIT-LIMIT-REQUESTS-XXX x-ratelimit-limit-tokens: - - '200000' + - X-RATELIMIT-LIMIT-TOKENS-XXX x-ratelimit-remaining-requests: - - '499' + - X-RATELIMIT-REMAINING-REQUESTS-XXX x-ratelimit-remaining-tokens: - - '199687' + - X-RATELIMIT-REMAINING-TOKENS-XXX x-ratelimit-reset-requests: - - 120ms + - X-RATELIMIT-RESET-REQUESTS-XXX x-ratelimit-reset-tokens: - - 93ms + - X-RATELIMIT-RESET-TOKENS-XXX x-request-id: - - req_REDACTED + - X-REQUEST-ID-XXX status: code: 200 message: OK - request: - body: '{"trace_id": "c682f49d-bb6b-49d9-84b7-06e1881d37cd", "execution_type": "crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null, "crew_name": "crew", "flow_name": null, "crewai_version": "1.4.1", "privacy_level": "standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count": 0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-11-15T21:20:09.431751+00:00"}, "ephemeral_trace_id": "c682f49d-bb6b-49d9-84b7-06e1881d37cd"}' + body: '{"trace_id": "c682f49d-bb6b-49d9-84b7-06e1881d37cd", "execution_type": + "crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null, + "crew_name": "crew", "flow_name": null, "crewai_version": "1.4.1", "privacy_level": + "standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count": + 0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-11-15T21:20:09.431751+00:00"}, + "ephemeral_trace_id": "c682f49d-bb6b-49d9-84b7-06e1881d37cd"}' headers: Accept: - '*/*' - Accept-Encoding: - - gzip, deflate Connection: - keep-alive Content-Length: @@ -110,11 +141,13 @@ interactions: Content-Type: - application/json User-Agent: - - CrewAI-CLI/1.4.1 + - X-USER-AGENT-XXX X-Crewai-Organization-Id: - 73c2b193-f579-422c-84c7-76a39a1da77f X-Crewai-Version: - 1.4.1 + accept-encoding: + - ACCEPT-ENCODING-XXX method: POST uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches response: @@ -132,36 +165,168 @@ interactions: cache-control: - no-store content-security-policy: - - 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self'' ''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/ https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net https://js.hscollectedforms.net - https://js.usemessages.com https://snap.licdn.com https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data: *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com; connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/* https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com https://api.hubspot.com - https://forms.hscollectedforms.net https://api.hubapi.com https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509 https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self'' *.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com https://drive.google.com https://slides.google.com https://accounts.google.com https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/ https://www.youtube.com https://share.descript.com' + - CSP-FILTERED etag: - - W/"e8d1e903c8c6ec2f765163c0c03bed79" + - ETAG-XXX expires: - '0' permissions-policy: - - camera=(), microphone=(self), geolocation=() + - PERMISSIONS-POLICY-XXX pragma: - no-cache referrer-policy: - - strict-origin-when-cross-origin + - REFERRER-POLICY-XXX strict-transport-security: - - max-age=63072000; includeSubDomains + - STS-XXX vary: - Accept x-content-type-options: - - nosniff + - X-CONTENT-TYPE-XXX x-frame-options: - - SAMEORIGIN + - X-FRAME-OPTIONS-XXX x-permitted-cross-domain-policies: - - none + - X-PERMITTED-XXX x-request-id: - - 5ea5f513-c359-4a92-a84a-08ad44d9857b + - X-REQUEST-ID-XXX x-runtime: - - '0.044665' + - X-RUNTIME-XXX x-xss-protection: - - 1; mode=block + - X-XSS-PROTECTION-XXX status: code: 201 message: Created +- request: + body: '{"messages":[{"role":"system","content":"You are Scorer. You''re an expert + scorer, specialized in scoring titles.\nYour personal goal is: Score the title"},{"role":"user","content":"\nCurrent + Task: Give me an integer score between 1-5 for the following title: ''The impact + of AI in the future of work''\n\nThis is the expected criteria for your final + answer: The score of the title.\nyou MUST return the actual complete content + as the final answer, not a summary.\nFormat your final answer according to the + following OpenAPI schema: {\n \"properties\": {\n \"score\": {\n \"title\": + \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"score\"\n ],\n \"title\": + \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\n\nIMPORTANT: + Preserve the original content exactly as-is. Do NOT rewrite, paraphrase, or + modify the meaning of the content. Only structure it to match the schema format.\n\nDo + not include the OpenAPI schema in the final output. Ensure the final output + does not include any code block markers like ```json or ```python.\n\nProvide + your complete response:"},{"role":"system","content":"You are Scorer. You''re + an expert scorer, specialized in scoring titles.\nYour personal goal is: Score + the title"},{"role":"user","content":"\nCurrent Task: Give me an integer score + between 1-5 for the following title: ''The impact of AI in the future of work''\n\nThis + is the expected criteria for your final answer: The score of the title.\nyou + MUST return the actual complete content as the final answer, not a summary.\nFormat + your final answer according to the following OpenAPI schema: {\n \"properties\": + {\n \"score\": {\n \"title\": \"Score\",\n \"type\": \"integer\"\n }\n },\n \"required\": + [\n \"score\"\n ],\n \"title\": \"ScoreOutput\",\n \"type\": \"object\",\n \"additionalProperties\": + false\n}\n\nIMPORTANT: Preserve the original content exactly as-is. Do NOT rewrite, + paraphrase, or modify the meaning of the content. Only structure it to match + the schema format.\n\nDo not include the OpenAPI schema in the final output. + Ensure the final output does not include any code block markers like ```json + or ```python.\n\nProvide your complete response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"score":{"title":"Score","type":"integer"}},"required":["score"],"title":"ScoreOutput","type":"object","additionalProperties":false},"name":"ScoreOutput","strict":true}},"stream":false}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '2541' + content-type: + - application/json + cookie: + - COOKIE-XXX + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-helper-method: + - beta.chat.completions.parse + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.12 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-DDE0GSLDtGruDzwtl2bwlAXUmvmHG\",\n \"object\": + \"chat.completion\",\n \"created\": 1772044464,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"{\\\"score\\\":4}\",\n \"refusal\": + null,\n \"annotations\": []\n },\n \"logprobs\": null,\n + \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 513,\n \"completion_tokens\": 5,\n \"total_tokens\": 518,\n \"prompt_tokens_details\": + {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_a391f2cee0\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Wed, 25 Feb 2026 18:34:25 GMT + Server: + - cloudflare + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '530' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + set-cookie: + - SET-COOKIE-XXX + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK version: 1 diff --git a/lib/crewai/tests/cassettes/test_task_guardrail_process_output.yaml b/lib/crewai/tests/cassettes/test_task_guardrail_process_output.yaml index 26e54a516..d9454ccd2 100644 --- a/lib/crewai/tests/cassettes/test_task_guardrail_process_output.yaml +++ b/lib/crewai/tests/cassettes/test_task_guardrail_process_output.yaml @@ -3,11 +3,7 @@ interactions: body: "{\"messages\":[{\"role\":\"system\",\"content\":\"You are Guardrail Agent. You are a expert at validating the output of a task. By providing effective feedback if the output is not valid.\\nYour personal goal is: Validate the output - of the task\\nTo give my best complete final answer to the task respond using - the exact following format:\\n\\nThought: I now can give a great answer\\nFinal - Answer: Your final answer must be the great and the most complete as possible, - it must be outcome described.\\n\\nI MUST use these formats, my job depends - on it!\"},{\"role\":\"user\",\"content\":\"\\nCurrent Task: \\n Ensure + of the task\"},{\"role\":\"user\",\"content\":\"\\nCurrent Task: \\n Ensure the following task result complies with the given guardrail.\\n\\n Task result:\\n \\n Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy @@ -17,8 +13,9 @@ interactions: what is wrong (e.g., by how much it violates the rule, or what specific part fails).\\n - Focus only on identifying issues \u2014 do not propose corrections.\\n \ - If the Task result complies with the guardrail, saying that is valid\\n - \ \\n\\nBegin! This is VERY important to you, use the tools available - and give your best Final Answer, your job depends on it!\\n\\nThought:\"}],\"model\":\"gpt-4o\"}" + \ \\n\\nProvide your complete response:\"}],\"model\":\"gpt-4o\",\"response_format\":{\"type\":\"json_schema\",\"json_schema\":{\"schema\":{\"properties\":{\"valid\":{\"description\":\"Whether + the task output complies with the guardrail\",\"title\":\"Valid\",\"type\":\"boolean\"},\"feedback\":{\"anyOf\":[{\"type\":\"string\"},{\"type\":\"null\"}],\"description\":\"A + feedback about the task output if it is not valid\",\"title\":\"Feedback\"}},\"required\":[\"valid\",\"feedback\"],\"title\":\"LLMGuardrailResult\",\"type\":\"object\",\"additionalProperties\":false},\"name\":\"LLMGuardrailResult\",\"strict\":true}},\"stream\":false}" headers: User-Agent: - X-USER-AGENT-XXX @@ -31,7 +28,7 @@ interactions: connection: - keep-alive content-length: - - '1467' + - '1567' content-type: - application/json host: @@ -40,142 +37,6 @@ interactions: - X-STAINLESS-ARCH-XXX x-stainless-async: - 'false' - x-stainless-lang: - - python - x-stainless-os: - - X-STAINLESS-OS-XXX - x-stainless-package-version: - - 1.83.0 - x-stainless-read-timeout: - - X-STAINLESS-READ-TIMEOUT-XXX - x-stainless-retry-count: - - '0' - x-stainless-runtime: - - CPython - x-stainless-runtime-version: - - 3.13.3 - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - body: - string: "{\n \"id\": \"chatcmpl-Cy7yHRYTZi8yzRbcODnKr92keLKCb\",\n \"object\": - \"chat.completion\",\n \"created\": 1768446357,\n \"model\": \"gpt-4o-2024-08-06\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"The task result provided has more than - 10 words. I will count the words to verify this.\\n\\nThe task result is the - following text:\\n\\\"Lorem Ipsum is simply dummy text of the printing and - typesetting industry. Lorem Ipsum has been the industry's standard dummy text - ever\\\"\\n\\nCounting the words:\\n\\n1. Lorem \\n2. Ipsum \\n3. is \\n4. - simply \\n5. dummy \\n6. text \\n7. of \\n8. the \\n9. printing \\n10. and - \\n11. typesetting \\n12. industry. \\n13. Lorem \\n14. Ipsum \\n15. has \\n16. - been \\n17. the \\n18. industry's \\n19. standard \\n20. dummy \\n21. text - \\n22. ever\\n\\nThe total word count is 22.\\n\\nThought: I now can give - a great answer\\nFinal Answer: The task result does not comply with the guardrail. - It contains 22 words, which exceeds the limit of 10 words.\",\n \"refusal\": - null,\n \"annotations\": []\n },\n \"logprobs\": null,\n - \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 285,\n \"completion_tokens\": 195,\n \"total_tokens\": 480,\n \"prompt_tokens_details\": - {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": - {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": - 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": - \"default\",\n \"system_fingerprint\": \"fp_deacdd5f6f\"\n}\n" - headers: - CF-RAY: - - CF-RAY-XXX - Connection: - - keep-alive - Content-Type: - - application/json - Date: - - Thu, 15 Jan 2026 03:05:59 GMT - Server: - - cloudflare - Set-Cookie: - - SET-COOKIE-XXX - Strict-Transport-Security: - - STS-XXX - Transfer-Encoding: - - chunked - X-Content-Type-Options: - - X-CONTENT-TYPE-XXX - access-control-expose-headers: - - ACCESS-CONTROL-XXX - alt-svc: - - h3=":443"; ma=86400 - cf-cache-status: - - DYNAMIC - content-length: - - '1557' - openai-organization: - - OPENAI-ORG-XXX - openai-processing-ms: - - '2130' - openai-project: - - OPENAI-PROJECT-XXX - openai-version: - - '2020-10-01' - x-envoy-upstream-service-time: - - '2147' - x-openai-proxy-wasm: - - v0.1 - x-ratelimit-limit-requests: - - X-RATELIMIT-LIMIT-REQUESTS-XXX - x-ratelimit-limit-tokens: - - X-RATELIMIT-LIMIT-TOKENS-XXX - x-ratelimit-remaining-requests: - - X-RATELIMIT-REMAINING-REQUESTS-XXX - x-ratelimit-remaining-tokens: - - X-RATELIMIT-REMAINING-TOKENS-XXX - x-ratelimit-reset-requests: - - X-RATELIMIT-RESET-REQUESTS-XXX - x-ratelimit-reset-tokens: - - X-RATELIMIT-RESET-TOKENS-XXX - x-request-id: - - X-REQUEST-ID-XXX - status: - code: 200 - message: OK -- request: - body: '{"messages":[{"role":"system","content":"Ensure your final answer strictly - adheres to the following OpenAPI schema: {\n \"type\": \"json_schema\",\n \"json_schema\": - {\n \"name\": \"LLMGuardrailResult\",\n \"strict\": true,\n \"schema\": - {\n \"properties\": {\n \"valid\": {\n \"description\": - \"Whether the task output complies with the guardrail\",\n \"title\": - \"Valid\",\n \"type\": \"boolean\"\n },\n \"feedback\": - {\n \"anyOf\": [\n {\n \"type\": \"string\"\n },\n {\n \"type\": - \"null\"\n }\n ],\n \"default\": null,\n \"description\": - \"A feedback about the task output if it is not valid\",\n \"title\": - \"Feedback\"\n }\n },\n \"required\": [\n \"valid\",\n \"feedback\"\n ],\n \"title\": - \"LLMGuardrailResult\",\n \"type\": \"object\",\n \"additionalProperties\": - false\n }\n }\n}\n\nDo not include the OpenAPI schema in the final output. - Ensure the final output does not include any code block markers like ```json - or ```python."},{"role":"user","content":"The task result does not comply with - the guardrail. It contains 22 words, which exceeds the limit of 10 words."}],"model":"gpt-4o","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"valid":{"description":"Whether - the task output complies with the guardrail","title":"Valid","type":"boolean"},"feedback":{"anyOf":[{"type":"string"},{"type":"null"}],"description":"A - feedback about the task output if it is not valid","title":"Feedback"}},"required":["valid","feedback"],"title":"LLMGuardrailResult","type":"object","additionalProperties":false},"name":"LLMGuardrailResult","strict":true}},"stream":false}' - headers: - User-Agent: - - X-USER-AGENT-XXX - accept: - - application/json - accept-encoding: - - ACCEPT-ENCODING-XXX - authorization: - - AUTHORIZATION-XXX - connection: - - keep-alive - content-length: - - '1835' - content-type: - - application/json - cookie: - - COOKIE-XXX - host: - - api.openai.com - x-stainless-arch: - - X-STAINLESS-ARCH-XXX - x-stainless-async: - - 'false' x-stainless-helper-method: - beta.chat.completions.parse x-stainless-lang: @@ -191,23 +52,24 @@ interactions: x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.13.3 + - 3.13.12 method: POST uri: https://api.openai.com/v1/chat/completions response: body: - string: "{\n \"id\": \"chatcmpl-Cy7yJiPCk4fXuogyT5e8XeGRLCSf8\",\n \"object\": - \"chat.completion\",\n \"created\": 1768446359,\n \"model\": \"gpt-4o-2024-08-06\",\n + string: "{\n \"id\": \"chatcmpl-DDGANa7LCEtvfCZsEly4mNksTjCX3\",\n \"object\": + \"chat.completion\",\n \"created\": 1772052779,\n \"model\": \"gpt-4o-2024-08-06\",\n \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"{\\\"valid\\\":false,\\\"feedback\\\":\\\"The - task output exceeds the word limit of 10 words by containing 22 words.\\\"}\",\n - \ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": - null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 363,\n \"completion_tokens\": 25,\n \"total_tokens\": 388,\n \"prompt_tokens_details\": - {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + task result contains more than 10 words. Specifically, it has 20 words, which + exceeds the guardrail limit by 10 words.\\\"}\",\n \"refusal\": null,\n + \ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": + \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 290,\n \"completion_tokens\": + 37,\n \"total_tokens\": 327,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": - \"default\",\n \"system_fingerprint\": \"fp_a0e9480a2f\"\n}\n" + \"default\",\n \"system_fingerprint\": \"fp_64dfa806c7\"\n}\n" headers: CF-RAY: - CF-RAY-XXX @@ -216,7 +78,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 15 Jan 2026 03:05:59 GMT + - Wed, 25 Feb 2026 20:53:00 GMT Server: - cloudflare Strict-Transport-Security: @@ -231,18 +93,16 @@ interactions: - h3=":443"; ma=86400 cf-cache-status: - DYNAMIC - content-length: - - '913' openai-organization: - OPENAI-ORG-XXX openai-processing-ms: - - '488' + - '1108' openai-project: - OPENAI-PROJECT-XXX openai-version: - '2020-10-01' - x-envoy-upstream-service-time: - - '507' + set-cookie: + - SET-COOKIE-XXX x-openai-proxy-wasm: - v0.1 x-ratelimit-limit-requests: @@ -266,11 +126,7 @@ interactions: body: "{\"messages\":[{\"role\":\"system\",\"content\":\"You are Guardrail Agent. You are a expert at validating the output of a task. By providing effective feedback if the output is not valid.\\nYour personal goal is: Validate the output - of the task\\nTo give my best complete final answer to the task respond using - the exact following format:\\n\\nThought: I now can give a great answer\\nFinal - Answer: Your final answer must be the great and the most complete as possible, - it must be outcome described.\\n\\nI MUST use these formats, my job depends - on it!\"},{\"role\":\"user\",\"content\":\"\\nCurrent Task: \\n Ensure + of the task\"},{\"role\":\"user\",\"content\":\"\\nCurrent Task: \\n Ensure the following task result complies with the given guardrail.\\n\\n Task result:\\n \\n Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy @@ -280,8 +136,9 @@ interactions: explaining what is wrong (e.g., by how much it violates the rule, or what specific part fails).\\n - Focus only on identifying issues \u2014 do not propose corrections.\\n - If the Task result complies with the guardrail, saying - that is valid\\n \\n\\nBegin! This is VERY important to you, use the - tools available and give your best Final Answer, your job depends on it!\\n\\nThought:\"}],\"model\":\"gpt-4o\"}" + that is valid\\n \\n\\nProvide your complete response:\"}],\"model\":\"gpt-4o\",\"response_format\":{\"type\":\"json_schema\",\"json_schema\":{\"schema\":{\"properties\":{\"valid\":{\"description\":\"Whether + the task output complies with the guardrail\",\"title\":\"Valid\",\"type\":\"boolean\"},\"feedback\":{\"anyOf\":[{\"type\":\"string\"},{\"type\":\"null\"}],\"description\":\"A + feedback about the task output if it is not valid\",\"title\":\"Feedback\"}},\"required\":[\"valid\",\"feedback\"],\"title\":\"LLMGuardrailResult\",\"type\":\"object\",\"additionalProperties\":false},\"name\":\"LLMGuardrailResult\",\"strict\":true}},\"stream\":false}" headers: User-Agent: - X-USER-AGENT-XXX @@ -294,7 +151,7 @@ interactions: connection: - keep-alive content-length: - - '1468' + - '1568' content-type: - application/json host: @@ -303,144 +160,6 @@ interactions: - X-STAINLESS-ARCH-XXX x-stainless-async: - 'false' - x-stainless-lang: - - python - x-stainless-os: - - X-STAINLESS-OS-XXX - x-stainless-package-version: - - 1.83.0 - x-stainless-read-timeout: - - X-STAINLESS-READ-TIMEOUT-XXX - x-stainless-retry-count: - - '0' - x-stainless-runtime: - - CPython - x-stainless-runtime-version: - - 3.13.3 - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - body: - string: "{\n \"id\": \"chatcmpl-Cy7yKa0rmi2YoTLpyXt9hjeLt2rTI\",\n \"object\": - \"chat.completion\",\n \"created\": 1768446360,\n \"model\": \"gpt-4o-2024-08-06\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": \"First, I'll count the number of words - in the Task result to ensure it complies with the guardrail. \\n\\nThe Task - result is: \\\"Lorem Ipsum is simply dummy text of the printing and typesetting - industry. Lorem Ipsum has been the industry's standard dummy text ever.\\\"\\n\\nBy - counting the words: \\n1. Lorem\\n2. Ipsum\\n3. is\\n4. simply\\n5. dummy\\n6. - text\\n7. of\\n8. the\\n9. printing\\n10. and\\n11. typesetting\\n12. industry\\n13. - Lorem\\n14. Ipsum\\n15. has\\n16. been\\n17. the\\n18. industry's\\n19. standard\\n20. - dummy\\n21. text\\n22. ever\\n\\nThere are 22 words total in the Task result.\\n\\nI - need to verify if the count of 22 words is less than the guardrail limit of - 500 words.\\n\\nThought: I now can give a great answer\\nFinal Answer: The - Task result complies with the guardrail as it contains 22 words, which is - less than the 500-word limit. Therefore, the output is valid.\",\n \"refusal\": - null,\n \"annotations\": []\n },\n \"logprobs\": null,\n - \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 285,\n \"completion_tokens\": 227,\n \"total_tokens\": 512,\n \"prompt_tokens_details\": - {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": - {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": - 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": - \"default\",\n \"system_fingerprint\": \"fp_deacdd5f6f\"\n}\n" - headers: - CF-RAY: - - CF-RAY-XXX - Connection: - - keep-alive - Content-Type: - - application/json - Date: - - Thu, 15 Jan 2026 03:06:02 GMT - Server: - - cloudflare - Set-Cookie: - - SET-COOKIE-XXX - Strict-Transport-Security: - - STS-XXX - Transfer-Encoding: - - chunked - X-Content-Type-Options: - - X-CONTENT-TYPE-XXX - access-control-expose-headers: - - ACCESS-CONTROL-XXX - alt-svc: - - h3=":443"; ma=86400 - cf-cache-status: - - DYNAMIC - content-length: - - '1668' - openai-organization: - - OPENAI-ORG-XXX - openai-processing-ms: - - '2502' - openai-project: - - OPENAI-PROJECT-XXX - openai-version: - - '2020-10-01' - x-envoy-upstream-service-time: - - '2522' - x-openai-proxy-wasm: - - v0.1 - x-ratelimit-limit-requests: - - X-RATELIMIT-LIMIT-REQUESTS-XXX - x-ratelimit-limit-tokens: - - X-RATELIMIT-LIMIT-TOKENS-XXX - x-ratelimit-remaining-requests: - - X-RATELIMIT-REMAINING-REQUESTS-XXX - x-ratelimit-remaining-tokens: - - X-RATELIMIT-REMAINING-TOKENS-XXX - x-ratelimit-reset-requests: - - X-RATELIMIT-RESET-REQUESTS-XXX - x-ratelimit-reset-tokens: - - X-RATELIMIT-RESET-TOKENS-XXX - x-request-id: - - X-REQUEST-ID-XXX - status: - code: 200 - message: OK -- request: - body: '{"messages":[{"role":"system","content":"Ensure your final answer strictly - adheres to the following OpenAPI schema: {\n \"type\": \"json_schema\",\n \"json_schema\": - {\n \"name\": \"LLMGuardrailResult\",\n \"strict\": true,\n \"schema\": - {\n \"properties\": {\n \"valid\": {\n \"description\": - \"Whether the task output complies with the guardrail\",\n \"title\": - \"Valid\",\n \"type\": \"boolean\"\n },\n \"feedback\": - {\n \"anyOf\": [\n {\n \"type\": \"string\"\n },\n {\n \"type\": - \"null\"\n }\n ],\n \"default\": null,\n \"description\": - \"A feedback about the task output if it is not valid\",\n \"title\": - \"Feedback\"\n }\n },\n \"required\": [\n \"valid\",\n \"feedback\"\n ],\n \"title\": - \"LLMGuardrailResult\",\n \"type\": \"object\",\n \"additionalProperties\": - false\n }\n }\n}\n\nDo not include the OpenAPI schema in the final output. - Ensure the final output does not include any code block markers like ```json - or ```python."},{"role":"user","content":"The Task result complies with the - guardrail as it contains 22 words, which is less than the 500-word limit. Therefore, - the output is valid."}],"model":"gpt-4o","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"valid":{"description":"Whether - the task output complies with the guardrail","title":"Valid","type":"boolean"},"feedback":{"anyOf":[{"type":"string"},{"type":"null"}],"description":"A - feedback about the task output if it is not valid","title":"Feedback"}},"required":["valid","feedback"],"title":"LLMGuardrailResult","type":"object","additionalProperties":false},"name":"LLMGuardrailResult","strict":true}},"stream":false}' - headers: - User-Agent: - - X-USER-AGENT-XXX - accept: - - application/json - accept-encoding: - - ACCEPT-ENCODING-XXX - authorization: - - AUTHORIZATION-XXX - connection: - - keep-alive - content-length: - - '1864' - content-type: - - application/json - cookie: - - COOKIE-XXX - host: - - api.openai.com - x-stainless-arch: - - X-STAINLESS-ARCH-XXX - x-stainless-async: - - 'false' x-stainless-helper-method: - beta.chat.completions.parse x-stainless-lang: @@ -456,22 +175,22 @@ interactions: x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.13.3 + - 3.13.12 method: POST uri: https://api.openai.com/v1/chat/completions response: body: - string: "{\n \"id\": \"chatcmpl-Cy7yMAjNYSCz2foZPEcSVCuapzF8y\",\n \"object\": - \"chat.completion\",\n \"created\": 1768446362,\n \"model\": \"gpt-4o-2024-08-06\",\n + string: "{\n \"id\": \"chatcmpl-DDGAO7HbV6K3Iy0lQA058TOzTDoVa\",\n \"object\": + \"chat.completion\",\n \"created\": 1772052780,\n \"model\": \"gpt-4o-2024-08-06\",\n \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"{\\\"valid\\\":true,\\\"feedback\\\":null}\",\n \ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": - 369,\n \"completion_tokens\": 9,\n \"total_tokens\": 378,\n \"prompt_tokens_details\": + 290,\n \"completion_tokens\": 9,\n \"total_tokens\": 299,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": - \"default\",\n \"system_fingerprint\": \"fp_a0e9480a2f\"\n}\n" + \"default\",\n \"system_fingerprint\": \"fp_1d6b4c17c3\"\n}\n" headers: CF-RAY: - CF-RAY-XXX @@ -480,7 +199,7 @@ interactions: Content-Type: - application/json Date: - - Thu, 15 Jan 2026 03:06:03 GMT + - Wed, 25 Feb 2026 20:53:01 GMT Server: - cloudflare Strict-Transport-Security: @@ -495,18 +214,16 @@ interactions: - h3=":443"; ma=86400 cf-cache-status: - DYNAMIC - content-length: - - '837' openai-organization: - OPENAI-ORG-XXX openai-processing-ms: - - '413' + - '386' openai-project: - OPENAI-PROJECT-XXX openai-version: - '2020-10-01' - x-envoy-upstream-service-time: - - '650' + set-cookie: + - SET-COOKIE-XXX x-openai-proxy-wasm: - v0.1 x-ratelimit-limit-requests: diff --git a/lib/crewai/tests/cassettes/utilities/test_crew_emits_test_kickoff_type_event.yaml b/lib/crewai/tests/cassettes/utilities/test_crew_emits_test_kickoff_type_event.yaml index 89b172a9e..c103d8b5b 100644 --- a/lib/crewai/tests/cassettes/utilities/test_crew_emits_test_kickoff_type_event.yaml +++ b/lib/crewai/tests/cassettes/utilities/test_crew_emits_test_kickoff_type_event.yaml @@ -1,266 +1,240 @@ interactions: - request: - body: null - headers: {} - method: GET - uri: https://pypi.org/pypi/agentops/json + body: '{"messages":[{"role":"system","content":"You are base_agent. You are a + helpful assistant that just says hi\nYour personal goal is: Just say hi"},{"role":"user","content":"\nCurrent + Task: Just say hi\n\nThis is the expected criteria for your final answer: hi\nyou + MUST return the actual complete content as the final answer, not a summary.\n\nProvide + your complete response:"}],"model":"gpt-4o-mini"}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '399' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.12 + method: POST + uri: https://api.openai.com/v1/chat/completions response: body: - string: '{"info":{"author":null,"author_email":"Alex Reibman , Shawn Qiu , Braelyn Boynton , Howard Gil , Constantin Teodorescu , Pratyush Shukla , Travis Dent , Dwij Patel ","bugtrack_url":null,"classifiers":["License :: OSI Approved :: MIT License","Operating System :: OS Independent","Programming Language :: Python :: 3","Programming Language :: Python :: 3.10","Programming Language :: Python :: 3.11","Programming Language :: Python :: 3.12","Programming Language :: Python :: 3.13","Programming Language :: Python :: 3.9"],"description":"","description_content_type":null,"docs_url":null,"download_url":null,"downloads":{"last_day":-1,"last_month":-1,"last_week":-1},"dynamic":null,"home_page":null,"keywords":null,"license":null,"license_expression":null,"license_files":["LICENSE"],"maintainer":null,"maintainer_email":null,"name":"agentops","package_url":"https://pypi.org/project/agentops/","platform":null,"project_url":"https://pypi.org/project/agentops/","project_urls":{"Homepage":"https://github.com/AgentOps-AI/agentops","Issues":"https://github.com/AgentOps-AI/agentops/issues"},"provides_extra":null,"release_url":"https://pypi.org/project/agentops/0.4.12/","requires_dist":["httpx<0.29.0,>=0.24.0","opentelemetry-api==1.29.0; - python_version < \"3.10\"","opentelemetry-api>1.29.0; python_version >= \"3.10\"","opentelemetry-exporter-otlp-proto-http==1.29.0; python_version < \"3.10\"","opentelemetry-exporter-otlp-proto-http>1.29.0; python_version >= \"3.10\"","opentelemetry-instrumentation==0.50b0; python_version < \"3.10\"","opentelemetry-instrumentation>=0.50b0; python_version >= \"3.10\"","opentelemetry-sdk==1.29.0; python_version < \"3.10\"","opentelemetry-sdk>1.29.0; python_version >= \"3.10\"","opentelemetry-semantic-conventions==0.50b0; python_version < \"3.10\"","opentelemetry-semantic-conventions>=0.50b0; python_version >= \"3.10\"","ordered-set<5.0.0,>=4.0.0","packaging<25.0,>=21.0","psutil<6.1.0,>=5.9.8","pyyaml<7.0,>=5.3","requests<3.0.0,>=2.0.0","termcolor<2.5.0,>=2.3.0","wrapt<2.0.0,>=1.0.0"],"requires_python":"<3.14,>=3.9","summary":"Observability and DevTool Platform for AI Agents","version":"0.4.12","yanked":false,"yanked_reason":null},"last_serial":29075100,"releases":{"0.0.1":[{"comment_text":"","digests":{"blake2b_256":"9b4641d084346e88671acc02e3a0049d3e0925fe99edd88c8b82700dc3c04d01","md5":"2b491f3b3dd01edd4ee37c361087bb46","sha256":"f2cb9d59a0413e7977a44a23dbd6a9d89cda5309b63ed08f5c346c7488acf645"},"downloads":-1,"filename":"agentops-0.0.1-py3-none-any.whl","has_sig":false,"md5_digest":"2b491f3b3dd01edd4ee37c361087bb46","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":10328,"upload_time":"2023-08-21T18:33:47","upload_time_iso_8601":"2023-08-21T18:33:47.827866Z","url":"https://files.pythonhosted.org/packages/9b/46/41d084346e88671acc02e3a0049d3e0925fe99edd88c8b82700dc3c04d01/agentops-0.0.1-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"b280bf609d98778499bd42df723100a8e910d9b9827cbd00b804cf0b13bb3c87","md5":"ff218fc16d45cf72f73d50ee9a0afe82","sha256":"5c3d4311b9dde0c71cb475ec99d2963a71604c78d468b333f55e81364f4fe79e"},"downloads":-1,"filename":"agentops-0.0.1.tar.gz","has_sig":false,"md5_digest":"ff218fc16d45cf72f73d50ee9a0afe82","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":11452,"upload_time":"2023-08-21T18:33:49","upload_time_iso_8601":"2023-08-21T18:33:49.613830Z","url":"https://files.pythonhosted.org/packages/b2/80/bf609d98778499bd42df723100a8e910d9b9827cbd00b804cf0b13bb3c87/agentops-0.0.1.tar.gz","yanked":false,"yanked_reason":null}],"0.0.10":[{"comment_text":"","digests":{"blake2b_256":"92933862af53105332cb524db237138d3284b5d6abcc7df5fd4406e382372d94","md5":"8bdea319b5579775eb88efac72e70cd6","sha256":"e8a333567458c1df35538d626bc596f3ba7b8fa2aac5015bc378f3f7f8850669"},"downloads":-1,"filename":"agentops-0.0.10-py3-none-any.whl","has_sig":false,"md5_digest":"8bdea319b5579775eb88efac72e70cd6","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":14752,"upload_time":"2023-12-16T01:40:40","upload_time_iso_8601":"2023-12-16T01:40:40.867657Z","url":"https://files.pythonhosted.org/packages/92/93/3862af53105332cb524db237138d3284b5d6abcc7df5fd4406e382372d94/agentops-0.0.10-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"c63136b1f2e508b67f92ddb5f51f2acf5abdf2bf4b32d5b355d8018b368dc854","md5":"87bdcd4d7469d22ce922234d4f0b2b98","sha256":"5fbc567bece7b218fc35ce70d208e88e89bb399a9dbf84ab7ad59a2aa559648c"},"downloads":-1,"filename":"agentops-0.0.10.tar.gz","has_sig":false,"md5_digest":"87bdcd4d7469d22ce922234d4f0b2b98","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":15099,"upload_time":"2023-12-16T01:40:42","upload_time_iso_8601":"2023-12-16T01:40:42.281826Z","url":"https://files.pythonhosted.org/packages/c6/31/36b1f2e508b67f92ddb5f51f2acf5abdf2bf4b32d5b355d8018b368dc854/agentops-0.0.10.tar.gz","yanked":false,"yanked_reason":null}],"0.0.11":[{"comment_text":"","digests":{"blake2b_256":"7125ed114f918332cda824092f620b1002fd76ab6b538dd83711b31c93907139","md5":"83ba7e621f01412144aa38306fc1e04c","sha256":"cb80823e065d17dc26bdc8fe951ea7e04b23677ef2b4da939669c6fe1b2502bf"},"downloads":-1,"filename":"agentops-0.0.11-py3-none-any.whl","has_sig":false,"md5_digest":"83ba7e621f01412144aa38306fc1e04c","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":16627,"upload_time":"2023-12-21T19:50:28","upload_time_iso_8601":"2023-12-21T19:50:28.595886Z","url":"https://files.pythonhosted.org/packages/71/25/ed114f918332cda824092f620b1002fd76ab6b538dd83711b31c93907139/agentops-0.0.11-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"9e037750b04398cda2548bbf3d84ce554c4009592095c060c4904e773f3a43da","md5":"5bbb120cc9a5f5ff6fb5dd45691ba279","sha256":"cbf0f39768d47e32be448a3ff3ded665fce64ff8a90c0e10692fd7a3ab4790ee"},"downloads":-1,"filename":"agentops-0.0.11.tar.gz","has_sig":false,"md5_digest":"5bbb120cc9a5f5ff6fb5dd45691ba279","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":16794,"upload_time":"2023-12-21T19:50:29","upload_time_iso_8601":"2023-12-21T19:50:29.881561Z","url":"https://files.pythonhosted.org/packages/9e/03/7750b04398cda2548bbf3d84ce554c4009592095c060c4904e773f3a43da/agentops-0.0.11.tar.gz","yanked":false,"yanked_reason":null}],"0.0.12":[{"comment_text":"","digests":{"blake2b_256":"adf5cc3e93b2328532ea80b8b36450b8b48a8199ebbe1f75ebb490e57a926b88","md5":"694ba49ca8841532039bdf8dc0250b85","sha256":"9a2c773efbe3353f60d1b86da12333951dad288ba54839615a53b57e5965bea8"},"downloads":-1,"filename":"agentops-0.0.12-py3-none-any.whl","has_sig":false,"md5_digest":"694ba49ca8841532039bdf8dc0250b85","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":18602,"upload_time":"2024-01-03T03:47:07","upload_time_iso_8601":"2024-01-03T03:47:07.184203Z","url":"https://files.pythonhosted.org/packages/ad/f5/cc3e93b2328532ea80b8b36450b8b48a8199ebbe1f75ebb490e57a926b88/agentops-0.0.12-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"7eb0633ecd30c74a0613c7330ececf0303286622ce429f08ce0daa9ee8cc4ecf","md5":"025daef9622472882a1fa58b6c1fddb5","sha256":"fbb4c38711a7dff3ab08004591451b5a5c33bea5e496fa71fac668c7284513d2"},"downloads":-1,"filename":"agentops-0.0.12.tar.gz","has_sig":false,"md5_digest":"025daef9622472882a1fa58b6c1fddb5","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":19826,"upload_time":"2024-01-03T03:47:08","upload_time_iso_8601":"2024-01-03T03:47:08.942790Z","url":"https://files.pythonhosted.org/packages/7e/b0/633ecd30c74a0613c7330ececf0303286622ce429f08ce0daa9ee8cc4ecf/agentops-0.0.12.tar.gz","yanked":false,"yanked_reason":null}],"0.0.13":[{"comment_text":"","digests":{"blake2b_256":"3a0f9c1500adb4191531374db4d7920c51aba92c5472d13d172108e881c36948","md5":"f0a3b78c15af3ab467778f94fb50bf4a","sha256":"3379a231f37a375bda421114a5626643263e84ce951503d0bdff8411149946e0"},"downloads":-1,"filename":"agentops-0.0.13-py3-none-any.whl","has_sig":false,"md5_digest":"f0a3b78c15af3ab467778f94fb50bf4a","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":18709,"upload_time":"2024-01-07T08:57:57","upload_time_iso_8601":"2024-01-07T08:57:57.456769Z","url":"https://files.pythonhosted.org/packages/3a/0f/9c1500adb4191531374db4d7920c51aba92c5472d13d172108e881c36948/agentops-0.0.13-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"cbf9a3824bd30d7107aaca8d409165c0a3574a879efd7ca0fea755e903623b61","md5":"0ebceb6aad82c0622adcd4c2633fc677","sha256":"5e6adf68c2a533496648ea3fabb6e791f39ce810d18dbc1354d118b195fd8556"},"downloads":-1,"filename":"agentops-0.0.13.tar.gz","has_sig":false,"md5_digest":"0ebceb6aad82c0622adcd4c2633fc677","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":19933,"upload_time":"2024-01-07T08:57:59","upload_time_iso_8601":"2024-01-07T08:57:59.146933Z","url":"https://files.pythonhosted.org/packages/cb/f9/a3824bd30d7107aaca8d409165c0a3574a879efd7ca0fea755e903623b61/agentops-0.0.13.tar.gz","yanked":false,"yanked_reason":null}],"0.0.14":[{"comment_text":"","digests":{"blake2b_256":"252b1d8ee3b4ab02215eb1a52865a9f2c209d6d4cbf4a3444fb7faf23b02ca66","md5":"a8ba77b0ec0d25072b2e0535a135cc40","sha256":"d5bb4661642daf8fc63a257ef0f04ccc5c79a73e73d57ea04190e74d9a3e6df9"},"downloads":-1,"filename":"agentops-0.0.14-py3-none-any.whl","has_sig":false,"md5_digest":"a8ba77b0ec0d25072b2e0535a135cc40","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":18710,"upload_time":"2024-01-08T21:52:28","upload_time_iso_8601":"2024-01-08T21:52:28.340899Z","url":"https://files.pythonhosted.org/packages/25/2b/1d8ee3b4ab02215eb1a52865a9f2c209d6d4cbf4a3444fb7faf23b02ca66/agentops-0.0.14-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"bf3a1fdf85563c47c2fc6571a1406aecb772f644d53a2adabf4981012971587a","md5":"1ecf7177ab57738c6663384de20887e5","sha256":"c54cee1c9ed1b5b7829fd80d5d01278b1efb50e977e5a890627f4688d0f2afb2"},"downloads":-1,"filename":"agentops-0.0.14.tar.gz","has_sig":false,"md5_digest":"1ecf7177ab57738c6663384de20887e5","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":19932,"upload_time":"2024-01-08T21:52:29","upload_time_iso_8601":"2024-01-08T21:52:29.988596Z","url":"https://files.pythonhosted.org/packages/bf/3a/1fdf85563c47c2fc6571a1406aecb772f644d53a2adabf4981012971587a/agentops-0.0.14.tar.gz","yanked":false,"yanked_reason":null}],"0.0.15":[{"comment_text":"","digests":{"blake2b_256":"0c5374cbe5c78db9faa7c939d1a91eff111c4d3f13f4d8d18920ddd48f89f335","md5":"c4528a66151e76c7b1abdcac3c3eaf52","sha256":"aa8034dc9a0e9e56014a06fac521fc2a63a968d34f73e4d4c9bef4b0e87f8241"},"downloads":-1,"filename":"agentops-0.0.15-py3-none-any.whl","has_sig":false,"md5_digest":"c4528a66151e76c7b1abdcac3c3eaf52","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":18734,"upload_time":"2024-01-23T08:43:24","upload_time_iso_8601":"2024-01-23T08:43:24.651479Z","url":"https://files.pythonhosted.org/packages/0c/53/74cbe5c78db9faa7c939d1a91eff111c4d3f13f4d8d18920ddd48f89f335/agentops-0.0.15-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"da56c7d8189f4accc182be6729bc44a8006d981173e721ff4751ab784bbadfb3","md5":"cd27bff6c943c6fcbed33ed8280ab5ea","sha256":"71b0e048d2f1b86744105509436cbb6fa51e6b418a50a8253849dc6cdeda6cca"},"downloads":-1,"filename":"agentops-0.0.15.tar.gz","has_sig":false,"md5_digest":"cd27bff6c943c6fcbed33ed8280ab5ea","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":19985,"upload_time":"2024-01-23T08:43:26","upload_time_iso_8601":"2024-01-23T08:43:26.316265Z","url":"https://files.pythonhosted.org/packages/da/56/c7d8189f4accc182be6729bc44a8006d981173e721ff4751ab784bbadfb3/agentops-0.0.15.tar.gz","yanked":false,"yanked_reason":null}],"0.0.16":[{"comment_text":"","digests":{"blake2b_256":"b694d78d43f49688829cab72b7326db1d9e3f436f71eed113f26d402fefa6856","md5":"657c2cad11b3c8b97469524bff19b916","sha256":"e9633dcbc419a47db8de13bd0dc4f5d55f0a50ef3434ffe8e1f8a3468561bd60"},"downloads":-1,"filename":"agentops-0.0.16-py3-none-any.whl","has_sig":false,"md5_digest":"657c2cad11b3c8b97469524bff19b916","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":18736,"upload_time":"2024-01-23T09:03:05","upload_time_iso_8601":"2024-01-23T09:03:05.799496Z","url":"https://files.pythonhosted.org/packages/b6/94/d78d43f49688829cab72b7326db1d9e3f436f71eed113f26d402fefa6856/agentops-0.0.16-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"ec353005c98c1e2642d61510a9977c2118d3baa72f50e3c45ef6a341bfd9a3b0","md5":"2f9b28dd0953fdd2da606e19b9131006","sha256":"469588d72734fc6e90c66cf9658613baf2a0b94c933a23cab16820435576c61f"},"downloads":-1,"filename":"agentops-0.0.16.tar.gz","has_sig":false,"md5_digest":"2f9b28dd0953fdd2da606e19b9131006","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":19986,"upload_time":"2024-01-23T09:03:07","upload_time_iso_8601":"2024-01-23T09:03:07.645949Z","url":"https://files.pythonhosted.org/packages/ec/35/3005c98c1e2642d61510a9977c2118d3baa72f50e3c45ef6a341bfd9a3b0/agentops-0.0.16.tar.gz","yanked":false,"yanked_reason":null}],"0.0.17":[{"comment_text":"","digests":{"blake2b_256":"f3b2eff27fc5373097fc4f4d3d90f4d0fad1c3be7b923a6213750fe1cb022e6e","md5":"20325afd9b9d9633b120b63967d4ae85","sha256":"1a7c8d8fc8821e2e7eedbbe2683e076bfaca3434401b0d1ca6b830bf3230e61e"},"downloads":-1,"filename":"agentops-0.0.17-py3-none-any.whl","has_sig":false,"md5_digest":"20325afd9b9d9633b120b63967d4ae85","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":18827,"upload_time":"2024-01-23T17:12:19","upload_time_iso_8601":"2024-01-23T17:12:19.300806Z","url":"https://files.pythonhosted.org/packages/f3/b2/eff27fc5373097fc4f4d3d90f4d0fad1c3be7b923a6213750fe1cb022e6e/agentops-0.0.17-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"ac2a2cb7548cce5b009bee9e6f9b46b26df1cca777830231e2d1603b83740053","md5":"4ac65e38fa45946f1d382ce290b904e9","sha256":"cc1e7f796a84c66a29b271d8f0faa4999c152c80195911b817502da002a3ae02"},"downloads":-1,"filename":"agentops-0.0.17.tar.gz","has_sig":false,"md5_digest":"4ac65e38fa45946f1d382ce290b904e9","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":20063,"upload_time":"2024-01-23T17:12:20","upload_time_iso_8601":"2024-01-23T17:12:20.558647Z","url":"https://files.pythonhosted.org/packages/ac/2a/2cb7548cce5b009bee9e6f9b46b26df1cca777830231e2d1603b83740053/agentops-0.0.17.tar.gz","yanked":false,"yanked_reason":null}],"0.0.18":[{"comment_text":"","digests":{"blake2b_256":"321102c865df2245ab8cfaeb48a72ef7011a7bbbe1553a43791d68295ff7c20d","md5":"ad10ec2bf28bf434d3d2f11500f5a396","sha256":"df241f6a62368aa645d1599bb6885688fba0d49dcc26f97f7f65ab29a6af1a2a"},"downloads":-1,"filename":"agentops-0.0.18-py3-none-any.whl","has_sig":false,"md5_digest":"ad10ec2bf28bf434d3d2f11500f5a396","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":18860,"upload_time":"2024-01-24T04:39:06","upload_time_iso_8601":"2024-01-24T04:39:06.952175Z","url":"https://files.pythonhosted.org/packages/32/11/02c865df2245ab8cfaeb48a72ef7011a7bbbe1553a43791d68295ff7c20d/agentops-0.0.18-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"7831bd4249dcf9a0cdcad5451ca62aa83187295bb9c16fd1b3034999bff7ceaf","md5":"76dc30c0a2e68f09c0411c23dd5e3a36","sha256":"47e071424247dbbb1b9aaf07ff60a7e376ae01666478d0305d62a9068d61c1c1"},"downloads":-1,"filename":"agentops-0.0.18.tar.gz","has_sig":false,"md5_digest":"76dc30c0a2e68f09c0411c23dd5e3a36","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":20094,"upload_time":"2024-01-24T04:39:09","upload_time_iso_8601":"2024-01-24T04:39:09.795862Z","url":"https://files.pythonhosted.org/packages/78/31/bd4249dcf9a0cdcad5451ca62aa83187295bb9c16fd1b3034999bff7ceaf/agentops-0.0.18.tar.gz","yanked":false,"yanked_reason":null}],"0.0.19":[{"comment_text":"","digests":{"blake2b_256":"9d48292d743b748eddc01b51747e1dac4b62dea0eb5f240877bae821c0049572","md5":"a26178cdf9d5fc5b466a30e5990c16a1","sha256":"0e663e26aad41bf0288d250685e88130430dd087d03ffc69aa7f43e587921b59"},"downloads":-1,"filename":"agentops-0.0.19-py3-none-any.whl","has_sig":false,"md5_digest":"a26178cdf9d5fc5b466a30e5990c16a1","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":18380,"upload_time":"2024-01-24T07:58:38","upload_time_iso_8601":"2024-01-24T07:58:38.440021Z","url":"https://files.pythonhosted.org/packages/9d/48/292d743b748eddc01b51747e1dac4b62dea0eb5f240877bae821c0049572/agentops-0.0.19-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"dfe6f3b3fc53b050ec70de947e27227d0ea1e7a75037d082fc5f4d914178d12f","md5":"c62a69951acd19121b059215cf0ddb8b","sha256":"3d46faabf2dad44bd4705279569c76240ab5c71f03f511ba9d363dfd033d453e"},"downloads":-1,"filename":"agentops-0.0.19.tar.gz","has_sig":false,"md5_digest":"c62a69951acd19121b059215cf0ddb8b","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":19728,"upload_time":"2024-01-24T07:58:41","upload_time_iso_8601":"2024-01-24T07:58:41.352463Z","url":"https://files.pythonhosted.org/packages/df/e6/f3b3fc53b050ec70de947e27227d0ea1e7a75037d082fc5f4d914178d12f/agentops-0.0.19.tar.gz","yanked":false,"yanked_reason":null}],"0.0.2":[{"comment_text":"","digests":{"blake2b_256":"e593e3863d3c61a75e43a347d423f754bc57559989773af6a9c7bc696ff1d6b4","md5":"8ff77b84c32a4e846ce50c6844664b49","sha256":"3bea2bdd8a26c190675aaf2775d97bc2e3c52d7da05c04ae8ec46fed959e0c6e"},"downloads":-1,"filename":"agentops-0.0.2-py3-none-any.whl","has_sig":false,"md5_digest":"8ff77b84c32a4e846ce50c6844664b49","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":10452,"upload_time":"2023-08-28T23:14:23","upload_time_iso_8601":"2023-08-28T23:14:23.488523Z","url":"https://files.pythonhosted.org/packages/e5/93/e3863d3c61a75e43a347d423f754bc57559989773af6a9c7bc696ff1d6b4/agentops-0.0.2-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"82dbea7088c3ba71d9882a8d09d896d8529100f3103d1fe58ff4b890f9d616f1","md5":"02c4fed5ca014de524e5c1dfe3ec2dd2","sha256":"dc183d28965a9514cb33d916b29b3159189f5be64c4a7d943be0cad1a00379f9"},"downloads":-1,"filename":"agentops-0.0.2.tar.gz","has_sig":false,"md5_digest":"02c4fed5ca014de524e5c1dfe3ec2dd2","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":11510,"upload_time":"2023-08-28T23:14:24","upload_time_iso_8601":"2023-08-28T23:14:24.882664Z","url":"https://files.pythonhosted.org/packages/82/db/ea7088c3ba71d9882a8d09d896d8529100f3103d1fe58ff4b890f9d616f1/agentops-0.0.2.tar.gz","yanked":false,"yanked_reason":null}],"0.0.20":[{"comment_text":"","digests":{"blake2b_256":"ad68d8cc6d631618e04ec6988d0c3f4462a74b0b5849719b8373c2470cf9d533","md5":"09b2866043abc3e5cb5dfc17b80068cb","sha256":"ba20fc48902434858f28e3c4a7febe56d275a28bd33378868e7fcde2f53f2430"},"downloads":-1,"filename":"agentops-0.0.20-py3-none-any.whl","has_sig":false,"md5_digest":"09b2866043abc3e5cb5dfc17b80068cb","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":18367,"upload_time":"2024-01-25T07:12:48","upload_time_iso_8601":"2024-01-25T07:12:48.514177Z","url":"https://files.pythonhosted.org/packages/ad/68/d8cc6d631618e04ec6988d0c3f4462a74b0b5849719b8373c2470cf9d533/agentops-0.0.20-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"0ba37435a8ce7125c7d75b931a373a188acf1c9e793be28db1b5c5e5a57d7a10","md5":"fb700178ad44a4697b696ecbd28d115c","sha256":"d50623b03b410c8c88718c29ea271304681e1305b5c05ba824edb92d18aab4f8"},"downloads":-1,"filename":"agentops-0.0.20.tar.gz","has_sig":false,"md5_digest":"fb700178ad44a4697b696ecbd28d115c","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":19707,"upload_time":"2024-01-25T07:12:49","upload_time_iso_8601":"2024-01-25T07:12:49.915462Z","url":"https://files.pythonhosted.org/packages/0b/a3/7435a8ce7125c7d75b931a373a188acf1c9e793be28db1b5c5e5a57d7a10/agentops-0.0.20.tar.gz","yanked":false,"yanked_reason":null}],"0.0.21":[{"comment_text":"","digests":{"blake2b_256":"9182ceb8c12e05c0e56ea6c5ba7395c57764ffc5a8134fd045b247793873c172","md5":"ce428cf01a0c1066d3f1f3c8ca6b4f9b","sha256":"fdefe50d945ad669b33c90bf526f9af0e7dc4792b4443aeb907b0a36de2be186"},"downloads":-1,"filename":"agentops-0.0.21-py3-none-any.whl","has_sig":false,"md5_digest":"ce428cf01a0c1066d3f1f3c8ca6b4f9b","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":18483,"upload_time":"2024-02-22T03:07:14","upload_time_iso_8601":"2024-02-22T03:07:14.032143Z","url":"https://files.pythonhosted.org/packages/91/82/ceb8c12e05c0e56ea6c5ba7395c57764ffc5a8134fd045b247793873c172/agentops-0.0.21-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"acbb361e3d7ed85fc4207ffbbe44ddfa7ee3b8f96b76c3712d4153d63ebb45e2","md5":"360f00d330fa37ad10f687906e31e219","sha256":"ec10f8e64c553a1c400f1d5c792c3daef383cd718747cabb8e5abc9ef685f25d"},"downloads":-1,"filename":"agentops-0.0.21.tar.gz","has_sig":false,"md5_digest":"360f00d330fa37ad10f687906e31e219","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":19787,"upload_time":"2024-02-22T03:07:15","upload_time_iso_8601":"2024-02-22T03:07:15.546312Z","url":"https://files.pythonhosted.org/packages/ac/bb/361e3d7ed85fc4207ffbbe44ddfa7ee3b8f96b76c3712d4153d63ebb45e2/agentops-0.0.21.tar.gz","yanked":false,"yanked_reason":null}],"0.0.22":[{"comment_text":"","digests":{"blake2b_256":"b9da29a808d5bd3045f80b5652737e94695056b4a7cf7830ed7de037b1fe941c","md5":"d9e04a68f0b143432b9e34341e4f0a17","sha256":"fbcd962ff08a2e216637341c36c558be74368fbfda0b2408e55388e4c96474ca"},"downloads":-1,"filename":"agentops-0.0.22-py3-none-any.whl","has_sig":false,"md5_digest":"d9e04a68f0b143432b9e34341e4f0a17","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":18485,"upload_time":"2024-02-29T21:16:00","upload_time_iso_8601":"2024-02-29T21:16:00.124986Z","url":"https://files.pythonhosted.org/packages/b9/da/29a808d5bd3045f80b5652737e94695056b4a7cf7830ed7de037b1fe941c/agentops-0.0.22-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"4d842d1c5d80c69e6c9b8f3fd925c2f2fd084ad6eb29d93fdeadbdeca79e5eda","md5":"8f3b286fd01c2c43f7f7b1e4aebe3594","sha256":"397544ce90474fee59f1e8561c92f4923e9034842be593f1ac41437c5fca5841"},"downloads":-1,"filename":"agentops-0.0.22.tar.gz","has_sig":false,"md5_digest":"8f3b286fd01c2c43f7f7b1e4aebe3594","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":19784,"upload_time":"2024-02-29T21:16:01","upload_time_iso_8601":"2024-02-29T21:16:01.909583Z","url":"https://files.pythonhosted.org/packages/4d/84/2d1c5d80c69e6c9b8f3fd925c2f2fd084ad6eb29d93fdeadbdeca79e5eda/agentops-0.0.22.tar.gz","yanked":false,"yanked_reason":null}],"0.0.3":[{"comment_text":"","digests":{"blake2b_256":"324eda261865c2042eeb5da9827a350760e435896855d5480b8f3136212c3f65","md5":"07a9f9f479a14e65b82054a145514e8d","sha256":"35351701e3caab900243771bda19d6613bdcb84cc9ef2e1adde431a775c09af8"},"downloads":-1,"filename":"agentops-0.0.3-py3-none-any.whl","has_sig":false,"md5_digest":"07a9f9f479a14e65b82054a145514e8d","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":11872,"upload_time":"2023-09-13T23:03:34","upload_time_iso_8601":"2023-09-13T23:03:34.300564Z","url":"https://files.pythonhosted.org/packages/32/4e/da261865c2042eeb5da9827a350760e435896855d5480b8f3136212c3f65/agentops-0.0.3-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"643485e455d4f411b56bef2a99c40e32f35f456c93deda0a3915231f1da92e56","md5":"c637ee3cfa358b65ed14cfc20d5f803f","sha256":"45a57492e4072f3f27b5e851f6e501b54c796f6ace5f65ecf70e51dbe18ca1a8"},"downloads":-1,"filename":"agentops-0.0.3.tar.gz","has_sig":false,"md5_digest":"c637ee3cfa358b65ed14cfc20d5f803f","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":12455,"upload_time":"2023-09-13T23:03:35","upload_time_iso_8601":"2023-09-13T23:03:35.513682Z","url":"https://files.pythonhosted.org/packages/64/34/85e455d4f411b56bef2a99c40e32f35f456c93deda0a3915231f1da92e56/agentops-0.0.3.tar.gz","yanked":false,"yanked_reason":null}],"0.0.4":[{"comment_text":"","digests":{"blake2b_256":"20cc12cf2391854ed588eaf6cdc87f60048f84e8dc7d15792850b7e90a0406b8","md5":"7a3c11004517e22dc7cde83cf6d8d5e8","sha256":"5a5cdcbe6e32c59237521182b83768e650b4519416b42f4e13929a115a0f20ee"},"downloads":-1,"filename":"agentops-0.0.4-py3-none-any.whl","has_sig":false,"md5_digest":"7a3c11004517e22dc7cde83cf6d8d5e8","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":13520,"upload_time":"2023-09-22T09:23:52","upload_time_iso_8601":"2023-09-22T09:23:52.896099Z","url":"https://files.pythonhosted.org/packages/20/cc/12cf2391854ed588eaf6cdc87f60048f84e8dc7d15792850b7e90a0406b8/agentops-0.0.4-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"98d2d9f9932d17711dd5d98af674c868686bdbdd9aaae9b8d69e9eecfd4c68f4","md5":"712d3bc3b28703963f8f398845b1d17a","sha256":"97743c6420bc5ba2655ac690041d5f5732fb950130cf61ab25ef6d44be6ecfb2"},"downloads":-1,"filename":"agentops-0.0.4.tar.gz","has_sig":false,"md5_digest":"712d3bc3b28703963f8f398845b1d17a","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":14050,"upload_time":"2023-09-22T09:23:54","upload_time_iso_8601":"2023-09-22T09:23:54.315467Z","url":"https://files.pythonhosted.org/packages/98/d2/d9f9932d17711dd5d98af674c868686bdbdd9aaae9b8d69e9eecfd4c68f4/agentops-0.0.4.tar.gz","yanked":false,"yanked_reason":null}],"0.0.5":[{"comment_text":"","digests":{"blake2b_256":"e900cd903074a01932ded9a05dac7849a16c5850ed20c027b954b1eccfba54c1","md5":"1bd4fd6cca14dac4947ecc6c4e3fe0a1","sha256":"e39e1051ba8c58f222f3495196eb939ccc53f04bd279372ae01e694973dd25d6"},"downloads":-1,"filename":"agentops-0.0.5-py3-none-any.whl","has_sig":false,"md5_digest":"1bd4fd6cca14dac4947ecc6c4e3fe0a1","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":14107,"upload_time":"2023-10-07T00:22:48","upload_time_iso_8601":"2023-10-07T00:22:48.714074Z","url":"https://files.pythonhosted.org/packages/e9/00/cd903074a01932ded9a05dac7849a16c5850ed20c027b954b1eccfba54c1/agentops-0.0.5-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"08d5c29068ce4df9c85865b45e1cdb7be1df06e54fce087fad18ec390a7aea54","md5":"4d8fc5553e3199fe24d6118337884a2b","sha256":"8f3662e600ba57e9a102c6bf86a6a1e16c0e53e1f38a84fa1b9c01cc07ca4990"},"downloads":-1,"filename":"agentops-0.0.5.tar.gz","has_sig":false,"md5_digest":"4d8fc5553e3199fe24d6118337884a2b","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":14724,"upload_time":"2023-10-07T00:22:50","upload_time_iso_8601":"2023-10-07T00:22:50.304226Z","url":"https://files.pythonhosted.org/packages/08/d5/c29068ce4df9c85865b45e1cdb7be1df06e54fce087fad18ec390a7aea54/agentops-0.0.5.tar.gz","yanked":false,"yanked_reason":null}],"0.0.6":[{"comment_text":"","digests":{"blake2b_256":"2f5b5f3bd8a5b2d96b6417fd4a3fc72ed484e3a4ffacac49035f17bb8df1dd5b","md5":"b7e701ff7953ecca01ceec3a6b9374b2","sha256":"05dea1d06f8f8d06a8f460d18d302febe91f4dad2e3fc0088d05b7017765f3b6"},"downloads":-1,"filename":"agentops-0.0.6-py3-none-any.whl","has_sig":false,"md5_digest":"b7e701ff7953ecca01ceec3a6b9374b2","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":14236,"upload_time":"2023-10-27T06:56:14","upload_time_iso_8601":"2023-10-27T06:56:14.029277Z","url":"https://files.pythonhosted.org/packages/2f/5b/5f3bd8a5b2d96b6417fd4a3fc72ed484e3a4ffacac49035f17bb8df1dd5b/agentops-0.0.6-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"4af43743bf40518545c8906687038e5717b1bd33db7ba300a084ec4f6c9c59e0","md5":"0a78dcafcbc6292cf0823181cdc226a7","sha256":"0057cb5d6dc0dd2c444f3371faef40c844a1510700b31824a4fccf5302713361"},"downloads":-1,"filename":"agentops-0.0.6.tar.gz","has_sig":false,"md5_digest":"0a78dcafcbc6292cf0823181cdc226a7","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":14785,"upload_time":"2023-10-27T06:56:15","upload_time_iso_8601":"2023-10-27T06:56:15.069192Z","url":"https://files.pythonhosted.org/packages/4a/f4/3743bf40518545c8906687038e5717b1bd33db7ba300a084ec4f6c9c59e0/agentops-0.0.6.tar.gz","yanked":false,"yanked_reason":null}],"0.0.7":[{"comment_text":"","digests":{"blake2b_256":"3cb1d15c39bbc95f66c64d01cca304f9b4b0c3503509ad92ef29f926c9163599","md5":"f494f6c256899103a80666be68d136ad","sha256":"6984429ca1a9013fd4386105516cb36a46dd7078f7ac81e0a4701f1700bd25b5"},"downloads":-1,"filename":"agentops-0.0.7-py3-none-any.whl","has_sig":false,"md5_digest":"f494f6c256899103a80666be68d136ad","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":14370,"upload_time":"2023-11-02T06:37:36","upload_time_iso_8601":"2023-11-02T06:37:36.480189Z","url":"https://files.pythonhosted.org/packages/3c/b1/d15c39bbc95f66c64d01cca304f9b4b0c3503509ad92ef29f926c9163599/agentops-0.0.7-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"ba709ae02fc635cab51b237dcc3657ec69aac61ee67ea5f903cfae07de19abc8","md5":"b163eaaf9cbafbbd19ec3f91b2b56969","sha256":"a6f36d94a82d8e481b406f040790cefd4d939f07108737c696327d97c0ccdaf4"},"downloads":-1,"filename":"agentops-0.0.7.tar.gz","has_sig":false,"md5_digest":"b163eaaf9cbafbbd19ec3f91b2b56969","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":14895,"upload_time":"2023-11-02T06:37:37","upload_time_iso_8601":"2023-11-02T06:37:37.698159Z","url":"https://files.pythonhosted.org/packages/ba/70/9ae02fc635cab51b237dcc3657ec69aac61ee67ea5f903cfae07de19abc8/agentops-0.0.7.tar.gz","yanked":false,"yanked_reason":null}],"0.0.8":[{"comment_text":"","digests":{"blake2b_256":"8147fa3ee8807ad961aa50a773b6567e3a624000936d3cc1a578af72d83e02e7","md5":"20cffb5534b4545fa1e8b24a6a24b1da","sha256":"5d50b2ab18a203dbb4555a2cd482dae8df5bf2aa3e771a9758ee28b540330da3"},"downloads":-1,"filename":"agentops-0.0.8-py3-none-any.whl","has_sig":false,"md5_digest":"20cffb5534b4545fa1e8b24a6a24b1da","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":14391,"upload_time":"2023-11-23T06:17:56","upload_time_iso_8601":"2023-11-23T06:17:56.154712Z","url":"https://files.pythonhosted.org/packages/81/47/fa3ee8807ad961aa50a773b6567e3a624000936d3cc1a578af72d83e02e7/agentops-0.0.8-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"707473dc640a3fecfbe84ab7da230f7c862f72f231514a2a488b43a896146ed6","md5":"bba7e74b58849f15d50f4e1270cbd23f","sha256":"3a625d2acc922d99563ce71c5032b0b3b0db57d1c6fade319cf1bb636608eca0"},"downloads":-1,"filename":"agentops-0.0.8.tar.gz","has_sig":false,"md5_digest":"bba7e74b58849f15d50f4e1270cbd23f","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":14775,"upload_time":"2023-11-23T06:17:58","upload_time_iso_8601":"2023-11-23T06:17:58.768877Z","url":"https://files.pythonhosted.org/packages/70/74/73dc640a3fecfbe84ab7da230f7c862f72f231514a2a488b43a896146ed6/agentops-0.0.8.tar.gz","yanked":false,"yanked_reason":null}],"0.1.0":[{"comment_text":"","digests":{"blake2b_256":"c2a41dc8456edc9bccc0c560967cfdce23a4d7ab8162946be288b54391d80f7c","md5":"5fb09f82b7eeb270c6644dcd3656953f","sha256":"b480fd51fbffc76ae13bb885c2adb1236a7d3b0095b4dafb4a992f6e25647433"},"downloads":-1,"filename":"agentops-0.1.0-py3-none-any.whl","has_sig":false,"md5_digest":"5fb09f82b7eeb270c6644dcd3656953f","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":25045,"upload_time":"2024-04-03T02:01:56","upload_time_iso_8601":"2024-04-03T02:01:56.936873Z","url":"https://files.pythonhosted.org/packages/c2/a4/1dc8456edc9bccc0c560967cfdce23a4d7ab8162946be288b54391d80f7c/agentops-0.1.0-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"a81756443f28de774cb7c863a2856e1b07658a9a772ba86dfb1cfbb19bc08fe3","md5":"b93c602c1d1da5d8f7a2dcdaa70f8e21","sha256":"22d3dc87dedf93b3b78a0dfdef8c685b2f3bff9fbab32016360e298a24d311dc"},"downloads":-1,"filename":"agentops-0.1.0.tar.gz","has_sig":false,"md5_digest":"b93c602c1d1da5d8f7a2dcdaa70f8e21","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":24685,"upload_time":"2024-04-03T02:01:58","upload_time_iso_8601":"2024-04-03T02:01:58.623055Z","url":"https://files.pythonhosted.org/packages/a8/17/56443f28de774cb7c863a2856e1b07658a9a772ba86dfb1cfbb19bc08fe3/agentops-0.1.0.tar.gz","yanked":false,"yanked_reason":null}],"0.1.0b1":[{"comment_text":"","digests":{"blake2b_256":"c03a329c59f001f50701e9e541775c79304a5ce4ffe34d717b1d2af555362e9e","md5":"7c7e84b3b4448580bf5a7e9c08012477","sha256":"825ab57ac5f7840f5a7f8ac195f4af75ec07a9c0972b17d1a57a595420d06208"},"downloads":-1,"filename":"agentops-0.1.0b1-py3-none-any.whl","has_sig":false,"md5_digest":"7c7e84b3b4448580bf5a7e9c08012477","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":23258,"upload_time":"2024-03-18T18:51:08","upload_time_iso_8601":"2024-03-18T18:51:08.693772Z","url":"https://files.pythonhosted.org/packages/c0/3a/329c59f001f50701e9e541775c79304a5ce4ffe34d717b1d2af555362e9e/agentops-0.1.0b1-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"026ee44f1d5a49924867475f7d101abe40170c0674b4b395f28ce88552c1ba71","md5":"9cf6699fe45f13f1893c8992405e7261","sha256":"f5ce4b34999fe4b21a4ce3643980253d30f8ea9c55f01d96cd35631355fc7ac3"},"downloads":-1,"filename":"agentops-0.1.0b1.tar.gz","has_sig":false,"md5_digest":"9cf6699fe45f13f1893c8992405e7261","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":23842,"upload_time":"2024-03-18T18:51:10","upload_time_iso_8601":"2024-03-18T18:51:10.250127Z","url":"https://files.pythonhosted.org/packages/02/6e/e44f1d5a49924867475f7d101abe40170c0674b4b395f28ce88552c1ba71/agentops-0.1.0b1.tar.gz","yanked":false,"yanked_reason":null}],"0.1.0b2":[{"comment_text":"","digests":{"blake2b_256":"6a25e9282f81c3f2615ef6543a0b5ca49dd14b03f311fc5a108ad1aff4f0b720","md5":"1d3e736ef44c0ad8829c50f036ac807b","sha256":"485362b9a68d2327da250f0681b30a9296f0b41e058672b023ae2a8ed924b4d3"},"downloads":-1,"filename":"agentops-0.1.0b2-py3-none-any.whl","has_sig":false,"md5_digest":"1d3e736ef44c0ad8829c50f036ac807b","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":23477,"upload_time":"2024-03-21T23:31:20","upload_time_iso_8601":"2024-03-21T23:31:20.022797Z","url":"https://files.pythonhosted.org/packages/6a/25/e9282f81c3f2615ef6543a0b5ca49dd14b03f311fc5a108ad1aff4f0b720/agentops-0.1.0b2-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"3165f702684da6e01f8df74a4291be2914c382ec4cb6f8ed2c3dc6d5a9f177ff","md5":"0d51a6f6bf7cb0d3651574404c9c703c","sha256":"cf9a8b54cc4f76592b6380729c03ec7adfe2256e6b200876d7595e50015f5d62"},"downloads":-1,"filename":"agentops-0.1.0b2.tar.gz","has_sig":false,"md5_digest":"0d51a6f6bf7cb0d3651574404c9c703c","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":23659,"upload_time":"2024-03-21T23:31:21","upload_time_iso_8601":"2024-03-21T23:31:21.330837Z","url":"https://files.pythonhosted.org/packages/31/65/f702684da6e01f8df74a4291be2914c382ec4cb6f8ed2c3dc6d5a9f177ff/agentops-0.1.0b2.tar.gz","yanked":false,"yanked_reason":null}],"0.1.0b3":[{"comment_text":"","digests":{"blake2b_256":"2e64bfe82911b8981ce57f86154915d53b45fffa83ccb9cd6cf4cc71af3f796b","md5":"470bc56525c114dddd908628dcb4f267","sha256":"45b5aaa9f38989cfbfcc4f64e3041050df6d417177874316839225085e60d18d"},"downloads":-1,"filename":"agentops-0.1.0b3-py3-none-any.whl","has_sig":false,"md5_digest":"470bc56525c114dddd908628dcb4f267","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":23522,"upload_time":"2024-03-25T19:34:58","upload_time_iso_8601":"2024-03-25T19:34:58.102867Z","url":"https://files.pythonhosted.org/packages/2e/64/bfe82911b8981ce57f86154915d53b45fffa83ccb9cd6cf4cc71af3f796b/agentops-0.1.0b3-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"0858e4b718e30a6bbe27d32b7128398cb3884f83f89b4121e36cbb7f979466ca","md5":"8ddb13824d3636d841739479e02a12e6","sha256":"9020daab306fe8c7ed0a98a9edcad9772eb1df0eacce7f936a5ed6bf0f7d2af1"},"downloads":-1,"filename":"agentops-0.1.0b3.tar.gz","has_sig":false,"md5_digest":"8ddb13824d3636d841739479e02a12e6","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":23641,"upload_time":"2024-03-25T19:35:01","upload_time_iso_8601":"2024-03-25T19:35:01.119334Z","url":"https://files.pythonhosted.org/packages/08/58/e4b718e30a6bbe27d32b7128398cb3884f83f89b4121e36cbb7f979466ca/agentops-0.1.0b3.tar.gz","yanked":false,"yanked_reason":null}],"0.1.0b4":[{"comment_text":"","digests":{"blake2b_256":"67f860440d18b674b06c5a9f4f334bf1f1656dca9f6763d5dd3a2be9e5d2c256","md5":"b11f47108926fb46964bbf28675c3e35","sha256":"93a1f241c3fd7880c3d29ab64baa0661d9ba84e2071092aecb3e4fc574037900"},"downloads":-1,"filename":"agentops-0.1.0b4-py3-none-any.whl","has_sig":false,"md5_digest":"b11f47108926fb46964bbf28675c3e35","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":23512,"upload_time":"2024-03-26T01:14:54","upload_time_iso_8601":"2024-03-26T01:14:54.986869Z","url":"https://files.pythonhosted.org/packages/67/f8/60440d18b674b06c5a9f4f334bf1f1656dca9f6763d5dd3a2be9e5d2c256/agentops-0.1.0b4-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"10feabb836b04b7eae44383f5616ed1c4c6e9aee9beecc3df4617f69f7e3adc5","md5":"fa4512f74baf9909544ebab021862740","sha256":"4716b4e2a627d7a3846ddee3d334c8f5e8a1a2d231ec5286379c0f22920a2a9d"},"downloads":-1,"filename":"agentops-0.1.0b4.tar.gz","has_sig":false,"md5_digest":"fa4512f74baf9909544ebab021862740","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":23668,"upload_time":"2024-03-26T01:14:56","upload_time_iso_8601":"2024-03-26T01:14:56.921017Z","url":"https://files.pythonhosted.org/packages/10/fe/abb836b04b7eae44383f5616ed1c4c6e9aee9beecc3df4617f69f7e3adc5/agentops-0.1.0b4.tar.gz","yanked":false,"yanked_reason":null}],"0.1.0b5":[{"comment_text":"","digests":{"blake2b_256":"3ac591c14d08000def551f70ccc1da9ab8b37f57561d24cf7fdf6cd3547610ee","md5":"52a2212b79870ee48f0dbdad852dbb90","sha256":"ed050e51137baa4f46769c77595e1cbe212bb86243f27a29b50218782a0d8242"},"downloads":-1,"filename":"agentops-0.1.0b5-py3-none-any.whl","has_sig":false,"md5_digest":"52a2212b79870ee48f0dbdad852dbb90","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":24597,"upload_time":"2024-04-02T00:56:17","upload_time_iso_8601":"2024-04-02T00:56:17.570921Z","url":"https://files.pythonhosted.org/packages/3a/c5/91c14d08000def551f70ccc1da9ab8b37f57561d24cf7fdf6cd3547610ee/agentops-0.1.0b5-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"84d6f0bbe5883b86e749f2f02896d94054ebd84b4d66524e4b7004263ae21a6f","md5":"89c6aa7864f45c17f42a38bb6fae904b","sha256":"6ebe6a94f0898fd47521755b6c8083c5f6c0c8bb30d43441200b9ef67998ed01"},"downloads":-1,"filename":"agentops-0.1.0b5.tar.gz","has_sig":false,"md5_digest":"89c6aa7864f45c17f42a38bb6fae904b","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":24624,"upload_time":"2024-04-02T00:56:18","upload_time_iso_8601":"2024-04-02T00:56:18.703411Z","url":"https://files.pythonhosted.org/packages/84/d6/f0bbe5883b86e749f2f02896d94054ebd84b4d66524e4b7004263ae21a6f/agentops-0.1.0b5.tar.gz","yanked":false,"yanked_reason":null}],"0.1.0b7":[{"comment_text":"","digests":{"blake2b_256":"3cc4ebdb56f0ff88ad20ddba765093aa6c1fc655a8f2bbafbcb2057f998d814f","md5":"d117591df22735d1dedbdc034c93bff6","sha256":"0d4fdb036836dddcce770cffcb2d564b0011a3307224d9a4675fc9bf80ffa5d2"},"downloads":-1,"filename":"agentops-0.1.0b7-py3-none-any.whl","has_sig":false,"md5_digest":"d117591df22735d1dedbdc034c93bff6","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":24592,"upload_time":"2024-04-02T03:20:11","upload_time_iso_8601":"2024-04-02T03:20:11.132539Z","url":"https://files.pythonhosted.org/packages/3c/c4/ebdb56f0ff88ad20ddba765093aa6c1fc655a8f2bbafbcb2057f998d814f/agentops-0.1.0b7-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"cbf0c32014a8ee12df4596ec4d90428e73e0cc5277d1b9bd2b53f815a7f0ea1f","md5":"20364eb7d493e6f9b46666f36be8fb2f","sha256":"938b29cd894ff38c7b1dee02f6422458702ccf8f3b69b69bc0e4220e42a33629"},"downloads":-1,"filename":"agentops-0.1.0b7.tar.gz","has_sig":false,"md5_digest":"20364eb7d493e6f9b46666f36be8fb2f","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":24611,"upload_time":"2024-04-02T03:20:12","upload_time_iso_8601":"2024-04-02T03:20:12.490524Z","url":"https://files.pythonhosted.org/packages/cb/f0/c32014a8ee12df4596ec4d90428e73e0cc5277d1b9bd2b53f815a7f0ea1f/agentops-0.1.0b7.tar.gz","yanked":false,"yanked_reason":null}],"0.1.1":[{"comment_text":"","digests":{"blake2b_256":"ba13ff18b4ff72805bcbe7437aa445cde854a44b4b358564ed2b044678e270b9","md5":"d4f77de8dd58468c6c307e735c1cfaa9","sha256":"8afc0b7871d17f8cbe9996cab5ca10a8a3ed33a3406e1ddc257fadc214daa79a"},"downloads":-1,"filename":"agentops-0.1.1-py3-none-any.whl","has_sig":false,"md5_digest":"d4f77de8dd58468c6c307e735c1cfaa9","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":25189,"upload_time":"2024-04-05T22:41:01","upload_time_iso_8601":"2024-04-05T22:41:01.867983Z","url":"https://files.pythonhosted.org/packages/ba/13/ff18b4ff72805bcbe7437aa445cde854a44b4b358564ed2b044678e270b9/agentops-0.1.1-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"1dec1d2af6e33dd097feaf1e41a4d34c66d4e4e59ce35c5efac85c18614b9d4b","md5":"f072d8700d4e22fc25eae8bb29a54d1f","sha256":"001582703d5e6ffe67a51f9d67a303b5344e4ef8ca315f24aa43e0dd3d19f53b"},"downloads":-1,"filename":"agentops-0.1.1.tar.gz","has_sig":false,"md5_digest":"f072d8700d4e22fc25eae8bb29a54d1f","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":24831,"upload_time":"2024-04-05T22:41:03","upload_time_iso_8601":"2024-04-05T22:41:03.677234Z","url":"https://files.pythonhosted.org/packages/1d/ec/1d2af6e33dd097feaf1e41a4d34c66d4e4e59ce35c5efac85c18614b9d4b/agentops-0.1.1.tar.gz","yanked":false,"yanked_reason":null}],"0.1.10":[{"comment_text":"","digests":{"blake2b_256":"cdf9a295ed62701dd4e56d5b57e45e0425db2bcea992c687534c9a2dd1e001f1","md5":"8d82b9cb794b4b4a1e91ddece5447bcf","sha256":"8b80800d4fa5a7a6c85c79f2bf39a50fb446ab8b209519bd51f44dee3b38517e"},"downloads":-1,"filename":"agentops-0.1.10-py3-none-any.whl","has_sig":false,"md5_digest":"8d82b9cb794b4b4a1e91ddece5447bcf","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":29769,"upload_time":"2024-05-10T20:13:39","upload_time_iso_8601":"2024-05-10T20:13:39.477237Z","url":"https://files.pythonhosted.org/packages/cd/f9/a295ed62701dd4e56d5b57e45e0425db2bcea992c687534c9a2dd1e001f1/agentops-0.1.10-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"f3788e027be4aa50f677a46bba1e0132f021e90d299c6eae093181a91679e378","md5":"4dd3d1fd8c08efb1a08ae212ed9211d7","sha256":"73fbd36cd5f3052d22e64dbea1fa9d70fb02658a901a600101801daa73f359f9"},"downloads":-1,"filename":"agentops-0.1.10.tar.gz","has_sig":false,"md5_digest":"4dd3d1fd8c08efb1a08ae212ed9211d7","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":30268,"upload_time":"2024-05-10T20:14:25","upload_time_iso_8601":"2024-05-10T20:14:25.258530Z","url":"https://files.pythonhosted.org/packages/f3/78/8e027be4aa50f677a46bba1e0132f021e90d299c6eae093181a91679e378/agentops-0.1.10.tar.gz","yanked":false,"yanked_reason":null}],"0.1.11":[{"comment_text":"","digests":{"blake2b_256":"1ebfaaa31babe3bf687312592f99fe900e3808058658577bd1367b7df0332a08","md5":"73c0b028248665a7927688fb8baa7680","sha256":"e9411981a5d0b1190b93e3e1124db3ac6f17015c65a84b92a793f34d79b694c9"},"downloads":-1,"filename":"agentops-0.1.11-py3-none-any.whl","has_sig":false,"md5_digest":"73c0b028248665a7927688fb8baa7680","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":30952,"upload_time":"2024-05-17T00:32:49","upload_time_iso_8601":"2024-05-17T00:32:49.202597Z","url":"https://files.pythonhosted.org/packages/1e/bf/aaa31babe3bf687312592f99fe900e3808058658577bd1367b7df0332a08/agentops-0.1.11-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"6ee43f71a7d1d63595058cd6945e7b9e2de1b06ace04176a6723b7bfb37bf880","md5":"36092e907e4f15a6bafd6788383df112","sha256":"4a365ee56303b5b80d9de21fc13ccb7a3fe44544a6c165327bbfd9213bfe0191"},"downloads":-1,"filename":"agentops-0.1.11.tar.gz","has_sig":false,"md5_digest":"36092e907e4f15a6bafd6788383df112","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":31256,"upload_time":"2024-05-17T00:32:50","upload_time_iso_8601":"2024-05-17T00:32:50.919974Z","url":"https://files.pythonhosted.org/packages/6e/e4/3f71a7d1d63595058cd6945e7b9e2de1b06ace04176a6723b7bfb37bf880/agentops-0.1.11.tar.gz","yanked":false,"yanked_reason":null}],"0.1.12":[{"comment_text":"","digests":{"blake2b_256":"67f5227dffbebeffd3b404db0dd71805f00814e458c0d081faf7a4e70c7e984f","md5":"2591924de6f2e5580e4733b0e8336e2c","sha256":"b4b47c990638b74810cc1c38624ada162094b46e3fdd63883642a16bc5258386"},"downloads":-1,"filename":"agentops-0.1.12-py3-none-any.whl","has_sig":false,"md5_digest":"2591924de6f2e5580e4733b0e8336e2c","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":35605,"upload_time":"2024-05-24T20:11:52","upload_time_iso_8601":"2024-05-24T20:11:52.863109Z","url":"https://files.pythonhosted.org/packages/67/f5/227dffbebeffd3b404db0dd71805f00814e458c0d081faf7a4e70c7e984f/agentops-0.1.12-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"9f9ae6dc42ad8d40ad47c6116629b2cbda443d314327ab4d33e1044cb75ba88b","md5":"4c2e76e7b6d4799ef4b464dee29e7255","sha256":"c4f762482fb240fc3503907f52498f2d8d9e4f80236ee4a12bf039317a85fcd7"},"downloads":-1,"filename":"agentops-0.1.12.tar.gz","has_sig":false,"md5_digest":"4c2e76e7b6d4799ef4b464dee29e7255","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":35103,"upload_time":"2024-05-24T20:11:54","upload_time_iso_8601":"2024-05-24T20:11:54.846567Z","url":"https://files.pythonhosted.org/packages/9f/9a/e6dc42ad8d40ad47c6116629b2cbda443d314327ab4d33e1044cb75ba88b/agentops-0.1.12.tar.gz","yanked":false,"yanked_reason":null}],"0.1.2":[{"comment_text":"","digests":{"blake2b_256":"e709193dfe68c2d23de2c60dd0af2af336cbf81d3a3f0c175705783b4c1da580","md5":"588d9877b9767546606d3d6d76d247fc","sha256":"ec79e56889eadd2bab04dfe2f6a899a1b90dc347a66cc80488297368386105b4"},"downloads":-1,"filename":"agentops-0.1.2-py3-none-any.whl","has_sig":false,"md5_digest":"588d9877b9767546606d3d6d76d247fc","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":25359,"upload_time":"2024-04-09T23:00:51","upload_time_iso_8601":"2024-04-09T23:00:51.897995Z","url":"https://files.pythonhosted.org/packages/e7/09/193dfe68c2d23de2c60dd0af2af336cbf81d3a3f0c175705783b4c1da580/agentops-0.1.2-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"8acc872aba374093481bb40ed6b7531b1500b00138baf6bfb9ca7c20fb889d58","md5":"80f8f7c56b1e1a6ff4c48877fe12dd12","sha256":"d213e1037d2d319743889c2bdbc10dc068b0591e2c6c156f69019302490336d5"},"downloads":-1,"filename":"agentops-0.1.2.tar.gz","has_sig":false,"md5_digest":"80f8f7c56b1e1a6ff4c48877fe12dd12","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":24968,"upload_time":"2024-04-09T23:00:53","upload_time_iso_8601":"2024-04-09T23:00:53.227389Z","url":"https://files.pythonhosted.org/packages/8a/cc/872aba374093481bb40ed6b7531b1500b00138baf6bfb9ca7c20fb889d58/agentops-0.1.2.tar.gz","yanked":false,"yanked_reason":null}],"0.1.3":[{"comment_text":"","digests":{"blake2b_256":"9701aad65170506dcf29606e9e619d2c0caaee565e5e8b14a791c3e0e86c6356","md5":"4dc967275c884e2a5a1de8df448ae1c6","sha256":"f1ca0f2c5156d826381e9ebd634555215c67e1cb344683abddb382e594f483e4"},"downloads":-1,"filename":"agentops-0.1.3-py3-none-any.whl","has_sig":false,"md5_digest":"4dc967275c884e2a5a1de8df448ae1c6","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":25393,"upload_time":"2024-04-09T23:24:20","upload_time_iso_8601":"2024-04-09T23:24:20.821465Z","url":"https://files.pythonhosted.org/packages/97/01/aad65170506dcf29606e9e619d2c0caaee565e5e8b14a791c3e0e86c6356/agentops-0.1.3-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"5e22afde273bcf52cfc6581fba804b44eeebea6ff2ae774f0e5917fa1dd3ee09","md5":"624c9b63dbe56c8b1dd535e1b20ada81","sha256":"dd65e80ec70accfac0692171199b6ecfa37a7d109a3c25f2191c0934b5004114"},"downloads":-1,"filename":"agentops-0.1.3.tar.gz","has_sig":false,"md5_digest":"624c9b63dbe56c8b1dd535e1b20ada81","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":24994,"upload_time":"2024-04-09T23:24:22","upload_time_iso_8601":"2024-04-09T23:24:22.610198Z","url":"https://files.pythonhosted.org/packages/5e/22/afde273bcf52cfc6581fba804b44eeebea6ff2ae774f0e5917fa1dd3ee09/agentops-0.1.3.tar.gz","yanked":false,"yanked_reason":null}],"0.1.4":[{"comment_text":"","digests":{"blake2b_256":"50313e20afb169e707941cc3342cecb88060aa8746e95d72a202fd90ac4096b6","md5":"3f64b736522ea40c35db6d2a609fc54f","sha256":"476a5e795a6cc87858a0885be61b1e05eed21e4c6ab47f20348c48717c2ac454"},"downloads":-1,"filename":"agentops-0.1.4-py3-none-any.whl","has_sig":false,"md5_digest":"3f64b736522ea40c35db6d2a609fc54f","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":25558,"upload_time":"2024-04-11T19:26:01","upload_time_iso_8601":"2024-04-11T19:26:01.162829Z","url":"https://files.pythonhosted.org/packages/50/31/3e20afb169e707941cc3342cecb88060aa8746e95d72a202fd90ac4096b6/agentops-0.1.4-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"e0688b1a21f72b85c9bdd56da4223c991bdfb5d0c2accd9ddd326616bf952795","md5":"6f4601047f3e2080b4f7363ff84f15f3","sha256":"d55e64953f84654d44557b496a3b3744a20449b854af84fa83a15be75b362b3d"},"downloads":-1,"filename":"agentops-0.1.4.tar.gz","has_sig":false,"md5_digest":"6f4601047f3e2080b4f7363ff84f15f3","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":25390,"upload_time":"2024-04-11T19:26:02","upload_time_iso_8601":"2024-04-11T19:26:02.991657Z","url":"https://files.pythonhosted.org/packages/e0/68/8b1a21f72b85c9bdd56da4223c991bdfb5d0c2accd9ddd326616bf952795/agentops-0.1.4.tar.gz","yanked":false,"yanked_reason":null}],"0.1.5":[{"comment_text":"","digests":{"blake2b_256":"641c742793fa77c803e5667830ccd34b8d313d11f361a105fe92ce68d871cc5f","md5":"964421a604c67c07b5c72b70ceee6ce8","sha256":"bc65dd4cd85d1ffcba195f2490b5a4380d0b565dd0f4a71ecc64ed96a7fe1eee"},"downloads":-1,"filename":"agentops-0.1.5-py3-none-any.whl","has_sig":false,"md5_digest":"964421a604c67c07b5c72b70ceee6ce8","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":25793,"upload_time":"2024-04-20T01:56:23","upload_time_iso_8601":"2024-04-20T01:56:23.089343Z","url":"https://files.pythonhosted.org/packages/64/1c/742793fa77c803e5667830ccd34b8d313d11f361a105fe92ce68d871cc5f/agentops-0.1.5-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"62beabcb235daf34d4740961c4ad295b8dfb8a053ac6a1e341394e36f722ea89","md5":"3ff7fa3135bc5c4254aaa99e3cc00dc8","sha256":"17f0a573362d9c4770846874a4091662304d6889e21ca6a7dd747be48b9c8597"},"downloads":-1,"filename":"agentops-0.1.5.tar.gz","has_sig":false,"md5_digest":"3ff7fa3135bc5c4254aaa99e3cc00dc8","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":25664,"upload_time":"2024-04-20T01:56:24","upload_time_iso_8601":"2024-04-20T01:56:24.303013Z","url":"https://files.pythonhosted.org/packages/62/be/abcb235daf34d4740961c4ad295b8dfb8a053ac6a1e341394e36f722ea89/agentops-0.1.5.tar.gz","yanked":false,"yanked_reason":null}],"0.1.6":[{"comment_text":"","digests":{"blake2b_256":"430b9f3fcfc2f9778dbbfc1fd68b223e9a91938505ef987e17b93a631bb6b2e4","md5":"28ce2e6aa7a4598fa1e764d9762fd030","sha256":"9dff841ef71f5fad2d897012a00f50011a706970e0e5eaae9d7b0540a637b128"},"downloads":-1,"filename":"agentops-0.1.6-py3-none-any.whl","has_sig":false,"md5_digest":"28ce2e6aa7a4598fa1e764d9762fd030","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":26154,"upload_time":"2024-04-20T03:48:58","upload_time_iso_8601":"2024-04-20T03:48:58.494391Z","url":"https://files.pythonhosted.org/packages/43/0b/9f3fcfc2f9778dbbfc1fd68b223e9a91938505ef987e17b93a631bb6b2e4/agentops-0.1.6-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"a6c2b437246ce28bad9c2bbad9a9371f7008f76a979fb19699588212f653daf9","md5":"fc81fd641ad630a17191d4a9cf77193b","sha256":"48ddb49fc01eb83ce151d3f08ae670b3d603c454aa35b4ea145f2dc15e081b36"},"downloads":-1,"filename":"agentops-0.1.6.tar.gz","has_sig":false,"md5_digest":"fc81fd641ad630a17191d4a9cf77193b","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":25792,"upload_time":"2024-04-20T03:48:59","upload_time_iso_8601":"2024-04-20T03:48:59.957150Z","url":"https://files.pythonhosted.org/packages/a6/c2/b437246ce28bad9c2bbad9a9371f7008f76a979fb19699588212f653daf9/agentops-0.1.6.tar.gz","yanked":false,"yanked_reason":null}],"0.1.7":[{"comment_text":"","digests":{"blake2b_256":"1ca529570477f62973c6b835e09dc5bbda7498c1a26ba7a428cdb08a71ae86ca","md5":"a1962d1bb72c6fd00e67e83fe56a3692","sha256":"ce7a9e89dcf17507ee6db85017bef8f87fc4e8a23745f3f73e1fbda5489fb6f9"},"downloads":-1,"filename":"agentops-0.1.7-py3-none-any.whl","has_sig":false,"md5_digest":"a1962d1bb72c6fd00e67e83fe56a3692","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.10","size":27891,"upload_time":"2024-05-03T19:21:38","upload_time_iso_8601":"2024-05-03T19:21:38.018602Z","url":"https://files.pythonhosted.org/packages/1c/a5/29570477f62973c6b835e09dc5bbda7498c1a26ba7a428cdb08a71ae86ca/agentops-0.1.7-py3-none-any.whl","yanked":true,"yanked_reason":"Introduced - breaking bug"},{"comment_text":"","digests":{"blake2b_256":"b2447ce75e71fcc9605a609b41adc52d517eba4356d15f7ca77d46f683ca07f1","md5":"9a9bb22af4b30c454d46b9a01e8701a0","sha256":"70d22e9a71ea13af6e6ad9c1cffe63c98f9dbccf91bda199825609379b2babaf"},"downloads":-1,"filename":"agentops-0.1.7.tar.gz","has_sig":false,"md5_digest":"9a9bb22af4b30c454d46b9a01e8701a0","packagetype":"sdist","python_version":"source","requires_python":">=3.10","size":28122,"upload_time":"2024-05-03T19:21:39","upload_time_iso_8601":"2024-05-03T19:21:39.415523Z","url":"https://files.pythonhosted.org/packages/b2/44/7ce75e71fcc9605a609b41adc52d517eba4356d15f7ca77d46f683ca07f1/agentops-0.1.7.tar.gz","yanked":true,"yanked_reason":"Introduced breaking bug"}],"0.1.8":[{"comment_text":"","digests":{"blake2b_256":"38c63d0d19eeae4c3c9e3ff5957b10c3c16a4a9fd2be6673fbfc965f8bb4fd08","md5":"e12d3d92f51f5b2fed11a01742e5b5b5","sha256":"d49d113028a891d50900bb4fae253218cc49519f7fe39f9ea15f8f2b29d6d7ef"},"downloads":-1,"filename":"agentops-0.1.8-py3-none-any.whl","has_sig":false,"md5_digest":"e12d3d92f51f5b2fed11a01742e5b5b5","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.10","size":27977,"upload_time":"2024-05-04T03:01:53","upload_time_iso_8601":"2024-05-04T03:01:53.905081Z","url":"https://files.pythonhosted.org/packages/38/c6/3d0d19eeae4c3c9e3ff5957b10c3c16a4a9fd2be6673fbfc965f8bb4fd08/agentops-0.1.8-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"9269e51fa1714f169f692e4fad0a42ebeb77c7a27c48f62b751c869ad6441c69","md5":"07dbdb45f9ec086b1bc314d6a8264423","sha256":"5762137a84e2309e1b6ca9a0fd72c8b72c90f6f73ba49549980722221960cac8"},"downloads":-1,"filename":"agentops-0.1.8.tar.gz","has_sig":false,"md5_digest":"07dbdb45f9ec086b1bc314d6a8264423","packagetype":"sdist","python_version":"source","requires_python":">=3.10","size":28189,"upload_time":"2024-05-04T03:01:55","upload_time_iso_8601":"2024-05-04T03:01:55.328668Z","url":"https://files.pythonhosted.org/packages/92/69/e51fa1714f169f692e4fad0a42ebeb77c7a27c48f62b751c869ad6441c69/agentops-0.1.8.tar.gz","yanked":false,"yanked_reason":null}],"0.1.9":[{"comment_text":"","digests":{"blake2b_256":"eb5a920e71729bd1f06b002ee146b38b0d1862357a1f484628e6b20a7d3dcca1","md5":"6ae4929d91c4bb8025edc86b5322630c","sha256":"af7983ba4929b04a34714dd97d7e82c11384ebbe9d7d8bc7b673e1263c4c79a1"},"downloads":-1,"filename":"agentops-0.1.9-py3-none-any.whl","has_sig":false,"md5_digest":"6ae4929d91c4bb8025edc86b5322630c","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":28458,"upload_time":"2024-05-07T07:07:30","upload_time_iso_8601":"2024-05-07T07:07:30.798380Z","url":"https://files.pythonhosted.org/packages/eb/5a/920e71729bd1f06b002ee146b38b0d1862357a1f484628e6b20a7d3dcca1/agentops-0.1.9-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"df2b8fc76d629d8a83b0796612a27b966426550114c930eee5d730654fcd9fe9","md5":"43090632f87cd398ed77b57daa8c28d6","sha256":"7f428bfda2db57a994029b1c9f72b63ca7660616635c9c671b2b729d112a833e"},"downloads":-1,"filename":"agentops-0.1.9.tar.gz","has_sig":false,"md5_digest":"43090632f87cd398ed77b57daa8c28d6","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":28596,"upload_time":"2024-05-07T07:07:35","upload_time_iso_8601":"2024-05-07T07:07:35.242350Z","url":"https://files.pythonhosted.org/packages/df/2b/8fc76d629d8a83b0796612a27b966426550114c930eee5d730654fcd9fe9/agentops-0.1.9.tar.gz","yanked":false,"yanked_reason":null}],"0.2.0":[{"comment_text":"","digests":{"blake2b_256":"483560ec38a81a7e9588d32730ed4f581621169216f968771d5f611388f68a9b","md5":"bdda5480977cccd55628e117e8c8da04","sha256":"bee84bf046c9b4346c5f0f50e2087a992e8d2eae80b3fe9f01c456b49c299bcc"},"downloads":-1,"filename":"agentops-0.2.0-py3-none-any.whl","has_sig":false,"md5_digest":"bdda5480977cccd55628e117e8c8da04","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":35921,"upload_time":"2024-05-28T22:04:14","upload_time_iso_8601":"2024-05-28T22:04:14.813154Z","url":"https://files.pythonhosted.org/packages/48/35/60ec38a81a7e9588d32730ed4f581621169216f968771d5f611388f68a9b/agentops-0.2.0-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"8d7591c79141d31da4e56d6c6a00737b50dcc2f1ce8a711c1293d2a1d70478fc","md5":"71e3c3b9fe0286c9b58d81ba1c12a42d","sha256":"ca340136abff6a3727729c3eda87f0768e5ba2b672ce03320cb52ad138b05598"},"downloads":-1,"filename":"agentops-0.2.0.tar.gz","has_sig":false,"md5_digest":"71e3c3b9fe0286c9b58d81ba1c12a42d","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":35498,"upload_time":"2024-05-28T22:04:16","upload_time_iso_8601":"2024-05-28T22:04:16.598374Z","url":"https://files.pythonhosted.org/packages/8d/75/91c79141d31da4e56d6c6a00737b50dcc2f1ce8a711c1293d2a1d70478fc/agentops-0.2.0.tar.gz","yanked":false,"yanked_reason":null}],"0.2.1":[{"comment_text":"","digests":{"blake2b_256":"fa3b84032b7dca3d7315b329db6681bbfe0872c2a46d62ca992a05f2d6a078e1","md5":"ce3fc46711fa8225a3d6a9566f95f875","sha256":"7dde95db92c8306c0a17e193bfb5ee20e71e16630ccc629db685e148b3aca3f6"},"downloads":-1,"filename":"agentops-0.2.1-py3-none-any.whl","has_sig":false,"md5_digest":"ce3fc46711fa8225a3d6a9566f95f875","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":36375,"upload_time":"2024-06-03T18:40:02","upload_time_iso_8601":"2024-06-03T18:40:02.820700Z","url":"https://files.pythonhosted.org/packages/fa/3b/84032b7dca3d7315b329db6681bbfe0872c2a46d62ca992a05f2d6a078e1/agentops-0.2.1-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"d6286ad330da5736588a54575fde95502006da58c3e9f4f15933f5876c1e1482","md5":"faa972c26a3e59fb6ca04f253165da22","sha256":"9f18a36a79c04e9c06f6e96aefe75f0fb1d08e562873315d6cb945488306e515"},"downloads":-1,"filename":"agentops-0.2.1.tar.gz","has_sig":false,"md5_digest":"faa972c26a3e59fb6ca04f253165da22","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":35784,"upload_time":"2024-06-03T18:40:05","upload_time_iso_8601":"2024-06-03T18:40:05.431174Z","url":"https://files.pythonhosted.org/packages/d6/28/6ad330da5736588a54575fde95502006da58c3e9f4f15933f5876c1e1482/agentops-0.2.1.tar.gz","yanked":false,"yanked_reason":null}],"0.2.2":[{"comment_text":"","digests":{"blake2b_256":"fbe73a57dd30e354b7bcc5a86908fc92aa16378035c69eb225ce254387940b5d","md5":"c24e4656bb6de14ffb9d810fe7872829","sha256":"57aab8a5d76a0dd7b1f0b14e90e778c42444eeaf5c48f2f387719735d7d840ee"},"downloads":-1,"filename":"agentops-0.2.2-py3-none-any.whl","has_sig":false,"md5_digest":"c24e4656bb6de14ffb9d810fe7872829","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":36588,"upload_time":"2024-06-05T19:30:29","upload_time_iso_8601":"2024-06-05T19:30:29.208415Z","url":"https://files.pythonhosted.org/packages/fb/e7/3a57dd30e354b7bcc5a86908fc92aa16378035c69eb225ce254387940b5d/agentops-0.2.2-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"89c51cbd038b9d2898b7f1b05943c338aa4aa9654d7e7763d8fa8d73a25fbfb6","md5":"401bfce001638cc26d7975f6534b5bab","sha256":"d4135c96ad7ec39c81015b3e33dfa977d2d846a685aba0d1922d2d6e3dca7fff"},"downloads":-1,"filename":"agentops-0.2.2.tar.gz","has_sig":false,"md5_digest":"401bfce001638cc26d7975f6534b5bab","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":36012,"upload_time":"2024-06-05T19:30:31","upload_time_iso_8601":"2024-06-05T19:30:31.173781Z","url":"https://files.pythonhosted.org/packages/89/c5/1cbd038b9d2898b7f1b05943c338aa4aa9654d7e7763d8fa8d73a25fbfb6/agentops-0.2.2.tar.gz","yanked":false,"yanked_reason":null}],"0.2.3":[{"comment_text":"","digests":{"blake2b_256":"b66fb36e2bb7158f45b6c496ce3cec50ef861e130cfa3ec8c62e709d63fa9e94","md5":"b3f6a8d97cc0129a9e4730b7810509c6","sha256":"a1829a21301223c26464cbc9da5bfba2f3750e21238912ee1d2f3097c358859a"},"downloads":-1,"filename":"agentops-0.2.3-py3-none-any.whl","has_sig":false,"md5_digest":"b3f6a8d97cc0129a9e4730b7810509c6","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":36986,"upload_time":"2024-06-13T19:56:33","upload_time_iso_8601":"2024-06-13T19:56:33.675807Z","url":"https://files.pythonhosted.org/packages/b6/6f/b36e2bb7158f45b6c496ce3cec50ef861e130cfa3ec8c62e709d63fa9e94/agentops-0.2.3-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"f4d34aed81a4ec4251131b94fb8ed4edf0823922bfda66ba0e4c43d9452111d2","md5":"466abe04d466a950d4bcebbe9c3ccc27","sha256":"b502b83bb4954386a28c4304028ba8cd2b45303f7e1f84720477b521267a3b4e"},"downloads":-1,"filename":"agentops-0.2.3.tar.gz","has_sig":false,"md5_digest":"466abe04d466a950d4bcebbe9c3ccc27","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":37024,"upload_time":"2024-06-13T19:56:35","upload_time_iso_8601":"2024-06-13T19:56:35.481794Z","url":"https://files.pythonhosted.org/packages/f4/d3/4aed81a4ec4251131b94fb8ed4edf0823922bfda66ba0e4c43d9452111d2/agentops-0.2.3.tar.gz","yanked":false,"yanked_reason":null}],"0.2.4":[{"comment_text":"","digests":{"blake2b_256":"a4d4e91fb66bc2eb7effb53f7d9481da04e60809d10240306452a8307aca7985","md5":"f1ba1befb6bd854d5fd6f670937dcb55","sha256":"96162c28cc0391011c04e654273e5a96ec4dcf015e27a7ac12a1ea4077d38950"},"downloads":-1,"filename":"agentops-0.2.4-py3-none-any.whl","has_sig":false,"md5_digest":"f1ba1befb6bd854d5fd6f670937dcb55","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":37518,"upload_time":"2024-06-24T19:31:58","upload_time_iso_8601":"2024-06-24T19:31:58.838680Z","url":"https://files.pythonhosted.org/packages/a4/d4/e91fb66bc2eb7effb53f7d9481da04e60809d10240306452a8307aca7985/agentops-0.2.4-py3-none-any.whl","yanked":true,"yanked_reason":"Potential - breaking change"},{"comment_text":"","digests":{"blake2b_256":"8e4b920629e08c956cdc74a31ab466d005eb13d86c2d58fa2d2bd261cf36c37b","md5":"527c82f21f01f13b879a1fca90ddb209","sha256":"d263de21eb40e15eb17adc31821fc0dee4ff4ca4501a9feb7ed376d473063208"},"downloads":-1,"filename":"agentops-0.2.4.tar.gz","has_sig":false,"md5_digest":"527c82f21f01f13b879a1fca90ddb209","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":37656,"upload_time":"2024-06-24T19:32:01","upload_time_iso_8601":"2024-06-24T19:32:01.155014Z","url":"https://files.pythonhosted.org/packages/8e/4b/920629e08c956cdc74a31ab466d005eb13d86c2d58fa2d2bd261cf36c37b/agentops-0.2.4.tar.gz","yanked":true,"yanked_reason":"Potential breaking change"}],"0.2.5":[{"comment_text":"","digests":{"blake2b_256":"47c73ab9d7d971b664a9bdff6e6464afb6c1de8eb0f845d8de93eb036d5dcc60","md5":"bed576cc1591da4783777920fb223761","sha256":"ff87b82d1efaf50b10624e00c6e9334f4c16ffe08ec7f9889b4417c231c31471"},"downloads":-1,"filename":"agentops-0.2.5-py3-none-any.whl","has_sig":false,"md5_digest":"bed576cc1591da4783777920fb223761","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":37529,"upload_time":"2024-06-26T22:57:15","upload_time_iso_8601":"2024-06-26T22:57:15.646328Z","url":"https://files.pythonhosted.org/packages/47/c7/3ab9d7d971b664a9bdff6e6464afb6c1de8eb0f845d8de93eb036d5dcc60/agentops-0.2.5-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"31c48f2af30ae75dbdb4697506f80f76ce786f79014deb8c6679fa62962fdd6f","md5":"42def99798edfaf201fa6f62846e77c5","sha256":"6bad7aca37af6174307769550a53ec00824049a57e97b8868a9a213b2272adb4"},"downloads":-1,"filename":"agentops-0.2.5.tar.gz","has_sig":false,"md5_digest":"42def99798edfaf201fa6f62846e77c5","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":37703,"upload_time":"2024-06-26T22:57:17","upload_time_iso_8601":"2024-06-26T22:57:17.337904Z","url":"https://files.pythonhosted.org/packages/31/c4/8f2af30ae75dbdb4697506f80f76ce786f79014deb8c6679fa62962fdd6f/agentops-0.2.5.tar.gz","yanked":false,"yanked_reason":null}],"0.2.6":[{"comment_text":"","digests":{"blake2b_256":"5af2f90538b00d887c04a5570e8a3af4aef27a600a67c058a0ee6befafd60748","md5":"8ef3ed13ed582346b71648ca9df30f7c","sha256":"59e88000a9f108931fd68056f22def7a7f4b3015906de5791e777c23ba7dee52"},"downloads":-1,"filename":"agentops-0.2.6-py3-none-any.whl","has_sig":false,"md5_digest":"8ef3ed13ed582346b71648ca9df30f7c","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":37534,"upload_time":"2024-06-28T21:41:56","upload_time_iso_8601":"2024-06-28T21:41:56.933334Z","url":"https://files.pythonhosted.org/packages/5a/f2/f90538b00d887c04a5570e8a3af4aef27a600a67c058a0ee6befafd60748/agentops-0.2.6-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"bcf412c388dccc301ad54a501843ba5b5dd359575dcef9ac24c18a619a32214d","md5":"89a6b04f12801682b53ee0133593ce74","sha256":"7906a08c9154355484deb173b82631f9acddec3775b2d5e8ca946abdee27183b"},"downloads":-1,"filename":"agentops-0.2.6.tar.gz","has_sig":false,"md5_digest":"89a6b04f12801682b53ee0133593ce74","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":37874,"upload_time":"2024-06-28T21:41:59","upload_time_iso_8601":"2024-06-28T21:41:59.143953Z","url":"https://files.pythonhosted.org/packages/bc/f4/12c388dccc301ad54a501843ba5b5dd359575dcef9ac24c18a619a32214d/agentops-0.2.6.tar.gz","yanked":false,"yanked_reason":null}],"0.3.0":[{"comment_text":"","digests":{"blake2b_256":"b8e996f12ac457f46c370c6f70f344e975d534f2c92853703ee29802f0127024","md5":"d9c6995a843b49ac7eb6f500fa1f3c2a","sha256":"22aeb3355e66b32a2b2a9f676048b81979b2488feddb088f9266034b3ed50539"},"downloads":-1,"filename":"agentops-0.3.0-py3-none-any.whl","has_sig":false,"md5_digest":"d9c6995a843b49ac7eb6f500fa1f3c2a","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":39430,"upload_time":"2024-07-17T18:38:24","upload_time_iso_8601":"2024-07-17T18:38:24.763919Z","url":"https://files.pythonhosted.org/packages/b8/e9/96f12ac457f46c370c6f70f344e975d534f2c92853703ee29802f0127024/agentops-0.3.0-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"7e2d6fda9613562c0394d7ef3dd8f0cb9fc4ebaa8d413862fce33940c73564d6","md5":"8fa67ca01ca726e3bfcd66898313f33f","sha256":"6c0c08a57410fa5e826a7bafa1deeba9f7b3524709427d9e1abbd0964caaf76b"},"downloads":-1,"filename":"agentops-0.3.0.tar.gz","has_sig":false,"md5_digest":"8fa67ca01ca726e3bfcd66898313f33f","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":41734,"upload_time":"2024-07-17T18:38:26","upload_time_iso_8601":"2024-07-17T18:38:26.447237Z","url":"https://files.pythonhosted.org/packages/7e/2d/6fda9613562c0394d7ef3dd8f0cb9fc4ebaa8d413862fce33940c73564d6/agentops-0.3.0.tar.gz","yanked":false,"yanked_reason":null}],"0.3.10":[{"comment_text":"","digests":{"blake2b_256":"eb5e3ac36b33d3e95747d64effd509f66a9b3b76b47216b16f492e27d8d90b0c","md5":"6fade0b81fc65b2c79a869b5f240590b","sha256":"b304d366691281e08c1f02307aabdd551ae4f68b0de82bbbb4cf6f651af2dd16"},"downloads":-1,"filename":"agentops-0.3.10-py3-none-any.whl","has_sig":false,"md5_digest":"6fade0b81fc65b2c79a869b5f240590b","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":41201,"upload_time":"2024-08-19T20:51:49","upload_time_iso_8601":"2024-08-19T20:51:49.487947Z","url":"https://files.pythonhosted.org/packages/eb/5e/3ac36b33d3e95747d64effd509f66a9b3b76b47216b16f492e27d8d90b0c/agentops-0.3.10-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"8367ca0cb01df6b529f0127d23ec661e92c95ff68faf544439d86ec2331f3a52","md5":"639da9c2a3381cb3f62812bfe48a5e57","sha256":"40f895019f29bc5a6c023110cbec32870e5edb3e3926f8100974db8d3e299e2a"},"downloads":-1,"filename":"agentops-0.3.10.tar.gz","has_sig":false,"md5_digest":"639da9c2a3381cb3f62812bfe48a5e57","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":45332,"upload_time":"2024-08-19T20:51:50","upload_time_iso_8601":"2024-08-19T20:51:50.714217Z","url":"https://files.pythonhosted.org/packages/83/67/ca0cb01df6b529f0127d23ec661e92c95ff68faf544439d86ec2331f3a52/agentops-0.3.10.tar.gz","yanked":false,"yanked_reason":null}],"0.3.11":[{"comment_text":"","digests":{"blake2b_256":"0b078e6a74f084463def9d79d2c84d79475adc0229bbfb2e57401b0616ba6d6a","md5":"e760d867d9431d1bc13798024237ab99","sha256":"75fe10b8fc86c7f5c2633139ac1c06959611f22434fc1aaa8688c3c223fde8b5"},"downloads":-1,"filename":"agentops-0.3.11-py3-none-any.whl","has_sig":false,"md5_digest":"e760d867d9431d1bc13798024237ab99","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":50252,"upload_time":"2024-09-17T21:57:23","upload_time_iso_8601":"2024-09-17T21:57:23.085964Z","url":"https://files.pythonhosted.org/packages/0b/07/8e6a74f084463def9d79d2c84d79475adc0229bbfb2e57401b0616ba6d6a/agentops-0.3.11-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"3746057c552ea7ded5c954bdcbaf8a7dca07b6109633e040bf33de5f97a1289b","md5":"3b661fb76d343ec3bdef5b70fc9e5cc3","sha256":"38a2ffeeac1d722cb72c32d70e1c840424902b57934c647ef10de15478fe8f27"},"downloads":-1,"filename":"agentops-0.3.11.tar.gz","has_sig":false,"md5_digest":"3b661fb76d343ec3bdef5b70fc9e5cc3","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":48018,"upload_time":"2024-09-17T21:57:24","upload_time_iso_8601":"2024-09-17T21:57:24.699442Z","url":"https://files.pythonhosted.org/packages/37/46/057c552ea7ded5c954bdcbaf8a7dca07b6109633e040bf33de5f97a1289b/agentops-0.3.11.tar.gz","yanked":false,"yanked_reason":null}],"0.3.12":[{"comment_text":"","digests":{"blake2b_256":"ac0a9004d7a8c2865ed804ddd6968095ef100ac554bc51ada7a2f3c0b4e9142b","md5":"be18cdad4333c6013d9584b84b4c7875","sha256":"4767def30de5dd97397728efcb50398a4f6d6823c1b534846f0a9b0cb85a6d45"},"downloads":-1,"filename":"agentops-0.3.12-py3-none-any.whl","has_sig":false,"md5_digest":"be18cdad4333c6013d9584b84b4c7875","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":50794,"upload_time":"2024-09-23T19:30:49","upload_time_iso_8601":"2024-09-23T19:30:49.050650Z","url":"https://files.pythonhosted.org/packages/ac/0a/9004d7a8c2865ed804ddd6968095ef100ac554bc51ada7a2f3c0b4e9142b/agentops-0.3.12-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"2c6d4f640d9fadd22f8cd7cb9857eed1f56d422f11b130ba226b947454eb0f0b","md5":"91aa981d4199ac73b4d7407547667e2f","sha256":"11ce3048656b5d146d02a4890dd50c8d2801ca5ad5caccab17d573cd8eea6e83"},"downloads":-1,"filename":"agentops-0.3.12.tar.gz","has_sig":false,"md5_digest":"91aa981d4199ac73b4d7407547667e2f","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":48525,"upload_time":"2024-09-23T19:30:50","upload_time_iso_8601":"2024-09-23T19:30:50.568151Z","url":"https://files.pythonhosted.org/packages/2c/6d/4f640d9fadd22f8cd7cb9857eed1f56d422f11b130ba226b947454eb0f0b/agentops-0.3.12.tar.gz","yanked":false,"yanked_reason":null}],"0.3.13":[{"comment_text":"","digests":{"blake2b_256":"68efa3b8adc0de2e7daa1e6e2734af9a0e37c90e3346b8a804e3fdc322c82b6c","md5":"948e9278dfc02e1a6ba2ec563296779a","sha256":"81bfdfedd990fbc3064ee42a67422ddbee07b6cd96c5fca7e124eb8c1e0cebdc"},"downloads":-1,"filename":"agentops-0.3.13-py3-none-any.whl","has_sig":false,"md5_digest":"948e9278dfc02e1a6ba2ec563296779a","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":50813,"upload_time":"2024-10-02T18:32:59","upload_time_iso_8601":"2024-10-02T18:32:59.208892Z","url":"https://files.pythonhosted.org/packages/68/ef/a3b8adc0de2e7daa1e6e2734af9a0e37c90e3346b8a804e3fdc322c82b6c/agentops-0.3.13-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"3511fb06b4cee96285a5f745809d0f4efddef70d2a82112a633ed53834d6fc64","md5":"27a923eaceb4ae35abe2cf1aed1b8241","sha256":"319b7325fb79004ce996191aa21f0982489be22cc1acc2f3f6d02cdff1db2429"},"downloads":-1,"filename":"agentops-0.3.13.tar.gz","has_sig":false,"md5_digest":"27a923eaceb4ae35abe2cf1aed1b8241","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":48559,"upload_time":"2024-10-02T18:33:00","upload_time_iso_8601":"2024-10-02T18:33:00.614409Z","url":"https://files.pythonhosted.org/packages/35/11/fb06b4cee96285a5f745809d0f4efddef70d2a82112a633ed53834d6fc64/agentops-0.3.13.tar.gz","yanked":false,"yanked_reason":null}],"0.3.14":[{"comment_text":"","digests":{"blake2b_256":"1c2775ab5bf99341a6a02775e3858f54a18cbcda0f35b5c6c0f114a829d62b8e","md5":"ad2d676d293c4baa1f9afecc61654e50","sha256":"f4a2fcf1a7caf1d5383bfb66d8a9d567f3cb88fc7495cfd81ade167b0c06a4ea"},"downloads":-1,"filename":"agentops-0.3.14-py3-none-any.whl","has_sig":false,"md5_digest":"ad2d676d293c4baa1f9afecc61654e50","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":50825,"upload_time":"2024-10-14T23:53:48","upload_time_iso_8601":"2024-10-14T23:53:48.464714Z","url":"https://files.pythonhosted.org/packages/1c/27/75ab5bf99341a6a02775e3858f54a18cbcda0f35b5c6c0f114a829d62b8e/agentops-0.3.14-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"46cb183fdaf40ae97ac1806ba91f6f23d55dc0a1a5cdf0881a5c834c8ca7175a","md5":"b90053253770c8e1c385b18e7172d58f","sha256":"fcb515e5743d73efee851b687692bed74797dc88e29a8327b2bbfb21d73a7447"},"downloads":-1,"filename":"agentops-0.3.14.tar.gz","has_sig":false,"md5_digest":"b90053253770c8e1c385b18e7172d58f","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":48548,"upload_time":"2024-10-14T23:53:50","upload_time_iso_8601":"2024-10-14T23:53:50.306080Z","url":"https://files.pythonhosted.org/packages/46/cb/183fdaf40ae97ac1806ba91f6f23d55dc0a1a5cdf0881a5c834c8ca7175a/agentops-0.3.14.tar.gz","yanked":false,"yanked_reason":null}],"0.3.15":[{"comment_text":"","digests":{"blake2b_256":"eadebed95f173bd304abe219b2b0a6f4e1f8e38b6733b19f2444a30fe2e731e1","md5":"7a46ccd127ffcd52eff26edaf5721bd9","sha256":"d5617108bbd9871a4250415f4e536ba33c2a6a2d2bec9342046303fb9e839f9d"},"downloads":-1,"filename":"agentops-0.3.15-py3-none-any.whl","has_sig":false,"md5_digest":"7a46ccd127ffcd52eff26edaf5721bd9","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":55349,"upload_time":"2024-11-09T01:18:40","upload_time_iso_8601":"2024-11-09T01:18:40.622134Z","url":"https://files.pythonhosted.org/packages/ea/de/bed95f173bd304abe219b2b0a6f4e1f8e38b6733b19f2444a30fe2e731e1/agentops-0.3.15-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"33a40ef511dc3f23bba2d345b464223b1e7acc3c2a29230a93abb8fbcb6faebf","md5":"7af7abcf01e8d3ef64ac287e9300528f","sha256":"4358f85929d55929002cae589323d36b68fc4d12d0ea5010a80bfc4c7addc0ec"},"downloads":-1,"filename":"agentops-0.3.15.tar.gz","has_sig":false,"md5_digest":"7af7abcf01e8d3ef64ac287e9300528f","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":51296,"upload_time":"2024-11-09T01:18:42","upload_time_iso_8601":"2024-11-09T01:18:42.358185Z","url":"https://files.pythonhosted.org/packages/33/a4/0ef511dc3f23bba2d345b464223b1e7acc3c2a29230a93abb8fbcb6faebf/agentops-0.3.15.tar.gz","yanked":false,"yanked_reason":null}],"0.3.15rc1":[{"comment_text":"","digests":{"blake2b_256":"0978ac2f89ccb7b3a31742f5b70434953faff168da6cab67c0836f432919c762","md5":"7f805adf76594ac4bc169b1a111817f4","sha256":"86069387a265bc6c5fa00ffbb3f8a131254a51ee3a9b8b35af4aca823dee76f1"},"downloads":-1,"filename":"agentops-0.3.15rc1-py3-none-any.whl","has_sig":false,"md5_digest":"7f805adf76594ac4bc169b1a111817f4","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":50798,"upload_time":"2024-10-31T04:36:11","upload_time_iso_8601":"2024-10-31T04:36:11.059082Z","url":"https://files.pythonhosted.org/packages/09/78/ac2f89ccb7b3a31742f5b70434953faff168da6cab67c0836f432919c762/agentops-0.3.15rc1-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"4317d6950ad32c33317509ea05a64d01ab661515165ffbd4e120148826b69ffb","md5":"5f131294c10c9b60b33ec93edc106f4f","sha256":"897ab94ae4fca8f1711216f9317dbf6f14e5d018c866086ef0b8831dc125e4ad"},"downloads":-1,"filename":"agentops-0.3.15rc1.tar.gz","has_sig":false,"md5_digest":"5f131294c10c9b60b33ec93edc106f4f","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":48739,"upload_time":"2024-10-31T04:36:12","upload_time_iso_8601":"2024-10-31T04:36:12.630857Z","url":"https://files.pythonhosted.org/packages/43/17/d6950ad32c33317509ea05a64d01ab661515165ffbd4e120148826b69ffb/agentops-0.3.15rc1.tar.gz","yanked":false,"yanked_reason":null}],"0.3.16":[{"comment_text":"","digests":{"blake2b_256":"b876e1c933480ec9ad093a841321e5c9f7f16a0af59f339ba2c840851b1af01d","md5":"d57593bb32704fae1163656f03355a71","sha256":"7763e65efe053fa81cea2a2e16f015c7603365280972e0c0709eec32c3c8569e"},"downloads":-1,"filename":"agentops-0.3.16-py3-none-any.whl","has_sig":false,"md5_digest":"d57593bb32704fae1163656f03355a71","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":55351,"upload_time":"2024-11-09T18:44:21","upload_time_iso_8601":"2024-11-09T18:44:21.626158Z","url":"https://files.pythonhosted.org/packages/b8/76/e1c933480ec9ad093a841321e5c9f7f16a0af59f339ba2c840851b1af01d/agentops-0.3.16-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"aa748e77e654b37a5e0c977eca4f7e92740c1e24be39c827815e7bd8da429003","md5":"23078e1dc78ef459a667feeb904345c1","sha256":"564163eb048939d64e848c7e6caf25d6c0aee31200623ef97efe492f090f8939"},"downloads":-1,"filename":"agentops-0.3.16.tar.gz","has_sig":false,"md5_digest":"23078e1dc78ef459a667feeb904345c1","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":51308,"upload_time":"2024-11-09T18:44:23","upload_time_iso_8601":"2024-11-09T18:44:23.037514Z","url":"https://files.pythonhosted.org/packages/aa/74/8e77e654b37a5e0c977eca4f7e92740c1e24be39c827815e7bd8da429003/agentops-0.3.16.tar.gz","yanked":false,"yanked_reason":null}],"0.3.17":[{"comment_text":"","digests":{"blake2b_256":"6c3038a659671eec20fcae759bd69655ec45b08c4e875627b33e3b05bd46f299","md5":"93bbe3bd4ee492e7e73780c07897b017","sha256":"0d24dd082270a76c98ad0391101d5b5c3d01e389c5032389ecd551285e4b0662"},"downloads":-1,"filename":"agentops-0.3.17-py3-none-any.whl","has_sig":false,"md5_digest":"93bbe3bd4ee492e7e73780c07897b017","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":55503,"upload_time":"2024-11-10T02:39:28","upload_time_iso_8601":"2024-11-10T02:39:28.884052Z","url":"https://files.pythonhosted.org/packages/6c/30/38a659671eec20fcae759bd69655ec45b08c4e875627b33e3b05bd46f299/agentops-0.3.17-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"2131d9a3747df04b7915ee1cffaa4a5636f8ed0e1385e5236b0da085ccce936a","md5":"49e8cf186203cadaa39301c4ce5fda42","sha256":"a893cc7c37eda720ab59e8facaa2774cc23d125648aa00539ae485ff592e8b77"},"downloads":-1,"filename":"agentops-0.3.17.tar.gz","has_sig":false,"md5_digest":"49e8cf186203cadaa39301c4ce5fda42","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":51469,"upload_time":"2024-11-10T02:39:30","upload_time_iso_8601":"2024-11-10T02:39:30.636907Z","url":"https://files.pythonhosted.org/packages/21/31/d9a3747df04b7915ee1cffaa4a5636f8ed0e1385e5236b0da085ccce936a/agentops-0.3.17.tar.gz","yanked":false,"yanked_reason":null}],"0.3.18":[{"comment_text":"","digests":{"blake2b_256":"978dbd4cad95dad722dc2d3e4179feab1058ef846828c0e15e51e8bfaea373ee","md5":"d9afc3636cb969c286738ce02ed12196","sha256":"8b48d8a1662f276653430fd541c77fa4f9a15a43e881b518ff88ea56925afcf7"},"downloads":-1,"filename":"agentops-0.3.18-py3-none-any.whl","has_sig":false,"md5_digest":"d9afc3636cb969c286738ce02ed12196","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":58032,"upload_time":"2024-11-19T19:06:19","upload_time_iso_8601":"2024-11-19T19:06:19.068511Z","url":"https://files.pythonhosted.org/packages/97/8d/bd4cad95dad722dc2d3e4179feab1058ef846828c0e15e51e8bfaea373ee/agentops-0.3.18-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"c55246bb2f29b9e5f2e1d8b124296b7794934a9048de635d9e7d6a95e791ad7b","md5":"02a4fc081499360aac58485a94a6ca33","sha256":"4d509754df7be52579597cc9f53939c5218131a0379463e0ff6f6f40cde9fcc4"},"downloads":-1,"filename":"agentops-0.3.18.tar.gz","has_sig":false,"md5_digest":"02a4fc081499360aac58485a94a6ca33","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":55394,"upload_time":"2024-11-19T19:06:21","upload_time_iso_8601":"2024-11-19T19:06:21.306448Z","url":"https://files.pythonhosted.org/packages/c5/52/46bb2f29b9e5f2e1d8b124296b7794934a9048de635d9e7d6a95e791ad7b/agentops-0.3.18.tar.gz","yanked":false,"yanked_reason":null}],"0.3.19":[{"comment_text":"","digests":{"blake2b_256":"fc1e48616d2db40717d560a561e13521009655d447388f944f12f2b3811e6d7d","md5":"a9e23f1d31821585017e97633b058233","sha256":"1888a47dd3d9b92c5f246cdeeab333def5acbd26833d3148c63e8793457405b3"},"downloads":-1,"filename":"agentops-0.3.19-py3-none-any.whl","has_sig":false,"md5_digest":"a9e23f1d31821585017e97633b058233","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":38648,"upload_time":"2024-12-04T00:54:00","upload_time_iso_8601":"2024-12-04T00:54:00.173948Z","url":"https://files.pythonhosted.org/packages/fc/1e/48616d2db40717d560a561e13521009655d447388f944f12f2b3811e6d7d/agentops-0.3.19-py3-none-any.whl","yanked":true,"yanked_reason":"Broken - dependency, please install 0.3.18"},{"comment_text":"","digests":{"blake2b_256":"b319bb0e9895cb6da29f764f8d7b95b10ac8fde400bc17028f9bd486e9574dbe","md5":"f6424c41464d438007e9628748a0bea6","sha256":"ca0d4ba35ae699169ae20f74f72ca6a5780a8768ba2a2c32589fc5292ed81674"},"downloads":-1,"filename":"agentops-0.3.19.tar.gz","has_sig":false,"md5_digest":"f6424c41464d438007e9628748a0bea6","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":48360,"upload_time":"2024-12-04T00:54:01","upload_time_iso_8601":"2024-12-04T00:54:01.418776Z","url":"https://files.pythonhosted.org/packages/b3/19/bb0e9895cb6da29f764f8d7b95b10ac8fde400bc17028f9bd486e9574dbe/agentops-0.3.19.tar.gz","yanked":true,"yanked_reason":"Broken dependency, please install 0.3.18"}],"0.3.2":[{"comment_text":"","digests":{"blake2b_256":"9d2c23b745a61d48df788b8020e5ea37e94f9da59b322a17accafe18d8cb4006","md5":"62d576d9518a627fe4232709c0721eff","sha256":"b35988e04378624204572bb3d7a454094f879ea573f05b57d4e75ab0bfbb82af"},"downloads":-1,"filename":"agentops-0.3.2-py3-none-any.whl","has_sig":false,"md5_digest":"62d576d9518a627fe4232709c0721eff","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":39527,"upload_time":"2024-07-21T03:09:56","upload_time_iso_8601":"2024-07-21T03:09:56.844372Z","url":"https://files.pythonhosted.org/packages/9d/2c/23b745a61d48df788b8020e5ea37e94f9da59b322a17accafe18d8cb4006/agentops-0.3.2-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"d2a1cc21406646c065e83435fe30fa205b99b2204d8074eca31926a5f8ef4381","md5":"30b247bcae25b181485a89213518241c","sha256":"55559ac4a43634831dfa8937c2597c28e332809dc7c6bb3bc3c8b233442e224c"},"downloads":-1,"filename":"agentops-0.3.2.tar.gz","has_sig":false,"md5_digest":"30b247bcae25b181485a89213518241c","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":41894,"upload_time":"2024-07-21T03:09:58","upload_time_iso_8601":"2024-07-21T03:09:58.409826Z","url":"https://files.pythonhosted.org/packages/d2/a1/cc21406646c065e83435fe30fa205b99b2204d8074eca31926a5f8ef4381/agentops-0.3.2.tar.gz","yanked":false,"yanked_reason":null}],"0.3.20":[{"comment_text":"","digests":{"blake2b_256":"a854ae9147a490dd9bd03ab7bfc5af47f40ff675840a9aa143896b385a8f8d3a","md5":"a13af8737ddff8a0c7c0f05cee70085f","sha256":"b5396e11b0bfef46b85604e8e36ab17668057711edd56f1edb0a067b8676fdcc"},"downloads":-1,"filename":"agentops-0.3.20-py3-none-any.whl","has_sig":false,"md5_digest":"a13af8737ddff8a0c7c0f05cee70085f","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":38674,"upload_time":"2024-12-07T00:06:31","upload_time_iso_8601":"2024-12-07T00:06:31.901162Z","url":"https://files.pythonhosted.org/packages/a8/54/ae9147a490dd9bd03ab7bfc5af47f40ff675840a9aa143896b385a8f8d3a/agentops-0.3.20-py3-none-any.whl","yanked":true,"yanked_reason":"Wrong - release"},{"comment_text":"","digests":{"blake2b_256":"c1eb19d04c801854ba75e235eb87c51a6a9c5b1a89e8579cb745c83f8bf84e08","md5":"11754497191d8340eda7a831720d9b74","sha256":"c71406294804a82795310a4afc492064a8884b1ba47e12607230975bc1291ce3"},"downloads":-1,"filename":"agentops-0.3.20.tar.gz","has_sig":false,"md5_digest":"11754497191d8340eda7a831720d9b74","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":48332,"upload_time":"2024-12-07T00:06:33","upload_time_iso_8601":"2024-12-07T00:06:33.568362Z","url":"https://files.pythonhosted.org/packages/c1/eb/19d04c801854ba75e235eb87c51a6a9c5b1a89e8579cb745c83f8bf84e08/agentops-0.3.20.tar.gz","yanked":true,"yanked_reason":"Wrong release"}],"0.3.20rc1":[{"comment_text":"","digests":{"blake2b_256":"073de7eba58e2a60c0136eee2760b20f99607001d372de26505feee891e0976b","md5":"73c6ac515ee9d555e27a7ba7e26e3a46","sha256":"079ea8138938e27a3e1319a235a6f4cf98c0d6846731d854aa83b8422d570bda"},"downloads":-1,"filename":"agentops-0.3.20rc1-py3-none-any.whl","has_sig":false,"md5_digest":"73c6ac515ee9d555e27a7ba7e26e3a46","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":38718,"upload_time":"2024-12-07T00:10:18","upload_time_iso_8601":"2024-12-07T00:10:18.796963Z","url":"https://files.pythonhosted.org/packages/07/3d/e7eba58e2a60c0136eee2760b20f99607001d372de26505feee891e0976b/agentops-0.3.20rc1-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"02ff111d618c21aad946caedb666030f1f374a0d558228b9061ea2b46acb6bcd","md5":"17062e985b931dc85b4855922d7842ce","sha256":"ef48447e07a3eded246b2f7e10bba74422a34563ffdc667ac16b2d3383475a3f"},"downloads":-1,"filename":"agentops-0.3.20rc1.tar.gz","has_sig":false,"md5_digest":"17062e985b931dc85b4855922d7842ce","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":48329,"upload_time":"2024-12-07T00:10:20","upload_time_iso_8601":"2024-12-07T00:10:20.510407Z","url":"https://files.pythonhosted.org/packages/02/ff/111d618c21aad946caedb666030f1f374a0d558228b9061ea2b46acb6bcd/agentops-0.3.20rc1.tar.gz","yanked":false,"yanked_reason":null}],"0.3.20rc10":[{"comment_text":"","digests":{"blake2b_256":"a7274706d8d9c8f4abecc1dda2b9b02cd02ffe895220bd39f58322a46ccc7254","md5":"2c66a93c691c6b8cac2f2dc8fab9efae","sha256":"3c10d77f2fe88b61d97ad007820c1ba968c62f692986ea2b2cbfd8b22ec9e5bc"},"downloads":-1,"filename":"agentops-0.3.20rc10-py3-none-any.whl","has_sig":false,"md5_digest":"2c66a93c691c6b8cac2f2dc8fab9efae","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":57423,"upload_time":"2024-12-10T03:41:04","upload_time_iso_8601":"2024-12-10T03:41:04.579814Z","url":"https://files.pythonhosted.org/packages/a7/27/4706d8d9c8f4abecc1dda2b9b02cd02ffe895220bd39f58322a46ccc7254/agentops-0.3.20rc10-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"efe9e304f465945f57e4c6d35cd35fff53dc2a2e36b9b32793fa57017467b0c2","md5":"9882d32866b94d925ba36ac376c30bea","sha256":"f0c72c20e7fe41054c22c6257420314863549dd91428a892ac9b47b81cdfcc8c"},"downloads":-1,"filename":"agentops-0.3.20rc10.tar.gz","has_sig":false,"md5_digest":"9882d32866b94d925ba36ac376c30bea","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":57564,"upload_time":"2024-12-10T03:41:06","upload_time_iso_8601":"2024-12-10T03:41:06.899043Z","url":"https://files.pythonhosted.org/packages/ef/e9/e304f465945f57e4c6d35cd35fff53dc2a2e36b9b32793fa57017467b0c2/agentops-0.3.20rc10.tar.gz","yanked":false,"yanked_reason":null}],"0.3.20rc11":[{"comment_text":"","digests":{"blake2b_256":"8dbf598ec2532b713a228f4041c9b2c10358cd43e6aecf6128d0988a0b5f103e","md5":"d9ab67a850aefcb5bf9467b48f74675d","sha256":"3e5d4c19de6c58ae684693f47a2f03db35eaf4cd6d8aafc1e804a134462c2b55"},"downloads":-1,"filename":"agentops-0.3.20rc11-py3-none-any.whl","has_sig":false,"md5_digest":"d9ab67a850aefcb5bf9467b48f74675d","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":60280,"upload_time":"2024-12-10T22:45:05","upload_time_iso_8601":"2024-12-10T22:45:05.280119Z","url":"https://files.pythonhosted.org/packages/8d/bf/598ec2532b713a228f4041c9b2c10358cd43e6aecf6128d0988a0b5f103e/agentops-0.3.20rc11-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"210642e51fff6a4537fb811a15bc22d00343145285c6246dc069433d61436e1b","md5":"ca5279f4cb6ad82e06ef542a2d08d06e","sha256":"9211489c6a01bc9cda4061826f8b80d0989cfcd7fbabe1dd2ed5a5cb76b3d6f0"},"downloads":-1,"filename":"agentops-0.3.20rc11.tar.gz","has_sig":false,"md5_digest":"ca5279f4cb6ad82e06ef542a2d08d06e","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":59718,"upload_time":"2024-12-10T22:45:09","upload_time_iso_8601":"2024-12-10T22:45:09.616947Z","url":"https://files.pythonhosted.org/packages/21/06/42e51fff6a4537fb811a15bc22d00343145285c6246dc069433d61436e1b/agentops-0.3.20rc11.tar.gz","yanked":false,"yanked_reason":null}],"0.3.20rc12":[{"comment_text":"","digests":{"blake2b_256":"dc281db6f49f10ac849683de1d7f5b5ef492be2a996325302167b8388f375d51","md5":"8b2611d2510f0d4fac7ab824d7658ff7","sha256":"9237652d28db89315c49c0705829b291c17280e07d41272f909e2609acec650b"},"downloads":-1,"filename":"agentops-0.3.20rc12-py3-none-any.whl","has_sig":false,"md5_digest":"8b2611d2510f0d4fac7ab824d7658ff7","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":60282,"upload_time":"2024-12-10T23:10:54","upload_time_iso_8601":"2024-12-10T23:10:54.516317Z","url":"https://files.pythonhosted.org/packages/dc/28/1db6f49f10ac849683de1d7f5b5ef492be2a996325302167b8388f375d51/agentops-0.3.20rc12-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"10c073cb9a55592f55bb44c9206f50f41d7b7a8a8d6fd67d42f40c8f9f184b0e","md5":"02b3a68f3491564af2e29f0f216eea1e","sha256":"d4d3a73ac34b2a00edb6e6b5b220cbb031bb76ff58d85e2096b536be24aee4fe"},"downloads":-1,"filename":"agentops-0.3.20rc12.tar.gz","has_sig":false,"md5_digest":"02b3a68f3491564af2e29f0f216eea1e","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":59731,"upload_time":"2024-12-10T23:10:56","upload_time_iso_8601":"2024-12-10T23:10:56.822803Z","url":"https://files.pythonhosted.org/packages/10/c0/73cb9a55592f55bb44c9206f50f41d7b7a8a8d6fd67d42f40c8f9f184b0e/agentops-0.3.20rc12.tar.gz","yanked":false,"yanked_reason":null}],"0.3.20rc13":[{"comment_text":"","digests":{"blake2b_256":"4ed48a97563074235f266281167c70ab90833c195e2b704087e414509ae3ec32","md5":"c86fe22044483f94bc044a3bf7b054b7","sha256":"2fbb3b55701d9aea64f622e7e29aa417772e897e2414f74ed3954d99009d224f"},"downloads":-1,"filename":"agentops-0.3.20rc13-py3-none-any.whl","has_sig":false,"md5_digest":"c86fe22044483f94bc044a3bf7b054b7","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":64724,"upload_time":"2024-12-10T23:27:50","upload_time_iso_8601":"2024-12-10T23:27:50.895316Z","url":"https://files.pythonhosted.org/packages/4e/d4/8a97563074235f266281167c70ab90833c195e2b704087e414509ae3ec32/agentops-0.3.20rc13-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"767e59c6f34e9a067d9152021de7e3146e5c0f69f36434dcb3026ff03f382489","md5":"152a70647d5ff28fe851e4cc406d8fb4","sha256":"b7a6d1d7f603bbb2605cc747762ae866bdee53941c4c76087c9f0f0a5efad03b"},"downloads":-1,"filename":"agentops-0.3.20rc13.tar.gz","has_sig":false,"md5_digest":"152a70647d5ff28fe851e4cc406d8fb4","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":63242,"upload_time":"2024-12-10T23:27:53","upload_time_iso_8601":"2024-12-10T23:27:53.657606Z","url":"https://files.pythonhosted.org/packages/76/7e/59c6f34e9a067d9152021de7e3146e5c0f69f36434dcb3026ff03f382489/agentops-0.3.20rc13.tar.gz","yanked":false,"yanked_reason":null}],"0.3.20rc2":[{"comment_text":"","digests":{"blake2b_256":"cebbbca58531e21f4c1c92cbe6ba15d0f308ff8f3b27083cd0ce6358c7d1d117","md5":"5a9fcd99e0b6e3b24e721b22c3ee5907","sha256":"ada95d42e82abef16c1e83443dc42d02bb470ee48b1fa8f2d58a20703511a7be"},"downloads":-1,"filename":"agentops-0.3.20rc2-py3-none-any.whl","has_sig":false,"md5_digest":"5a9fcd99e0b6e3b24e721b22c3ee5907","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":38716,"upload_time":"2024-12-07T00:20:01","upload_time_iso_8601":"2024-12-07T00:20:01.561074Z","url":"https://files.pythonhosted.org/packages/ce/bb/bca58531e21f4c1c92cbe6ba15d0f308ff8f3b27083cd0ce6358c7d1d117/agentops-0.3.20rc2-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"124aec14492566949b7383ae321cb40c1edc18940712b277c08d32392566f7a8","md5":"ff8db0075584474e35784b080fb9b6b1","sha256":"60462b82390e78fd21312c5db45f0f48dfcc9c9ab354e6bf232db557ccf57c13"},"downloads":-1,"filename":"agentops-0.3.20rc2.tar.gz","has_sig":false,"md5_digest":"ff8db0075584474e35784b080fb9b6b1","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":48341,"upload_time":"2024-12-07T00:20:02","upload_time_iso_8601":"2024-12-07T00:20:02.519240Z","url":"https://files.pythonhosted.org/packages/12/4a/ec14492566949b7383ae321cb40c1edc18940712b277c08d32392566f7a8/agentops-0.3.20rc2.tar.gz","yanked":false,"yanked_reason":null}],"0.3.20rc4":[{"comment_text":"","digests":{"blake2b_256":"a1551125b2b3823fcb3f3afa3c6b9621541799ac329622ee21038babbfbedf39","md5":"a82f1b73347d3a2fe33f31cec01ca376","sha256":"72253950b46a11b5b1163b13bbb9d5b769e6cdb7b102acf46efac8cf02f7eaac"},"downloads":-1,"filename":"agentops-0.3.20rc4-py3-none-any.whl","has_sig":false,"md5_digest":"a82f1b73347d3a2fe33f31cec01ca376","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":38719,"upload_time":"2024-12-07T00:53:45","upload_time_iso_8601":"2024-12-07T00:53:45.212239Z","url":"https://files.pythonhosted.org/packages/a1/55/1125b2b3823fcb3f3afa3c6b9621541799ac329622ee21038babbfbedf39/agentops-0.3.20rc4-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"a180420ef26926052b12d1c2010360b4037f6765321055ce7e09c6bfaeac3480","md5":"1a314ff81d87a774e5e1cf338151a353","sha256":"4218fcfa42644dd86ee50ac7806d08783e4629db30b127bc8011c9c3523eeb5c"},"downloads":-1,"filename":"agentops-0.3.20rc4.tar.gz","has_sig":false,"md5_digest":"1a314ff81d87a774e5e1cf338151a353","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":48332,"upload_time":"2024-12-07T00:53:47","upload_time_iso_8601":"2024-12-07T00:53:47.581677Z","url":"https://files.pythonhosted.org/packages/a1/80/420ef26926052b12d1c2010360b4037f6765321055ce7e09c6bfaeac3480/agentops-0.3.20rc4.tar.gz","yanked":false,"yanked_reason":null}],"0.3.20rc5":[{"comment_text":"","digests":{"blake2b_256":"7747e61c5387124f53a3095261427888ab88e192828e3bb8be92660bf4e008d0","md5":"fd7343ddf99f077d1a159b87d84ed79c","sha256":"97df38116ec7fe337fc04b800e423aa8b5e69681565c02dc4af3e9c60764827e"},"downloads":-1,"filename":"agentops-0.3.20rc5-py3-none-any.whl","has_sig":false,"md5_digest":"fd7343ddf99f077d1a159b87d84ed79c","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":44545,"upload_time":"2024-12-07T01:38:17","upload_time_iso_8601":"2024-12-07T01:38:17.177125Z","url":"https://files.pythonhosted.org/packages/77/47/e61c5387124f53a3095261427888ab88e192828e3bb8be92660bf4e008d0/agentops-0.3.20rc5-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"145fa0bf5ee5b56dacf63b9712ac62169c585c6222efe043cc77f3148f709965","md5":"20a32d514b5d51851dbcbdfb2c189491","sha256":"48111083dab1fc30f0545e0812c4aab00fc9e9d48de42de95d254699396992a8"},"downloads":-1,"filename":"agentops-0.3.20rc5.tar.gz","has_sig":false,"md5_digest":"20a32d514b5d51851dbcbdfb2c189491","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":53243,"upload_time":"2024-12-07T01:38:18","upload_time_iso_8601":"2024-12-07T01:38:18.772880Z","url":"https://files.pythonhosted.org/packages/14/5f/a0bf5ee5b56dacf63b9712ac62169c585c6222efe043cc77f3148f709965/agentops-0.3.20rc5.tar.gz","yanked":false,"yanked_reason":null}],"0.3.20rc6":[{"comment_text":"","digests":{"blake2b_256":"85f3a5ae3d8d47aa5160a5c805551d75077cad61bff9626abe44079d29d1c299","md5":"30f87c628c530e82e27b8bc2d2a46d8a","sha256":"d03f16832b3a5670d9c3273b95c9d9def772c203b2cd4ac52ae0e7f6d3b1b9e4"},"downloads":-1,"filename":"agentops-0.3.20rc6-py3-none-any.whl","has_sig":false,"md5_digest":"30f87c628c530e82e27b8bc2d2a46d8a","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":61844,"upload_time":"2024-12-07T01:49:11","upload_time_iso_8601":"2024-12-07T01:49:11.801219Z","url":"https://files.pythonhosted.org/packages/85/f3/a5ae3d8d47aa5160a5c805551d75077cad61bff9626abe44079d29d1c299/agentops-0.3.20rc6-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"060e24f42ed1de3d892355f3ba90f0b7f659855fafd18851e59aa7174fa30615","md5":"384c60ee11b827b8bad31cef20a35a17","sha256":"45aa4797269214d41858537d95050964f330651da5c7412b2895e714a81f30f5"},"downloads":-1,"filename":"agentops-0.3.20rc6.tar.gz","has_sig":false,"md5_digest":"384c60ee11b827b8bad31cef20a35a17","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":61004,"upload_time":"2024-12-07T01:49:13","upload_time_iso_8601":"2024-12-07T01:49:13.917920Z","url":"https://files.pythonhosted.org/packages/06/0e/24f42ed1de3d892355f3ba90f0b7f659855fafd18851e59aa7174fa30615/agentops-0.3.20rc6.tar.gz","yanked":false,"yanked_reason":null}],"0.3.20rc7":[{"comment_text":"","digests":{"blake2b_256":"d502edf7ba8aff1a994176da4c95688c9ba0428ac3bd9a0db2392fe5009162a9","md5":"9b43c5e2df12abac01ffc5262e991825","sha256":"95972115c5c753ceee477834de902afaf0664107048e44eee2c65e74e05656a2"},"downloads":-1,"filename":"agentops-0.3.20rc7-py3-none-any.whl","has_sig":false,"md5_digest":"9b43c5e2df12abac01ffc5262e991825","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":40117,"upload_time":"2024-12-07T02:12:48","upload_time_iso_8601":"2024-12-07T02:12:48.512036Z","url":"https://files.pythonhosted.org/packages/d5/02/edf7ba8aff1a994176da4c95688c9ba0428ac3bd9a0db2392fe5009162a9/agentops-0.3.20rc7-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"5d7029d8d02fcf6db627c6b20ceab974c455e23a25fc0e991c0a8d0eaebda523","md5":"9de760856bed3f7adbd1d0ab7ba0a63a","sha256":"7c793b7b199a61ca61366ddb8fd94986fac262ef6514918c3baaa08184b86669"},"downloads":-1,"filename":"agentops-0.3.20rc7.tar.gz","has_sig":false,"md5_digest":"9de760856bed3f7adbd1d0ab7ba0a63a","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":49661,"upload_time":"2024-12-07T02:12:50","upload_time_iso_8601":"2024-12-07T02:12:50.120388Z","url":"https://files.pythonhosted.org/packages/5d/70/29d8d02fcf6db627c6b20ceab974c455e23a25fc0e991c0a8d0eaebda523/agentops-0.3.20rc7.tar.gz","yanked":false,"yanked_reason":null}],"0.3.20rc8":[{"comment_text":"","digests":{"blake2b_256":"6d0f66418c0b20f40fe11de50f29481abdb266ff641ac6166eab9eac3d7364d2","md5":"52a2cea48e48d1818169c07507a6c7a9","sha256":"8cf2e9fe6400a4fb4367a039cacc5d76339a8fd2749a44243389547e928e545c"},"downloads":-1,"filename":"agentops-0.3.20rc8-py3-none-any.whl","has_sig":false,"md5_digest":"52a2cea48e48d1818169c07507a6c7a9","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":57414,"upload_time":"2024-12-07T02:17:51","upload_time_iso_8601":"2024-12-07T02:17:51.404804Z","url":"https://files.pythonhosted.org/packages/6d/0f/66418c0b20f40fe11de50f29481abdb266ff641ac6166eab9eac3d7364d2/agentops-0.3.20rc8-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"4d18250b066f23ccbb22f2bba8df101361abd5724ddcef59a4d63d4539c7cd82","md5":"f7887176e88d4434e38e237850363b80","sha256":"a06e7939dd4d59c9880ded1b129fd4548b34be5530a46cf043326740bdfeca56"},"downloads":-1,"filename":"agentops-0.3.20rc8.tar.gz","has_sig":false,"md5_digest":"f7887176e88d4434e38e237850363b80","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":57521,"upload_time":"2024-12-07T02:17:53","upload_time_iso_8601":"2024-12-07T02:17:53.055737Z","url":"https://files.pythonhosted.org/packages/4d/18/250b066f23ccbb22f2bba8df101361abd5724ddcef59a4d63d4539c7cd82/agentops-0.3.20rc8.tar.gz","yanked":false,"yanked_reason":null}],"0.3.21":[{"comment_text":"","digests":{"blake2b_256":"c4cb3b6cc5a08d11d9e56501f980222da0fa41814b7d6948a7f6354f31739af6","md5":"c7592f9e7993dbe307fbffd7e4da1e51","sha256":"4f98beecdce4c7cbee80ec26658a9657ba307a1fb2910b589f85325d3259b75b"},"downloads":-1,"filename":"agentops-0.3.21-py3-none-any.whl","has_sig":false,"md5_digest":"c7592f9e7993dbe307fbffd7e4da1e51","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":64701,"upload_time":"2024-12-11T12:24:00","upload_time_iso_8601":"2024-12-11T12:24:00.934724Z","url":"https://files.pythonhosted.org/packages/c4/cb/3b6cc5a08d11d9e56501f980222da0fa41814b7d6948a7f6354f31739af6/agentops-0.3.21-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"83f6bfd27fa4b948c353eaff579dafdf4eb54833f5c526e00c6f2faee4b467a8","md5":"83d7666511cccf3b0d4354cebd99b110","sha256":"d8e8d1f6d154554dba64ec5b139905bf76c68f21575af9fa2ca1697277fe36f2"},"downloads":-1,"filename":"agentops-0.3.21.tar.gz","has_sig":false,"md5_digest":"83d7666511cccf3b0d4354cebd99b110","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":63185,"upload_time":"2024-12-11T12:24:02","upload_time_iso_8601":"2024-12-11T12:24:02.068404Z","url":"https://files.pythonhosted.org/packages/83/f6/bfd27fa4b948c353eaff579dafdf4eb54833f5c526e00c6f2faee4b467a8/agentops-0.3.21.tar.gz","yanked":false,"yanked_reason":null}],"0.3.22":[{"comment_text":"","digests":{"blake2b_256":"11e721b42168ecfd0a9fff9dea51201646b6e62c4f52c8cd9c2a6400125d7234","md5":"26061ab467e358b63251f9547275bbbd","sha256":"992f4f31d80e8b0b2098abf58ae2707c60538e4b66e5aec8cf49fb269d5a2adc"},"downloads":-1,"filename":"agentops-0.3.22-py3-none-any.whl","has_sig":false,"md5_digest":"26061ab467e358b63251f9547275bbbd","packagetype":"bdist_wheel","python_version":"py3","requires_python":"<3.14,>=3.9","size":39539,"upload_time":"2025-01-11T03:21:39","upload_time_iso_8601":"2025-01-11T03:21:39.093169Z","url":"https://files.pythonhosted.org/packages/11/e7/21b42168ecfd0a9fff9dea51201646b6e62c4f52c8cd9c2a6400125d7234/agentops-0.3.22-py3-none-any.whl","yanked":true,"yanked_reason":"Broken - dependency"},{"comment_text":"","digests":{"blake2b_256":"e067e61aa4c2e329da10b5e95d325091e599d8a00a28843a54bdcefa7a2eef8d","md5":"bcf45b6c4c56884ed2409f835571af62","sha256":"705d772b6994f8bab0cd163b24602009353f7906c72d9db008af11683f6e9341"},"downloads":-1,"filename":"agentops-0.3.22.tar.gz","has_sig":false,"md5_digest":"bcf45b6c4c56884ed2409f835571af62","packagetype":"sdist","python_version":"source","requires_python":"<3.14,>=3.9","size":52845,"upload_time":"2025-01-11T03:21:41","upload_time_iso_8601":"2025-01-11T03:21:41.762282Z","url":"https://files.pythonhosted.org/packages/e0/67/e61aa4c2e329da10b5e95d325091e599d8a00a28843a54bdcefa7a2eef8d/agentops-0.3.22.tar.gz","yanked":true,"yanked_reason":"Broken dependency"}],"0.3.23":[{"comment_text":null,"digests":{"blake2b_256":"e67de1434765cf0a3d62372b74f47919aa17c0b01909823f7d3ee705edf821a9","md5":"1f0f02509b8ba713db72e57a072f01a6","sha256":"ecfff77d8f9006361ef2a2e8593271e97eb54b7b504abfb8abd6504006baca56"},"downloads":-1,"filename":"agentops-0.3.23-py3-none-any.whl","has_sig":false,"md5_digest":"1f0f02509b8ba713db72e57a072f01a6","packagetype":"bdist_wheel","python_version":"py3","requires_python":"<3.14,>=3.9","size":70098,"upload_time":"2025-01-12T02:11:56","upload_time_iso_8601":"2025-01-12T02:11:56.319763Z","url":"https://files.pythonhosted.org/packages/e6/7d/e1434765cf0a3d62372b74f47919aa17c0b01909823f7d3ee705edf821a9/agentops-0.3.23-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":null,"digests":{"blake2b_256":"5c7fa4fd91f8fd819e1ecfdc608d1c7ade83de0f9dddd868e2c2c139a2fdae25","md5":"b7922399f81fb26517eb69fc7fef97c9","sha256":"4e4de49caeaf567b8746082f84a8cdd65afe2c698720f6f40251bbc4fdffe4c9"},"downloads":-1,"filename":"agentops-0.3.23.tar.gz","has_sig":false,"md5_digest":"b7922399f81fb26517eb69fc7fef97c9","packagetype":"sdist","python_version":"source","requires_python":"<3.14,>=3.9","size":64225,"upload_time":"2025-01-12T02:11:59","upload_time_iso_8601":"2025-01-12T02:11:59.360077Z","url":"https://files.pythonhosted.org/packages/5c/7f/a4fd91f8fd819e1ecfdc608d1c7ade83de0f9dddd868e2c2c139a2fdae25/agentops-0.3.23.tar.gz","yanked":false,"yanked_reason":null}],"0.3.24":[{"comment_text":null,"digests":{"blake2b_256":"254ea7d131802bac2ece5302ebf78dcef1ba1ba2f8b3a51fbe44c7f52bae6a53","md5":"39c39d8a7f1285add0fec21830a89a4a","sha256":"c5dfc8098b0dd49ddd819aa55280d07f8bfbf2f8fa088fc51ff5849b65062b10"},"downloads":-1,"filename":"agentops-0.3.24-py3-none-any.whl","has_sig":false,"md5_digest":"39c39d8a7f1285add0fec21830a89a4a","packagetype":"bdist_wheel","python_version":"py3","requires_python":"<3.14,>=3.9","size":71957,"upload_time":"2025-01-18T19:08:02","upload_time_iso_8601":"2025-01-18T19:08:02.053316Z","url":"https://files.pythonhosted.org/packages/25/4e/a7d131802bac2ece5302ebf78dcef1ba1ba2f8b3a51fbe44c7f52bae6a53/agentops-0.3.24-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":null,"digests":{"blake2b_256":"71fee96e22c4bf762f34cd5ba435880470dad4576ab357ee61742fe053752322","md5":"3e1b7e0a31197936e099a7509128f794","sha256":"c97a3af959b728bcfbfb1ac2494cef82d8804defc9dac858648b39a9ecdcd2e4"},"downloads":-1,"filename":"agentops-0.3.24.tar.gz","has_sig":false,"md5_digest":"3e1b7e0a31197936e099a7509128f794","packagetype":"sdist","python_version":"source","requires_python":"<3.14,>=3.9","size":233974,"upload_time":"2025-01-18T19:08:04","upload_time_iso_8601":"2025-01-18T19:08:04.121618Z","url":"https://files.pythonhosted.org/packages/71/fe/e96e22c4bf762f34cd5ba435880470dad4576ab357ee61742fe053752322/agentops-0.3.24.tar.gz","yanked":false,"yanked_reason":null}],"0.3.25":[{"comment_text":null,"digests":{"blake2b_256":"e6e39cff4ed65c5deac34f427ed60cd7af3604ec7ed8a999c351f6411e190d3b","md5":"328dedc417be02fc28f8a4c7ed7b52e9","sha256":"4faebf73a62aa0bcac8578428277ca5b9af5e828f49f2cb03a9695b8426e6b9d"},"downloads":-1,"filename":"agentops-0.3.25-py3-none-any.whl","has_sig":false,"md5_digest":"328dedc417be02fc28f8a4c7ed7b52e9","packagetype":"bdist_wheel","python_version":"py3","requires_python":"<3.14,>=3.9","size":71971,"upload_time":"2025-01-22T10:43:16","upload_time_iso_8601":"2025-01-22T10:43:16.070593Z","url":"https://files.pythonhosted.org/packages/e6/e3/9cff4ed65c5deac34f427ed60cd7af3604ec7ed8a999c351f6411e190d3b/agentops-0.3.25-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":null,"digests":{"blake2b_256":"2fdfeb00eaabebb51feae0724a5928f25df4d71d1c8392204f4f849351fd748c","md5":"a40bc7037baf6dbba92d63331f561a28","sha256":"868d855b6531d1fa2d1047db2cb03ddb1121062fd51c44b564dc626f15cc1e40"},"downloads":-1,"filename":"agentops-0.3.25.tar.gz","has_sig":false,"md5_digest":"a40bc7037baf6dbba92d63331f561a28","packagetype":"sdist","python_version":"source","requires_python":"<3.14,>=3.9","size":234024,"upload_time":"2025-01-22T10:43:17","upload_time_iso_8601":"2025-01-22T10:43:17.986230Z","url":"https://files.pythonhosted.org/packages/2f/df/eb00eaabebb51feae0724a5928f25df4d71d1c8392204f4f849351fd748c/agentops-0.3.25.tar.gz","yanked":false,"yanked_reason":null}],"0.3.26":[{"comment_text":null,"digests":{"blake2b_256":"f521671c458951850bd3a445aa09eafd2793aae1104fa68351a5c3976cdf762b","md5":"c3f8fa92ff5a94a37516e774c7f58b9a","sha256":"20948f52e3ffb4ba1d52301c3a82e59490182c4dad22774ad831dce0181eb5c2"},"downloads":-1,"filename":"agentops-0.3.26-py3-none-any.whl","has_sig":false,"md5_digest":"c3f8fa92ff5a94a37516e774c7f58b9a","packagetype":"bdist_wheel","python_version":"py3","requires_python":"<3.14,>=3.9","size":72090,"upload_time":"2025-01-24T23:44:06","upload_time_iso_8601":"2025-01-24T23:44:06.828461Z","url":"https://files.pythonhosted.org/packages/f5/21/671c458951850bd3a445aa09eafd2793aae1104fa68351a5c3976cdf762b/agentops-0.3.26-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":null,"digests":{"blake2b_256":"76a1b03c6348a77798e750bde4eec03b4af620d71b9e4b64ff7dcf0860025a2d","md5":"ba4d0f2411ec72828677b38a395465cc","sha256":"bc824bf8727332f59bf803cf84440d13e9e398406222ab29f45909ac1e39f815"},"downloads":-1,"filename":"agentops-0.3.26.tar.gz","has_sig":false,"md5_digest":"ba4d0f2411ec72828677b38a395465cc","packagetype":"sdist","python_version":"source","requires_python":"<3.14,>=3.9","size":234235,"upload_time":"2025-01-24T23:44:08","upload_time_iso_8601":"2025-01-24T23:44:08.541961Z","url":"https://files.pythonhosted.org/packages/76/a1/b03c6348a77798e750bde4eec03b4af620d71b9e4b64ff7dcf0860025a2d/agentops-0.3.26.tar.gz","yanked":false,"yanked_reason":null}],"0.3.4":[{"comment_text":"","digests":{"blake2b_256":"52f32bd714234ec345153c0fcbc9e4896c306c347f3fb66a3aa6d6fc109a7243","md5":"c7a975a86900f7dbe6861a21fdd3c2d8","sha256":"126f7aed4ba43c1399b5488d67a03d10cb4c531e619c650776f826ca00c1aa24"},"downloads":-1,"filename":"agentops-0.3.4-py3-none-any.whl","has_sig":false,"md5_digest":"c7a975a86900f7dbe6861a21fdd3c2d8","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":39915,"upload_time":"2024-07-24T23:15:03","upload_time_iso_8601":"2024-07-24T23:15:03.892439Z","url":"https://files.pythonhosted.org/packages/52/f3/2bd714234ec345153c0fcbc9e4896c306c347f3fb66a3aa6d6fc109a7243/agentops-0.3.4-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"d28b88a2c9c2df655de806adbb5deebb12c64d19d6aa3cfa759da642953525e0","md5":"f48a2ab7fcaf9cf11a25805ac5300e26","sha256":"a92c9cb7c511197f0ecb8cb5aca15d35022c15a3d2fd2aaaa34cd7e5dc59393f"},"downloads":-1,"filename":"agentops-0.3.4.tar.gz","has_sig":false,"md5_digest":"f48a2ab7fcaf9cf11a25805ac5300e26","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":42063,"upload_time":"2024-07-24T23:15:05","upload_time_iso_8601":"2024-07-24T23:15:05.586475Z","url":"https://files.pythonhosted.org/packages/d2/8b/88a2c9c2df655de806adbb5deebb12c64d19d6aa3cfa759da642953525e0/agentops-0.3.4.tar.gz","yanked":false,"yanked_reason":null}],"0.3.5":[{"comment_text":"","digests":{"blake2b_256":"f253f9672c6aa3c79b6a5b64321e93d2316f126add867ceb2e3e95ea8b4bf1b0","md5":"bd45dc8100fd3974dff11014d12424ff","sha256":"687cb938ecf9d1bf7650afc910e2b2e1b8b6d9e969215aeb49e57f1555a2a756"},"downloads":-1,"filename":"agentops-0.3.5-py3-none-any.whl","has_sig":false,"md5_digest":"bd45dc8100fd3974dff11014d12424ff","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":39177,"upload_time":"2024-08-01T19:32:19","upload_time_iso_8601":"2024-08-01T19:32:19.765946Z","url":"https://files.pythonhosted.org/packages/f2/53/f9672c6aa3c79b6a5b64321e93d2316f126add867ceb2e3e95ea8b4bf1b0/agentops-0.3.5-py3-none-any.whl","yanked":true,"yanked_reason":"Introduces - FileNotFoundError impacting OpenAI and LiteLLM integrations"},{"comment_text":"","digests":{"blake2b_256":"235508ce5915f1ceb86ea6f7a6e8c8dc025b34981408a1b638316b5140fad525","md5":"53ef2f5230de09260f4ead09633dde62","sha256":"ae98540355ce9b892a630e61a7224a9175657cad1b7e799269238748ca7bc0ea"},"downloads":-1,"filename":"agentops-0.3.5.tar.gz","has_sig":false,"md5_digest":"53ef2f5230de09260f4ead09633dde62","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":42699,"upload_time":"2024-08-01T19:32:21","upload_time_iso_8601":"2024-08-01T19:32:21.259555Z","url":"https://files.pythonhosted.org/packages/23/55/08ce5915f1ceb86ea6f7a6e8c8dc025b34981408a1b638316b5140fad525/agentops-0.3.5.tar.gz","yanked":true,"yanked_reason":"Introduces FileNotFoundError impacting OpenAI and LiteLLM integrations"}],"0.3.6":[{"comment_text":"","digests":{"blake2b_256":"be89412afc864df3715d377cff9fe15deadaccdc0902b0a242f742f286e6d84b","md5":"149922f5cd986a8641b6e88c991af0cc","sha256":"413f812eb015fb31175a507784afe08123adfa9e227870e315899b059f42b443"},"downloads":-1,"filename":"agentops-0.3.6-py3-none-any.whl","has_sig":false,"md5_digest":"149922f5cd986a8641b6e88c991af0cc","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":39431,"upload_time":"2024-08-02T06:48:19","upload_time_iso_8601":"2024-08-02T06:48:19.594149Z","url":"https://files.pythonhosted.org/packages/be/89/412afc864df3715d377cff9fe15deadaccdc0902b0a242f742f286e6d84b/agentops-0.3.6-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"c3bf85f1439c3951ef69c81dbd7ef6df8a11df957e8d1180d835d71c11fa5131","md5":"b68d3124e365867f891bec4fb211a398","sha256":"0941f2486f3a561712ba6f77d560b49e2df55be141f243da0f9dc97ed43e6968"},"downloads":-1,"filename":"agentops-0.3.6.tar.gz","has_sig":false,"md5_digest":"b68d3124e365867f891bec4fb211a398","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":42933,"upload_time":"2024-08-02T06:48:21","upload_time_iso_8601":"2024-08-02T06:48:21.508300Z","url":"https://files.pythonhosted.org/packages/c3/bf/85f1439c3951ef69c81dbd7ef6df8a11df957e8d1180d835d71c11fa5131/agentops-0.3.6.tar.gz","yanked":false,"yanked_reason":null}],"0.3.7":[{"comment_text":"","digests":{"blake2b_256":"a34d05ba61e4fbd976dabe736d74fb2bb14d064ca758f05f084c0dadb6ac5cb1","md5":"551df1e89278270e0f5522d41f5c28ae","sha256":"7eeec5bef41e9ba397b3d880bcec8cd0818209ab31665c85e8b97615011a23d9"},"downloads":-1,"filename":"agentops-0.3.7-py3-none-any.whl","has_sig":false,"md5_digest":"551df1e89278270e0f5522d41f5c28ae","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":39816,"upload_time":"2024-08-08T23:21:45","upload_time_iso_8601":"2024-08-08T23:21:45.035395Z","url":"https://files.pythonhosted.org/packages/a3/4d/05ba61e4fbd976dabe736d74fb2bb14d064ca758f05f084c0dadb6ac5cb1/agentops-0.3.7-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"9f31034c3e062287f4fe9f57f2448e9508617a26bbb8a16b11c77cda9b28e1c0","md5":"1c48a797903a25988bae9b72559307ec","sha256":"048ee3caa5edf01b98c994e4e3ff90c09d83f820a43a70f07db96032c3386750"},"downloads":-1,"filename":"agentops-0.3.7.tar.gz","has_sig":false,"md5_digest":"1c48a797903a25988bae9b72559307ec","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":43495,"upload_time":"2024-08-08T23:21:46","upload_time_iso_8601":"2024-08-08T23:21:46.798531Z","url":"https://files.pythonhosted.org/packages/9f/31/034c3e062287f4fe9f57f2448e9508617a26bbb8a16b11c77cda9b28e1c0/agentops-0.3.7.tar.gz","yanked":false,"yanked_reason":null}],"0.3.9":[{"comment_text":"","digests":{"blake2b_256":"660ce931f892e0cedd40d861c3deff4134e1af1d226d6dc9762b32514d6dbc9f","md5":"82792de7bccabed058a24d3bd47443db","sha256":"582c9ddb30a9bb951b4d3ee2fd0428ba77d4a4367950b9cc6043f45b10bf12d8"},"downloads":-1,"filename":"agentops-0.3.9-py3-none-any.whl","has_sig":false,"md5_digest":"82792de7bccabed058a24d3bd47443db","packagetype":"bdist_wheel","python_version":"py3","requires_python":">=3.7","size":40235,"upload_time":"2024-08-15T21:21:33","upload_time_iso_8601":"2024-08-15T21:21:33.468748Z","url":"https://files.pythonhosted.org/packages/66/0c/e931f892e0cedd40d861c3deff4134e1af1d226d6dc9762b32514d6dbc9f/agentops-0.3.9-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":"","digests":{"blake2b_256":"e17b68cef3aaf44d423046b7779e9325e4feef5257e6d784a55c9dadf84bd61a","md5":"470f3b2663b71eb2f1597903bf8922e7","sha256":"7c999edbc64196924acdb06da09ec664a09d9fec8e73ba4e0f89e5f3dafc79e5"},"downloads":-1,"filename":"agentops-0.3.9.tar.gz","has_sig":false,"md5_digest":"470f3b2663b71eb2f1597903bf8922e7","packagetype":"sdist","python_version":"source","requires_python":">=3.7","size":43796,"upload_time":"2024-08-15T21:21:34","upload_time_iso_8601":"2024-08-15T21:21:34.591272Z","url":"https://files.pythonhosted.org/packages/e1/7b/68cef3aaf44d423046b7779e9325e4feef5257e6d784a55c9dadf84bd61a/agentops-0.3.9.tar.gz","yanked":false,"yanked_reason":null}],"0.4.0":[{"comment_text":null,"digests":{"blake2b_256":"060e66184fab1fc3bdd955ac20ea7bdef78f5b9aecc4080ea3e054c2a2436991","md5":"250de44e3599992c75625cef67682ecd","sha256":"b4821b8ec69c05a4d13b34eaad4762bb06a4f14e1241d57c16fdd28de5c8c929"},"downloads":-1,"filename":"agentops-0.4.0-py3-none-any.whl","has_sig":false,"md5_digest":"250de44e3599992c75625cef67682ecd","packagetype":"bdist_wheel","python_version":"py3","requires_python":"<3.14,>=3.9","size":171419,"upload_time":"2025-03-13T11:24:15","upload_time_iso_8601":"2025-03-13T11:24:15.042606Z","url":"https://files.pythonhosted.org/packages/06/0e/66184fab1fc3bdd955ac20ea7bdef78f5b9aecc4080ea3e054c2a2436991/agentops-0.4.0-py3-none-any.whl","yanked":true,"yanked_reason":"broken - dependencies"},{"comment_text":null,"digests":{"blake2b_256":"ff7f8a57d060489c780db3e15c4d9ff8c670e5db583549c74dd2d32ae6ec10c0","md5":"ea0932849a7311750c6ac0e567c90182","sha256":"45f5367cecd8a0b648055b6ec76e8a6a2801425e80dede8f86b39e9c6cfe1d98"},"downloads":-1,"filename":"agentops-0.4.0.tar.gz","has_sig":false,"md5_digest":"ea0932849a7311750c6ac0e567c90182","packagetype":"sdist","python_version":"source","requires_python":"<3.14,>=3.9","size":248757,"upload_time":"2025-03-13T11:24:16","upload_time_iso_8601":"2025-03-13T11:24:16.866033Z","url":"https://files.pythonhosted.org/packages/ff/7f/8a57d060489c780db3e15c4d9ff8c670e5db583549c74dd2d32ae6ec10c0/agentops-0.4.0.tar.gz","yanked":true,"yanked_reason":"broken dependencies"}],"0.4.1":[{"comment_text":null,"digests":{"blake2b_256":"736e7ab03c56260ec59bfaeeb08efb76f55ec6153861ad2a9cf20b38b222e4e7","md5":"3fcebe0141ca19b2fbcb53e918003ce9","sha256":"69c944e22628bc0f52c534007d2453da2a1988a7fd1f993720c4a15b0f70465a"},"downloads":-1,"filename":"agentops-0.4.1-py3-none-any.whl","has_sig":false,"md5_digest":"3fcebe0141ca19b2fbcb53e918003ce9","packagetype":"bdist_wheel","python_version":"py3","requires_python":"<3.14,>=3.9","size":171402,"upload_time":"2025-03-13T16:29:26","upload_time_iso_8601":"2025-03-13T16:29:26.477091Z","url":"https://files.pythonhosted.org/packages/73/6e/7ab03c56260ec59bfaeeb08efb76f55ec6153861ad2a9cf20b38b222e4e7/agentops-0.4.1-py3-none-any.whl","yanked":true,"yanked_reason":"Broken - dependencies"},{"comment_text":null,"digests":{"blake2b_256":"ca303217cd3480ad099ffa92848ccbc8672e5232c22918c95a4b99e49c0ef31e","md5":"ec421fa88b575b827fc0d3fd02f45515","sha256":"fec044f0346dca6aba17e458e669ac1f52f1b618a4a15b43342615096c5e7d56"},"downloads":-1,"filename":"agentops-0.4.1.tar.gz","has_sig":false,"md5_digest":"ec421fa88b575b827fc0d3fd02f45515","packagetype":"sdist","python_version":"source","requires_python":"<3.14,>=3.9","size":248747,"upload_time":"2025-03-13T16:29:27","upload_time_iso_8601":"2025-03-13T16:29:27.905694Z","url":"https://files.pythonhosted.org/packages/ca/30/3217cd3480ad099ffa92848ccbc8672e5232c22918c95a4b99e49c0ef31e/agentops-0.4.1.tar.gz","yanked":true,"yanked_reason":"Broken dependencies"}],"0.4.10":[{"comment_text":null,"digests":{"blake2b_256":"301e0fe4fb617a5a69a8692b571d726f03e713a37d94d6a43c595a08fc33cff3","md5":"5ac7ec12e80bae6946dc10e46ef768f7","sha256":"917ad7ad51af0ca00cace2a3ae1d1d36e0d65dc813e030fcd377ff98535002bd"},"downloads":-1,"filename":"agentops-0.4.10-py3-none-any.whl","has_sig":false,"md5_digest":"5ac7ec12e80bae6946dc10e46ef768f7","packagetype":"bdist_wheel","python_version":"py3","requires_python":"<3.14,>=3.9","size":198777,"upload_time":"2025-05-08T20:37:29","upload_time_iso_8601":"2025-05-08T20:37:29.322288Z","url":"https://files.pythonhosted.org/packages/30/1e/0fe4fb617a5a69a8692b571d726f03e713a37d94d6a43c595a08fc33cff3/agentops-0.4.10-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":null,"digests":{"blake2b_256":"a0ef0a56be3981bd464ad5a22fa3a859421f4b5560cbbb082f3ef9aca9cdb1a7","md5":"1954d07bfa38ba5c5ce0e516b7dbfdc9","sha256":"b66a48b4ec50c9cb34abc6ff1df873f0dcddbbb528d8a8c0527cb97b24c91b36"},"downloads":-1,"filename":"agentops-0.4.10.tar.gz","has_sig":false,"md5_digest":"1954d07bfa38ba5c5ce0e516b7dbfdc9","packagetype":"sdist","python_version":"source","requires_python":"<3.14,>=3.9","size":284727,"upload_time":"2025-05-08T20:37:30","upload_time_iso_8601":"2025-05-08T20:37:30.744217Z","url":"https://files.pythonhosted.org/packages/a0/ef/0a56be3981bd464ad5a22fa3a859421f4b5560cbbb082f3ef9aca9cdb1a7/agentops-0.4.10.tar.gz","yanked":false,"yanked_reason":null}],"0.4.11":[{"comment_text":null,"digests":{"blake2b_256":"35cde66dea05d2d8070f886e8f4ce86905cf1cce2f89622e041f26e39f717c9e","md5":"20424d54ba76517d586d4bcc92dda3bf","sha256":"b08c84fd69f36fcd5d6f2b14d16ff88b977a9a417d92448c9709f3c7990d6438"},"downloads":-1,"filename":"agentops-0.4.11-py3-none-any.whl","has_sig":false,"md5_digest":"20424d54ba76517d586d4bcc92dda3bf","packagetype":"bdist_wheel","python_version":"py3","requires_python":"<3.14,>=3.9","size":198789,"upload_time":"2025-05-12T20:38:29","upload_time_iso_8601":"2025-05-12T20:38:29.202046Z","url":"https://files.pythonhosted.org/packages/35/cd/e66dea05d2d8070f886e8f4ce86905cf1cce2f89622e041f26e39f717c9e/agentops-0.4.11-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":null,"digests":{"blake2b_256":"349df76fc1760cb21788967db3dd22ff2e6521c42b8ecee152e6ac4278e7cade","md5":"b7affd8b15834e4f9cb63066d7d160d1","sha256":"6eb80ee4a0653f9bdc9fc7641bf60cb7546cd34ff1c04dfbc4fca77dbb07edda"},"downloads":-1,"filename":"agentops-0.4.11.tar.gz","has_sig":false,"md5_digest":"b7affd8b15834e4f9cb63066d7d160d1","packagetype":"sdist","python_version":"source","requires_python":"<3.14,>=3.9","size":284735,"upload_time":"2025-05-12T20:38:30","upload_time_iso_8601":"2025-05-12T20:38:30.393540Z","url":"https://files.pythonhosted.org/packages/34/9d/f76fc1760cb21788967db3dd22ff2e6521c42b8ecee152e6ac4278e7cade/agentops-0.4.11.tar.gz","yanked":false,"yanked_reason":null}],"0.4.12":[{"comment_text":null,"digests":{"blake2b_256":"eb86772ed94e4e55433e8014933dab08aa6dfbcd8072f7fd74ffcad335ba0e73","md5":"831a3d54bccce09cc6c2a352776d02e6","sha256":"7c2685ae9c9de1a1071f6a29d395444191744d5ee58e33c020a69e2388dc2f7c"},"downloads":-1,"filename":"agentops-0.4.12-py3-none-any.whl","has_sig":false,"md5_digest":"831a3d54bccce09cc6c2a352776d02e6","packagetype":"bdist_wheel","python_version":"py3","requires_python":"<3.14,>=3.9","size":198319,"upload_time":"2025-05-15T19:59:27","upload_time_iso_8601":"2025-05-15T19:59:27.609093Z","url":"https://files.pythonhosted.org/packages/eb/86/772ed94e4e55433e8014933dab08aa6dfbcd8072f7fd74ffcad335ba0e73/agentops-0.4.12-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":null,"digests":{"blake2b_256":"0cf664cea8e916a305d2dc2f3f3840a1d4cae40e1927892e1fcc11f83ec7ebee","md5":"7e97e0612a6e8544b37a2fa2e1633166","sha256":"530f15d428a4c78db918fa766366c8f11105c4d1d3b1a56de027747d805a573f"},"downloads":-1,"filename":"agentops-0.4.12.tar.gz","has_sig":false,"md5_digest":"7e97e0612a6e8544b37a2fa2e1633166","packagetype":"sdist","python_version":"source","requires_python":"<3.14,>=3.9","size":284309,"upload_time":"2025-05-15T19:59:28","upload_time_iso_8601":"2025-05-15T19:59:28.955745Z","url":"https://files.pythonhosted.org/packages/0c/f6/64cea8e916a305d2dc2f3f3840a1d4cae40e1927892e1fcc11f83ec7ebee/agentops-0.4.12.tar.gz","yanked":false,"yanked_reason":null}],"0.4.2":[{"comment_text":null,"digests":{"blake2b_256":"b13fcb38831e86502e3a30460a27e72a254df39cc2f223d1952e063e2d0b1f70","md5":"c958500ff1e2b600064e980d526f3ad8","sha256":"4c376e3a95d1c65a864e8a5ab6f4bdb62f76abf2271b3c9a1cda2a0ad33b2b1a"},"downloads":-1,"filename":"agentops-0.4.2-py3-none-any.whl","has_sig":false,"md5_digest":"c958500ff1e2b600064e980d526f3ad8","packagetype":"bdist_wheel","python_version":"py3","requires_python":"<3.14,>=3.9","size":171420,"upload_time":"2025-03-13T16:56:31","upload_time_iso_8601":"2025-03-13T16:56:31.589623Z","url":"https://files.pythonhosted.org/packages/b1/3f/cb38831e86502e3a30460a27e72a254df39cc2f223d1952e063e2d0b1f70/agentops-0.4.2-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":null,"digests":{"blake2b_256":"4bd0f2c1951661617febfd14c3e98a58fbd805e48f453356e912dc8efc950490","md5":"7a125604d2bb3494714462442f0ac47c","sha256":"42cbc30a0eecee5db468d01dcbe398d57f080cbf8bb09aecc2ce40c5a21509a5"},"downloads":-1,"filename":"agentops-0.4.2.tar.gz","has_sig":false,"md5_digest":"7a125604d2bb3494714462442f0ac47c","packagetype":"sdist","python_version":"source","requires_python":"<3.14,>=3.9","size":248754,"upload_time":"2025-03-13T16:56:33","upload_time_iso_8601":"2025-03-13T16:56:33.062966Z","url":"https://files.pythonhosted.org/packages/4b/d0/f2c1951661617febfd14c3e98a58fbd805e48f453356e912dc8efc950490/agentops-0.4.2.tar.gz","yanked":false,"yanked_reason":null}],"0.4.3":[{"comment_text":null,"digests":{"blake2b_256":"398892f5a663cf616607e92a0499f5b636fe4e5ae8a6b7febc436077cd02ecd5","md5":"e739880fc1b0cf1e15a816277ca1e8d9","sha256":"c69cf884fc20cd3b44dd07bc9bca9ecec72e44fd2b12c50523670e3743fbbe6c"},"downloads":-1,"filename":"agentops-0.4.3-py3-none-any.whl","has_sig":false,"md5_digest":"e739880fc1b0cf1e15a816277ca1e8d9","packagetype":"bdist_wheel","python_version":"py3","requires_python":"<3.14,>=3.9","size":111111,"upload_time":"2025-03-14T17:35:53","upload_time_iso_8601":"2025-03-14T17:35:53.978325Z","url":"https://files.pythonhosted.org/packages/39/88/92f5a663cf616607e92a0499f5b636fe4e5ae8a6b7febc436077cd02ecd5/agentops-0.4.3-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":null,"digests":{"blake2b_256":"c296f6f5268ffd68079185c6b21190a6ab5b35997678ce89af211d3c3683cc16","md5":"8df7f60a4346721caf9a4a74b0ba2e32","sha256":"48379801976e5e6c830ee40b247d7e7834fb79fb18d2cec926a8c06bdf767090"},"downloads":-1,"filename":"agentops-0.4.3.tar.gz","has_sig":false,"md5_digest":"8df7f60a4346721caf9a4a74b0ba2e32","packagetype":"sdist","python_version":"source","requires_python":"<3.14,>=3.9","size":209668,"upload_time":"2025-03-14T17:35:55","upload_time_iso_8601":"2025-03-14T17:35:55.387572Z","url":"https://files.pythonhosted.org/packages/c2/96/f6f5268ffd68079185c6b21190a6ab5b35997678ce89af211d3c3683cc16/agentops-0.4.3.tar.gz","yanked":false,"yanked_reason":null}],"0.4.4":[{"comment_text":null,"digests":{"blake2b_256":"e230799eb1a6b63e6f072611e4d6c5f7d70d969b1c2d14735100a5295eb794fd","md5":"76de08f25b0f1765ec9b3ce200f2273c","sha256":"a33f32e0d09e942b501a4066460b77bc1f6be960bdbd8dfed1cfc5950702f87c"},"downloads":-1,"filename":"agentops-0.4.4-py3-none-any.whl","has_sig":false,"md5_digest":"76de08f25b0f1765ec9b3ce200f2273c","packagetype":"bdist_wheel","python_version":"py3","requires_python":"<3.14,>=3.9","size":115456,"upload_time":"2025-03-17T21:08:16","upload_time_iso_8601":"2025-03-17T21:08:16.149499Z","url":"https://files.pythonhosted.org/packages/e2/30/799eb1a6b63e6f072611e4d6c5f7d70d969b1c2d14735100a5295eb794fd/agentops-0.4.4-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":null,"digests":{"blake2b_256":"65e969c80c4c8fbf27826644c2bbcaf657bf9882a7974b115bff5021c683560d","md5":"2c34c20f9b785c60ea1cc6011b50684b","sha256":"509daf197bb27f8e5b1ac87e516487883178335c70328fd74897b1a5fadbf0bd"},"downloads":-1,"filename":"agentops-0.4.4.tar.gz","has_sig":false,"md5_digest":"2c34c20f9b785c60ea1cc6011b50684b","packagetype":"sdist","python_version":"source","requires_python":"<3.14,>=3.9","size":209971,"upload_time":"2025-03-17T21:08:17","upload_time_iso_8601":"2025-03-17T21:08:17.396763Z","url":"https://files.pythonhosted.org/packages/65/e9/69c80c4c8fbf27826644c2bbcaf657bf9882a7974b115bff5021c683560d/agentops-0.4.4.tar.gz","yanked":false,"yanked_reason":null}],"0.4.5":[{"comment_text":null,"digests":{"blake2b_256":"5cf1848e02d7233e3bfe74119e28a4fb7cf9dd3363eb215cf8bb8ca835317cc7","md5":"e70f8b49cbbbf5b6a56bbfc51938581c","sha256":"ec45a775dd5f494fe137620ce3e43aa06a6858495bed31c4b9019b343a34d092"},"downloads":-1,"filename":"agentops-0.4.5-py3-none-any.whl","has_sig":false,"md5_digest":"e70f8b49cbbbf5b6a56bbfc51938581c","packagetype":"bdist_wheel","python_version":"py3","requires_python":"<3.14,>=3.9","size":148034,"upload_time":"2025-03-25T00:05:57","upload_time_iso_8601":"2025-03-25T00:05:57.075368Z","url":"https://files.pythonhosted.org/packages/5c/f1/848e02d7233e3bfe74119e28a4fb7cf9dd3363eb215cf8bb8ca835317cc7/agentops-0.4.5-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":null,"digests":{"blake2b_256":"cc2c243f2e01dae6cc2583bca8009c735bb08267c9f51f0e916154b91329e08f","md5":"16781e2f18e40444f869c38b3b27c70c","sha256":"d82d908072c8ffea1b90d63d651ccb73dec8597ef830e60b4311efb4f5593e8e"},"downloads":-1,"filename":"agentops-0.4.5.tar.gz","has_sig":false,"md5_digest":"16781e2f18e40444f869c38b3b27c70c","packagetype":"sdist","python_version":"source","requires_python":"<3.14,>=3.9","size":232839,"upload_time":"2025-03-25T00:05:58","upload_time_iso_8601":"2025-03-25T00:05:58.270348Z","url":"https://files.pythonhosted.org/packages/cc/2c/243f2e01dae6cc2583bca8009c735bb08267c9f51f0e916154b91329e08f/agentops-0.4.5.tar.gz","yanked":false,"yanked_reason":null}],"0.4.6":[{"comment_text":null,"digests":{"blake2b_256":"316124fa78f759c68e1484ed04ed6d0d60ad4b6b58d02570a65dc670975fd954","md5":"36d7d7e64cde9ed73d4ced26e9ee4fb0","sha256":"283929b8f7a1bc79693a6c982e012ccceac4645c6a35709603e7ff83332ec00d"},"downloads":-1,"filename":"agentops-0.4.6-py3-none-any.whl","has_sig":false,"md5_digest":"36d7d7e64cde9ed73d4ced26e9ee4fb0","packagetype":"bdist_wheel","python_version":"py3","requires_python":"<3.14,>=3.9","size":160863,"upload_time":"2025-04-07T22:18:58","upload_time_iso_8601":"2025-04-07T22:18:58.881418Z","url":"https://files.pythonhosted.org/packages/31/61/24fa78f759c68e1484ed04ed6d0d60ad4b6b58d02570a65dc670975fd954/agentops-0.4.6-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":null,"digests":{"blake2b_256":"d0073869f9b99dbc45ac55bc0dbfd8cf6b22de850a716004135ec96a29c3d81e","md5":"1390e3bc3185a4e97492958c1c4e549c","sha256":"78179a0d2c02217445fb7315bb963496bb338c96bcc126bebfb45a5733fea23e"},"downloads":-1,"filename":"agentops-0.4.6.tar.gz","has_sig":false,"md5_digest":"1390e3bc3185a4e97492958c1c4e549c","packagetype":"sdist","python_version":"source","requires_python":"<3.14,>=3.9","size":254164,"upload_time":"2025-04-07T22:19:00","upload_time_iso_8601":"2025-04-07T22:19:00.589814Z","url":"https://files.pythonhosted.org/packages/d0/07/3869f9b99dbc45ac55bc0dbfd8cf6b22de850a716004135ec96a29c3d81e/agentops-0.4.6.tar.gz","yanked":false,"yanked_reason":null}],"0.4.7":[{"comment_text":null,"digests":{"blake2b_256":"a4be6d708281bd3a282879859231fb7d2ab1d0fec6ee421ec6b02d08a3726670","md5":"3bb2171ad2809a49c43935f1d249aa02","sha256":"b1c4acda70ef45a3c7deac01a695b922a14bb762826ba68fb2b8c3859f4e87da"},"downloads":-1,"filename":"agentops-0.4.7-py3-none-any.whl","has_sig":false,"md5_digest":"3bb2171ad2809a49c43935f1d249aa02","packagetype":"bdist_wheel","python_version":"py3","requires_python":"<3.14,>=3.9","size":182708,"upload_time":"2025-04-24T00:39:39","upload_time_iso_8601":"2025-04-24T00:39:39.403616Z","url":"https://files.pythonhosted.org/packages/a4/be/6d708281bd3a282879859231fb7d2ab1d0fec6ee421ec6b02d08a3726670/agentops-0.4.7-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":null,"digests":{"blake2b_256":"20a5d142e98481d82912280e29b5b73dc5a5deea4d34c132045333b5201c1209","md5":"62c78776d059798f2e6a74bf1b03932f","sha256":"ad6dca62ff88d4c09eda34e3393c138880a5126682b53cf0c881a7dbb61dcc0d"},"downloads":-1,"filename":"agentops-0.4.7.tar.gz","has_sig":false,"md5_digest":"62c78776d059798f2e6a74bf1b03932f","packagetype":"sdist","python_version":"source","requires_python":"<3.14,>=3.9","size":272982,"upload_time":"2025-04-24T00:39:40","upload_time_iso_8601":"2025-04-24T00:39:40.931148Z","url":"https://files.pythonhosted.org/packages/20/a5/d142e98481d82912280e29b5b73dc5a5deea4d34c132045333b5201c1209/agentops-0.4.7.tar.gz","yanked":false,"yanked_reason":null}],"0.4.8":[{"comment_text":null,"digests":{"blake2b_256":"96d32cee2a94f2917be9c7575238dfff3088a51a6376168a2c7287da0e8b654c","md5":"a02a327b4620a909e831fbd6889bf25e","sha256":"86f439d47c0fdfcb3525859528300b19bb96c105875d0b5b3d205260aedc3f24"},"downloads":-1,"filename":"agentops-0.4.8-py3-none-any.whl","has_sig":false,"md5_digest":"a02a327b4620a909e831fbd6889bf25e","packagetype":"bdist_wheel","python_version":"py3","requires_python":"<3.14,>=3.9","size":182678,"upload_time":"2025-04-27T09:10:39","upload_time_iso_8601":"2025-04-27T09:10:39.925403Z","url":"https://files.pythonhosted.org/packages/96/d3/2cee2a94f2917be9c7575238dfff3088a51a6376168a2c7287da0e8b654c/agentops-0.4.8-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":null,"digests":{"blake2b_256":"ba64732ebe57c77123058cbc03eec0795267fac65aa6032b8906b1dfe80ff837","md5":"f947ace32256ff3ee6b2a6c716ef3543","sha256":"c299ca067298f568ae2885e4d21951b0bdb7067692d930b57ff1f19bd447ae5a"},"downloads":-1,"filename":"agentops-0.4.8.tar.gz","has_sig":false,"md5_digest":"f947ace32256ff3ee6b2a6c716ef3543","packagetype":"sdist","python_version":"source","requires_python":"<3.14,>=3.9","size":272951,"upload_time":"2025-04-27T09:10:41","upload_time_iso_8601":"2025-04-27T09:10:41.806172Z","url":"https://files.pythonhosted.org/packages/ba/64/732ebe57c77123058cbc03eec0795267fac65aa6032b8906b1dfe80ff837/agentops-0.4.8.tar.gz","yanked":false,"yanked_reason":null}],"0.4.9":[{"comment_text":null,"digests":{"blake2b_256":"5814e40def8897f404273f69d6841793b3dbdcbb8f2948fb6bd9c50087239b37","md5":"f49c139fbf17affaa3e8165743971a50","sha256":"622b9ecdc1b5e91c5ac3aa92d2f756d083c4e0ba830d8e94c3785f7290587a97"},"downloads":-1,"filename":"agentops-0.4.9-py3-none-any.whl","has_sig":false,"md5_digest":"f49c139fbf17affaa3e8165743971a50","packagetype":"bdist_wheel","python_version":"py3","requires_python":"<3.14,>=3.9","size":198463,"upload_time":"2025-05-02T23:51:48","upload_time_iso_8601":"2025-05-02T23:51:48.502905Z","url":"https://files.pythonhosted.org/packages/58/14/e40def8897f404273f69d6841793b3dbdcbb8f2948fb6bd9c50087239b37/agentops-0.4.9-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":null,"digests":{"blake2b_256":"32efa2af9802799b3d26c570b8dd18669e3577fb58fa093a3c9cfafbf179376c","md5":"5eb22fdc989748711f0252c3679388e9","sha256":"c69a0c912a75367850036c20368d4722462b5769eb86bdebabb0695f8be4c8bd"},"downloads":-1,"filename":"agentops-0.4.9.tar.gz","has_sig":false,"md5_digest":"5eb22fdc989748711f0252c3679388e9","packagetype":"sdist","python_version":"source","requires_python":"<3.14,>=3.9","size":284471,"upload_time":"2025-05-02T23:51:49","upload_time_iso_8601":"2025-05-02T23:51:49.781274Z","url":"https://files.pythonhosted.org/packages/32/ef/a2af9802799b3d26c570b8dd18669e3577fb58fa093a3c9cfafbf179376c/agentops-0.4.9.tar.gz","yanked":false,"yanked_reason":null}]},"urls":[{"comment_text":null,"digests":{"blake2b_256":"eb86772ed94e4e55433e8014933dab08aa6dfbcd8072f7fd74ffcad335ba0e73","md5":"831a3d54bccce09cc6c2a352776d02e6","sha256":"7c2685ae9c9de1a1071f6a29d395444191744d5ee58e33c020a69e2388dc2f7c"},"downloads":-1,"filename":"agentops-0.4.12-py3-none-any.whl","has_sig":false,"md5_digest":"831a3d54bccce09cc6c2a352776d02e6","packagetype":"bdist_wheel","python_version":"py3","requires_python":"<3.14,>=3.9","size":198319,"upload_time":"2025-05-15T19:59:27","upload_time_iso_8601":"2025-05-15T19:59:27.609093Z","url":"https://files.pythonhosted.org/packages/eb/86/772ed94e4e55433e8014933dab08aa6dfbcd8072f7fd74ffcad335ba0e73/agentops-0.4.12-py3-none-any.whl","yanked":false,"yanked_reason":null},{"comment_text":null,"digests":{"blake2b_256":"0cf664cea8e916a305d2dc2f3f3840a1d4cae40e1927892e1fcc11f83ec7ebee","md5":"7e97e0612a6e8544b37a2fa2e1633166","sha256":"530f15d428a4c78db918fa766366c8f11105c4d1d3b1a56de027747d805a573f"},"downloads":-1,"filename":"agentops-0.4.12.tar.gz","has_sig":false,"md5_digest":"7e97e0612a6e8544b37a2fa2e1633166","packagetype":"sdist","python_version":"source","requires_python":"<3.14,>=3.9","size":284309,"upload_time":"2025-05-15T19:59:28","upload_time_iso_8601":"2025-05-15T19:59:28.955745Z","url":"https://files.pythonhosted.org/packages/0c/f6/64cea8e916a305d2dc2f3f3840a1d4cae40e1927892e1fcc11f83ec7ebee/agentops-0.4.12.tar.gz","yanked":false,"yanked_reason":null}],"vulnerabilities":[]} - - ' + string: "{\n \"id\": \"chatcmpl-DDG3i8i7PX54gKKVTtyZoA9C4DBqM\",\n \"object\": + \"chat.completion\",\n \"created\": 1772052366,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"hi\",\n \"refusal\": null,\n + \ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": + \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 75,\n \"completion_tokens\": + 1,\n \"total_tokens\": 76,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_373a14eb6f\"\n}\n" headers: - Accept-Ranges: - - bytes + CF-RAY: + - CF-RAY-XXX Connection: - keep-alive - Content-Length: - - '141284' + Content-Type: + - application/json Date: - - Fri, 16 May 2025 22:11:18 GMT - Permissions-Policy: - - publickey-credentials-create=(self),publickey-credentials-get=(self),accelerometer=(),ambient-light-sensor=(),autoplay=(),battery=(),camera=(),display-capture=(),document-domain=(),encrypted-media=(),execution-while-not-rendered=(),execution-while-out-of-viewport=(),fullscreen=(),gamepad=(),geolocation=(),gyroscope=(),hid=(),identity-credentials-get=(),idle-detection=(),local-fonts=(),magnetometer=(),microphone=(),midi=(),otp-credentials=(),payment=(),picture-in-picture=(),screen-wake-lock=(),serial=(),speaker-selection=(),storage-access=(),usb=(),web-share=(),xr-spatial-tracking=() + - Wed, 25 Feb 2026 20:46:06 GMT + Server: + - cloudflare Strict-Transport-Security: - - max-age=31536000; includeSubDomains; preload - Vary: - - Accept-Encoding - X-Cache: - - MISS, HIT, HIT - X-Cache-Hits: - - 0, 14, 0 - X-Content-Type-Options: - - nosniff - X-Frame-Options: - - deny - X-Permitted-Cross-Domain-Policies: - - none - X-Served-By: - - cache-iad-kjyo7100048-IAD, cache-iad-kjyo7100044-IAD, cache-sjc10057-SJC - X-Timer: - - S1747433478.903585,VS0,VE171 - X-XSS-Protection: - - 1; mode=block - access-control-allow-headers: - - Content-Type, If-Match, If-Modified-Since, If-None-Match, If-Unmodified-Since - access-control-allow-methods: - - GET - access-control-allow-origin: - - '*' - access-control-expose-headers: - - X-PyPI-Last-Serial - access-control-max-age: - - '86400' - cache-control: - - max-age=900, public - content-security-policy: - - base-uri 'self'; connect-src 'self' https://api.github.com/repos/ https://api.github.com/search/issues https://gitlab.com/api/ https://analytics.python.org fastly-insights.com *.fastly-insights.com *.ethicalads.io https://api.pwnedpasswords.com https://cdn.jsdelivr.net/npm/mathjax@3.2.2/es5/sre/mathmaps/ https://2p66nmmycsj3.statuspage.io; default-src 'none'; font-src 'self' fonts.gstatic.com; form-action 'self' https://checkout.stripe.com; frame-ancestors 'none'; frame-src 'none'; img-src 'self' https://pypi-camo.freetls.fastly.net/ *.fastly-insights.com *.ethicalads.io ethicalads.blob.core.windows.net; script-src 'self' https://analytics.python.org *.fastly-insights.com *.ethicalads.io 'sha256-U3hKDidudIaxBDEzwGJApJgPEf2mWk6cfMWghrAa6i0=' https://cdn.jsdelivr.net/npm/mathjax@3.2.2/ 'sha256-1CldwzdEg2k1wTmf7s5RWVd7NMXI/7nxxjJM2C4DqII=' 'sha256-0POaN8stWYQxhzjKS+/eOfbbJ/u4YHO5ZagJvLpMypo='; style-src 'self' fonts.googleapis.com *.ethicalads.io 'sha256-2YHqZokjiizkHi1Zt+6ar0XJ0OeEy/egBnlm+MDMtrM=' - 'sha256-47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU=' 'sha256-JLEjeN9e5dGsz5475WyRaoA4eQOdNPxDIeUhclnJDCE=' 'sha256-mQyxHEuwZJqpxCw3SLmc4YOySNKXunyu2Oiz1r3/wAE=' 'sha256-OCf+kv5Asiwp++8PIevKBYSgnNLNUZvxAp4a7wMLuKA=' 'sha256-h5LOiLhk6wiJrGsG5ItM0KimwzWQH/yAcmoJDJL//bY='; worker-src *.fastly-insights.com - content-type: - - application/json - etag: - - '"f+xzB2HkOqSq5o8PEbR7zQ"' - referrer-policy: - - origin-when-cross-origin - x-pypi-last-serial: - - '29075100' - status: - code: 200 - message: OK -- request: - body: '{"messages": [{"role": "system", "content": "You are base_agent. You are a helpful assistant that just says hi\nYour personal goal is: Just say hi\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task: Just say hi\n\nThis is the expected criteria for your final answer: hi\nyou MUST return the actual complete content as the final answer, not a summary.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}' - headers: - accept: - - application/json - accept-encoding: - - gzip, deflate - connection: - - keep-alive - content-length: - - '838' - content-type: - - application/json - cookie: - - _cfuvid=pgWR9g.y6i.3_EHHkfdBfvv5isYFU_joRq3kXvX2IE4-1740180069173-0.0.1.1-604800000 - host: - - api.openai.com - user-agent: - - OpenAI/Python 1.68.2 - x-stainless-arch: - - arm64 - x-stainless-async: - - 'false' - x-stainless-lang: - - python - x-stainless-os: - - MacOS - x-stainless-package-version: - - 1.68.2 - x-stainless-raw-response: - - 'true' - x-stainless-read-timeout: - - '600.0' - x-stainless-retry-count: - - '0' - x-stainless-runtime: - - CPython - x-stainless-runtime-version: - - 3.12.9 - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - body: - string: "{\n \"id\": \"chatcmpl-BXxYsYY0bTPlXgle6qZOlhLQQqvVP\",\n \"object\": \"chat.completion\",\n \"created\": 1747433478,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"I now can give a great answer \\nFinal Answer: hi\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 161,\n \"completion_tokens\": 12,\n \"total_tokens\": 173,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_dbaca60df0\"\n}\n" - headers: - CF-RAY: - - 940e35c4d9de174e-SJC - Connection: - - keep-alive - Content-Type: - - application/json - Date: - - Fri, 16 May 2025 22:11:18 GMT - Server: - - cloudflare - Set-Cookie: - - __cf_bm=7gYhEK1ZebbV2RqWIdRN.0Kv_XoKdpvnwX3SkGHCXnU-1747433478-1.0.1.1-2aU819p9q3cYgN_xx91359ew9UFwtVswCekjsQw7Qgz4X9r3RzR9e0CRqkfXgCACAMxJI7BJCmWvJ0bRuKaFrXbWRGphDbDW5xMKyMxQxbY; path=/; expires=Fri, 16-May-25 22:41:18 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - - _cfuvid=SuZPImI5tNZ3RsqGDhWpp3lM9bZ.ClZzaHNPgVIvvHA-1747433478823-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + - STS-XXX Transfer-Encoding: - chunked X-Content-Type-Options: - - nosniff + - X-CONTENT-TYPE-XXX access-control-expose-headers: - - X-Request-ID + - ACCESS-CONTROL-XXX alt-svc: - h3=":443"; ma=86400 cf-cache-status: - DYNAMIC openai-organization: - - crewai-iuxna1 + - OPENAI-ORG-XXX openai-processing-ms: - - '405' + - '292' + openai-project: + - OPENAI-PROJECT-XXX openai-version: - '2020-10-01' - strict-transport-security: - - max-age=31536000; includeSubDomains; preload - x-envoy-upstream-service-time: - - '411' + set-cookie: + - SET-COOKIE-XXX + x-openai-proxy-wasm: + - v0.1 x-ratelimit-limit-requests: - - '30000' + - X-RATELIMIT-LIMIT-REQUESTS-XXX x-ratelimit-limit-tokens: - - '150000000' + - X-RATELIMIT-LIMIT-TOKENS-XXX x-ratelimit-remaining-requests: - - '29999' + - X-RATELIMIT-REMAINING-REQUESTS-XXX x-ratelimit-remaining-tokens: - - '149999824' + - X-RATELIMIT-REMAINING-TOKENS-XXX x-ratelimit-reset-requests: - - 2ms + - X-RATELIMIT-RESET-REQUESTS-XXX x-ratelimit-reset-tokens: - - 0s + - X-RATELIMIT-RESET-TOKENS-XXX x-request-id: - - req_8274f4da736f4f31854b3c8ad67d02fb + - X-REQUEST-ID-XXX status: code: 200 message: OK - request: - body: '{"messages": [{"role": "system", "content": "You are Task Execution Evaluator. Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed\nYour personal goal is: Your goal is to evaluate the performance of the agents in the crew based on the tasks they have performed using score from 1 to 10 evaluating on completion, quality, and overall performance.\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task: Based on the task description and the expected output, compare and evaluate the performance of the agents in the crew based on the Task Output they have performed using score from 1 to 10 evaluating - on completion, quality, and overall performance.task_description: Just say hi task_expected_output: hi agent: base_agent agent_goal: Just say hi Task Output: hi\n\nThis is the expected criteria for your final answer: Evaluation Score from 1 to 10 based on the performance of the agents on the tasks\nyou MUST return the actual complete content as the final answer, not a summary.\nEnsure your final answer contains only the content in the following format: {\n \"quality\": float\n}\n\nEnsure the final output does not include any code block markers like ```json or ```python.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}' + body: '{"messages":[{"role":"system","content":"You are Task Execution Evaluator. + Evaluator agent for crew evaluation with precise capabilities to evaluate the + performance of the agents in the crew based on the tasks they have performed\nYour + personal goal is: Your goal is to evaluate the performance of the agents in + the crew based on the tasks they have performed using score from 1 to 10 evaluating + on completion, quality, and overall performance."},{"role":"user","content":"\nCurrent + Task: Based on the task description and the expected output, compare and evaluate + the performance of the agents in the crew based on the Task Output they have + performed using score from 1 to 10 evaluating on completion, quality, and overall + performance.task_description: Just say hi task_expected_output: hi agent: base_agent + agent_goal: Just say hi Task Output: hi\n\nThis is the expected criteria for + your final answer: Evaluation Score from 1 to 10 based on the performance of + the agents on the tasks\nyou MUST return the actual complete content as the + final answer, not a summary.\nFormat your final answer according to the following + OpenAPI schema: {\n \"properties\": {\n \"quality\": {\n \"description\": + \"A score from 1 to 10 evaluating on completion, quality, and overall performance + from the task_description and task_expected_output to the actual Task Output.\",\n \"title\": + \"Quality\",\n \"type\": \"number\"\n }\n },\n \"required\": [\n \"quality\"\n ],\n \"title\": + \"TaskEvaluationPydanticOutput\",\n \"type\": \"object\",\n \"additionalProperties\": + false\n}\n\nIMPORTANT: Preserve the original content exactly as-is. Do NOT rewrite, + paraphrase, or modify the meaning of the content. Only structure it to match + the schema format.\n\nDo not include the OpenAPI schema in the final output. + Ensure the final output does not include any code block markers like ```json + or ```python.\n\nProvide your complete response:"}],"model":"gpt-4o-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"quality":{"description":"A + score from 1 to 10 evaluating on completion, quality, and overall performance + from the task_description and task_expected_output to the actual Task Output.","title":"Quality","type":"number"}},"required":["quality"],"title":"TaskEvaluationPydanticOutput","type":"object","additionalProperties":false},"name":"TaskEvaluationPydanticOutput","strict":true}},"stream":false}' headers: + User-Agent: + - X-USER-AGENT-XXX accept: - application/json accept-encoding: - - gzip, deflate + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX connection: - keep-alive content-length: - - '1765' + - '2449' content-type: - application/json - cookie: - - _cfuvid=SuZPImI5tNZ3RsqGDhWpp3lM9bZ.ClZzaHNPgVIvvHA-1747433478823-0.0.1.1-604800000; __cf_bm=7gYhEK1ZebbV2RqWIdRN.0Kv_XoKdpvnwX3SkGHCXnU-1747433478-1.0.1.1-2aU819p9q3cYgN_xx91359ew9UFwtVswCekjsQw7Qgz4X9r3RzR9e0CRqkfXgCACAMxJI7BJCmWvJ0bRuKaFrXbWRGphDbDW5xMKyMxQxbY host: - api.openai.com - user-agent: - - OpenAI/Python 1.68.2 x-stainless-arch: - - arm64 + - X-STAINLESS-ARCH-XXX x-stainless-async: - 'false' + x-stainless-helper-method: + - beta.chat.completions.parse x-stainless-lang: - python x-stainless-os: - - MacOS + - X-STAINLESS-OS-XXX x-stainless-package-version: - - 1.68.2 - x-stainless-raw-response: - - 'true' + - 1.83.0 x-stainless-read-timeout: - - '600.0' + - X-STAINLESS-READ-TIMEOUT-XXX x-stainless-retry-count: - '0' x-stainless-runtime: - CPython x-stainless-runtime-version: - - 3.12.9 + - 3.13.12 method: POST uri: https://api.openai.com/v1/chat/completions response: body: - string: "{\n \"id\": \"chatcmpl-BXxYtpQ3GBiYNJUfkAYDTQSCgL56c\",\n \"object\": \"chat.completion\",\n \"created\": 1747433479,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"I now can give a great answer \\nFinal Answer: {\\n \\\"quality\\\": 10.0\\n}\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 338,\n \"completion_tokens\": 22,\n \"total_tokens\": 360,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_54eb4bd693\"\ - \n}\n" + string: "{\n \"id\": \"chatcmpl-DDG3kSHBFtT0Gw3lryKg9uTpbeNwD\",\n \"object\": + \"chat.completion\",\n \"created\": 1772052368,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"{\\\"quality\\\":10}\",\n \"refusal\": + null,\n \"annotations\": []\n },\n \"logprobs\": null,\n + \ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 473,\n \"completion_tokens\": 5,\n \"total_tokens\": 478,\n \"prompt_tokens_details\": + {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_bd4be55b21\"\n}\n" headers: CF-RAY: - - 940e35cada1f174e-SJC + - CF-RAY-XXX Connection: - keep-alive Content-Type: - application/json Date: - - Fri, 16 May 2025 22:11:19 GMT + - Wed, 25 Feb 2026 20:46:08 GMT Server: - cloudflare + Strict-Transport-Security: + - STS-XXX Transfer-Encoding: - chunked X-Content-Type-Options: - - nosniff + - X-CONTENT-TYPE-XXX access-control-expose-headers: - - X-Request-ID + - ACCESS-CONTROL-XXX alt-svc: - h3=":443"; ma=86400 cf-cache-status: - DYNAMIC openai-organization: - - crewai-iuxna1 + - OPENAI-ORG-XXX openai-processing-ms: - - '696' + - '620' + openai-project: + - OPENAI-PROJECT-XXX openai-version: - '2020-10-01' - strict-transport-security: - - max-age=31536000; includeSubDomains; preload - x-envoy-upstream-service-time: - - '708' + set-cookie: + - SET-COOKIE-XXX + x-openai-proxy-wasm: + - v0.1 x-ratelimit-limit-requests: - - '30000' + - X-RATELIMIT-LIMIT-REQUESTS-XXX x-ratelimit-limit-tokens: - - '150000000' + - X-RATELIMIT-LIMIT-TOKENS-XXX x-ratelimit-remaining-requests: - - '29999' + - X-RATELIMIT-REMAINING-REQUESTS-XXX x-ratelimit-remaining-tokens: - - '149999594' + - X-RATELIMIT-REMAINING-TOKENS-XXX x-ratelimit-reset-requests: - - 2ms + - X-RATELIMIT-RESET-REQUESTS-XXX x-ratelimit-reset-tokens: - - 0s + - X-RATELIMIT-RESET-TOKENS-XXX x-request-id: - - req_1227f2635e62eb396693c8857c57b878 + - X-REQUEST-ID-XXX status: code: 200 message: OK diff --git a/lib/crewai/tests/cli/authentication/test_auth_main.py b/lib/crewai/tests/cli/authentication/test_auth_main.py index 5f7308e20..095fea3c4 100644 --- a/lib/crewai/tests/cli/authentication/test_auth_main.py +++ b/lib/crewai/tests/cli/authentication/test_auth_main.py @@ -2,7 +2,7 @@ from datetime import datetime, timedelta from unittest.mock import MagicMock, call, patch import pytest -import requests +import httpx from crewai.cli.authentication.main import AuthenticationCommand from crewai.cli.constants import ( CREWAI_ENTERPRISE_DEFAULT_OAUTH2_AUDIENCE, @@ -220,7 +220,7 @@ class TestAuthenticationCommand: ] mock_console_print.assert_has_calls(expected_calls) - @patch("requests.post") + @patch("crewai.cli.authentication.main.httpx.post") def test_get_device_code(self, mock_post): mock_response = MagicMock() mock_response.json.return_value = { @@ -256,7 +256,7 @@ class TestAuthenticationCommand: "verification_uri_complete": "https://example.com/auth", } - @patch("requests.post") + @patch("crewai.cli.authentication.main.httpx.post") @patch("crewai.cli.authentication.main.console.print") def test_poll_for_token_success(self, mock_console_print, mock_post): mock_response_success = MagicMock() @@ -305,7 +305,7 @@ class TestAuthenticationCommand: ] mock_console_print.assert_has_calls(expected_calls) - @patch("requests.post") + @patch("crewai.cli.authentication.main.httpx.post") @patch("crewai.cli.authentication.main.console.print") def test_poll_for_token_timeout(self, mock_console_print, mock_post): mock_response_pending = MagicMock() @@ -324,7 +324,7 @@ class TestAuthenticationCommand: "Timeout: Failed to get the token. Please try again.", style="bold red" ) - @patch("requests.post") + @patch("crewai.cli.authentication.main.httpx.post") def test_poll_for_token_error(self, mock_post): """Test the method to poll for token (error path).""" # Setup mock to return error @@ -338,5 +338,5 @@ class TestAuthenticationCommand: device_code_data = {"device_code": "test_device_code", "interval": 1} - with pytest.raises(requests.HTTPError): + with pytest.raises(httpx.HTTPError): self.auth_command._poll_for_token(device_code_data) diff --git a/lib/crewai/tests/cli/deploy/test_deploy_main.py b/lib/crewai/tests/cli/deploy/test_deploy_main.py index f33dfbbd5..4b818cc58 100644 --- a/lib/crewai/tests/cli/deploy/test_deploy_main.py +++ b/lib/crewai/tests/cli/deploy/test_deploy_main.py @@ -4,10 +4,11 @@ from io import StringIO from unittest.mock import MagicMock, Mock, patch import pytest -import requests +import json + +import httpx from crewai.cli.deploy.main import DeployCommand from crewai.cli.utils import parse_toml -from requests.exceptions import JSONDecodeError class TestDeployCommand(unittest.TestCase): @@ -37,18 +38,18 @@ class TestDeployCommand(unittest.TestCase): DeployCommand() def test_validate_response_successful_response(self): - mock_response = Mock(spec=requests.Response) + mock_response = Mock(spec=httpx.Response) mock_response.json.return_value = {"message": "Success"} mock_response.status_code = 200 - mock_response.ok = True + mock_response.is_success = True with patch("sys.stdout", new=StringIO()) as fake_out: self.deploy_command._validate_response(mock_response) assert fake_out.getvalue() == "" def test_validate_response_json_decode_error(self): - mock_response = Mock(spec=requests.Response) - mock_response.json.side_effect = JSONDecodeError("Decode error", "", 0) + mock_response = Mock(spec=httpx.Response) + mock_response.json.side_effect = json.JSONDecodeError("Decode error", "", 0) mock_response.status_code = 500 mock_response.content = b"Invalid JSON" @@ -64,13 +65,13 @@ class TestDeployCommand(unittest.TestCase): assert "Response:\nInvalid JSON" in output def test_validate_response_422_error(self): - mock_response = Mock(spec=requests.Response) + mock_response = Mock(spec=httpx.Response) mock_response.json.return_value = { "field1": ["Error message 1"], "field2": ["Error message 2"], } mock_response.status_code = 422 - mock_response.ok = False + mock_response.is_success = False with patch("sys.stdout", new=StringIO()) as fake_out: with pytest.raises(SystemExit): @@ -84,10 +85,10 @@ class TestDeployCommand(unittest.TestCase): assert "Field2 Error message 2" in output def test_validate_response_other_error(self): - mock_response = Mock(spec=requests.Response) + mock_response = Mock(spec=httpx.Response) mock_response.json.return_value = {"error": "Something went wrong"} mock_response.status_code = 500 - mock_response.ok = False + mock_response.is_success = False with patch("sys.stdout", new=StringIO()) as fake_out: with pytest.raises(SystemExit): diff --git a/lib/crewai/tests/cli/enterprise/test_main.py b/lib/crewai/tests/cli/enterprise/test_main.py index e6be4e006..8a225dc41 100644 --- a/lib/crewai/tests/cli/enterprise/test_main.py +++ b/lib/crewai/tests/cli/enterprise/test_main.py @@ -3,8 +3,9 @@ import unittest from pathlib import Path from unittest.mock import Mock, patch -import requests -from requests.exceptions import JSONDecodeError +import json + +import httpx from crewai.cli.enterprise.main import EnterpriseConfigureCommand from crewai.cli.settings.main import SettingsCommand @@ -25,7 +26,7 @@ class TestEnterpriseConfigureCommand(unittest.TestCase): def tearDown(self): shutil.rmtree(self.test_dir) - @patch('crewai.cli.enterprise.main.requests.get') + @patch('crewai.cli.enterprise.main.httpx.get') @patch('crewai.cli.enterprise.main.get_crewai_version') def test_successful_configuration(self, mock_get_version, mock_requests_get): mock_get_version.return_value = "1.0.0" @@ -73,19 +74,23 @@ class TestEnterpriseConfigureCommand(unittest.TestCase): self.assertEqual(call_args[0], key) self.assertEqual(call_args[1], value) - @patch('crewai.cli.enterprise.main.requests.get') + @patch('crewai.cli.enterprise.main.httpx.get') @patch('crewai.cli.enterprise.main.get_crewai_version') def test_http_error_handling(self, mock_get_version, mock_requests_get): mock_get_version.return_value = "1.0.0" mock_response = Mock() - mock_response.raise_for_status.side_effect = requests.HTTPError("404 Not Found") + mock_response.raise_for_status.side_effect = httpx.HTTPStatusError( + "404 Not Found", + request=httpx.Request("GET", "http://test"), + response=httpx.Response(404), + ) mock_requests_get.return_value = mock_response with self.assertRaises(SystemExit): self.enterprise_command.configure("https://enterprise.example.com") - @patch('crewai.cli.enterprise.main.requests.get') + @patch('crewai.cli.enterprise.main.httpx.get') @patch('crewai.cli.enterprise.main.get_crewai_version') def test_invalid_json_response(self, mock_get_version, mock_requests_get): mock_get_version.return_value = "1.0.0" @@ -93,13 +98,13 @@ class TestEnterpriseConfigureCommand(unittest.TestCase): mock_response = Mock() mock_response.status_code = 200 mock_response.raise_for_status.return_value = None - mock_response.json.side_effect = JSONDecodeError("Invalid JSON", "", 0) + mock_response.json.side_effect = json.JSONDecodeError("Invalid JSON", "", 0) mock_requests_get.return_value = mock_response with self.assertRaises(SystemExit): self.enterprise_command.configure("https://enterprise.example.com") - @patch('crewai.cli.enterprise.main.requests.get') + @patch('crewai.cli.enterprise.main.httpx.get') @patch('crewai.cli.enterprise.main.get_crewai_version') def test_missing_required_fields(self, mock_get_version, mock_requests_get): mock_get_version.return_value = "1.0.0" @@ -115,7 +120,7 @@ class TestEnterpriseConfigureCommand(unittest.TestCase): with self.assertRaises(SystemExit): self.enterprise_command.configure("https://enterprise.example.com") - @patch('crewai.cli.enterprise.main.requests.get') + @patch('crewai.cli.enterprise.main.httpx.get') @patch('crewai.cli.enterprise.main.get_crewai_version') def test_settings_update_error(self, mock_get_version, mock_requests_get): mock_get_version.return_value = "1.0.0" diff --git a/lib/crewai/tests/cli/organization/test_main.py b/lib/crewai/tests/cli/organization/test_main.py index c0620fe33..0db790cbb 100644 --- a/lib/crewai/tests/cli/organization/test_main.py +++ b/lib/crewai/tests/cli/organization/test_main.py @@ -3,7 +3,7 @@ from unittest.mock import MagicMock, patch, call import pytest from click.testing import CliRunner -import requests +import httpx from crewai.cli.organization.main import OrganizationCommand from crewai.cli.cli import org_list, switch, current @@ -115,7 +115,7 @@ class TestOrganizationCommand(unittest.TestCase): def test_list_organizations_api_error(self, mock_console): self.org_command.plus_api_client = MagicMock() self.org_command.plus_api_client.get_organizations.side_effect = ( - requests.exceptions.RequestException("API Error") + httpx.HTTPError("API Error") ) with pytest.raises(SystemExit): @@ -201,8 +201,10 @@ class TestOrganizationCommand(unittest.TestCase): @patch("crewai.cli.organization.main.console") def test_list_organizations_unauthorized(self, mock_console): mock_response = MagicMock() - mock_http_error = requests.exceptions.HTTPError( - "401 Client Error: Unauthorized", response=MagicMock(status_code=401) + mock_http_error = httpx.HTTPStatusError( + "401 Client Error: Unauthorized", + request=httpx.Request("GET", "http://test"), + response=httpx.Response(401), ) mock_response.raise_for_status.side_effect = mock_http_error @@ -219,8 +221,10 @@ class TestOrganizationCommand(unittest.TestCase): @patch("crewai.cli.organization.main.console") def test_switch_organization_unauthorized(self, mock_console): mock_response = MagicMock() - mock_http_error = requests.exceptions.HTTPError( - "401 Client Error: Unauthorized", response=MagicMock(status_code=401) + mock_http_error = httpx.HTTPStatusError( + "401 Client Error: Unauthorized", + request=httpx.Request("GET", "http://test"), + response=httpx.Response(401), ) mock_response.raise_for_status.side_effect = mock_http_error diff --git a/lib/crewai/tests/cli/test_plus_api.py b/lib/crewai/tests/cli/test_plus_api.py index 70eff917e..95a322e21 100644 --- a/lib/crewai/tests/cli/test_plus_api.py +++ b/lib/crewai/tests/cli/test_plus_api.py @@ -28,14 +28,26 @@ class TestPlusAPI(unittest.TestCase): response = self.api.login_to_tool_repository() mock_make_request.assert_called_once_with( - "POST", "/crewai_plus/api/v1/tools/login" + "POST", "/crewai_plus/api/v1/tools/login", json={} + ) + self.assertEqual(response, mock_response) + + @patch("crewai.cli.plus_api.PlusAPI._make_request") + def test_login_to_tool_repository_with_user_identifier(self, mock_make_request): + mock_response = MagicMock() + mock_make_request.return_value = mock_response + + response = self.api.login_to_tool_repository(user_identifier="test-hash-123") + + mock_make_request.assert_called_once_with( + "POST", "/crewai_plus/api/v1/tools/login", json={"user_identifier": "test-hash-123"} ) self.assertEqual(response, mock_response) def assert_request_with_org_id( - self, mock_make_request, method: str, endpoint: str, **kwargs + self, mock_client_instance, method: str, endpoint: str, **kwargs ): - mock_make_request.assert_called_once_with( + mock_client_instance.request.assert_called_once_with( method, f"{os.getenv('CREWAI_PLUS_URL')}{endpoint}", headers={ @@ -49,24 +61,25 @@ class TestPlusAPI(unittest.TestCase): ) @patch("crewai.cli.plus_api.Settings") - @patch("requests.Session.request") + @patch("crewai.cli.plus_api.httpx.Client") def test_login_to_tool_repository_with_org_uuid( - self, mock_make_request, mock_settings_class + self, mock_client_class, mock_settings_class ): mock_settings = MagicMock() mock_settings.org_uuid = self.org_uuid mock_settings.enterprise_base_url = os.getenv('CREWAI_PLUS_URL') mock_settings_class.return_value = mock_settings - # re-initialize Client self.api = PlusAPI(self.api_key) + mock_client_instance = MagicMock() mock_response = MagicMock() - mock_make_request.return_value = mock_response + mock_client_instance.request.return_value = mock_response + mock_client_class.return_value.__enter__.return_value = mock_client_instance response = self.api.login_to_tool_repository() self.assert_request_with_org_id( - mock_make_request, "POST", "/crewai_plus/api/v1/tools/login" + mock_client_instance, "POST", "/crewai_plus/api/v1/tools/login", json={} ) self.assertEqual(response, mock_response) @@ -82,23 +95,23 @@ class TestPlusAPI(unittest.TestCase): self.assertEqual(response, mock_response) @patch("crewai.cli.plus_api.Settings") - @patch("requests.Session.request") - def test_get_tool_with_org_uuid(self, mock_make_request, mock_settings_class): + @patch("crewai.cli.plus_api.httpx.Client") + def test_get_tool_with_org_uuid(self, mock_client_class, mock_settings_class): mock_settings = MagicMock() mock_settings.org_uuid = self.org_uuid mock_settings.enterprise_base_url = os.getenv('CREWAI_PLUS_URL') mock_settings_class.return_value = mock_settings - # re-initialize Client self.api = PlusAPI(self.api_key) - # Set up mock response + mock_client_instance = MagicMock() mock_response = MagicMock() - mock_make_request.return_value = mock_response + mock_client_instance.request.return_value = mock_response + mock_client_class.return_value.__enter__.return_value = mock_client_instance response = self.api.get_tool("test_tool_handle") self.assert_request_with_org_id( - mock_make_request, "GET", "/crewai_plus/api/v1/tools/test_tool_handle" + mock_client_instance, "GET", "/crewai_plus/api/v1/tools/test_tool_handle" ) self.assertEqual(response, mock_response) @@ -130,18 +143,18 @@ class TestPlusAPI(unittest.TestCase): self.assertEqual(response, mock_response) @patch("crewai.cli.plus_api.Settings") - @patch("requests.Session.request") - def test_publish_tool_with_org_uuid(self, mock_make_request, mock_settings_class): + @patch("crewai.cli.plus_api.httpx.Client") + def test_publish_tool_with_org_uuid(self, mock_client_class, mock_settings_class): mock_settings = MagicMock() mock_settings.org_uuid = self.org_uuid mock_settings.enterprise_base_url = os.getenv('CREWAI_PLUS_URL') mock_settings_class.return_value = mock_settings - # re-initialize Client self.api = PlusAPI(self.api_key) - # Set up mock response + mock_client_instance = MagicMock() mock_response = MagicMock() - mock_make_request.return_value = mock_response + mock_client_instance.request.return_value = mock_response + mock_client_class.return_value.__enter__.return_value = mock_client_instance handle = "test_tool_handle" public = True @@ -153,7 +166,6 @@ class TestPlusAPI(unittest.TestCase): handle, public, version, description, encoded_file ) - # Expected params including organization_uuid expected_params = { "handle": handle, "public": public, @@ -164,7 +176,7 @@ class TestPlusAPI(unittest.TestCase): } self.assert_request_with_org_id( - mock_make_request, "POST", "/crewai_plus/api/v1/tools", json=expected_params + mock_client_instance, "POST", "/crewai_plus/api/v1/tools", json=expected_params ) self.assertEqual(response, mock_response) @@ -195,20 +207,19 @@ class TestPlusAPI(unittest.TestCase): ) self.assertEqual(response, mock_response) - @patch("crewai.cli.plus_api.requests.Session") - def test_make_request(self, mock_session): + @patch("crewai.cli.plus_api.httpx.Client") + def test_make_request(self, mock_client_class): + mock_client_instance = MagicMock() mock_response = MagicMock() - - mock_session_instance = mock_session.return_value - mock_session_instance.request.return_value = mock_response + mock_client_instance.request.return_value = mock_response + mock_client_class.return_value.__enter__.return_value = mock_client_instance response = self.api._make_request("GET", "test_endpoint") - mock_session.assert_called_once() - mock_session_instance.request.assert_called_once_with( + mock_client_class.assert_called_once_with(trust_env=False, verify=True) + mock_client_instance.request.assert_called_once_with( "GET", f"{self.api.base_url}/test_endpoint", headers=self.api.headers ) - mock_session_instance.trust_env = False self.assertEqual(response, mock_response) @patch("crewai.cli.plus_api.PlusAPI._make_request") diff --git a/lib/crewai/tests/cli/tools/test_main.py b/lib/crewai/tests/cli/tools/test_main.py index 71acea76d..6661011d3 100644 --- a/lib/crewai/tests/cli/tools/test_main.py +++ b/lib/crewai/tests/cli/tools/test_main.py @@ -351,7 +351,7 @@ def test_publish_api_error( mock_response = MagicMock() mock_response.status_code = 500 mock_response.json.return_value = {"error": "Internal Server Error"} - mock_response.ok = False + mock_response.is_success = False mock_publish.return_value = mock_response with raises(SystemExit): diff --git a/lib/crewai/tests/cli/triggers/test_main.py b/lib/crewai/tests/cli/triggers/test_main.py index 93d24568d..641abc7cf 100644 --- a/lib/crewai/tests/cli/triggers/test_main.py +++ b/lib/crewai/tests/cli/triggers/test_main.py @@ -3,7 +3,7 @@ import subprocess import unittest from unittest.mock import Mock, patch -import requests +import httpx from crewai.cli.triggers.main import TriggersCommand @@ -21,7 +21,7 @@ class TestTriggersCommand(unittest.TestCase): @patch("crewai.cli.triggers.main.console.print") def test_list_triggers_success(self, mock_console_print): - mock_response = Mock(spec=requests.Response) + mock_response = Mock(spec=httpx.Response) mock_response.status_code = 200 mock_response.ok = True mock_response.json.return_value = { @@ -50,7 +50,7 @@ class TestTriggersCommand(unittest.TestCase): @patch("crewai.cli.triggers.main.console.print") def test_list_triggers_no_apps(self, mock_console_print): - mock_response = Mock(spec=requests.Response) + mock_response = Mock(spec=httpx.Response) mock_response.status_code = 200 mock_response.ok = True mock_response.json.return_value = {"apps": []} @@ -81,7 +81,7 @@ class TestTriggersCommand(unittest.TestCase): @patch("crewai.cli.triggers.main.console.print") @patch.object(TriggersCommand, "_run_crew_with_payload") def test_execute_with_trigger_success(self, mock_run_crew, mock_console_print): - mock_response = Mock(spec=requests.Response) + mock_response = Mock(spec=httpx.Response) mock_response.status_code = 200 mock_response.ok = True mock_response.json.return_value = { @@ -99,7 +99,7 @@ class TestTriggersCommand(unittest.TestCase): @patch("crewai.cli.triggers.main.console.print") def test_execute_with_trigger_not_found(self, mock_console_print): - mock_response = Mock(spec=requests.Response) + mock_response = Mock(spec=httpx.Response) mock_response.status_code = 404 mock_response.json.return_value = {"error": "Trigger not found"} self.mock_client.get_trigger_payload.return_value = mock_response @@ -159,7 +159,7 @@ class TestTriggersCommand(unittest.TestCase): @patch("crewai.cli.triggers.main.console.print") def test_execute_with_trigger_with_default_error_message(self, mock_console_print): - mock_response = Mock(spec=requests.Response) + mock_response = Mock(spec=httpx.Response) mock_response.status_code = 404 mock_response.json.return_value = {} self.mock_client.get_trigger_payload.return_value = mock_response diff --git a/lib/crewai/tests/llms/bedrock/test_bedrock.py b/lib/crewai/tests/llms/bedrock/test_bedrock.py index efe3191e7..531e4d967 100644 --- a/lib/crewai/tests/llms/bedrock/test_bedrock.py +++ b/lib/crewai/tests/llms/bedrock/test_bedrock.py @@ -437,17 +437,36 @@ def test_bedrock_aws_credentials_configuration(): """ Test that AWS credentials configuration works properly """ + aws_access_key_id = "test-access-key" + aws_secret_access_key = "test-secret-key" + aws_region_name = "us-east-1" + + # Test with environment variables with patch.dict(os.environ, { - "AWS_ACCESS_KEY_ID": "test-access-key", - "AWS_SECRET_ACCESS_KEY": "test-secret-key", - "AWS_DEFAULT_REGION": "us-east-1" + "AWS_ACCESS_KEY_ID": aws_access_key_id, + "AWS_SECRET_ACCESS_KEY": aws_secret_access_key, + "AWS_DEFAULT_REGION": aws_region_name }): llm = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0") from crewai.llms.providers.bedrock.completion import BedrockCompletion assert isinstance(llm, BedrockCompletion) - assert llm.region_name == "us-east-1" + assert llm.region_name == aws_region_name + assert llm.aws_access_key_id == aws_access_key_id + assert llm.aws_secret_access_key == aws_secret_access_key + + # Test with litellm environment variables + with patch.dict(os.environ, { + "AWS_ACCESS_KEY_ID": aws_access_key_id, + "AWS_SECRET_ACCESS_KEY": aws_secret_access_key, + "AWS_REGION_NAME": aws_region_name + }): + llm = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0") + + from crewai.llms.providers.bedrock.completion import BedrockCompletion + assert isinstance(llm, BedrockCompletion) + assert llm.region_name == aws_region_name # Test with explicit credentials llm_explicit = LLM( diff --git a/lib/crewai/tests/llms/google/test_google.py b/lib/crewai/tests/llms/google/test_google.py index 3f86388d5..6f475ef49 100644 --- a/lib/crewai/tests/llms/google/test_google.py +++ b/lib/crewai/tests/llms/google/test_google.py @@ -957,6 +957,47 @@ def test_gemini_agent_kickoff_structured_output_with_tools(): +@pytest.mark.vcr() +def test_gemini_crew_structured_output_with_tools(): + """ + Test that a crew with Gemini can use both tools and output_pydantic on a task. + """ + from pydantic import BaseModel, Field + from crewai.tools import tool + + class CalculationResult(BaseModel): + operation: str = Field(description="The mathematical operation performed") + result: int = Field(description="The result of the calculation") + explanation: str = Field(description="Brief explanation of the calculation") + + @tool + def add_numbers(a: int, b: int) -> int: + """Add two numbers together and return the sum.""" + return a + b + + agent = Agent( + role="Calculator", + goal="Perform calculations using available tools", + backstory="You are a calculator assistant that uses tools to compute results.", + llm=LLM(model="google/gemini-2.0-flash-001"), + tools=[add_numbers], + ) + + task = Task( + description="Calculate 15 + 27 using your add_numbers tool. Report the result.", + expected_output="A structured calculation result", + output_pydantic=CalculationResult, + agent=agent, + ) + + crew = Crew(agents=[agent], tasks=[task]) + result = crew.kickoff() + + assert result.pydantic is not None, "Expected pydantic output but got None" + assert isinstance(result.pydantic, CalculationResult) + assert result.pydantic.result == 42, f"Expected 42 but got {result.pydantic.result}" + + def test_gemini_stop_words_not_applied_to_structured_output(): """ Test that stop words are NOT applied when response_model is provided. diff --git a/lib/crewai/tests/mcp/test_amp_mcp.py b/lib/crewai/tests/mcp/test_amp_mcp.py new file mode 100644 index 000000000..3c4001f3c --- /dev/null +++ b/lib/crewai/tests/mcp/test_amp_mcp.py @@ -0,0 +1,373 @@ +"""Tests for AMP MCP config fetching and tool resolution.""" + +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest +from crewai.agent.core import Agent +from crewai.mcp.config import MCPServerHTTP, MCPServerSSE +from crewai.mcp.tool_resolver import MCPToolResolver +from crewai.tools.base_tool import BaseTool + + +@pytest.fixture +def agent(): + return Agent( + role="Test Agent", + goal="Test goal", + backstory="Test backstory", + ) + + +@pytest.fixture +def resolver(agent): + return MCPToolResolver(agent=agent, logger=agent._logger) + + +@pytest.fixture +def mock_tool_definitions(): + return [ + { + "name": "search", + "description": "Search tool", + "inputSchema": { + "type": "object", + "properties": { + "query": {"type": "string", "description": "Search query"} + }, + "required": ["query"], + }, + }, + { + "name": "create_page", + "description": "Create a page", + "inputSchema": {}, + }, + ] + + +class TestBuildMCPConfigFromDict: + def test_builds_http_config(self): + config_dict = { + "type": "http", + "url": "https://mcp.example.com/api", + "headers": {"Authorization": "Bearer token123"}, + "streamable": True, + "cache_tools_list": False, + } + + result = MCPToolResolver._build_mcp_config_from_dict(config_dict) + + assert isinstance(result, MCPServerHTTP) + assert result.url == "https://mcp.example.com/api" + assert result.headers == {"Authorization": "Bearer token123"} + assert result.streamable is True + assert result.cache_tools_list is False + + def test_builds_sse_config(self): + config_dict = { + "type": "sse", + "url": "https://mcp.example.com/sse", + "headers": {"Authorization": "Bearer token123"}, + "cache_tools_list": True, + } + + result = MCPToolResolver._build_mcp_config_from_dict(config_dict) + + assert isinstance(result, MCPServerSSE) + assert result.url == "https://mcp.example.com/sse" + assert result.headers == {"Authorization": "Bearer token123"} + assert result.cache_tools_list is True + + def test_defaults_to_http(self): + config_dict = { + "url": "https://mcp.example.com/api", + } + + result = MCPToolResolver._build_mcp_config_from_dict(config_dict) + + assert isinstance(result, MCPServerHTTP) + assert result.streamable is True + + def test_http_defaults(self): + config_dict = { + "type": "http", + "url": "https://mcp.example.com/api", + } + + result = MCPToolResolver._build_mcp_config_from_dict(config_dict) + + assert result.headers is None + assert result.streamable is True + assert result.cache_tools_list is False + + +class TestFetchAmpMCPConfigs: + @patch("crewai.cli.plus_api.PlusAPI") + @patch("crewai_tools.tools.crewai_platform_tools.misc.get_platform_integration_token", return_value="test-api-key") + def test_fetches_configs_successfully(self, mock_get_token, mock_plus_api_class, resolver): + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = { + "configs": { + "notion": { + "type": "sse", + "url": "https://mcp.notion.so/sse", + "headers": {"Authorization": "Bearer notion-token"}, + }, + "github": { + "type": "http", + "url": "https://mcp.github.com/api", + "headers": {"Authorization": "Bearer gh-token"}, + }, + }, + } + mock_plus_api = MagicMock() + mock_plus_api.get_mcp_configs.return_value = mock_response + mock_plus_api_class.return_value = mock_plus_api + + result = resolver._fetch_amp_mcp_configs(["notion", "github"]) + + assert "notion" in result + assert "github" in result + assert result["notion"]["url"] == "https://mcp.notion.so/sse" + mock_plus_api_class.assert_called_once_with(api_key="test-api-key") + mock_plus_api.get_mcp_configs.assert_called_once_with(["notion", "github"]) + + @patch("crewai.cli.plus_api.PlusAPI") + @patch("crewai_tools.tools.crewai_platform_tools.misc.get_platform_integration_token", return_value="test-api-key") + def test_omits_missing_slugs(self, mock_get_token, mock_plus_api_class, resolver): + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = { + "configs": {"notion": {"type": "sse", "url": "https://mcp.notion.so/sse"}}, + } + mock_plus_api = MagicMock() + mock_plus_api.get_mcp_configs.return_value = mock_response + mock_plus_api_class.return_value = mock_plus_api + + result = resolver._fetch_amp_mcp_configs(["notion", "missing-server"]) + + assert "notion" in result + assert "missing-server" not in result + + @patch("crewai.cli.plus_api.PlusAPI") + @patch("crewai_tools.tools.crewai_platform_tools.misc.get_platform_integration_token", return_value="test-api-key") + def test_returns_empty_on_http_error(self, mock_get_token, mock_plus_api_class, resolver): + mock_response = MagicMock() + mock_response.status_code = 500 + mock_plus_api = MagicMock() + mock_plus_api.get_mcp_configs.return_value = mock_response + mock_plus_api_class.return_value = mock_plus_api + + result = resolver._fetch_amp_mcp_configs(["notion"]) + + assert result == {} + + @patch("crewai.cli.plus_api.PlusAPI") + @patch("crewai_tools.tools.crewai_platform_tools.misc.get_platform_integration_token", return_value="test-api-key") + def test_returns_empty_on_network_error(self, mock_get_token, mock_plus_api_class, resolver): + import httpx + + mock_plus_api = MagicMock() + mock_plus_api.get_mcp_configs.side_effect = httpx.ConnectError("Connection refused") + mock_plus_api_class.return_value = mock_plus_api + + result = resolver._fetch_amp_mcp_configs(["notion"]) + + assert result == {} + + @patch("crewai_tools.tools.crewai_platform_tools.misc.get_platform_integration_token", side_effect=Exception("No token")) + def test_returns_empty_when_no_token(self, mock_get_token, resolver): + result = resolver._fetch_amp_mcp_configs(["notion"]) + + assert result == {} + + +class TestParseAmpRef: + def test_bare_slug(self): + slug, tool = MCPToolResolver._parse_amp_ref("notion") + assert slug == "notion" + assert tool is None + + def test_bare_slug_with_tool(self): + slug, tool = MCPToolResolver._parse_amp_ref("notion#search") + assert slug == "notion" + assert tool == "search" + + def test_bare_slug_with_empty_tool(self): + slug, tool = MCPToolResolver._parse_amp_ref("notion#") + assert slug == "notion" + assert tool is None + + def test_legacy_prefix_slug(self): + slug, tool = MCPToolResolver._parse_amp_ref("crewai-amp:notion") + assert slug == "notion" + assert tool is None + + def test_legacy_prefix_with_tool(self): + slug, tool = MCPToolResolver._parse_amp_ref("crewai-amp:notion#search") + assert slug == "notion" + assert tool == "search" + + +class TestGetMCPToolsAmpIntegration: + @patch("crewai.mcp.tool_resolver.MCPClient") + @patch.object(MCPToolResolver, "_fetch_amp_mcp_configs") + def test_single_request_for_multiple_amp_refs( + self, mock_fetch, mock_client_class, agent, mock_tool_definitions + ): + mock_fetch.return_value = { + "notion": { + "type": "sse", + "url": "https://mcp.notion.so/sse", + "headers": {"Authorization": "Bearer token"}, + }, + "github": { + "type": "http", + "url": "https://mcp.github.com/api", + "headers": {"Authorization": "Bearer gh-token"}, + "streamable": True, + }, + } + + mock_client = AsyncMock() + mock_client.list_tools = AsyncMock(return_value=mock_tool_definitions) + mock_client.connected = False + mock_client.connect = AsyncMock() + mock_client.disconnect = AsyncMock() + mock_client_class.return_value = mock_client + + tools = agent.get_mcp_tools(["notion", "github"]) + + mock_fetch.assert_called_once_with(["notion", "github"]) + assert len(tools) == 4 # 2 tools per server + + @patch("crewai.mcp.tool_resolver.MCPClient") + @patch.object(MCPToolResolver, "_fetch_amp_mcp_configs") + def test_tool_filter_with_hash_syntax( + self, mock_fetch, mock_client_class, agent, mock_tool_definitions + ): + mock_fetch.return_value = { + "notion": { + "type": "sse", + "url": "https://mcp.notion.so/sse", + "headers": {"Authorization": "Bearer token"}, + }, + } + + mock_client = AsyncMock() + mock_client.list_tools = AsyncMock(return_value=mock_tool_definitions) + mock_client.connected = False + mock_client.connect = AsyncMock() + mock_client.disconnect = AsyncMock() + mock_client_class.return_value = mock_client + + tools = agent.get_mcp_tools(["notion#search"]) + + mock_fetch.assert_called_once_with(["notion"]) + assert len(tools) == 1 + assert tools[0].name == "mcp_notion_so_sse_search" + + @patch("crewai.mcp.tool_resolver.MCPClient") + @patch.object(MCPToolResolver, "_fetch_amp_mcp_configs") + def test_deduplicates_slugs( + self, mock_fetch, mock_client_class, agent, mock_tool_definitions + ): + mock_fetch.return_value = { + "notion": { + "type": "sse", + "url": "https://mcp.notion.so/sse", + "headers": {"Authorization": "Bearer token"}, + }, + } + + mock_client = AsyncMock() + mock_client.list_tools = AsyncMock(return_value=mock_tool_definitions) + mock_client.connected = False + mock_client.connect = AsyncMock() + mock_client.disconnect = AsyncMock() + mock_client_class.return_value = mock_client + + tools = agent.get_mcp_tools(["notion#search", "notion#create_page"]) + + mock_fetch.assert_called_once_with(["notion"]) + assert len(tools) == 2 + + @patch.object(MCPToolResolver, "_fetch_amp_mcp_configs") + def test_skips_missing_configs_gracefully(self, mock_fetch, agent): + mock_fetch.return_value = {} + + tools = agent.get_mcp_tools(["missing-server"]) + + assert tools == [] + + @patch("crewai.mcp.tool_resolver.MCPClient") + @patch.object(MCPToolResolver, "_fetch_amp_mcp_configs") + def test_legacy_crewai_amp_prefix_still_works( + self, mock_fetch, mock_client_class, agent, mock_tool_definitions + ): + mock_fetch.return_value = { + "notion": { + "type": "sse", + "url": "https://mcp.notion.so/sse", + "headers": {"Authorization": "Bearer token"}, + }, + } + + mock_client = AsyncMock() + mock_client.list_tools = AsyncMock(return_value=mock_tool_definitions) + mock_client.connected = False + mock_client.connect = AsyncMock() + mock_client.disconnect = AsyncMock() + mock_client_class.return_value = mock_client + + tools = agent.get_mcp_tools(["crewai-amp:notion"]) + + mock_fetch.assert_called_once_with(["notion"]) + assert len(tools) == 2 + + @patch("crewai.mcp.tool_resolver.MCPClient") + @patch.object(MCPToolResolver, "_fetch_amp_mcp_configs") + @patch.object(MCPToolResolver, "_resolve_external") + def test_non_amp_items_unaffected( + self, + mock_external, + mock_fetch, + mock_client_class, + agent, + mock_tool_definitions, + ): + mock_fetch.return_value = { + "notion": { + "type": "sse", + "url": "https://mcp.notion.so/sse", + }, + } + + mock_client = AsyncMock() + mock_client.list_tools = AsyncMock(return_value=mock_tool_definitions) + mock_client.connected = False + mock_client.connect = AsyncMock() + mock_client.disconnect = AsyncMock() + mock_client_class.return_value = mock_client + + mock_external_tool = MagicMock(spec=BaseTool) + mock_external.return_value = [mock_external_tool] + + http_config = MCPServerHTTP( + url="https://other.mcp.com/api", + headers={"Authorization": "Bearer other"}, + ) + + tools = agent.get_mcp_tools( + [ + "notion", + "https://external.mcp.com/api", + http_config, + ] + ) + + mock_fetch.assert_called_once_with(["notion"]) + mock_external.assert_called_once_with("https://external.mcp.com/api") + # 2 from notion + 1 from external + 2 from http_config + assert len(tools) == 5 diff --git a/lib/crewai/tests/mcp/test_mcp_config.py b/lib/crewai/tests/mcp/test_mcp_config.py index e55a7d504..24fc59769 100644 --- a/lib/crewai/tests/mcp/test_mcp_config.py +++ b/lib/crewai/tests/mcp/test_mcp_config.py @@ -1,5 +1,5 @@ import asyncio -from unittest.mock import AsyncMock, MagicMock, patch +from unittest.mock import AsyncMock, patch import pytest from crewai.agent.core import Agent @@ -46,7 +46,7 @@ def test_agent_with_stdio_mcp_config(mock_tool_definitions): ) - with patch("crewai.agent.core.MCPClient") as mock_client_class: + with patch("crewai.mcp.tool_resolver.MCPClient") as mock_client_class: mock_client = AsyncMock() mock_client.list_tools = AsyncMock(return_value=mock_tool_definitions) mock_client.connected = False # Will trigger connect @@ -82,7 +82,7 @@ def test_agent_with_http_mcp_config(mock_tool_definitions): mcps=[http_config], ) - with patch("crewai.agent.core.MCPClient") as mock_client_class: + with patch("crewai.mcp.tool_resolver.MCPClient") as mock_client_class: mock_client = AsyncMock() mock_client.list_tools = AsyncMock(return_value=mock_tool_definitions) mock_client.connected = False # Will trigger connect @@ -117,7 +117,7 @@ def test_agent_with_sse_mcp_config(mock_tool_definitions): mcps=[sse_config], ) - with patch("crewai.agent.core.MCPClient") as mock_client_class: + with patch("crewai.mcp.tool_resolver.MCPClient") as mock_client_class: mock_client = AsyncMock() mock_client.list_tools = AsyncMock(return_value=mock_tool_definitions) mock_client.connected = False @@ -141,7 +141,7 @@ def test_mcp_tool_execution_in_sync_context(mock_tool_definitions): """Test MCPNativeTool execution in synchronous context (normal crew execution).""" http_config = MCPServerHTTP(url="https://api.example.com/mcp") - with patch("crewai.agent.core.MCPClient") as mock_client_class: + with patch("crewai.mcp.tool_resolver.MCPClient") as mock_client_class: mock_client = AsyncMock() mock_client.list_tools = AsyncMock(return_value=mock_tool_definitions) mock_client.connected = False @@ -173,7 +173,7 @@ async def test_mcp_tool_execution_in_async_context(mock_tool_definitions): """Test MCPNativeTool execution in async context (e.g., from a Flow).""" http_config = MCPServerHTTP(url="https://api.example.com/mcp") - with patch("crewai.agent.core.MCPClient") as mock_client_class: + with patch("crewai.mcp.tool_resolver.MCPClient") as mock_client_class: mock_client = AsyncMock() mock_client.list_tools = AsyncMock(return_value=mock_tool_definitions) mock_client.connected = False diff --git a/lib/crewai/tests/memory/test_unified_memory.py b/lib/crewai/tests/memory/test_unified_memory.py index 5b25b8077..98a041086 100644 --- a/lib/crewai/tests/memory/test_unified_memory.py +++ b/lib/crewai/tests/memory/test_unified_memory.py @@ -172,8 +172,8 @@ def test_memory_scope_slice(tmp_path: Path, mock_embedder: MagicMock) -> None: sc = mem.scope("/agent/1") assert sc._root in ("/agent/1", "/agent/1/") sl = mem.slice(["/a", "/b"], read_only=True) - assert sl._read_only is True - assert "/a" in sl._scopes and "/b" in sl._scopes + assert sl.read_only is True + assert "/a" in sl.scopes and "/b" in sl.scopes def test_memory_list_scopes_info_tree(tmp_path: Path, mock_embedder: MagicMock) -> None: @@ -198,7 +198,7 @@ def test_memory_scope_remember_recall(tmp_path: Path, mock_embedder: MagicMock) from crewai.memory.memory_scope import MemoryScope mem = Memory(storage=str(tmp_path / "db5"), llm=MagicMock(), embedder=mock_embedder) - scope = MemoryScope(mem, "/crew/1") + scope = MemoryScope(memory=mem, root_path="/crew/1") scope.remember("Scoped note", scope="/", categories=[], importance=0.5, metadata={}) results = scope.recall("note", limit=5, depth="shallow") assert len(results) >= 1 @@ -213,19 +213,20 @@ def test_memory_slice_recall(tmp_path: Path, mock_embedder: MagicMock) -> None: mem = Memory(storage=str(tmp_path / "db6"), llm=MagicMock(), embedder=mock_embedder) mem.remember("In scope A", scope="/a", categories=[], importance=0.5, metadata={}) - sl = MemorySlice(mem, ["/a"], read_only=True) + sl = MemorySlice(memory=mem, scopes=["/a"], read_only=True) matches = sl.recall("scope", limit=5, depth="shallow") assert isinstance(matches, list) -def test_memory_slice_remember_raises_when_read_only(tmp_path: Path, mock_embedder: MagicMock) -> None: +def test_memory_slice_remember_is_noop_when_read_only(tmp_path: Path, mock_embedder: MagicMock) -> None: from crewai.memory.unified_memory import Memory from crewai.memory.memory_scope import MemorySlice mem = Memory(storage=str(tmp_path / "db7"), llm=MagicMock(), embedder=mock_embedder) - sl = MemorySlice(mem, ["/a"], read_only=True) - with pytest.raises(PermissionError): - sl.remember("x", scope="/a") + sl = MemorySlice(memory=mem, scopes=["/a"], read_only=True) + result = sl.remember("x", scope="/a") + assert result is None + assert mem.list_records() == [] # --- Flow memory --- @@ -318,6 +319,7 @@ def test_executor_save_to_memory_calls_extract_then_remember_per_item() -> None: from crewai.agents.parser import AgentFinish mock_memory = MagicMock() + mock_memory.read_only = False mock_memory.extract_memories.return_value = ["Fact A.", "Fact B."] mock_agent = MagicMock() @@ -358,6 +360,7 @@ def test_executor_save_to_memory_skips_delegation_output() -> None: from crewai.utilities.string_utils import sanitize_tool_name mock_memory = MagicMock() + mock_memory.read_only = False mock_agent = MagicMock() mock_agent.memory = mock_memory mock_agent._logger = MagicMock() @@ -390,7 +393,7 @@ def test_memory_scope_extract_memories_delegates() -> None: mock_memory = MagicMock() mock_memory.extract_memories.return_value = ["Scoped fact."] - scope = MemoryScope(mock_memory, "/agent/1") + scope = MemoryScope(memory=mock_memory, root_path="/agent/1") result = scope.extract_memories("Some content") mock_memory.extract_memories.assert_called_once_with("Some content") assert result == ["Scoped fact."] @@ -402,7 +405,7 @@ def test_memory_slice_extract_memories_delegates() -> None: mock_memory = MagicMock() mock_memory.extract_memories.return_value = ["Sliced fact."] - sl = MemorySlice(mock_memory, ["/a", "/b"], read_only=True) + sl = MemorySlice(memory=mock_memory, scopes=["/a", "/b"], read_only=True) result = sl.extract_memories("Some content") mock_memory.extract_memories.assert_called_once_with("Some content") assert result == ["Sliced fact."] @@ -667,10 +670,10 @@ def test_agent_kickoff_memory_recall_and_save(tmp_path: Path, mock_embedder: Mag verbose=False, ) - # Mock recall to verify it's called, but return real results - with patch.object(mem, "recall", wraps=mem.recall) as recall_mock, \ - patch.object(mem, "extract_memories", return_value=["PostgreSQL is used."]) as extract_mock, \ - patch.object(mem, "remember_many", wraps=mem.remember_many) as remember_many_mock: + # Patch on the class to avoid Pydantic BaseModel __delattr__ restriction + with patch.object(Memory, "recall", wraps=mem.recall) as recall_mock, \ + patch.object(Memory, "extract_memories", return_value=["PostgreSQL is used."]) as extract_mock, \ + patch.object(Memory, "remember_many", wraps=mem.remember_many) as remember_many_mock: result = agent.kickoff("What database do we use?") assert result is not None diff --git a/lib/crewai/tests/telemetry/test_telemetry.py b/lib/crewai/tests/telemetry/test_telemetry.py index 8f7f5fc70..d0564982d 100644 --- a/lib/crewai/tests/telemetry/test_telemetry.py +++ b/lib/crewai/tests/telemetry/test_telemetry.py @@ -121,3 +121,41 @@ def test_telemetry_singleton_pattern(): thread.join() assert all(instance is telemetry1 for instance in instances) + + +def test_no_signal_handler_traceback_in_non_main_thread(): + """Signal handler registration should be silently skipped in non-main threads. + + Regression test for https://github.com/crewAIInc/crewAI/issues/4289 + """ + errors: list[Exception] = [] + mock_holder: dict = {} + + def init_in_thread(): + try: + Telemetry._instance = None + with ( + patch.dict( + os.environ, + {"CREWAI_DISABLE_TELEMETRY": "false", "OTEL_SDK_DISABLED": "false"}, + ), + patch("crewai.telemetry.telemetry.TracerProvider"), + patch("signal.signal") as mock_signal, + patch("crewai.telemetry.telemetry.logger") as mock_logger, + ): + Telemetry() + mock_holder["signal"] = mock_signal + mock_holder["logger"] = mock_logger + except Exception as exc: + errors.append(exc) + + thread = threading.Thread(target=init_in_thread) + thread.start() + thread.join() + + assert not errors, f"Unexpected error: {errors}" + assert mock_holder, "Thread did not execute" + mock_holder["signal"].assert_not_called() + mock_holder["logger"].debug.assert_any_call( + "Skipping signal handler registration: not running in main thread" + ) diff --git a/lib/crewai/tests/test_async_human_feedback.py b/lib/crewai/tests/test_async_human_feedback.py index 9bb3d0045..035f29dcc 100644 --- a/lib/crewai/tests/test_async_human_feedback.py +++ b/lib/crewai/tests/test_async_human_feedback.py @@ -971,6 +971,128 @@ class TestCollapseToOutcomeJsonParsing: assert mock_llm.call.call_count == 2 +class TestLLMObjectPreservedInContext: + """Tests that BaseLLM objects have their model string preserved in PendingFeedbackContext.""" + + @patch("crewai.flow.flow.crewai_event_bus.emit") + def test_basellm_object_model_string_survives_roundtrip(self, mock_emit: MagicMock) -> None: + """Test that when llm is a BaseLLM object, its model string is stored in context + so that outcome collapsing works after async pause/resume. + + This is the exact bug: locally the sync path keeps the LLM object in memory, + but in production the async path serializes the context and the LLM object was + discarded (stored as None), causing resume to skip classification and always + fall back to emit[0]. + """ + with tempfile.TemporaryDirectory() as tmpdir: + db_path = os.path.join(tmpdir, "test_flows.db") + persistence = SQLiteFlowPersistence(db_path) + + # Create a mock BaseLLM object (not a string) + mock_llm_obj = MagicMock() + mock_llm_obj.model = "gemini/gemini-2.0-flash" + + class PausingProvider: + def __init__(self, persistence: SQLiteFlowPersistence): + self.persistence = persistence + self.captured_context: PendingFeedbackContext | None = None + + def request_feedback( + self, context: PendingFeedbackContext, flow: Flow + ) -> str: + self.captured_context = context + self.persistence.save_pending_feedback( + flow_uuid=context.flow_id, + context=context, + state_data=flow.state if isinstance(flow.state, dict) else flow.state.model_dump(), + ) + raise HumanFeedbackPending(context=context) + + provider = PausingProvider(persistence) + + class TestFlow(Flow): + result_path: str = "" + + @start() + @human_feedback( + message="Approve?", + emit=["needs_changes", "approved"], + llm=mock_llm_obj, + default_outcome="approved", + provider=provider, + ) + def review(self): + return "content for review" + + @listen("approved") + def handle_approved(self): + self.result_path = "approved" + return "Approved!" + + @listen("needs_changes") + def handle_changes(self): + self.result_path = "needs_changes" + return "Changes needed" + + # Phase 1: Start flow (should pause) + flow1 = TestFlow(persistence=persistence) + result = flow1.kickoff() + assert isinstance(result, HumanFeedbackPending) + + # Verify the context stored the model STRING, not None + assert provider.captured_context is not None + assert provider.captured_context.llm == "gemini/gemini-2.0-flash" + + # Verify it survives persistence roundtrip + flow_id = result.context.flow_id + loaded = persistence.load_pending_feedback(flow_id) + assert loaded is not None + _, loaded_context = loaded + assert loaded_context.llm == "gemini/gemini-2.0-flash" + + # Phase 2: Resume with positive feedback - should use LLM to classify + flow2 = TestFlow.from_pending(flow_id, persistence) + assert flow2._pending_feedback_context is not None + assert flow2._pending_feedback_context.llm == "gemini/gemini-2.0-flash" + + # Mock _collapse_to_outcome to verify it gets called (not skipped) + with patch.object(flow2, "_collapse_to_outcome", return_value="approved") as mock_collapse: + flow2.resume("this looks good, proceed!") + + # The key assertion: _collapse_to_outcome was called (not skipped due to llm=None) + mock_collapse.assert_called_once_with( + feedback="this looks good, proceed!", + outcomes=["needs_changes", "approved"], + llm="gemini/gemini-2.0-flash", + ) + assert flow2.last_human_feedback.outcome == "approved" + assert flow2.result_path == "approved" + + def test_string_llm_still_works(self) -> None: + """Test that passing llm as a string still works correctly.""" + context = PendingFeedbackContext( + flow_id="str-llm-test", + flow_class="test.Flow", + method_name="review", + method_output="output", + message="Review:", + emit=["approved", "rejected"], + llm="gpt-4o-mini", + ) + + serialized = context.to_dict() + restored = PendingFeedbackContext.from_dict(serialized) + assert restored.llm == "gpt-4o-mini" + + def test_none_llm_when_no_model_attr(self) -> None: + """Test that llm is None when object has no model attribute.""" + mock_obj = MagicMock(spec=[]) # No attributes + + # Simulate what the decorator does + llm_value = mock_obj if isinstance(mock_obj, str) else getattr(mock_obj, "model", None) + assert llm_value is None + + class TestAsyncHumanFeedbackEdgeCases: """Edge case tests for async human feedback.""" diff --git a/lib/crewai/tests/test_crew.py b/lib/crewai/tests/test_crew.py index 64d122a7c..adcdfda4c 100644 --- a/lib/crewai/tests/test_crew.py +++ b/lib/crewai/tests/test_crew.py @@ -36,7 +36,7 @@ from crewai.flow import Flow, start from crewai.knowledge.knowledge import Knowledge from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource from crewai.llm import LLM - +from crewai.memory.unified_memory import Memory from crewai.process import Process from crewai.project import CrewBase, agent, before_kickoff, crew, task from crewai.task import Task @@ -2618,9 +2618,9 @@ def test_memory_remember_called_after_task(): ) with patch.object( - crew._memory, "extract_memories", wraps=crew._memory.extract_memories + Memory, "extract_memories", wraps=crew._memory.extract_memories ) as extract_mock, patch.object( - crew._memory, "remember", wraps=crew._memory.remember + Memory, "remember", wraps=crew._memory.remember ) as remember_mock: crew.kickoff() @@ -4773,13 +4773,13 @@ def test_memory_remember_receives_task_content(): # Mock extract_memories to return fake memories and capture the raw input. # No wraps= needed -- the test only checks what args it receives, not the output. patch.object( - crew._memory, "extract_memories", return_value=["Fake memory."] + Memory, "extract_memories", return_value=["Fake memory."] ) as extract_mock, # Mock recall to avoid LLM calls for query analysis (not in cassette). - patch.object(crew._memory, "recall", return_value=[]), + patch.object(Memory, "recall", return_value=[]), # Mock remember_many to prevent the background save from triggering # LLM calls (field resolution) that aren't in the cassette. - patch.object(crew._memory, "remember_many", return_value=[]), + patch.object(Memory, "remember_many", return_value=[]), ): crew.kickoff() diff --git a/lib/crewai/tests/test_flow.py b/lib/crewai/tests/test_flow.py index 0ec4b3063..f214006aa 100644 --- a/lib/crewai/tests/test_flow.py +++ b/lib/crewai/tests/test_flow.py @@ -1772,3 +1772,284 @@ def test_cyclic_flow_multiple_or_listeners_fire_every_iteration(): f"'{method}' should fire every iteration, " f"got {len(events)} fires: {execution_order}" ) + + +def test_cyclic_flow_works_with_persist_and_id_input(): + """Cyclic router flows must complete all iterations when persistence is + enabled and 'id' is passed in inputs. + + Regression test: passing ``inputs={"id": ...}`` with a persistence backend + previously caused ``_is_execution_resuming`` to be set even though + ``_completed_methods`` was empty. The flag was never cleared during + execution, so on the second cycle iteration the resumption path in + ``_execute_single_listener`` short-circuited the router with ``(None, None)`` + and the flow silently terminated after a single iteration. + """ + from uuid import uuid4 + + from crewai.flow.persistence import SQLiteFlowPersistence + + execution_order: list[str] = [] + + class PersistCyclicFlow(Flow): + iteration: int = 0 + max_iterations: int = 3 + + @start() + def begin(self): + execution_order.append("begin") + + @router(or_(begin, "capture")) + def classify(self): + self.iteration += 1 + execution_order.append(f"classify_{self.iteration}") + if self.iteration <= self.max_iterations: + return "type_a" + return "exit" + + @listen("type_a") + def handle(self): + execution_order.append(f"handle_{self.iteration}") + + @listen(or_(handle,)) + def send(self): + execution_order.append(f"send_{self.iteration}") + + @listen("send") + def capture(self): + execution_order.append(f"capture_{self.iteration}") + + @listen("exit") + def finish(self): + execution_order.append("finish") + + persistence = SQLiteFlowPersistence() + flow = PersistCyclicFlow(persistence=persistence) + flow.kickoff(inputs={"id": str(uuid4())}) + + assert "finish" in execution_order, ( + f"Flow should have reached 'finish', got: {execution_order}" + ) + # The router fires max_iterations+1 times (3 cycles + the final "exit") + classify_events = [e for e in execution_order if e.startswith("classify_")] + assert len(classify_events) == 4, ( + f"'classify' should fire 4 times (3 cycles + exit), " + f"got {len(classify_events)}: {execution_order}" + ) + # The other methods fire once per "type_a" cycle + for method in ["handle", "send", "capture"]: + events = [e for e in execution_order if e.startswith(f"{method}_")] + assert len(events) == 3, ( + f"'{method}' should fire 3 times, " + f"got {len(events)}: {execution_order}" + ) + + +@pytest.mark.timeout(5) +def test_self_listening_method_does_not_loop(): + """A method whose @listen label matches its own name must not loop forever. + + Without the guard, 'process' re-triggers itself on every completion, + running indefinitely (timeout → FAIL). The fix caps method calls + and raises RecursionError (PASS). + """ + + class SelfListenFlow(Flow): + @start() + def begin(self): + return "process" + + @router(begin) + def route(self): + return "process" + + @listen("process") + def process(self): + pass + + flow = SelfListenFlow() + with pytest.raises(RecursionError, match="infinite loop"): + flow.kickoff() + + +def test_or_condition_self_listen_fires_once(): + """or_() with a self-referencing label only fires once due to or_() guard.""" + call_count = 0 + + class OrSelfListenFlow(Flow): + @start() + def begin(self): + return "process" + + @router(begin) + def route(self): + return "process" + + @listen(or_("other_trigger", "process")) + def process(self): + nonlocal call_count + call_count += 1 + + flow = OrSelfListenFlow() + flow.kickoff() + assert call_count == 1 + +class ListState(BaseModel): + items: list = [] + + +class DictState(BaseModel): + data: dict = {} + + +class _ListFlow(Flow[ListState]): + @start() + def populate(self): + self.state.items = [3, 1, 4, 1, 5, 9, 2, 6] + + +class _DictFlow(Flow[DictState]): + @start() + def populate(self): + self.state.data = {"a": 1, "b": 2, "c": 3} + + +def _make_list_flow(): + flow = _ListFlow() + flow.kickoff() + return flow + + +def _make_dict_flow(): + flow = _DictFlow() + flow.kickoff() + return flow + + +def test_locked_list_proxy_index(): + flow = _make_list_flow() + assert flow.state.items.index(4) == 2 + assert flow.state.items.index(1, 2) == 3 + + +def test_locked_list_proxy_index_missing_raises(): + flow = _make_list_flow() + with pytest.raises(ValueError): + flow.state.items.index(999) + + +def test_locked_list_proxy_count(): + flow = _make_list_flow() + assert flow.state.items.count(1) == 2 + assert flow.state.items.count(999) == 0 + + +def test_locked_list_proxy_sort(): + flow = _make_list_flow() + flow.state.items.sort() + assert list(flow.state.items) == [1, 1, 2, 3, 4, 5, 6, 9] + + +def test_locked_list_proxy_sort_reverse(): + flow = _make_list_flow() + flow.state.items.sort(reverse=True) + assert list(flow.state.items) == [9, 6, 5, 4, 3, 2, 1, 1] + + +def test_locked_list_proxy_sort_key(): + flow = _make_list_flow() + flow.state.items.sort(key=lambda x: -x) + assert list(flow.state.items) == [9, 6, 5, 4, 3, 2, 1, 1] + + +def test_locked_list_proxy_reverse(): + flow = _make_list_flow() + original = list(flow.state.items) + flow.state.items.reverse() + assert list(flow.state.items) == list(reversed(original)) + + +def test_locked_list_proxy_copy(): + flow = _make_list_flow() + copied = flow.state.items.copy() + assert copied == [3, 1, 4, 1, 5, 9, 2, 6] + assert isinstance(copied, list) + copied.append(999) + assert 999 not in flow.state.items + + +def test_locked_list_proxy_add(): + flow = _make_list_flow() + result = flow.state.items + [10, 11] + assert result == [3, 1, 4, 1, 5, 9, 2, 6, 10, 11] + assert len(flow.state.items) == 8 + + +def test_locked_list_proxy_radd(): + flow = _make_list_flow() + result = [0] + flow.state.items + assert result[0] == 0 + assert len(result) == 9 + + +def test_locked_list_proxy_iadd(): + flow = _make_list_flow() + flow.state.items += [10] + assert 10 in flow.state.items + # Verify no deadlock: mutations must still work after += + flow.state.items.append(99) + assert 99 in flow.state.items + + +def test_locked_list_proxy_mul(): + flow = _make_list_flow() + result = flow.state.items * 2 + assert len(result) == 16 + + +def test_locked_list_proxy_rmul(): + flow = _make_list_flow() + result = 2 * flow.state.items + assert len(result) == 16 + + +def test_locked_list_proxy_reversed(): + flow = _make_list_flow() + original = list(flow.state.items) + assert list(reversed(flow.state.items)) == list(reversed(original)) + + +def test_locked_dict_proxy_copy(): + flow = _make_dict_flow() + copied = flow.state.data.copy() + assert copied == {"a": 1, "b": 2, "c": 3} + assert isinstance(copied, dict) + copied["z"] = 99 + assert "z" not in flow.state.data + + +def test_locked_dict_proxy_or(): + flow = _make_dict_flow() + result = flow.state.data | {"d": 4} + assert result == {"a": 1, "b": 2, "c": 3, "d": 4} + assert "d" not in flow.state.data + + +def test_locked_dict_proxy_ror(): + flow = _make_dict_flow() + result = {"z": 0} | flow.state.data + assert result == {"z": 0, "a": 1, "b": 2, "c": 3} + + +def test_locked_dict_proxy_ior(): + flow = _make_dict_flow() + flow.state.data |= {"d": 4} + assert flow.state.data["d"] == 4 + # Verify no deadlock: mutations must still work after |= + flow.state.data["e"] = 5 + assert flow.state.data["e"] == 5 + + +def test_locked_dict_proxy_reversed(): + flow = _make_dict_flow() + assert list(reversed(flow.state.data)) == ["c", "b", "a"] diff --git a/lib/crewai/tests/test_task.py b/lib/crewai/tests/test_task.py index 9a0010d89..21356c3b4 100644 --- a/lib/crewai/tests/test_task.py +++ b/lib/crewai/tests/test_task.py @@ -759,11 +759,11 @@ def test_custom_converter_cls(): crew = Crew(agents=[scorer], tasks=[task]) - with patch.object( - ScoreConverter, "to_pydantic", return_value=ScoreOutput(score=5) - ) as mock_to_pydantic: - crew.kickoff() - mock_to_pydantic.assert_called_once() + # With native structured output, the LLM returns a BaseModel directly, + # so the converter is bypassed. Verify the output is valid instead. + result = crew.kickoff() + assert isinstance(result.pydantic, ScoreOutput) + assert isinstance(result.pydantic.score, int) @pytest.mark.vcr() diff --git a/lib/crewai/tests/tools/test_base_tool.py b/lib/crewai/tests/tools/test_base_tool.py index 4a6850ce1..8f7ae877b 100644 --- a/lib/crewai/tests/tools/test_base_tool.py +++ b/lib/crewai/tests/tools/test_base_tool.py @@ -3,6 +3,8 @@ from typing import Callable from unittest.mock import patch import pytest +from pydantic import BaseModel, Field + from crewai.agent import Agent from crewai.crew import Crew from crewai.task import Task @@ -230,3 +232,218 @@ def test_max_usage_count_is_respected(): crew.kickoff() assert tool.max_usage_count == 5 assert tool.current_usage_count == 5 + + +# ============================================================================= +# Schema Validation in run() Tests +# ============================================================================= + + +class CodeExecutorInput(BaseModel): + code: str = Field(description="The code to execute") + language: str = Field(default="python", description="Programming language") + + +class CodeExecutorTool(BaseTool): + name: str = "code_executor" + description: str = "Execute code snippets" + args_schema: type[BaseModel] = CodeExecutorInput + + def _run(self, code: str, language: str = "python") -> str: + return f"Executed {language}: {code}" + + +class TestBaseToolRunValidation: + """Tests for args_schema validation in BaseTool.run().""" + + def test_run_with_valid_kwargs_passes_validation(self) -> None: + """Valid keyword arguments should pass schema validation and execute.""" + t = CodeExecutorTool() + result = t.run(code="print('hello')") + assert result == "Executed python: print('hello')" + + def test_run_with_all_kwargs_passes_validation(self) -> None: + """All keyword arguments including optional ones should pass.""" + t = CodeExecutorTool() + result = t.run(code="console.log('hi')", language="javascript") + assert result == "Executed javascript: console.log('hi')" + + def test_run_with_no_args_raises_validation_error(self) -> None: + """Calling run() with no arguments should raise a clear ValueError, + not a cryptic TypeError about missing positional arguments (GH-4611).""" + t = CodeExecutorTool() + with pytest.raises(ValueError, match="validation failed"): + t.run() + + def test_run_with_missing_required_kwarg_raises(self) -> None: + """Missing required kwargs should raise ValueError from schema validation.""" + t = CodeExecutorTool() + with pytest.raises(ValueError, match="validation failed"): + t.run(language="python") + + def test_run_with_wrong_field_name_raises(self) -> None: + """Kwargs not matching any schema field should trigger validation error + for missing required fields.""" + t = CodeExecutorTool() + with pytest.raises(ValueError, match="validation failed"): + t.run(wrong_arg="value") + + def test_run_with_positional_args_skips_validation(self) -> None: + """Positional-arg calls should bypass schema validation (backwards compat).""" + class SimpleTool(BaseTool): + name: str = "simple" + description: str = "A simple tool" + + def _run(self, question: str) -> str: + return question + + t = SimpleTool() + result = t.run("What is life?") + assert result == "What is life?" + + def test_run_strips_extra_kwargs_from_llm(self) -> None: + """Extra kwargs not in the schema should be silently stripped, + preventing unexpected-keyword crashes in _run.""" + t = CodeExecutorTool() + result = t.run(code="1+1", extra_hallucinated_field="junk") + assert result == "Executed python: 1+1" + + def test_run_increments_usage_after_validation(self) -> None: + """Usage count should still increment after validated execution.""" + t = CodeExecutorTool() + assert t.current_usage_count == 0 + t.run(code="x = 1") + assert t.current_usage_count == 1 + + def test_run_does_not_increment_usage_on_validation_error(self) -> None: + """Usage count should NOT increment when validation fails.""" + t = CodeExecutorTool() + assert t.current_usage_count == 0 + with pytest.raises(ValueError): + t.run(wrong="bad") + assert t.current_usage_count == 0 + + +class TestToolDecoratorRunValidation: + """Tests for args_schema validation in Tool.run() (decorator-based tools).""" + + def test_decorator_tool_run_validates_kwargs(self) -> None: + """Decorator-created tools should also validate kwargs against schema.""" + @tool("execute_code") + def execute_code(code: str, language: str = "python") -> str: + """Execute a code snippet.""" + return f"Executed {language}: {code}" + + result = execute_code.run(code="x = 1") + assert result == "Executed python: x = 1" + + def test_decorator_tool_run_rejects_missing_required(self) -> None: + """Decorator tools should reject missing required args via validation.""" + @tool("execute_code") + def execute_code(code: str) -> str: + """Execute a code snippet.""" + return f"Executed: {code}" + + with pytest.raises(ValueError, match="validation failed"): + execute_code.run(wrong_arg="value") + + def test_decorator_tool_positional_args_still_work(self) -> None: + """Positional args to decorator tools should bypass validation.""" + @tool("greet") + def greet(name: str) -> str: + """Greet someone.""" + return f"Hello, {name}!" + + result = greet.run("World") + assert result == "Hello, World!" + + +# ============================================================================= +# Async arun() Schema Validation Tests +# ============================================================================= + + +class AsyncCodeExecutorTool(BaseTool): + name: str = "async_code_executor" + description: str = "Execute code snippets asynchronously" + args_schema: type[BaseModel] = CodeExecutorInput + + async def _arun(self, code: str, language: str = "python") -> str: + return f"Async executed {language}: {code}" + + def _run(self, code: str, language: str = "python") -> str: + return f"Executed {language}: {code}" + + +class TestBaseToolArunValidation: + """Tests for args_schema validation in BaseTool.arun().""" + + @pytest.mark.asyncio + async def test_arun_with_valid_kwargs_passes_validation(self) -> None: + """Valid keyword arguments should pass schema validation in arun.""" + t = AsyncCodeExecutorTool() + result = await t.arun(code="print('hello')") + assert result == "Async executed python: print('hello')" + + @pytest.mark.asyncio + async def test_arun_with_no_args_raises_validation_error(self) -> None: + """Calling arun() with no arguments should raise a clear ValueError (GH-4611).""" + t = AsyncCodeExecutorTool() + with pytest.raises(ValueError, match="validation failed"): + await t.arun() + + @pytest.mark.asyncio + async def test_arun_with_missing_required_kwarg_raises(self) -> None: + """Missing required kwargs should raise ValueError in arun.""" + t = AsyncCodeExecutorTool() + with pytest.raises(ValueError, match="validation failed"): + await t.arun(language="python") + + @pytest.mark.asyncio + async def test_arun_with_wrong_field_name_raises(self) -> None: + """Kwargs not matching schema fields should trigger validation error in arun.""" + t = AsyncCodeExecutorTool() + with pytest.raises(ValueError, match="validation failed"): + await t.arun(wrong_arg="value") + + @pytest.mark.asyncio + async def test_arun_strips_extra_kwargs(self) -> None: + """Extra kwargs not in the schema should be stripped in arun.""" + t = AsyncCodeExecutorTool() + result = await t.arun(code="1+1", extra_field="junk") + assert result == "Async executed python: 1+1" + + @pytest.mark.asyncio + async def test_arun_does_not_increment_usage_on_validation_error(self) -> None: + """Usage count should NOT increment when arun validation fails.""" + t = AsyncCodeExecutorTool() + assert t.current_usage_count == 0 + with pytest.raises(ValueError): + await t.arun(wrong="bad") + assert t.current_usage_count == 0 + + +class TestToolDecoratorArunValidation: + """Tests for args_schema validation in Tool.arun() (decorator-based async tools).""" + + @pytest.mark.asyncio + async def test_async_decorator_tool_arun_validates_kwargs(self) -> None: + """Async decorator tools should validate kwargs in arun.""" + @tool("async_execute") + async def async_execute(code: str, language: str = "python") -> str: + """Execute code asynchronously.""" + return f"Async {language}: {code}" + + result = await async_execute.arun(code="x = 1") + assert result == "Async python: x = 1" + + @pytest.mark.asyncio + async def test_async_decorator_tool_arun_rejects_missing_required(self) -> None: + """Async decorator tools should reject missing required args in arun.""" + @tool("async_execute") + async def async_execute(code: str) -> str: + """Execute code asynchronously.""" + return f"Async: {code}" + + with pytest.raises(ValueError, match="validation failed"): + await async_execute.arun(wrong_arg="value") diff --git a/lib/crewai/tests/tracing/test_tracing.py b/lib/crewai/tests/tracing/test_tracing.py index 555446b26..ba49a37c8 100644 --- a/lib/crewai/tests/tracing/test_tracing.py +++ b/lib/crewai/tests/tracing/test_tracing.py @@ -840,3 +840,87 @@ class TestTraceListenerSetup: mock_mark_failed.assert_called_once_with( "test_batch_id_12345", "Internal Server Error" ) + + def test_ephemeral_batch_includes_anon_id(self): + """Test that ephemeral batch initialization sends anon_id from get_user_id()""" + fake_user_id = "abc123def456" + + with ( + patch( + "crewai.events.listeners.tracing.trace_batch_manager.is_tracing_enabled_in_context", + return_value=True, + ), + patch( + "crewai.events.listeners.tracing.trace_batch_manager.get_user_id", + return_value=fake_user_id, + ), + patch( + "crewai.events.listeners.tracing.trace_batch_manager.should_auto_collect_first_time_traces", + return_value=False, + ), + ): + batch_manager = TraceBatchManager() + + mock_response = MagicMock( + status_code=201, + json=MagicMock(return_value={ + "ephemeral_trace_id": "test-trace-id", + "access_code": "TRACE-abc123", + }), + ) + + with patch.object( + batch_manager.plus_api, + "initialize_ephemeral_trace_batch", + return_value=mock_response, + ) as mock_init: + batch_manager.initialize_batch( + user_context={"privacy_level": "standard"}, + execution_metadata={ + "execution_type": "crew", + "crew_name": "test_crew", + }, + use_ephemeral=True, + ) + + mock_init.assert_called_once() + payload = mock_init.call_args[0][0] + assert payload["user_identifier"] == fake_user_id + assert "ephemeral_trace_id" in payload + + def test_non_ephemeral_batch_does_not_include_anon_id(self): + """Test that non-ephemeral batch initialization does not send anon_id""" + with ( + patch( + "crewai.events.listeners.tracing.trace_batch_manager.is_tracing_enabled_in_context", + return_value=True, + ), + patch( + "crewai.events.listeners.tracing.trace_batch_manager.should_auto_collect_first_time_traces", + return_value=False, + ), + ): + batch_manager = TraceBatchManager() + + mock_response = MagicMock( + status_code=201, + json=MagicMock(return_value={"trace_id": "test-trace-id"}), + ) + + with patch.object( + batch_manager.plus_api, + "initialize_trace_batch", + return_value=mock_response, + ) as mock_init: + batch_manager.initialize_batch( + user_context={"privacy_level": "standard"}, + execution_metadata={ + "execution_type": "crew", + "crew_name": "test_crew", + }, + use_ephemeral=False, + ) + + mock_init.assert_called_once() + payload = mock_init.call_args[0][0] + assert "user_identifier" not in payload diff --git a/lib/crewai/tests/utilities/test_agent_utils.py b/lib/crewai/tests/utilities/test_agent_utils.py index 31d7b9705..3d249906a 100644 --- a/lib/crewai/tests/utilities/test_agent_utils.py +++ b/lib/crewai/tests/utilities/test_agent_utils.py @@ -3,7 +3,7 @@ from __future__ import annotations import asyncio -from typing import Any +from typing import Any, Literal, Optional from unittest.mock import AsyncMock, MagicMock, patch import pytest @@ -17,6 +17,7 @@ from crewai.utilities.agent_utils import ( _format_messages_for_summary, _split_messages_into_chunks, convert_tools_to_openai_schema, + parse_tool_call_args, summarize_messages, ) @@ -79,7 +80,7 @@ class TestConvertToolsToOpenaiSchema: def test_converts_single_tool(self) -> None: """Test converting a single tool to OpenAI schema.""" tools = [CalculatorTool()] - schemas, functions = convert_tools_to_openai_schema(tools) + schemas, functions, _ = convert_tools_to_openai_schema(tools) assert len(schemas) == 1 assert len(functions) == 1 @@ -94,7 +95,7 @@ class TestConvertToolsToOpenaiSchema: def test_converts_multiple_tools(self) -> None: """Test converting multiple tools to OpenAI schema.""" tools = [CalculatorTool(), SearchTool()] - schemas, functions = convert_tools_to_openai_schema(tools) + schemas, functions, _ = convert_tools_to_openai_schema(tools) assert len(schemas) == 2 assert len(functions) == 2 @@ -112,7 +113,7 @@ class TestConvertToolsToOpenaiSchema: def test_functions_dict_contains_callables(self) -> None: """Test that the functions dict maps names to callable run methods.""" tools = [CalculatorTool(), SearchTool()] - schemas, functions = convert_tools_to_openai_schema(tools) + schemas, functions, _ = convert_tools_to_openai_schema(tools) assert "calculator" in functions assert "web_search" in functions @@ -122,14 +123,14 @@ class TestConvertToolsToOpenaiSchema: def test_function_can_be_called(self) -> None: """Test that the returned function can be called.""" tools = [CalculatorTool()] - schemas, functions = convert_tools_to_openai_schema(tools) + schemas, functions, _ = convert_tools_to_openai_schema(tools) result = functions["calculator"](expression="2 + 2") assert result == "4" def test_empty_tools_list(self) -> None: """Test with an empty tools list.""" - schemas, functions = convert_tools_to_openai_schema([]) + schemas, functions, _ = convert_tools_to_openai_schema([]) assert schemas == [] assert functions == {} @@ -137,7 +138,7 @@ class TestConvertToolsToOpenaiSchema: def test_schema_has_required_fields(self) -> None: """Test that the schema includes required fields information.""" tools = [SearchTool()] - schemas, functions = convert_tools_to_openai_schema(tools) + schemas, functions, _ = convert_tools_to_openai_schema(tools) schema = schemas[0] params = schema["function"]["parameters"] @@ -157,7 +158,7 @@ class TestConvertToolsToOpenaiSchema: return "done" tools = [MinimalTool()] - schemas, functions = convert_tools_to_openai_schema(tools) + schemas, functions, _ = convert_tools_to_openai_schema(tools) assert len(schemas) == 1 schema = schemas[0] @@ -168,7 +169,7 @@ class TestConvertToolsToOpenaiSchema: def test_schema_structure_matches_openai_format(self) -> None: """Test that the schema structure matches OpenAI's expected format.""" tools = [CalculatorTool()] - schemas, functions = convert_tools_to_openai_schema(tools) + schemas, functions, _ = convert_tools_to_openai_schema(tools) schema = schemas[0] @@ -193,7 +194,7 @@ class TestConvertToolsToOpenaiSchema: def test_removes_redundant_schema_fields(self) -> None: """Test that redundant title and description are removed from parameters.""" tools = [CalculatorTool()] - schemas, functions = convert_tools_to_openai_schema(tools) + schemas, functions, _ = convert_tools_to_openai_schema(tools) params = schemas[0]["function"]["parameters"] # Title should be removed as it's redundant with function name @@ -202,7 +203,7 @@ class TestConvertToolsToOpenaiSchema: def test_preserves_field_descriptions(self) -> None: """Test that field descriptions are preserved in the schema.""" tools = [SearchTool()] - schemas, functions = convert_tools_to_openai_schema(tools) + schemas, functions, _ = convert_tools_to_openai_schema(tools) params = schemas[0]["function"]["parameters"] query_prop = params["properties"]["query"] @@ -214,7 +215,7 @@ class TestConvertToolsToOpenaiSchema: def test_preserves_default_values(self) -> None: """Test that default values are preserved in the schema.""" tools = [SearchTool()] - schemas, functions = convert_tools_to_openai_schema(tools) + schemas, functions, _ = convert_tools_to_openai_schema(tools) params = schemas[0]["function"]["parameters"] max_results_prop = params["properties"]["max_results"] @@ -234,6 +235,79 @@ def _make_mock_i18n() -> MagicMock: }.get(key, "") return mock_i18n +class MCPStyleInput(BaseModel): + """Input schema mimicking an MCP tool with optional fields.""" + + query: str = Field(description="Search query") + filter_type: Optional[Literal["internal", "user"]] = Field( + default=None, description="Filter type" + ) + page_id: Optional[str] = Field( + default=None, description="Page UUID" + ) + + +class MCPStyleTool(BaseTool): + """A tool mimicking MCP tool schemas with optional fields.""" + + name: str = "mcp_search" + description: str = "Search with optional filters" + args_schema: type[BaseModel] = MCPStyleInput + + def _run(self, **kwargs: Any) -> str: + return "result" + + +class TestOptionalFieldsPreserveNull: + """Tests that optional tool fields preserve null in the schema.""" + + def test_optional_string_allows_null(self) -> None: + """Optional[str] fields should include null in the schema so the LLM + can send null instead of being forced to guess a value.""" + tools = [MCPStyleTool()] + schemas, _, _ = convert_tools_to_openai_schema(tools) + + params = schemas[0]["function"]["parameters"] + page_id_prop = params["properties"]["page_id"] + + assert "anyOf" in page_id_prop + type_options = [opt.get("type") for opt in page_id_prop["anyOf"]] + assert "string" in type_options + assert "null" in type_options + + def test_optional_literal_allows_null(self) -> None: + """Optional[Literal[...]] fields should include null.""" + tools = [MCPStyleTool()] + schemas, _, _ = convert_tools_to_openai_schema(tools) + + params = schemas[0]["function"]["parameters"] + filter_prop = params["properties"]["filter_type"] + + assert "anyOf" in filter_prop + has_null = any(opt.get("type") == "null" for opt in filter_prop["anyOf"]) + assert has_null + + def test_required_field_stays_non_null(self) -> None: + """Required fields without Optional should NOT have null.""" + tools = [MCPStyleTool()] + schemas, _, _ = convert_tools_to_openai_schema(tools) + + params = schemas[0]["function"]["parameters"] + query_prop = params["properties"]["query"] + + assert query_prop.get("type") == "string" + assert "anyOf" not in query_prop + + def test_all_fields_in_required_for_strict_mode(self) -> None: + """All fields (including optional) must be in required for strict mode.""" + tools = [MCPStyleTool()] + schemas, _, _ = convert_tools_to_openai_schema(tools) + + params = schemas[0]["function"]["parameters"] + assert "query" in params["required"] + assert "filter_type" in params["required"] + assert "page_id" in params["required"] + class TestSummarizeMessages: """Tests for summarize_messages function.""" @@ -922,3 +996,56 @@ class TestParallelSummarizationVCR: assert summary_msg["role"] == "user" assert "files" in summary_msg assert "report.pdf" in summary_msg["files"] + + +class TestParseToolCallArgs: + """Unit tests for parse_tool_call_args.""" + + def test_valid_json_string_returns_dict(self) -> None: + args_dict, error = parse_tool_call_args('{"code": "print(1)"}', "run_code", "call_1") + assert error is None + assert args_dict == {"code": "print(1)"} + + def test_malformed_json_returns_error_dict(self) -> None: + args_dict, error = parse_tool_call_args('{"code": "print("hi")"}', "run_code", "call_1") + assert args_dict is None + assert error is not None + assert error["call_id"] == "call_1" + assert error["func_name"] == "run_code" + assert error["from_cache"] is False + assert "Failed to parse tool arguments as JSON" in error["result"] + assert "run_code" in error["result"] + + def test_malformed_json_preserves_original_tool(self) -> None: + mock_tool = object() + _, error = parse_tool_call_args("{bad}", "my_tool", "call_2", original_tool=mock_tool) + assert error is not None + assert error["original_tool"] is mock_tool + + def test_malformed_json_original_tool_defaults_to_none(self) -> None: + _, error = parse_tool_call_args("{bad}", "my_tool", "call_3") + assert error is not None + assert error["original_tool"] is None + + def test_dict_input_returned_directly(self) -> None: + func_args = {"code": "x = 42"} + args_dict, error = parse_tool_call_args(func_args, "run_code", "call_4") + assert error is None + assert args_dict == {"code": "x = 42"} + + def test_empty_dict_input_returned_directly(self) -> None: + args_dict, error = parse_tool_call_args({}, "run_code", "call_5") + assert error is None + assert args_dict == {} + + def test_valid_json_with_nested_values(self) -> None: + args_dict, error = parse_tool_call_args( + '{"query": "hello", "options": {"limit": 10}}', "search", "call_6" + ) + assert error is None + assert args_dict == {"query": "hello", "options": {"limit": 10}} + + def test_error_result_has_correct_keys(self) -> None: + _, error = parse_tool_call_args("{bad json}", "tool", "call_7") + assert error is not None + assert set(error.keys()) == {"call_id", "func_name", "result", "from_cache", "original_tool"} diff --git a/lib/crewai/tests/utilities/test_llm_utils.py b/lib/crewai/tests/utilities/test_llm_utils.py index e02173f8d..5d7d70b76 100644 --- a/lib/crewai/tests/utilities/test_llm_utils.py +++ b/lib/crewai/tests/utilities/test_llm_utils.py @@ -81,7 +81,7 @@ def test_create_llm_from_env_with_unaccepted_attributes() -> None: "OPENAI_API_KEY": "fake-key", "AWS_ACCESS_KEY_ID": "fake-access-key", "AWS_SECRET_ACCESS_KEY": "fake-secret-key", - "AWS_REGION_NAME": "us-west-2", + "AWS_DEFAULT_REGION": "us-west-2", }, ): llm = create_llm(llm_value=None) @@ -89,7 +89,7 @@ def test_create_llm_from_env_with_unaccepted_attributes() -> None: assert llm.model == "gpt-3.5-turbo" assert not hasattr(llm, "AWS_ACCESS_KEY_ID") assert not hasattr(llm, "AWS_SECRET_ACCESS_KEY") - assert not hasattr(llm, "AWS_REGION_NAME") + assert not hasattr(llm, "AWS_DEFAULT_REGION") def test_create_llm_with_partial_attributes() -> None: diff --git a/lib/crewai/tests/utilities/test_pydantic_schema_utils.py b/lib/crewai/tests/utilities/test_pydantic_schema_utils.py new file mode 100644 index 000000000..98a5e6aa5 --- /dev/null +++ b/lib/crewai/tests/utilities/test_pydantic_schema_utils.py @@ -0,0 +1,884 @@ +"""Tests for pydantic_schema_utils module. + +Covers: +- create_model_from_schema: type mapping, required/optional, enums, formats, + nested objects, arrays, unions, allOf, $ref, model_name, enrich_descriptions +- Schema transformation helpers: resolve_refs, force_additional_properties_false, + strip_unsupported_formats, ensure_type_in_schemas, convert_oneof_to_anyof, + ensure_all_properties_required, strip_null_from_types, build_rich_field_description +- End-to-end MCP tool schema conversion +""" + +from __future__ import annotations + +import datetime +from copy import deepcopy +from typing import Any + +import pytest +from pydantic import BaseModel + +from crewai.utilities.pydantic_schema_utils import ( + build_rich_field_description, + convert_oneof_to_anyof, + create_model_from_schema, + ensure_all_properties_required, + ensure_type_in_schemas, + force_additional_properties_false, + resolve_refs, + strip_null_from_types, + strip_unsupported_formats, +) + + +class TestSimpleTypes: + def test_string_field(self) -> None: + schema = { + "type": "object", + "properties": {"name": {"type": "string"}}, + "required": ["name"], + } + Model = create_model_from_schema(schema) + obj = Model(name="Alice") + assert obj.name == "Alice" + + def test_integer_field(self) -> None: + schema = { + "type": "object", + "properties": {"count": {"type": "integer"}}, + "required": ["count"], + } + Model = create_model_from_schema(schema) + obj = Model(count=42) + assert obj.count == 42 + + def test_number_field(self) -> None: + schema = { + "type": "object", + "properties": {"score": {"type": "number"}}, + "required": ["score"], + } + Model = create_model_from_schema(schema) + obj = Model(score=3.14) + assert obj.score == pytest.approx(3.14) + + def test_boolean_field(self) -> None: + schema = { + "type": "object", + "properties": {"active": {"type": "boolean"}}, + "required": ["active"], + } + Model = create_model_from_schema(schema) + assert Model(active=True).active is True + + def test_null_field(self) -> None: + schema = { + "type": "object", + "properties": {"value": {"type": "null"}}, + "required": ["value"], + } + Model = create_model_from_schema(schema) + obj = Model(value=None) + assert obj.value is None + + +class TestRequiredOptional: + def test_required_field_has_no_default(self) -> None: + schema = { + "type": "object", + "properties": {"name": {"type": "string"}}, + "required": ["name"], + } + Model = create_model_from_schema(schema) + with pytest.raises(Exception): + Model() + + def test_optional_field_defaults_to_none(self) -> None: + schema = { + "type": "object", + "properties": {"name": {"type": "string"}}, + "required": [], + } + Model = create_model_from_schema(schema) + obj = Model() + assert obj.name is None + + def test_mixed_required_optional(self) -> None: + schema = { + "type": "object", + "properties": { + "id": {"type": "integer"}, + "label": {"type": "string"}, + }, + "required": ["id"], + } + Model = create_model_from_schema(schema) + obj = Model(id=1) + assert obj.id == 1 + assert obj.label is None + + +class TestEnumLiteral: + def test_string_enum(self) -> None: + schema = { + "type": "object", + "properties": { + "color": {"type": "string", "enum": ["red", "green", "blue"]}, + }, + "required": ["color"], + } + Model = create_model_from_schema(schema) + obj = Model(color="red") + assert obj.color == "red" + + def test_string_enum_rejects_invalid(self) -> None: + schema = { + "type": "object", + "properties": { + "color": {"type": "string", "enum": ["red", "green", "blue"]}, + }, + "required": ["color"], + } + Model = create_model_from_schema(schema) + with pytest.raises(Exception): + Model(color="yellow") + + def test_const_value(self) -> None: + schema = { + "type": "object", + "properties": { + "kind": {"const": "fixed"}, + }, + "required": ["kind"], + } + Model = create_model_from_schema(schema) + obj = Model(kind="fixed") + assert obj.kind == "fixed" + + +class TestFormatMapping: + def test_date_format(self) -> None: + schema = { + "type": "object", + "properties": { + "birthday": {"type": "string", "format": "date"}, + }, + "required": ["birthday"], + } + Model = create_model_from_schema(schema) + obj = Model(birthday=datetime.date(2000, 1, 15)) + assert obj.birthday == datetime.date(2000, 1, 15) + + def test_datetime_format(self) -> None: + schema = { + "type": "object", + "properties": { + "created_at": {"type": "string", "format": "date-time"}, + }, + "required": ["created_at"], + } + Model = create_model_from_schema(schema) + dt = datetime.datetime(2025, 6, 1, 12, 0, 0) + obj = Model(created_at=dt) + assert obj.created_at == dt + + def test_time_format(self) -> None: + schema = { + "type": "object", + "properties": { + "alarm": {"type": "string", "format": "time"}, + }, + "required": ["alarm"], + } + Model = create_model_from_schema(schema) + t = datetime.time(8, 30) + obj = Model(alarm=t) + assert obj.alarm == t + + +class TestNestedObjects: + def test_nested_object_creates_model(self) -> None: + schema = { + "type": "object", + "properties": { + "address": { + "type": "object", + "properties": { + "street": {"type": "string"}, + "city": {"type": "string"}, + }, + "required": ["street", "city"], + }, + }, + "required": ["address"], + } + Model = create_model_from_schema(schema) + obj = Model(address={"street": "123 Main", "city": "Springfield"}) + assert obj.address.street == "123 Main" + assert obj.address.city == "Springfield" + + def test_object_without_properties_returns_dict(self) -> None: + schema = { + "type": "object", + "properties": { + "metadata": {"type": "object"}, + }, + "required": ["metadata"], + } + Model = create_model_from_schema(schema) + obj = Model(metadata={"key": "value"}) + assert obj.metadata == {"key": "value"} + + +class TestTypedArrays: + def test_array_of_strings(self) -> None: + schema = { + "type": "object", + "properties": { + "tags": {"type": "array", "items": {"type": "string"}}, + }, + "required": ["tags"], + } + Model = create_model_from_schema(schema) + obj = Model(tags=["a", "b", "c"]) + assert obj.tags == ["a", "b", "c"] + + def test_array_of_objects(self) -> None: + schema = { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "type": "object", + "properties": {"id": {"type": "integer"}}, + "required": ["id"], + }, + }, + }, + "required": ["items"], + } + Model = create_model_from_schema(schema) + obj = Model(items=[{"id": 1}, {"id": 2}]) + assert len(obj.items) == 2 + assert obj.items[0].id == 1 + + def test_untyped_array(self) -> None: + schema = { + "type": "object", + "properties": {"data": {"type": "array"}}, + "required": ["data"], + } + Model = create_model_from_schema(schema) + obj = Model(data=[1, "two", 3.0]) + assert obj.data == [1, "two", 3.0] + + +class TestUnionTypes: + def test_anyof_string_or_integer(self) -> None: + schema = { + "type": "object", + "properties": { + "value": { + "anyOf": [{"type": "string"}, {"type": "integer"}], + }, + }, + "required": ["value"], + } + Model = create_model_from_schema(schema) + assert Model(value="hello").value == "hello" + assert Model(value=42).value == 42 + + def test_oneof(self) -> None: + schema = { + "type": "object", + "properties": { + "value": { + "oneOf": [{"type": "string"}, {"type": "number"}], + }, + }, + "required": ["value"], + } + Model = create_model_from_schema(schema) + assert Model(value="hello").value == "hello" + assert Model(value=3.14).value == pytest.approx(3.14) + + +class TestAllOfMerging: + def test_allof_merges_properties(self) -> None: + schema = { + "type": "object", + "allOf": [ + { + "type": "object", + "properties": {"name": {"type": "string"}}, + "required": ["name"], + }, + { + "type": "object", + "properties": {"age": {"type": "integer"}}, + "required": ["age"], + }, + ], + } + Model = create_model_from_schema(schema) + obj = Model(name="Alice", age=30) + assert obj.name == "Alice" + assert obj.age == 30 + + def test_single_allof(self) -> None: + schema = { + "type": "object", + "properties": { + "item": { + "allOf": [ + { + "type": "object", + "properties": {"id": {"type": "integer"}}, + "required": ["id"], + } + ] + } + }, + "required": ["item"], + } + Model = create_model_from_schema(schema) + obj = Model(item={"id": 1}) + assert obj.item.id == 1 + + +# --------------------------------------------------------------------------- +# $ref resolution +# --------------------------------------------------------------------------- + + +class TestRefResolution: + def test_ref_in_property(self) -> None: + schema = { + "type": "object", + "properties": { + "item": {"$ref": "#/$defs/Item"}, + }, + "required": ["item"], + "$defs": { + "Item": { + "type": "object", + "title": "Item", + "properties": {"name": {"type": "string"}}, + "required": ["name"], + }, + }, + } + Model = create_model_from_schema(schema) + obj = Model(item={"name": "Widget"}) + assert obj.item.name == "Widget" + + +# --------------------------------------------------------------------------- +# model_name parameter +# --------------------------------------------------------------------------- + + +class TestModelName: + def test_model_name_override(self) -> None: + schema = { + "type": "object", + "title": "OriginalName", + "properties": {"x": {"type": "integer"}}, + "required": ["x"], + } + Model = create_model_from_schema(schema, model_name="CustomSchema") + assert Model.__name__ == "CustomSchema" + + def test_model_name_fallback_to_title(self) -> None: + schema = { + "type": "object", + "title": "FromTitle", + "properties": {"x": {"type": "integer"}}, + "required": ["x"], + } + Model = create_model_from_schema(schema) + assert Model.__name__ == "FromTitle" + + def test_model_name_fallback_to_dynamic(self) -> None: + schema = { + "type": "object", + "properties": {"x": {"type": "integer"}}, + "required": ["x"], + } + Model = create_model_from_schema(schema) + assert Model.__name__ == "DynamicModel" + + +# --------------------------------------------------------------------------- +# enrich_descriptions +# --------------------------------------------------------------------------- + + +class TestEnrichDescriptions: + def test_enriched_description_includes_constraints(self) -> None: + schema = { + "type": "object", + "properties": { + "score": { + "type": "integer", + "description": "The score value", + "minimum": 0, + "maximum": 100, + }, + }, + "required": ["score"], + } + Model = create_model_from_schema(schema, enrich_descriptions=True) + field_info = Model.model_fields["score"] + assert "Minimum: 0" in field_info.description + assert "Maximum: 100" in field_info.description + assert "The score value" in field_info.description + + def test_default_does_not_enrich(self) -> None: + schema = { + "type": "object", + "properties": { + "score": { + "type": "integer", + "description": "The score value", + "minimum": 0, + }, + }, + "required": ["score"], + } + Model = create_model_from_schema(schema, enrich_descriptions=False) + field_info = Model.model_fields["score"] + assert field_info.description == "The score value" + + def test_enriched_description_propagates_to_nested(self) -> None: + schema = { + "type": "object", + "properties": { + "config": { + "type": "object", + "properties": { + "level": { + "type": "integer", + "description": "Level", + "minimum": 1, + "maximum": 10, + }, + }, + "required": ["level"], + }, + }, + "required": ["config"], + } + Model = create_model_from_schema(schema, enrich_descriptions=True) + nested_model = Model.model_fields["config"].annotation + nested_field = nested_model.model_fields["level"] + assert "Minimum: 1" in nested_field.description + assert "Maximum: 10" in nested_field.description + + +# --------------------------------------------------------------------------- +# Edge cases +# --------------------------------------------------------------------------- + + +class TestEdgeCases: + def test_empty_properties(self) -> None: + schema = {"type": "object", "properties": {}, "required": []} + Model = create_model_from_schema(schema) + obj = Model() + assert obj is not None + + def test_no_properties_key(self) -> None: + schema = {"type": "object"} + Model = create_model_from_schema(schema) + obj = Model() + assert obj is not None + + def test_unknown_type_raises(self) -> None: + schema = { + "type": "object", + "properties": { + "weird": {"type": "hyperspace"}, + }, + "required": ["weird"], + } + with pytest.raises(ValueError, match="Unsupported JSON schema type"): + create_model_from_schema(schema) + + +# --------------------------------------------------------------------------- +# build_rich_field_description +# --------------------------------------------------------------------------- + + +class TestBuildRichFieldDescription: + def test_description_only(self) -> None: + assert build_rich_field_description({"description": "A name"}) == "A name" + + def test_empty_schema(self) -> None: + assert build_rich_field_description({}) == "" + + def test_format(self) -> None: + desc = build_rich_field_description({"format": "date-time"}) + assert "Format: date-time" in desc + + def test_enum(self) -> None: + desc = build_rich_field_description({"enum": ["a", "b"]}) + assert "Allowed values:" in desc + assert "'a'" in desc + assert "'b'" in desc + + def test_pattern(self) -> None: + desc = build_rich_field_description({"pattern": "^[a-z]+$"}) + assert "Pattern: ^[a-z]+$" in desc + + def test_min_max(self) -> None: + desc = build_rich_field_description({"minimum": 0, "maximum": 100}) + assert "Minimum: 0" in desc + assert "Maximum: 100" in desc + + def test_min_max_length(self) -> None: + desc = build_rich_field_description({"minLength": 1, "maxLength": 255}) + assert "Min length: 1" in desc + assert "Max length: 255" in desc + + def test_examples(self) -> None: + desc = build_rich_field_description({"examples": ["foo", "bar", "baz", "extra"]}) + assert "Examples:" in desc + assert "'foo'" in desc + assert "'baz'" in desc + # Only first 3 shown + assert "'extra'" not in desc + + def test_combined_constraints(self) -> None: + desc = build_rich_field_description({ + "description": "A score", + "minimum": 0, + "maximum": 10, + "format": "int32", + }) + assert desc.startswith("A score") + assert "Minimum: 0" in desc + assert "Maximum: 10" in desc + assert "Format: int32" in desc + + +# --------------------------------------------------------------------------- +# Schema transformation functions +# --------------------------------------------------------------------------- + + +class TestResolveRefs: + def test_basic_ref_resolution(self) -> None: + schema = { + "type": "object", + "properties": {"item": {"$ref": "#/$defs/Item"}}, + "$defs": { + "Item": {"type": "object", "properties": {"id": {"type": "integer"}}}, + }, + } + resolved = resolve_refs(schema) + assert "$ref" not in resolved["properties"]["item"] + assert resolved["properties"]["item"]["type"] == "object" + + def test_nested_ref_resolution(self) -> None: + schema = { + "type": "object", + "properties": {"wrapper": {"$ref": "#/$defs/Wrapper"}}, + "$defs": { + "Wrapper": { + "type": "object", + "properties": {"inner": {"$ref": "#/$defs/Inner"}}, + }, + "Inner": {"type": "string"}, + }, + } + resolved = resolve_refs(schema) + wrapper = resolved["properties"]["wrapper"] + assert wrapper["properties"]["inner"]["type"] == "string" + + def test_missing_ref_raises(self) -> None: + schema = { + "properties": {"x": {"$ref": "#/$defs/Missing"}}, + "$defs": {}, + } + with pytest.raises(KeyError, match="Missing"): + resolve_refs(schema) + + def test_no_refs_unchanged(self) -> None: + schema = { + "type": "object", + "properties": {"name": {"type": "string"}}, + } + resolved = resolve_refs(schema) + assert resolved == schema + + +class TestForceAdditionalPropertiesFalse: + def test_adds_to_object(self) -> None: + schema = {"type": "object", "properties": {"x": {"type": "integer"}}} + result = force_additional_properties_false(deepcopy(schema)) + assert result["additionalProperties"] is False + + def test_adds_empty_properties_and_required(self) -> None: + schema = {"type": "object"} + result = force_additional_properties_false(deepcopy(schema)) + assert result["properties"] == {} + assert result["required"] == [] + + def test_recursive_nested_objects(self) -> None: + schema = { + "type": "object", + "properties": { + "child": { + "type": "object", + "properties": {"id": {"type": "integer"}}, + }, + }, + } + result = force_additional_properties_false(deepcopy(schema)) + assert result["additionalProperties"] is False + assert result["properties"]["child"]["additionalProperties"] is False + + def test_does_not_affect_non_objects(self) -> None: + schema = {"type": "string"} + result = force_additional_properties_false(deepcopy(schema)) + assert "additionalProperties" not in result + + +class TestStripUnsupportedFormats: + def test_removes_email_format(self) -> None: + schema = {"type": "string", "format": "email"} + result = strip_unsupported_formats(deepcopy(schema)) + assert "format" not in result + + def test_keeps_date_time(self) -> None: + schema = {"type": "string", "format": "date-time"} + result = strip_unsupported_formats(deepcopy(schema)) + assert result["format"] == "date-time" + + def test_keeps_date(self) -> None: + schema = {"type": "string", "format": "date"} + result = strip_unsupported_formats(deepcopy(schema)) + assert result["format"] == "date" + + def test_removes_uri_format(self) -> None: + schema = {"type": "string", "format": "uri"} + result = strip_unsupported_formats(deepcopy(schema)) + assert "format" not in result + + def test_recursive(self) -> None: + schema = { + "type": "object", + "properties": { + "email": {"type": "string", "format": "email"}, + "created": {"type": "string", "format": "date-time"}, + }, + } + result = strip_unsupported_formats(deepcopy(schema)) + assert "format" not in result["properties"]["email"] + assert result["properties"]["created"]["format"] == "date-time" + + +class TestEnsureTypeInSchemas: + def test_empty_schema_in_anyof_gets_type(self) -> None: + schema = {"anyOf": [{}, {"type": "string"}]} + result = ensure_type_in_schemas(deepcopy(schema)) + assert result["anyOf"][0] == {"type": "object"} + + def test_empty_schema_in_oneof_gets_type(self) -> None: + schema = {"oneOf": [{}, {"type": "integer"}]} + result = ensure_type_in_schemas(deepcopy(schema)) + assert result["oneOf"][0] == {"type": "object"} + + def test_non_empty_unchanged(self) -> None: + schema = {"anyOf": [{"type": "string"}, {"type": "integer"}]} + result = ensure_type_in_schemas(deepcopy(schema)) + assert result == schema + + +class TestConvertOneofToAnyof: + def test_converts_top_level(self) -> None: + schema = {"oneOf": [{"type": "string"}, {"type": "integer"}]} + result = convert_oneof_to_anyof(deepcopy(schema)) + assert "oneOf" not in result + assert "anyOf" in result + assert len(result["anyOf"]) == 2 + + def test_converts_nested(self) -> None: + schema = { + "type": "object", + "properties": { + "value": {"oneOf": [{"type": "string"}, {"type": "number"}]}, + }, + } + result = convert_oneof_to_anyof(deepcopy(schema)) + assert "anyOf" in result["properties"]["value"] + assert "oneOf" not in result["properties"]["value"] + + +class TestEnsureAllPropertiesRequired: + def test_makes_all_required(self) -> None: + schema = { + "type": "object", + "properties": {"a": {"type": "string"}, "b": {"type": "integer"}}, + "required": ["a"], + } + result = ensure_all_properties_required(deepcopy(schema)) + assert set(result["required"]) == {"a", "b"} + + def test_recursive(self) -> None: + schema = { + "type": "object", + "properties": { + "child": { + "type": "object", + "properties": {"x": {"type": "integer"}, "y": {"type": "integer"}}, + "required": [], + }, + }, + } + result = ensure_all_properties_required(deepcopy(schema)) + assert set(result["properties"]["child"]["required"]) == {"x", "y"} + + +class TestStripNullFromTypes: + def test_strips_null_from_anyof(self) -> None: + schema = { + "anyOf": [{"type": "string"}, {"type": "null"}], + } + result = strip_null_from_types(deepcopy(schema)) + assert "anyOf" not in result + assert result["type"] == "string" + + def test_strips_null_from_type_array(self) -> None: + schema = {"type": ["string", "null"]} + result = strip_null_from_types(deepcopy(schema)) + assert result["type"] == "string" + + def test_multiple_non_null_in_anyof(self) -> None: + schema = { + "anyOf": [{"type": "string"}, {"type": "integer"}, {"type": "null"}], + } + result = strip_null_from_types(deepcopy(schema)) + assert len(result["anyOf"]) == 2 + + def test_no_null_unchanged(self) -> None: + schema = {"type": "string"} + result = strip_null_from_types(deepcopy(schema)) + assert result == schema + + +class TestEndToEndMCPSchema: + """Realistic MCP tool schema exercising multiple features simultaneously.""" + + MCP_SCHEMA: dict[str, Any] = { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "Search query", + "minLength": 1, + "maxLength": 500, + }, + "max_results": { + "type": "integer", + "description": "Maximum results", + "minimum": 1, + "maximum": 100, + }, + "format": { + "type": "string", + "enum": ["json", "csv", "xml"], + "description": "Output format", + }, + "filters": { + "type": "object", + "properties": { + "date_from": {"type": "string", "format": "date"}, + "date_to": {"type": "string", "format": "date"}, + "categories": { + "type": "array", + "items": {"type": "string"}, + }, + }, + "required": ["date_from"], + }, + "sort_order": { + "anyOf": [{"type": "string"}, {"type": "null"}], + }, + }, + "required": ["query", "format", "filters"], + } + + def test_model_creation(self) -> None: + Model = create_model_from_schema(self.MCP_SCHEMA) + assert Model is not None + assert issubclass(Model, BaseModel) + + def test_valid_input_accepted(self) -> None: + Model = create_model_from_schema(self.MCP_SCHEMA) + obj = Model( + query="test search", + format="json", + filters={"date_from": "2025-01-01"}, + ) + assert obj.query == "test search" + assert obj.format == "json" + + def test_invalid_enum_rejected(self) -> None: + Model = create_model_from_schema(self.MCP_SCHEMA) + with pytest.raises(Exception): + Model( + query="test", + format="yaml", + filters={"date_from": "2025-01-01"}, + ) + + def test_model_name_for_mcp_tool(self) -> None: + Model = create_model_from_schema( + self.MCP_SCHEMA, model_name="search_toolSchema" + ) + assert Model.__name__ == "search_toolSchema" + + def test_enriched_descriptions_for_mcp(self) -> None: + Model = create_model_from_schema( + self.MCP_SCHEMA, enrich_descriptions=True + ) + query_field = Model.model_fields["query"] + assert "Min length: 1" in query_field.description + assert "Max length: 500" in query_field.description + + max_results_field = Model.model_fields["max_results"] + assert "Minimum: 1" in max_results_field.description + assert "Maximum: 100" in max_results_field.description + + format_field = Model.model_fields["format"] + assert "Allowed values:" in format_field.description + + def test_optional_fields_accept_none(self) -> None: + Model = create_model_from_schema(self.MCP_SCHEMA) + obj = Model( + query="test", + format="csv", + filters={"date_from": "2025-01-01"}, + max_results=None, + sort_order=None, + ) + assert obj.max_results is None + assert obj.sort_order is None + + def test_nested_filters_validated(self) -> None: + Model = create_model_from_schema(self.MCP_SCHEMA) + obj = Model( + query="test", + format="xml", + filters={ + "date_from": "2025-01-01", + "date_to": "2025-12-31", + "categories": ["news", "tech"], + }, + ) + assert obj.filters.date_from == datetime.date(2025, 1, 1) + assert obj.filters.categories == ["news", "tech"] diff --git a/lib/devtools/src/crewai_devtools/__init__.py b/lib/devtools/src/crewai_devtools/__init__.py index 4cf11a18b..4c45a4486 100644 --- a/lib/devtools/src/crewai_devtools/__init__.py +++ b/lib/devtools/src/crewai_devtools/__init__.py @@ -1,3 +1,3 @@ """CrewAI development tools.""" -__version__ = "1.9.3" +__version__ = "1.10.1" diff --git a/lib/devtools/src/crewai_devtools/cli.py b/lib/devtools/src/crewai_devtools/cli.py index abe3709a7..32950c39f 100644 --- a/lib/devtools/src/crewai_devtools/cli.py +++ b/lib/devtools/src/crewai_devtools/cli.py @@ -14,7 +14,7 @@ from rich.markdown import Markdown from rich.panel import Panel from rich.prompt import Confirm -from crewai_devtools.prompts import RELEASE_NOTES_PROMPT +from crewai_devtools.prompts import RELEASE_NOTES_PROMPT, TRANSLATE_RELEASE_NOTES_PROMPT load_dotenv() @@ -191,6 +191,248 @@ def update_pyproject_dependencies(file_path: Path, new_version: str) -> bool: return False +def add_docs_version(docs_json_path: Path, version: str) -> bool: + """Add a new version to the Mintlify docs.json versioning config. + + Copies the current default version's tabs into a new version entry, + sets the new version as default, and marks the previous default as + non-default. Operates on all languages. + + Args: + docs_json_path: Path to docs/docs.json. + version: Version string (e.g., "1.10.1b1"). + + Returns: + True if docs.json was updated, False otherwise. + """ + import json + + if not docs_json_path.exists(): + return False + + data = json.loads(docs_json_path.read_text()) + version_label = f"v{version}" + updated = False + + for lang in data.get("navigation", {}).get("languages", []): + versions = lang.get("versions", []) + if not versions: + continue + + # Skip if this version already exists for this language + if any(v.get("version") == version_label for v in versions): + continue + + # Find the current default and copy its tabs + default_version = next( + (v for v in versions if v.get("default")), + versions[0], + ) + + new_version = { + "version": version_label, + "default": True, + "tabs": default_version.get("tabs", []), + } + + # Remove default flag from old default + default_version.pop("default", None) + + # Insert new version at the beginning + versions.insert(0, new_version) + updated = True + + if not updated: + return False + + docs_json_path.write_text(json.dumps(data, indent=2, ensure_ascii=False) + "\n") + return True + + +_PT_BR_MONTHS = { + 1: "jan", + 2: "fev", + 3: "mar", + 4: "abr", + 5: "mai", + 6: "jun", + 7: "jul", + 8: "ago", + 9: "set", + 10: "out", + 11: "nov", + 12: "dez", +} + +_CHANGELOG_LOCALES: dict[str, dict[str, str]] = { + "en": { + "link_text": "View release on GitHub", + "language_name": "English", + }, + "pt-BR": { + "link_text": "Ver release no GitHub", + "language_name": "Brazilian Portuguese", + }, + "ko": { + "link_text": "GitHub 릴리스 보기", + "language_name": "Korean", + }, +} + + +def translate_release_notes( + release_notes: str, + lang: str, + client: OpenAI, +) -> str: + """Translate release notes into the target language using OpenAI. + + Args: + release_notes: English release notes markdown. + lang: Language code (e.g., "pt-BR", "ko"). + client: OpenAI client instance. + + Returns: + Translated release notes, or original on failure. + """ + locale_cfg = _CHANGELOG_LOCALES.get(lang) + if not locale_cfg: + return release_notes + + language_name = locale_cfg["language_name"] + prompt = TRANSLATE_RELEASE_NOTES_PROMPT.substitute( + language=language_name, + release_notes=release_notes, + ) + + try: + response = client.chat.completions.create( + model="gpt-4o-mini", + messages=[ + { + "role": "system", + "content": f"You are a professional translator. Translate technical documentation into {language_name}.", + }, + {"role": "user", "content": prompt}, + ], + temperature=0.3, + ) + return response.choices[0].message.content or release_notes + except Exception as e: + console.print( + f"[yellow]Warning:[/yellow] Could not translate to {language_name}: {e}" + ) + return release_notes + + +def _format_changelog_date(lang: str) -> str: + """Format today's date for a changelog entry in the given language.""" + from datetime import datetime + + now = datetime.now() + if lang == "ko": + return f"{now.year}년 {now.month}월 {now.day}일" + if lang == "pt-BR": + return f"{now.day:02d} {_PT_BR_MONTHS[now.month]} {now.year}" + return now.strftime("%b %d, %Y") + + +def update_changelog( + changelog_path: Path, + version: str, + release_notes: str, + lang: str = "en", +) -> bool: + """Prepend a new release entry to a docs changelog file. + + Args: + changelog_path: Path to the changelog.mdx file. + version: Version string (e.g., "1.9.3"). + release_notes: Markdown release notes content. + lang: Language code for localized date/link text. + + Returns: + True if changelog was updated, False otherwise. + """ + if not changelog_path.exists(): + return False + + locale_cfg = _CHANGELOG_LOCALES.get(lang, _CHANGELOG_LOCALES["en"]) + date_label = _format_changelog_date(lang) + link_text = locale_cfg["link_text"] + + # Indent each non-empty line with 2 spaces to match block format + indented_lines = [] + for line in release_notes.splitlines(): + if line.strip(): + indented_lines.append(f" {line}") + else: + indented_lines.append("") + indented_notes = "\n".join(indented_lines) + + entry = ( + f'\n' + f" ## v{version}\n" + f"\n" + f" [{link_text}]" + f"(https://github.com/crewAIInc/crewAI/releases/tag/{version})\n" + f"\n" + f"{indented_notes}\n" + f"\n" + f"" + ) + + content = changelog_path.read_text() + + # Insert after the frontmatter closing --- + parts = content.split("---", 2) + if len(parts) >= 3: + new_content = ( + parts[0] + + "---" + + parts[1] + + "---\n" + + entry + + "\n\n" + + parts[2].lstrip("\n") + ) + else: + new_content = entry + "\n\n" + content + + changelog_path.write_text(new_content) + return True + + +def update_template_dependencies(templates_dir: Path, new_version: str) -> list[Path]: + """Update crewai dependency versions in CLI template pyproject.toml files. + + Handles both pinned (==) and minimum (>=) version specifiers, + as well as extras like [tools]. + + Args: + templates_dir: Path to the CLI templates directory. + new_version: New version string. + + Returns: + List of paths that were updated. + """ + import re + + updated = [] + for pyproject in templates_dir.rglob("pyproject.toml"): + content = pyproject.read_text() + new_content = re.sub( + r'"crewai(\[tools\])?(==|>=)[^"]*"', + lambda m: f'"crewai{(m.group(1) or "")!s}=={new_version}"', + content, + ) + if new_content != content: + pyproject.write_text(new_content) + updated.append(pyproject) + + return updated + + def find_version_files(base_path: Path) -> list[Path]: """Find all __init__.py files that contain __version__. @@ -394,6 +636,22 @@ def bump(version: str, dry_run: bool, no_push: bool, no_commit: bool) -> None: "[yellow]Warning:[/yellow] No __version__ attributes found to update" ) + # Update CLI template pyproject.toml files + templates_dir = lib_dir / "crewai" / "src" / "crewai" / "cli" / "templates" + if templates_dir.exists(): + if dry_run: + for tpl in templates_dir.rglob("pyproject.toml"): + console.print( + f"[dim][DRY RUN][/dim] Would update template: {tpl.relative_to(cwd)}" + ) + else: + tpl_updated = update_template_dependencies(templates_dir, version) + for tpl in tpl_updated: + console.print( + f"[green]✓[/green] Updated template: {tpl.relative_to(cwd)}" + ) + updated_files.append(tpl) + if not dry_run: console.print("\nSyncing workspace...") run_command(["uv", "sync"]) @@ -575,9 +833,9 @@ def tag(dry_run: bool, no_edit: bool) -> None: github_contributors = get_github_contributors(commit_range) - if commits.strip(): - client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) + openai_client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) + if commits.strip(): contributors_section = "" if github_contributors: contributors_section = f"\n\n## Contributors\n\n{', '.join([f'@{u}' for u in github_contributors])}" @@ -588,7 +846,7 @@ def tag(dry_run: bool, no_edit: bool) -> None: contributors_section=contributors_section, ) - response = client.chat.completions.create( + response = openai_client.chat.completions.create( model="gpt-4o-mini", messages=[ { @@ -643,6 +901,143 @@ def tag(dry_run: bool, no_edit: bool) -> None: "\n[green]✓[/green] Using generated release notes without editing" ) + is_prerelease = any( + indicator in version.lower() + for indicator in ["a", "b", "rc", "alpha", "beta", "dev"] + ) + + # Update docs: changelogs + version switcher + docs_json_path = cwd / "docs" / "docs.json" + changelog_langs = ["en", "pt-BR", "ko"] + if not dry_run: + docs_files_staged = [] + + for lang in changelog_langs: + cl_path = cwd / "docs" / lang / "changelog.mdx" + if lang == "en": + notes_for_lang = release_notes + else: + console.print(f"[dim]Translating release notes to {lang}...[/dim]") + notes_for_lang = translate_release_notes( + release_notes, lang, openai_client + ) + if update_changelog(cl_path, version, notes_for_lang, lang=lang): + console.print( + f"[green]✓[/green] Updated {cl_path.relative_to(cwd)}" + ) + docs_files_staged.append(str(cl_path)) + else: + console.print( + f"[yellow]Warning:[/yellow] Changelog not found at {cl_path.relative_to(cwd)}" + ) + + if not is_prerelease: + if add_docs_version(docs_json_path, version): + console.print( + f"[green]✓[/green] Added v{version} to docs version switcher" + ) + docs_files_staged.append(str(docs_json_path)) + else: + console.print( + f"[yellow]Warning:[/yellow] docs.json not found at {docs_json_path.relative_to(cwd)}" + ) + + if docs_files_staged: + docs_branch = f"docs/changelog-v{version}" + run_command(["git", "checkout", "-b", docs_branch]) + for f in docs_files_staged: + run_command(["git", "add", f]) + run_command( + [ + "git", + "commit", + "-m", + f"docs: update changelog and version for v{version}", + ] + ) + console.print("[green]✓[/green] Committed docs updates") + + run_command(["git", "push", "-u", "origin", docs_branch]) + console.print(f"[green]✓[/green] Pushed branch {docs_branch}") + + run_command( + [ + "gh", + "pr", + "create", + "--base", + "main", + "--title", + f"docs: update changelog and version for v{version}", + "--body", + "", + ] + ) + console.print("[green]✓[/green] Created docs PR") + + run_command( + [ + "gh", + "pr", + "merge", + docs_branch, + "--squash", + "--auto", + "--delete-branch", + ] + ) + console.print("[green]✓[/green] Enabled auto-merge on docs PR") + + import time + + console.print("[cyan]Waiting for PR checks to pass and merge...[/cyan]") + while True: + time.sleep(10) + try: + state = run_command( + [ + "gh", + "pr", + "view", + docs_branch, + "--json", + "state", + "--jq", + ".state", + ] + ) + except subprocess.CalledProcessError: + state = "" + + if state == "MERGED": + break + + console.print("[dim]Still waiting for PR to merge...[/dim]") + + console.print("[green]✓[/green] Docs PR merged") + + run_command(["git", "checkout", "main"]) + run_command(["git", "pull"]) + console.print("[green]✓[/green] main branch updated with docs changes") + else: + for lang in changelog_langs: + cl_path = cwd / "docs" / lang / "changelog.mdx" + translated = " (translated)" if lang != "en" else "" + console.print( + f"[dim][DRY RUN][/dim] Would update {cl_path.relative_to(cwd)}{translated}" + ) + if not is_prerelease: + console.print( + f"[dim][DRY RUN][/dim] Would add v{version} to docs version switcher" + ) + else: + console.print( + "[dim][DRY RUN][/dim] Skipping docs version (pre-release)" + ) + console.print( + f"[dim][DRY RUN][/dim] Would create branch docs/changelog-v{version}, PR, and merge" + ) + if not dry_run: with console.status(f"[cyan]Creating tag {tag_name}..."): try: @@ -660,11 +1055,6 @@ def tag(dry_run: bool, no_edit: bool) -> None: sys.exit(1) console.print(f"[green]✓[/green] Pushed tag {tag_name}") - is_prerelease = any( - indicator in version.lower() - for indicator in ["a", "b", "rc", "alpha", "beta", "dev"] - ) - with console.status("[cyan]Creating GitHub Release..."): try: gh_cmd = [ diff --git a/lib/devtools/src/crewai_devtools/prompts.py b/lib/devtools/src/crewai_devtools/prompts.py index 1e96f03f4..6272972af 100644 --- a/lib/devtools/src/crewai_devtools/prompts.py +++ b/lib/devtools/src/crewai_devtools/prompts.py @@ -43,3 +43,18 @@ Instructions: Keep it professional and clear.""" ) + + +TRANSLATE_RELEASE_NOTES_PROMPT = Template( + """Translate the following release notes into $language. + +$release_notes + +Instructions: +- Translate all section headers and descriptions naturally +- Keep markdown formatting (##, ###, -, etc.) exactly as-is +- Keep all proper nouns, code identifiers, class names, and technical terms unchanged + (e.g. "CrewAI", "LiteAgent", "ChromaDB", "MCP", "@username") +- Keep the ## Contributors section and GitHub usernames unchanged +- Do not add or remove any content, only translate""" +) diff --git a/pyproject.toml b/pyproject.toml index 657c15eaa..335f51dae 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,9 +8,9 @@ authors = [ [dependency-groups] dev = [ - "ruff==0.14.7", - "mypy==1.19.0", - "pre-commit==4.5.0", + "ruff==0.15.1", + "mypy==1.19.1", + "pre-commit==4.5.1", "bandit==1.9.2", "pytest==8.4.2", "pytest-asyncio==1.3.0", @@ -23,9 +23,9 @@ dev = [ "pytest-split==0.10.0", "types-requests~=2.31.0.6", "types-pyyaml==6.0.*", - "types-regex==2024.11.6.*", + "types-regex==2026.1.15.*", "types-appdirs==1.4.*", - "boto3-stubs[bedrock-runtime]==1.40.54", + "boto3-stubs[bedrock-runtime]==1.42.40", "types-psycopg2==2.9.21.20251012", "types-pymysql==1.1.0.20250916", "types-aiofiles~=25.1.0", @@ -146,9 +146,14 @@ python_functions = "test_*" # composio-core pins rich<14 but textual requires rich>=14. # onnxruntime 1.24+ dropped Python 3.10 wheels; cap it so qdrant[fastembed] resolves on 3.10. +# fastembed 0.7.x and docling 2.63 cap pillow<12; the removed APIs don't affect them. +# langchain-core 0.3.76 has a template-injection vuln (GHSA); force >=0.3.80. override-dependencies = [ "rich>=13.7.1", "onnxruntime<1.24; python_version < '3.11'", + "pillow>=12.1.1", + "langchain-core>=0.3.80,<1", + "urllib3>=2.6.3", ] [tool.uv.workspace] diff --git a/uv.lock b/uv.lock index df8cb3430..8fc9e56f5 100644 --- a/uv.lock +++ b/uv.lock @@ -20,16 +20,19 @@ members = [ "crewai-tools", ] overrides = [ + { name = "langchain-core", specifier = ">=0.3.80,<1" }, { name = "onnxruntime", marker = "python_full_version < '3.11'", specifier = "<1.24" }, + { name = "pillow", specifier = ">=12.1.1" }, { name = "rich", specifier = ">=13.7.1" }, + { name = "urllib3", specifier = ">=2.6.3" }, ] [manifest.dependency-groups] dev = [ { name = "bandit", specifier = "==1.9.2" }, - { name = "boto3-stubs", extras = ["bedrock-runtime"], specifier = "==1.40.54" }, - { name = "mypy", specifier = "==1.19.0" }, - { name = "pre-commit", specifier = "==4.5.0" }, + { name = "boto3-stubs", extras = ["bedrock-runtime"], specifier = "==1.42.40" }, + { name = "mypy", specifier = "==1.19.1" }, + { name = "pre-commit", specifier = "==4.5.1" }, { name = "pytest", specifier = "==8.4.2" }, { name = "pytest-asyncio", specifier = "==1.3.0" }, { name = "pytest-randomly", specifier = "==4.0.1" }, @@ -38,13 +41,13 @@ dev = [ { name = "pytest-subprocess", specifier = "==1.5.3" }, { name = "pytest-timeout", specifier = "==2.4.0" }, { name = "pytest-xdist", specifier = "==3.8.0" }, - { name = "ruff", specifier = "==0.14.7" }, + { name = "ruff", specifier = "==0.15.1" }, { name = "types-aiofiles", specifier = "~=25.1.0" }, { name = "types-appdirs", specifier = "==1.4.*" }, { name = "types-psycopg2", specifier = "==2.9.21.20251012" }, { name = "types-pymysql", specifier = "==1.1.0.20250916" }, { name = "types-pyyaml", specifier = "==6.0.*" }, - { name = "types-regex", specifier = "==2024.11.6.*" }, + { name = "types-regex", specifier = "==2026.1.15.*" }, { name = "types-requests", specifier = "~=2.31.0.6" }, { name = "vcrpy", specifier = "==7.0.0" }, ] @@ -593,8 +596,7 @@ dependencies = [ { name = "pydantic" }, { name = "starlette" }, { name = "typing-extensions" }, - { name = "urllib3", version = "1.26.20", source = { registry = "https://pypi.org/simple" }, marker = "platform_python_implementation == 'PyPy'" }, - { name = "urllib3", version = "2.6.3", source = { registry = "https://pypi.org/simple" }, marker = "platform_python_implementation != 'PyPy'" }, + { name = "urllib3" }, { name = "uvicorn" }, { name = "websockets" }, ] @@ -619,16 +621,16 @@ wheels = [ [[package]] name = "boto3-stubs" -version = "1.40.54" +version = "1.42.40" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "botocore-stubs" }, { name = "types-s3transfer" }, { name = "typing-extensions", marker = "python_full_version < '3.12'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/e2/70/245477b7f07c9e1533c47fa69e611b172814423a6fd4637004f0d2a13b73/boto3_stubs-1.40.54.tar.gz", hash = "sha256:e21a9eda979a451935eb3196de3efbe15b9470e6bf9027406d1f6d0ac08b339e", size = 100919, upload-time = "2025-10-16T19:49:17.079Z" } +sdist = { url = "https://files.pythonhosted.org/packages/89/87/190df0854bcacc31d58dab28721f855d928ddd1d20c0ca2c201731d4622b/boto3_stubs-1.42.40.tar.gz", hash = "sha256:2689e235ae0deb6878fced175f7c2701fd8c088e6764de65e8c14085c1fc1914", size = 100886, upload-time = "2026-02-02T23:19:28.917Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/9d/52/ee9dadd1cc8911e16f18ca9fa036a10328e0a0d3fddd54fadcc1ca0f9143/boto3_stubs-1.40.54-py3-none-any.whl", hash = "sha256:548a4786785ba7b43ef4ef1a2a764bebbb0301525f3201091fcf412e4c8ce323", size = 69712, upload-time = "2025-10-16T19:49:12.847Z" }, + { url = "https://files.pythonhosted.org/packages/e7/09/e1d031ceae85688c13dd16d84a0e6e416def62c6b23e04f7d318837ee355/boto3_stubs-1.42.40-py3-none-any.whl", hash = "sha256:66679f1075e094b15b2032d8cfc4f070a472e066b04ee1edf61aa44884a6d2cd", size = 69782, upload-time = "2026-02-02T23:19:20.16Z" }, ] [package.optional-dependencies] @@ -643,8 +645,7 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "jmespath" }, { name = "python-dateutil" }, - { name = "urllib3", version = "1.26.20", source = { registry = "https://pypi.org/simple" }, marker = "platform_python_implementation == 'PyPy'" }, - { name = "urllib3", version = "2.6.3", source = { registry = "https://pypi.org/simple" }, marker = "platform_python_implementation != 'PyPy'" }, + { name = "urllib3" }, ] sdist = { url = "https://files.pythonhosted.org/packages/35/c1/8c4c199ae1663feee579a15861e34f10b29da11ae6ea0ad7b6a847ef3823/botocore-1.40.70.tar.gz", hash = "sha256:61b1f2cecd54d1b28a081116fa113b97bf4e17da57c62ae2c2751fe4c528af1f", size = 14444592, upload-time = "2025-11-10T20:29:04.046Z" } wheels = [ @@ -1096,6 +1097,7 @@ dependencies = [ { name = "appdirs" }, { name = "chromadb" }, { name = "click" }, + { name = "httpx" }, { name = "instructor" }, { name = "json-repair" }, { name = "json5" }, @@ -1193,8 +1195,9 @@ requires-dist = [ { name = "click", specifier = "~=8.1.7" }, { name = "crewai-files", marker = "extra == 'file-processing'", editable = "lib/crewai-files" }, { name = "crewai-tools", marker = "extra == 'tools'", editable = "lib/crewai-tools" }, - { name = "docling", marker = "extra == 'docling'", specifier = "~=2.63.0" }, - { name = "google-genai", marker = "extra == 'google-genai'", specifier = "~=1.49.0" }, + { name = "docling", marker = "extra == 'docling'", specifier = "~=2.75.0" }, + { name = "google-genai", marker = "extra == 'google-genai'", specifier = "~=1.65.0" }, + { name = "httpx", specifier = "~=0.28.1" }, { name = "httpx-auth", marker = "extra == 'a2a'", specifier = "~=0.23.1" }, { name = "httpx-sse", marker = "extra == 'a2a'", specifier = "~=0.4.0" }, { name = "ibm-watsonx-ai", marker = "extra == 'watson'", specifier = "~=1.3.39" }, @@ -1202,7 +1205,7 @@ requires-dist = [ { name = "json-repair", specifier = "~=0.25.2" }, { name = "json5", specifier = "~=0.10.0" }, { name = "jsonref", specifier = "~=1.1.0" }, - { name = "lancedb", specifier = ">=0.4.0" }, + { name = "lancedb", specifier = ">=0.29.2" }, { name = "litellm", marker = "extra == 'litellm'", specifier = ">=1.74.9,<3" }, { name = "mcp", specifier = "~=1.26.0" }, { name = "mem0ai", marker = "extra == 'mem0'", specifier = "~=0.1.94" }, @@ -1223,7 +1226,7 @@ requires-dist = [ { name = "regex", specifier = "~=2026.1.15" }, { name = "textual", specifier = ">=7.5.0" }, { name = "tiktoken", marker = "extra == 'embeddings'", specifier = "~=0.8.0" }, - { name = "tokenizers", specifier = "~=0.20.3" }, + { name = "tokenizers", specifier = ">=0.21,<1" }, { name = "tomli", specifier = "~=2.0.2" }, { name = "tomli-w", specifier = "~=1.1.0" }, { name = "uv", specifier = "~=0.9.13" }, @@ -1271,8 +1274,8 @@ requires-dist = [ { name = "aiocache", specifier = "~=0.12.3" }, { name = "aiofiles", specifier = "~=24.1.0" }, { name = "av", specifier = "~=13.0.0" }, - { name = "pillow", specifier = "~=10.4.0" }, - { name = "pypdf", specifier = "~=4.0.0" }, + { name = "pillow", specifier = "~=12.1.1" }, + { name = "pypdf", specifier = "~=6.7.5" }, { name = "python-magic", specifier = ">=0.4.27" }, { name = "tinytag", specifier = "~=1.10.0" }, ] @@ -1284,7 +1287,6 @@ dependencies = [ { name = "beautifulsoup4" }, { name = "crewai" }, { name = "docker" }, - { name = "lancedb" }, { name = "pymupdf" }, { name = "python-docx" }, { name = "pytube" }, @@ -1424,9 +1426,8 @@ requires-dist = [ { name = "docker", specifier = "~=7.1.0" }, { name = "exa-py", marker = "extra == 'exa-py'", specifier = ">=1.8.7" }, { name = "firecrawl-py", marker = "extra == 'firecrawl-py'", specifier = ">=1.8.0" }, - { name = "gitpython", marker = "extra == 'github'", specifier = "==3.1.38" }, + { name = "gitpython", marker = "extra == 'github'", specifier = ">=3.1.41,<4" }, { name = "hyperbrowser", marker = "extra == 'hyperbrowser'", specifier = ">=0.18.0" }, - { name = "lancedb", specifier = "~=0.5.4" }, { name = "langchain-apify", marker = "extra == 'apify'", specifier = ">=0.1.2,<1.0.0" }, { name = "linkup-sdk", marker = "extra == 'linkup-sdk'", specifier = ">=0.2.2" }, { name = "lxml", marker = "extra == 'rag'", specifier = ">=5.3.0,<5.4.0" }, @@ -1469,48 +1470,48 @@ provides-extras = ["apify", "beautifulsoup4", "bedrock", "browserbase", "composi [[package]] name = "cryptography" -version = "46.0.4" +version = "46.0.5" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, { name = "typing-extensions", marker = "python_full_version < '3.11'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/78/19/f748958276519adf6a0c1e79e7b8860b4830dda55ccdf29f2719b5fc499c/cryptography-46.0.4.tar.gz", hash = "sha256:bfd019f60f8abc2ed1b9be4ddc21cfef059c841d86d710bb69909a688cbb8f59", size = 749301, upload-time = "2026-01-28T00:24:37.379Z" } +sdist = { url = "https://files.pythonhosted.org/packages/60/04/ee2a9e8542e4fa2773b81771ff8349ff19cdd56b7258a0cc442639052edb/cryptography-46.0.5.tar.gz", hash = "sha256:abace499247268e3757271b2f1e244b36b06f8515cf27c4d49468fc9eb16e93d", size = 750064, upload-time = "2026-02-10T19:18:38.255Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/8d/99/157aae7949a5f30d51fcb1a9851e8ebd5c74bf99b5285d8bb4b8b9ee641e/cryptography-46.0.4-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:281526e865ed4166009e235afadf3a4c4cba6056f99336a99efba65336fd5485", size = 7173686, upload-time = "2026-01-28T00:23:07.515Z" }, - { url = "https://files.pythonhosted.org/packages/87/91/874b8910903159043b5c6a123b7e79c4559ddd1896e38967567942635778/cryptography-46.0.4-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5f14fba5bf6f4390d7ff8f086c566454bff0411f6d8aa7af79c88b6f9267aecc", size = 4275871, upload-time = "2026-01-28T00:23:09.439Z" }, - { url = "https://files.pythonhosted.org/packages/c0/35/690e809be77896111f5b195ede56e4b4ed0435b428c2f2b6d35046fbb5e8/cryptography-46.0.4-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:47bcd19517e6389132f76e2d5303ded6cf3f78903da2158a671be8de024f4cd0", size = 4423124, upload-time = "2026-01-28T00:23:11.529Z" }, - { url = "https://files.pythonhosted.org/packages/1a/5b/a26407d4f79d61ca4bebaa9213feafdd8806dc69d3d290ce24996d3cfe43/cryptography-46.0.4-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:01df4f50f314fbe7009f54046e908d1754f19d0c6d3070df1e6268c5a4af09fa", size = 4277090, upload-time = "2026-01-28T00:23:13.123Z" }, - { url = "https://files.pythonhosted.org/packages/0c/d8/4bb7aec442a9049827aa34cee1aa83803e528fa55da9a9d45d01d1bb933e/cryptography-46.0.4-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:5aa3e463596b0087b3da0dbe2b2487e9fc261d25da85754e30e3b40637d61f81", size = 4947652, upload-time = "2026-01-28T00:23:14.554Z" }, - { url = "https://files.pythonhosted.org/packages/2b/08/f83e2e0814248b844265802d081f2fac2f1cbe6cd258e72ba14ff006823a/cryptography-46.0.4-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:0a9ad24359fee86f131836a9ac3bffc9329e956624a2d379b613f8f8abaf5255", size = 4455157, upload-time = "2026-01-28T00:23:16.443Z" }, - { url = "https://files.pythonhosted.org/packages/0a/05/19d849cf4096448779d2dcc9bb27d097457dac36f7273ffa875a93b5884c/cryptography-46.0.4-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:dc1272e25ef673efe72f2096e92ae39dea1a1a450dd44918b15351f72c5a168e", size = 3981078, upload-time = "2026-01-28T00:23:17.838Z" }, - { url = "https://files.pythonhosted.org/packages/e6/89/f7bac81d66ba7cde867a743ea5b37537b32b5c633c473002b26a226f703f/cryptography-46.0.4-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:de0f5f4ec8711ebc555f54735d4c673fc34b65c44283895f1a08c2b49d2fd99c", size = 4276213, upload-time = "2026-01-28T00:23:19.257Z" }, - { url = "https://files.pythonhosted.org/packages/da/9f/7133e41f24edd827020ad21b068736e792bc68eecf66d93c924ad4719fb3/cryptography-46.0.4-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:eeeb2e33d8dbcccc34d64651f00a98cb41b2dc69cef866771a5717e6734dfa32", size = 4912190, upload-time = "2026-01-28T00:23:21.244Z" }, - { url = "https://files.pythonhosted.org/packages/a6/f7/6d43cbaddf6f65b24816e4af187d211f0bc536a29961f69faedc48501d8e/cryptography-46.0.4-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:3d425eacbc9aceafd2cb429e42f4e5d5633c6f873f5e567077043ef1b9bbf616", size = 4454641, upload-time = "2026-01-28T00:23:22.866Z" }, - { url = "https://files.pythonhosted.org/packages/9e/4f/ebd0473ad656a0ac912a16bd07db0f5d85184924e14fc88feecae2492834/cryptography-46.0.4-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:91627ebf691d1ea3976a031b61fb7bac1ccd745afa03602275dda443e11c8de0", size = 4405159, upload-time = "2026-01-28T00:23:25.278Z" }, - { url = "https://files.pythonhosted.org/packages/d1/f7/7923886f32dc47e27adeff8246e976d77258fd2aa3efdd1754e4e323bf49/cryptography-46.0.4-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2d08bc22efd73e8854b0b7caff402d735b354862f1145d7be3b9c0f740fef6a0", size = 4666059, upload-time = "2026-01-28T00:23:26.766Z" }, - { url = "https://files.pythonhosted.org/packages/eb/a7/0fca0fd3591dffc297278a61813d7f661a14243dd60f499a7a5b48acb52a/cryptography-46.0.4-cp311-abi3-win32.whl", hash = "sha256:82a62483daf20b8134f6e92898da70d04d0ef9a75829d732ea1018678185f4f5", size = 3026378, upload-time = "2026-01-28T00:23:28.317Z" }, - { url = "https://files.pythonhosted.org/packages/2d/12/652c84b6f9873f0909374864a57b003686c642ea48c84d6c7e2c515e6da5/cryptography-46.0.4-cp311-abi3-win_amd64.whl", hash = "sha256:6225d3ebe26a55dbc8ead5ad1265c0403552a63336499564675b29eb3184c09b", size = 3478614, upload-time = "2026-01-28T00:23:30.275Z" }, - { url = "https://files.pythonhosted.org/packages/56/f7/f648fdbb61d0d45902d3f374217451385edc7e7768d1b03ff1d0e5ffc17b/cryptography-46.0.4-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a9556ba711f7c23f77b151d5798f3ac44a13455cc68db7697a1096e6d0563cab", size = 7169583, upload-time = "2026-01-28T00:23:56.558Z" }, - { url = "https://files.pythonhosted.org/packages/d8/cc/8f3224cbb2a928de7298d6ed4790f5ebc48114e02bdc9559196bfb12435d/cryptography-46.0.4-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8bf75b0259e87fa70bddc0b8b4078b76e7fd512fd9afae6c1193bcf440a4dbef", size = 4275419, upload-time = "2026-01-28T00:23:58.364Z" }, - { url = "https://files.pythonhosted.org/packages/17/43/4a18faa7a872d00e4264855134ba82d23546c850a70ff209e04ee200e76f/cryptography-46.0.4-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3c268a3490df22270955966ba236d6bc4a8f9b6e4ffddb78aac535f1a5ea471d", size = 4419058, upload-time = "2026-01-28T00:23:59.867Z" }, - { url = "https://files.pythonhosted.org/packages/ee/64/6651969409821d791ba12346a124f55e1b76f66a819254ae840a965d4b9c/cryptography-46.0.4-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:812815182f6a0c1d49a37893a303b44eaac827d7f0d582cecfc81b6427f22973", size = 4278151, upload-time = "2026-01-28T00:24:01.731Z" }, - { url = "https://files.pythonhosted.org/packages/20/0b/a7fce65ee08c3c02f7a8310cc090a732344066b990ac63a9dfd0a655d321/cryptography-46.0.4-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:a90e43e3ef65e6dcf969dfe3bb40cbf5aef0d523dff95bfa24256be172a845f4", size = 4939441, upload-time = "2026-01-28T00:24:03.175Z" }, - { url = "https://files.pythonhosted.org/packages/db/a7/20c5701e2cd3e1dfd7a19d2290c522a5f435dd30957d431dcb531d0f1413/cryptography-46.0.4-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:a05177ff6296644ef2876fce50518dffb5bcdf903c85250974fc8bc85d54c0af", size = 4451617, upload-time = "2026-01-28T00:24:05.403Z" }, - { url = "https://files.pythonhosted.org/packages/00/dc/3e16030ea9aa47b63af6524c354933b4fb0e352257c792c4deeb0edae367/cryptography-46.0.4-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:daa392191f626d50f1b136c9b4cf08af69ca8279d110ea24f5c2700054d2e263", size = 3977774, upload-time = "2026-01-28T00:24:06.851Z" }, - { url = "https://files.pythonhosted.org/packages/42/c8/ad93f14118252717b465880368721c963975ac4b941b7ef88f3c56bf2897/cryptography-46.0.4-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:e07ea39c5b048e085f15923511d8121e4a9dc45cee4e3b970ca4f0d338f23095", size = 4277008, upload-time = "2026-01-28T00:24:08.926Z" }, - { url = "https://files.pythonhosted.org/packages/00/cf/89c99698151c00a4631fbfcfcf459d308213ac29e321b0ff44ceeeac82f1/cryptography-46.0.4-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:d5a45ddc256f492ce42a4e35879c5e5528c09cd9ad12420828c972951d8e016b", size = 4903339, upload-time = "2026-01-28T00:24:12.009Z" }, - { url = "https://files.pythonhosted.org/packages/03/c3/c90a2cb358de4ac9309b26acf49b2a100957e1ff5cc1e98e6c4996576710/cryptography-46.0.4-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:6bb5157bf6a350e5b28aee23beb2d84ae6f5be390b2f8ee7ea179cda077e1019", size = 4451216, upload-time = "2026-01-28T00:24:13.975Z" }, - { url = "https://files.pythonhosted.org/packages/96/2c/8d7f4171388a10208671e181ca43cdc0e596d8259ebacbbcfbd16de593da/cryptography-46.0.4-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:dd5aba870a2c40f87a3af043e0dee7d9eb02d4aff88a797b48f2b43eff8c3ab4", size = 4404299, upload-time = "2026-01-28T00:24:16.169Z" }, - { url = "https://files.pythonhosted.org/packages/e9/23/cbb2036e450980f65c6e0a173b73a56ff3bccd8998965dea5cc9ddd424a5/cryptography-46.0.4-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:93d8291da8d71024379ab2cb0b5c57915300155ad42e07f76bea6ad838d7e59b", size = 4664837, upload-time = "2026-01-28T00:24:17.629Z" }, - { url = "https://files.pythonhosted.org/packages/0a/21/f7433d18fe6d5845329cbdc597e30caf983229c7a245bcf54afecc555938/cryptography-46.0.4-cp38-abi3-win32.whl", hash = "sha256:0563655cb3c6d05fb2afe693340bc050c30f9f34e15763361cf08e94749401fc", size = 3009779, upload-time = "2026-01-28T00:24:20.198Z" }, - { url = "https://files.pythonhosted.org/packages/3a/6a/bd2e7caa2facffedf172a45c1a02e551e6d7d4828658c9a245516a598d94/cryptography-46.0.4-cp38-abi3-win_amd64.whl", hash = "sha256:fa0900b9ef9c49728887d1576fd8d9e7e3ea872fa9b25ef9b64888adc434e976", size = 3466633, upload-time = "2026-01-28T00:24:21.851Z" }, - { url = "https://files.pythonhosted.org/packages/59/e0/f9c6c53e1f2a1c2507f00f2faba00f01d2f334b35b0fbfe5286715da2184/cryptography-46.0.4-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:766330cce7416c92b5e90c3bb71b1b79521760cdcfc3a6a1a182d4c9fab23d2b", size = 3476316, upload-time = "2026-01-28T00:24:24.144Z" }, - { url = "https://files.pythonhosted.org/packages/27/7a/f8d2d13227a9a1a9fe9c7442b057efecffa41f1e3c51d8622f26b9edbe8f/cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c236a44acfb610e70f6b3e1c3ca20ff24459659231ef2f8c48e879e2d32b73da", size = 4216693, upload-time = "2026-01-28T00:24:25.758Z" }, - { url = "https://files.pythonhosted.org/packages/c5/de/3787054e8f7972658370198753835d9d680f6cd4a39df9f877b57f0dd69c/cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8a15fb869670efa8f83cbffbc8753c1abf236883225aed74cd179b720ac9ec80", size = 4382765, upload-time = "2026-01-28T00:24:27.577Z" }, - { url = "https://files.pythonhosted.org/packages/8a/5f/60e0afb019973ba6a0b322e86b3d61edf487a4f5597618a430a2a15f2d22/cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:fdc3daab53b212472f1524d070735b2f0c214239df131903bae1d598016fa822", size = 4216066, upload-time = "2026-01-28T00:24:29.056Z" }, - { url = "https://files.pythonhosted.org/packages/81/8e/bf4a0de294f147fee66f879d9bae6f8e8d61515558e3d12785dd90eca0be/cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:44cc0675b27cadb71bdbb96099cca1fa051cd11d2ade09e5cd3a2edb929ed947", size = 4382025, upload-time = "2026-01-28T00:24:30.681Z" }, - { url = "https://files.pythonhosted.org/packages/79/f4/9ceb90cfd6a3847069b0b0b353fd3075dc69b49defc70182d8af0c4ca390/cryptography-46.0.4-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:be8c01a7d5a55f9a47d1888162b76c8f49d62b234d88f0ff91a9fbebe32ffbc3", size = 3406043, upload-time = "2026-01-28T00:24:32.236Z" }, + { url = "https://files.pythonhosted.org/packages/f7/81/b0bb27f2ba931a65409c6b8a8b358a7f03c0e46eceacddff55f7c84b1f3b/cryptography-46.0.5-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:351695ada9ea9618b3500b490ad54c739860883df6c1f555e088eaf25b1bbaad", size = 7176289, upload-time = "2026-02-10T19:17:08.274Z" }, + { url = "https://files.pythonhosted.org/packages/ff/9e/6b4397a3e3d15123de3b1806ef342522393d50736c13b20ec4c9ea6693a6/cryptography-46.0.5-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c18ff11e86df2e28854939acde2d003f7984f721eba450b56a200ad90eeb0e6b", size = 4275637, upload-time = "2026-02-10T19:17:10.53Z" }, + { url = "https://files.pythonhosted.org/packages/63/e7/471ab61099a3920b0c77852ea3f0ea611c9702f651600397ac567848b897/cryptography-46.0.5-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4d7e3d356b8cd4ea5aff04f129d5f66ebdc7b6f8eae802b93739ed520c47c79b", size = 4424742, upload-time = "2026-02-10T19:17:12.388Z" }, + { url = "https://files.pythonhosted.org/packages/37/53/a18500f270342d66bf7e4d9f091114e31e5ee9e7375a5aba2e85a91e0044/cryptography-46.0.5-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:50bfb6925eff619c9c023b967d5b77a54e04256c4281b0e21336a130cd7fc263", size = 4277528, upload-time = "2026-02-10T19:17:13.853Z" }, + { url = "https://files.pythonhosted.org/packages/22/29/c2e812ebc38c57b40e7c583895e73c8c5adb4d1e4a0cc4c5a4fdab2b1acc/cryptography-46.0.5-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:803812e111e75d1aa73690d2facc295eaefd4439be1023fefc4995eaea2af90d", size = 4947993, upload-time = "2026-02-10T19:17:15.618Z" }, + { url = "https://files.pythonhosted.org/packages/6b/e7/237155ae19a9023de7e30ec64e5d99a9431a567407ac21170a046d22a5a3/cryptography-46.0.5-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3ee190460e2fbe447175cda91b88b84ae8322a104fc27766ad09428754a618ed", size = 4456855, upload-time = "2026-02-10T19:17:17.221Z" }, + { url = "https://files.pythonhosted.org/packages/2d/87/fc628a7ad85b81206738abbd213b07702bcbdada1dd43f72236ef3cffbb5/cryptography-46.0.5-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:f145bba11b878005c496e93e257c1e88f154d278d2638e6450d17e0f31e558d2", size = 3984635, upload-time = "2026-02-10T19:17:18.792Z" }, + { url = "https://files.pythonhosted.org/packages/84/29/65b55622bde135aedf4565dc509d99b560ee4095e56989e815f8fd2aa910/cryptography-46.0.5-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:e9251e3be159d1020c4030bd2e5f84d6a43fe54b6c19c12f51cde9542a2817b2", size = 4277038, upload-time = "2026-02-10T19:17:20.256Z" }, + { url = "https://files.pythonhosted.org/packages/bc/36/45e76c68d7311432741faf1fbf7fac8a196a0a735ca21f504c75d37e2558/cryptography-46.0.5-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:47fb8a66058b80e509c47118ef8a75d14c455e81ac369050f20ba0d23e77fee0", size = 4912181, upload-time = "2026-02-10T19:17:21.825Z" }, + { url = "https://files.pythonhosted.org/packages/6d/1a/c1ba8fead184d6e3d5afcf03d569acac5ad063f3ac9fb7258af158f7e378/cryptography-46.0.5-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:4c3341037c136030cb46e4b1e17b7418ea4cbd9dd207e4a6f3b2b24e0d4ac731", size = 4456482, upload-time = "2026-02-10T19:17:25.133Z" }, + { url = "https://files.pythonhosted.org/packages/f9/e5/3fb22e37f66827ced3b902cf895e6a6bc1d095b5b26be26bd13c441fdf19/cryptography-46.0.5-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:890bcb4abd5a2d3f852196437129eb3667d62630333aacc13dfd470fad3aaa82", size = 4405497, upload-time = "2026-02-10T19:17:26.66Z" }, + { url = "https://files.pythonhosted.org/packages/1a/df/9d58bb32b1121a8a2f27383fabae4d63080c7ca60b9b5c88be742be04ee7/cryptography-46.0.5-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:80a8d7bfdf38f87ca30a5391c0c9ce4ed2926918e017c29ddf643d0ed2778ea1", size = 4667819, upload-time = "2026-02-10T19:17:28.569Z" }, + { url = "https://files.pythonhosted.org/packages/ea/ed/325d2a490c5e94038cdb0117da9397ece1f11201f425c4e9c57fe5b9f08b/cryptography-46.0.5-cp311-abi3-win32.whl", hash = "sha256:60ee7e19e95104d4c03871d7d7dfb3d22ef8a9b9c6778c94e1c8fcc8365afd48", size = 3028230, upload-time = "2026-02-10T19:17:30.518Z" }, + { url = "https://files.pythonhosted.org/packages/e9/5a/ac0f49e48063ab4255d9e3b79f5def51697fce1a95ea1370f03dc9db76f6/cryptography-46.0.5-cp311-abi3-win_amd64.whl", hash = "sha256:38946c54b16c885c72c4f59846be9743d699eee2b69b6988e0a00a01f46a61a4", size = 3480909, upload-time = "2026-02-10T19:17:32.083Z" }, + { url = "https://files.pythonhosted.org/packages/e2/fa/a66aa722105ad6a458bebd64086ca2b72cdd361fed31763d20390f6f1389/cryptography-46.0.5-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:4108d4c09fbbf2789d0c926eb4152ae1760d5a2d97612b92d508d96c861e4d31", size = 7170514, upload-time = "2026-02-10T19:17:56.267Z" }, + { url = "https://files.pythonhosted.org/packages/0f/04/c85bdeab78c8bc77b701bf0d9bdcf514c044e18a46dcff330df5448631b0/cryptography-46.0.5-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7d1f30a86d2757199cb2d56e48cce14deddf1f9c95f1ef1b64ee91ea43fe2e18", size = 4275349, upload-time = "2026-02-10T19:17:58.419Z" }, + { url = "https://files.pythonhosted.org/packages/5c/32/9b87132a2f91ee7f5223b091dc963055503e9b442c98fc0b8a5ca765fab0/cryptography-46.0.5-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:039917b0dc418bb9f6edce8a906572d69e74bd330b0b3fea4f79dab7f8ddd235", size = 4420667, upload-time = "2026-02-10T19:18:00.619Z" }, + { url = "https://files.pythonhosted.org/packages/a1/a6/a7cb7010bec4b7c5692ca6f024150371b295ee1c108bdc1c400e4c44562b/cryptography-46.0.5-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ba2a27ff02f48193fc4daeadf8ad2590516fa3d0adeeb34336b96f7fa64c1e3a", size = 4276980, upload-time = "2026-02-10T19:18:02.379Z" }, + { url = "https://files.pythonhosted.org/packages/8e/7c/c4f45e0eeff9b91e3f12dbd0e165fcf2a38847288fcfd889deea99fb7b6d/cryptography-46.0.5-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:61aa400dce22cb001a98014f647dc21cda08f7915ceb95df0c9eaf84b4b6af76", size = 4939143, upload-time = "2026-02-10T19:18:03.964Z" }, + { url = "https://files.pythonhosted.org/packages/37/19/e1b8f964a834eddb44fa1b9a9976f4e414cbb7aa62809b6760c8803d22d1/cryptography-46.0.5-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3ce58ba46e1bc2aac4f7d9290223cead56743fa6ab94a5d53292ffaac6a91614", size = 4453674, upload-time = "2026-02-10T19:18:05.588Z" }, + { url = "https://files.pythonhosted.org/packages/db/ed/db15d3956f65264ca204625597c410d420e26530c4e2943e05a0d2f24d51/cryptography-46.0.5-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:420d0e909050490d04359e7fdb5ed7e667ca5c3c402b809ae2563d7e66a92229", size = 3978801, upload-time = "2026-02-10T19:18:07.167Z" }, + { url = "https://files.pythonhosted.org/packages/41/e2/df40a31d82df0a70a0daf69791f91dbb70e47644c58581d654879b382d11/cryptography-46.0.5-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:582f5fcd2afa31622f317f80426a027f30dc792e9c80ffee87b993200ea115f1", size = 4276755, upload-time = "2026-02-10T19:18:09.813Z" }, + { url = "https://files.pythonhosted.org/packages/33/45/726809d1176959f4a896b86907b98ff4391a8aa29c0aaaf9450a8a10630e/cryptography-46.0.5-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:bfd56bb4b37ed4f330b82402f6f435845a5f5648edf1ad497da51a8452d5d62d", size = 4901539, upload-time = "2026-02-10T19:18:11.263Z" }, + { url = "https://files.pythonhosted.org/packages/99/0f/a3076874e9c88ecb2ecc31382f6e7c21b428ede6f55aafa1aa272613e3cd/cryptography-46.0.5-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:a3d507bb6a513ca96ba84443226af944b0f7f47dcc9a399d110cd6146481d24c", size = 4452794, upload-time = "2026-02-10T19:18:12.914Z" }, + { url = "https://files.pythonhosted.org/packages/02/ef/ffeb542d3683d24194a38f66ca17c0a4b8bf10631feef44a7ef64e631b1a/cryptography-46.0.5-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9f16fbdf4da055efb21c22d81b89f155f02ba420558db21288b3d0035bafd5f4", size = 4404160, upload-time = "2026-02-10T19:18:14.375Z" }, + { url = "https://files.pythonhosted.org/packages/96/93/682d2b43c1d5f1406ed048f377c0fc9fc8f7b0447a478d5c65ab3d3a66eb/cryptography-46.0.5-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:ced80795227d70549a411a4ab66e8ce307899fad2220ce5ab2f296e687eacde9", size = 4667123, upload-time = "2026-02-10T19:18:15.886Z" }, + { url = "https://files.pythonhosted.org/packages/45/2d/9c5f2926cb5300a8eefc3f4f0b3f3df39db7f7ce40c8365444c49363cbda/cryptography-46.0.5-cp38-abi3-win32.whl", hash = "sha256:02f547fce831f5096c9a567fd41bc12ca8f11df260959ecc7c3202555cc47a72", size = 3010220, upload-time = "2026-02-10T19:18:17.361Z" }, + { url = "https://files.pythonhosted.org/packages/48/ef/0c2f4a8e31018a986949d34a01115dd057bf536905dca38897bacd21fac3/cryptography-46.0.5-cp38-abi3-win_amd64.whl", hash = "sha256:556e106ee01aa13484ce9b0239bca667be5004efb0aabbed28d353df86445595", size = 3467050, upload-time = "2026-02-10T19:18:18.899Z" }, + { url = "https://files.pythonhosted.org/packages/eb/dd/2d9fdb07cebdf3d51179730afb7d5e576153c6744c3ff8fded23030c204e/cryptography-46.0.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:3b4995dc971c9fb83c25aa44cf45f02ba86f71ee600d81091c2f0cbae116b06c", size = 3476964, upload-time = "2026-02-10T19:18:20.687Z" }, + { url = "https://files.pythonhosted.org/packages/e9/6f/6cc6cc9955caa6eaf83660b0da2b077c7fe8ff9950a3c5e45d605038d439/cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:bc84e875994c3b445871ea7181d424588171efec3e185dced958dad9e001950a", size = 4218321, upload-time = "2026-02-10T19:18:22.349Z" }, + { url = "https://files.pythonhosted.org/packages/3e/5d/c4da701939eeee699566a6c1367427ab91a8b7088cc2328c09dbee940415/cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:2ae6971afd6246710480e3f15824ed3029a60fc16991db250034efd0b9fb4356", size = 4381786, upload-time = "2026-02-10T19:18:24.529Z" }, + { url = "https://files.pythonhosted.org/packages/ac/97/a538654732974a94ff96c1db621fa464f455c02d4bb7d2652f4edc21d600/cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:d861ee9e76ace6cf36a6a89b959ec08e7bc2493ee39d07ffe5acb23ef46d27da", size = 4217990, upload-time = "2026-02-10T19:18:25.957Z" }, + { url = "https://files.pythonhosted.org/packages/ae/11/7e500d2dd3ba891197b9efd2da5454b74336d64a7cc419aa7327ab74e5f6/cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:2b7a67c9cd56372f3249b39699f2ad479f6991e62ea15800973b956f4b73e257", size = 4381252, upload-time = "2026-02-10T19:18:27.496Z" }, + { url = "https://files.pythonhosted.org/packages/bc/58/6b3d24e6b9bc474a2dcdee65dfd1f008867015408a271562e4b690561a4d/cryptography-46.0.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:8456928655f856c6e1533ff59d5be76578a7157224dbd9ce6872f25055ab9ab7", size = 3407605, upload-time = "2026-02-10T19:18:29.233Z" }, ] [[package]] @@ -1666,8 +1667,7 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "pywin32", marker = "sys_platform == 'win32'" }, { name = "requests" }, - { name = "urllib3", version = "1.26.20", source = { registry = "https://pypi.org/simple" }, marker = "platform_python_implementation == 'PyPy'" }, - { name = "urllib3", version = "2.6.3", source = { registry = "https://pypi.org/simple" }, marker = "platform_python_implementation != 'PyPy'" }, + { name = "urllib3" }, ] sdist = { url = "https://files.pythonhosted.org/packages/91/9b/4a2ea29aeba62471211598dac5d96825bb49348fa07e906ea930394a83ce/docker-7.1.0.tar.gz", hash = "sha256:ad8c70e6e3f8926cb8a92619b832b4ea5299e2831c14284663184e200546fa6c", size = 117834, upload-time = "2024-05-23T11:13:57.216Z" } wheels = [ @@ -1676,12 +1676,13 @@ wheels = [ [[package]] name = "docling" -version = "2.63.0" +version = "2.75.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "accelerate" }, { name = "beautifulsoup4" }, { name = "certifi" }, + { name = "defusedxml" }, { name = "docling-core", extra = ["chunking"] }, { name = "docling-ibm-models" }, { name = "docling-parse" }, @@ -1708,16 +1709,17 @@ dependencies = [ { name = "tqdm" }, { name = "typer" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/18/c4/a8b7c66f0902ed4d0bcd87db94d3929539ac5fdff5325978744b30bee6b1/docling-2.63.0.tar.gz", hash = "sha256:5592c25e986ebf58811bcbfdbc8217d1a2074638b5412364968a1f1482994cc8", size = 250895, upload-time = "2025-11-20T14:43:53.131Z" } +sdist = { url = "https://files.pythonhosted.org/packages/77/0b/8ea363fd3c8bb4facb8d3c37aebfe7ad5265fecc1c6bd40f979d1f6179ba/docling-2.75.0.tar.gz", hash = "sha256:1b0a77766e201e5e2d118e236c006f3814afcea2e13726fb3c7389d666a56622", size = 364929, upload-time = "2026-02-24T20:18:04.896Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/69/fd/e5d23f8f12e18a8ada7d977cb86ae5f964b827ae71a42e3ee9f9e2d7d577/docling-2.63.0-py3-none-any.whl", hash = "sha256:59f39b6cf43f10f8c9e429c90f6973245c4c3752d5a03ca3e1732f6fb2905000", size = 268323, upload-time = "2025-11-20T14:43:51.823Z" }, + { url = "https://files.pythonhosted.org/packages/b8/85/5c6885547ce5cde33af43201e3b2b04cf2360e6854abc07485f54b8d265d/docling-2.75.0-py3-none-any.whl", hash = "sha256:6e156f0326edb6471fc076e978ac64f902f54aac0da13cf89df456013e377bcc", size = 396243, upload-time = "2026-02-24T20:18:03.57Z" }, ] [[package]] name = "docling-core" -version = "2.63.0" +version = "2.66.0" source = { registry = "https://pypi.org/simple" } dependencies = [ + { name = "defusedxml" }, { name = "jsonref" }, { name = "jsonschema" }, { name = "latex2mathml" }, @@ -1729,9 +1731,9 @@ dependencies = [ { name = "typer" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d3/76/f6a1333c0ce4c20e60358185ff8b7fa92e1e1561a43a6788e7c8aaa9898e/docling_core-2.63.0.tar.gz", hash = "sha256:946cf97f27cb81a2c6507121045a356be91e40b5a06bbaf028ca7036df78b2f1", size = 251016, upload-time = "2026-02-03T14:41:07.158Z" } +sdist = { url = "https://files.pythonhosted.org/packages/00/ba/0b40f5bb2fff918bea79b0ea843ab3479a5f2c7a4be7009ddd713f0e8ab0/docling_core-2.66.0.tar.gz", hash = "sha256:3bbb85bf3e0106d20e7f3d2801ec40460347c95bcda55862b1fcb9effa4f78ea", size = 256592, upload-time = "2026-02-26T10:46:56.744Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/8b/c4/0c825b46412f088828dd2730d231c745d1ff4b5537eed292e827103eff37/docling_core-2.63.0-py3-none-any.whl", hash = "sha256:8f39167bf17da13225c8a67d23df98c87a74e2ab39762dbf51fab93d9b90de25", size = 238637, upload-time = "2026-02-03T14:41:05.55Z" }, + { url = "https://files.pythonhosted.org/packages/2a/df/6983118cb33e5ce166592945bb473a2b7c60865a9ba661c1d462cfd2c356/docling_core-2.66.0-py3-none-any.whl", hash = "sha256:5f6cf447ca4f50c27531bd15ea1d16c3a811fbfe22e0107207711561520fb316", size = 241133, upload-time = "2026-02-26T10:46:55.021Z" }, ] [package.optional-dependencies] @@ -1771,7 +1773,7 @@ wheels = [ [[package]] name = "docling-parse" -version = "4.7.3" +version = "5.4.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "docling-core" }, @@ -1780,25 +1782,24 @@ dependencies = [ { name = "pywin32", marker = "sys_platform == 'win32'" }, { name = "tabulate" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/bb/7a/653c3b11920113217724fab9b4740f9f8964864f92a2a27590accecec5ac/docling_parse-4.7.3.tar.gz", hash = "sha256:5936e6bcb7969c2a13f38ecc75cada3b0919422dc845e96da4b0b7b3bbc394ce", size = 67646746, upload-time = "2026-01-14T14:18:19.376Z" } +sdist = { url = "https://files.pythonhosted.org/packages/5c/23/07335df49075c376f1cb1238438234a41989688b70119064ef5b9cf1731e/docling_parse-5.4.0.tar.gz", hash = "sha256:1c48096b21cd23d1ab1d306bf0fdfbc7626ec22d62c51eb08a9ec49a5b58dbc8", size = 55466941, upload-time = "2026-02-24T11:46:56.627Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/6d/21/98decb689c173763f9a089e221c68b36d7b67ace0759f8eb2c9ca4b98dd5/docling_parse-4.7.3-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:65e0653d9617d38e73bab069dc3e7960668ff4a6b0ff45a7635c3790eeed8a08", size = 14614450, upload-time = "2026-01-14T14:17:21.626Z" }, - { url = "https://files.pythonhosted.org/packages/b2/88/c7642d019b6932b294ac3aae0208b2998fc0b7690473d12b1aa56636c99f/docling_parse-4.7.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:978e7e7032760385264896871ae87cb3a04081766cc966c57e9750ce803162ac", size = 15063165, upload-time = "2026-01-14T14:17:24.337Z" }, - { url = "https://files.pythonhosted.org/packages/df/3d/a169dd9de8ed5f8edae2bbfd6528306ece67994813224bb0da7a6f694a5f/docling_parse-4.7.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1790e7e4ae202d67875c1c48fd6f8ef5c51d10b0c23157e4989b8673f2f31308", size = 15136333, upload-time = "2026-01-14T14:17:26.21Z" }, - { url = "https://files.pythonhosted.org/packages/aa/b5/b600c4a040f57b7876878550551a8a92000ffedc58f716c384e1a09ec085/docling_parse-4.7.3-cp310-cp310-win_amd64.whl", hash = "sha256:5fc8f4770f9f6f90ba25f52451864a64394ddb158aea3a8fdda46a208c029cf6", size = 16144041, upload-time = "2026-01-14T14:17:28.108Z" }, - { url = "https://files.pythonhosted.org/packages/6c/81/dd317e0bce475153dc08a60a9a8615b1a04d4d3c9803175e6cb7b7e9b49b/docling_parse-4.7.3-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:66896bbe925073e4d48f18ec29dcd611a390d6b2378fae72125e77b020cd5664", size = 14615974, upload-time = "2026-01-14T14:17:30.246Z" }, - { url = "https://files.pythonhosted.org/packages/3a/b5/088590e0b32fd0a393ca419c644d1435a1c99fa6b2a87888eef4d0fdea33/docling_parse-4.7.3-cp311-cp311-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:281347b3e937c1a5ffa6f8774ee603b64a0899fe8a6885573dec7eb48a3421d8", size = 14981051, upload-time = "2026-01-14T14:17:32.426Z" }, - { url = "https://files.pythonhosted.org/packages/b7/63/2b6c9127924487573d5419d58ec77955f0b7c0a923c8232ad461d71039aa/docling_parse-4.7.3-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d3d86c51f9ce35a1b40b2f410f7271d9bd5fc58e7240f4cae7fdd2cef757e671", size = 15092586, upload-time = "2026-01-14T14:17:34.634Z" }, - { url = "https://files.pythonhosted.org/packages/af/89/ed27a83eb113bdf0b0f82f3c30a0db3c005df58b236f6487b232dacdb57a/docling_parse-4.7.3-cp311-cp311-win_amd64.whl", hash = "sha256:3b04459cc97a8a4929622e341b9981e23987a63af07db599afc5e1c4d389060b", size = 16144866, upload-time = "2026-01-14T14:17:36.742Z" }, - { url = "https://files.pythonhosted.org/packages/d6/26/9d86ae12699a25b7233f76ce062253e9c14e57781e00166b792b3a9d56db/docling_parse-4.7.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:d89231aa4fba3e38b80c11beb8edc07569e934c1f3935b51f57904fefe958ba5", size = 14616739, upload-time = "2026-01-14T14:17:38.567Z" }, - { url = "https://files.pythonhosted.org/packages/f2/fd/1aebb8a7f15d658f3be858ddbbc4ef7206089d540a7df0dcd4b846b99901/docling_parse-4.7.3-cp312-cp312-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:dffd19ed373b0da5cea124606b183489a8686c3d18643e94485be1bdda5713ea", size = 14980782, upload-time = "2026-01-14T14:17:40.659Z" }, - { url = "https://files.pythonhosted.org/packages/3e/47/a722527c9f89c65f69f8a463be4f12ad73bae18132f29d8de8b2d9f6f082/docling_parse-4.7.3-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dc32b6f25a673e41b9a8112b6b841284f60dbac9427b7848a03b435460f74aee", size = 15092450, upload-time = "2026-01-14T14:17:42.838Z" }, - { url = "https://files.pythonhosted.org/packages/91/c7/316373a92ba42c2aeaee128fc77a34333449fe3e820b9d524e0ee396ea35/docling_parse-4.7.3-cp312-cp312-win_amd64.whl", hash = "sha256:ef691045623863624f2cb7347572d0262a53cb84940ef7dd851d9f13a2eb8833", size = 16147359, upload-time = "2026-01-14T14:17:44.906Z" }, - { url = "https://files.pythonhosted.org/packages/c9/9f/b62390c85f99436fd0c40cfcdfea2b553482696ca735e4cc0eee96b765aa/docling_parse-4.7.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:6cb4fe8c62de06b70e6b38c4bd608f41ea3e9d7154a4e05f9a3c4d8944fe3a25", size = 14616910, upload-time = "2026-01-14T14:17:47.146Z" }, - { url = "https://files.pythonhosted.org/packages/15/c4/a18d70118ff26b12021effab53d2ffe0c7e6ef378e92c35941b5557529c1/docling_parse-4.7.3-cp313-cp313-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9d18a5b1f7eecabed631c497a19f19d281a0d86f24bfe5d239e3df89bdc4df32", size = 14981477, upload-time = "2026-01-14T14:17:49.659Z" }, - { url = "https://files.pythonhosted.org/packages/cf/e6/899f033d80cb2b4e182226c73c6e91660df42e8867b76a04f0c024db7cb6/docling_parse-4.7.3-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f4a93f91f97055e19cade33bb957d83f8615f1d2a0103b89827aca16b31a3e22", size = 15092546, upload-time = "2026-01-14T14:17:51.6Z" }, - { url = "https://files.pythonhosted.org/packages/95/f3/6dbd2e9c018b44ffe1de3d0a1ea1b017ee25b2a2f21934495710beb6d4d7/docling_parse-4.7.3-cp313-cp313-win_amd64.whl", hash = "sha256:c5a416ae2e1761914ee8d7dbfbe3858e106c876b5a7fccaa3917c038e2f126ec", size = 16147305, upload-time = "2026-01-14T14:17:53.925Z" }, - { url = "https://files.pythonhosted.org/packages/4c/58/bcf78e156bf261de21c2ab2843f60aefd0b15217af69756a2ff0cd8287f5/docling_parse-4.7.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a6e0f9e18d808c87ce0fe1900c74a3496a42743f4bba7ed4dd83a0e6e168644a", size = 18061956, upload-time = "2026-01-14T14:18:12.96Z" }, + { url = "https://files.pythonhosted.org/packages/61/99/7c6c2a444d7e6f16b8628b3b71c6501b9b51bf8e987b07a7f60034763fce/docling_parse-5.4.0-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:b8c48d0fa52cdcd86dd2422ea78da55c99381d6c8ff8bd6abf9cb5f971654c57", size = 7764250, upload-time = "2026-02-24T11:46:18.402Z" }, + { url = "https://files.pythonhosted.org/packages/c9/86/acc1a6bf3c58ec2ffb2aef5076f04d69c6c9639818d4ffb6d5dfc8bf58b3/docling_parse-5.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2efe3e8748e450c47cff1715db4d3ed4e291212e251a7a6b7d9549090f3a1e6c", size = 8214211, upload-time = "2026-02-24T11:46:20.313Z" }, + { url = "https://files.pythonhosted.org/packages/8f/b1/c057ef6c61df8bbc81e7f2f860a65fca37bd0393c9a11fb387fd8f1e54db/docling_parse-5.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4b7d7bb0816708a87113e1c28b47ff3951eebc927e295275c70b4651090c04c", size = 8270981, upload-time = "2026-02-24T11:46:21.929Z" }, + { url = "https://files.pythonhosted.org/packages/38/3f/08dcd0e68c906865a9453aad3a551de23e0743a65d57248445d1244026b9/docling_parse-5.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:57a2c6133c859358cde26c1feb86c748749473544c01f938c987c1a007588c82", size = 9169554, upload-time = "2026-02-24T11:46:24.417Z" }, + { url = "https://files.pythonhosted.org/packages/45/85/bfd7f13d6a787bf2033e082aea26ba8a05e809ef1f72e6761403477e1d3f/docling_parse-5.4.0-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:e0e330c370c66aa6263b0537e756a05a5ee9c6c0ea8453dca6c6a95bc6549c47", size = 7764928, upload-time = "2026-02-24T11:46:26.515Z" }, + { url = "https://files.pythonhosted.org/packages/02/b4/4390ecd7ed34678c2890a5b40b480f43568775bf3446d5a65a5b81241c15/docling_parse-5.4.0-cp311-cp311-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4c3b5692dbb2fa20169e54452a7889de246e45a2d74b446c00bc0bea8487e859", size = 8168543, upload-time = "2026-02-24T11:46:28.168Z" }, + { url = "https://files.pythonhosted.org/packages/d2/94/bcc469b966be6cb03c6b6aa7989549c00a320575eb5b20ff1f52bada5297/docling_parse-5.4.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8d6fed073157e3a3373512c4fd2866081e71dc510a66a8ed303c2b004bc6ff0a", size = 8262410, upload-time = "2026-02-24T11:46:30.027Z" }, + { url = "https://files.pythonhosted.org/packages/15/9b/1419c9481ac71bb1d23b0bd4b72a991e5b03c7d3c4ec3c3078fb2e9f2be2/docling_parse-5.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:1573341070f81d5553840ade17895e8864aef8f3a0161034302fdab8e172c11c", size = 9170756, upload-time = "2026-02-24T11:46:31.719Z" }, + { url = "https://files.pythonhosted.org/packages/70/55/a4d5ede8ad11da359ee48d8d17ac77fb4ae59c3d275f50d1f9bc5cdf9b3a/docling_parse-5.4.0-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:3bf45ef2a9bf3ea86b7033f0337927568147dfb6f2c2828ef353d66ebc17eb49", size = 7766010, upload-time = "2026-02-24T11:46:33.592Z" }, + { url = "https://files.pythonhosted.org/packages/d1/ac/87308a424022559ea88d1765a3c3d2746c1286f22a2eb3606165c17518d6/docling_parse-5.4.0-cp312-cp312-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a77401b3e1b68e2965e9cc25f3907c6c1198b988098983cf726109265ad4317f", size = 8166965, upload-time = "2026-02-24T11:46:35.108Z" }, + { url = "https://files.pythonhosted.org/packages/c6/18/12b49c87109f63ff54e570edd2faa47d1193ecf4b8e94ff5d273645f879e/docling_parse-5.4.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7a4bd77a7abfe1843e4d8cedcfb4363b4975227af7622f2ded3a0fc2ce7bd0b4", size = 8261576, upload-time = "2026-02-24T11:46:36.927Z" }, + { url = "https://files.pythonhosted.org/packages/6d/c3/862ddb3ece951f467384d58e503394589e9428488fa956fe399d2b1738c1/docling_parse-5.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:88e27d43101e71f56f22594ce1b05d5a3a868df7ee16f2dd167214735f12636f", size = 9172236, upload-time = "2026-02-24T11:46:38.423Z" }, + { url = "https://files.pythonhosted.org/packages/c4/54/a6876b41387ac11967c161d85ad06db1d562856add11d633afc24c788885/docling_parse-5.4.0-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:dda35a980afb3afbf432f4781fed507928188e27b40884226d720f4b3a9afa9c", size = 7766085, upload-time = "2026-02-24T11:46:40.351Z" }, + { url = "https://files.pythonhosted.org/packages/72/fb/9f0d60af63b0f3063cbcae4273e527a14274d2e4b814f5c2051f8f16d55b/docling_parse-5.4.0-cp313-cp313-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b679653d1aadce962d3266b727c1563ae9aff3abf3a820d45b130a1a55bad2d2", size = 8167008, upload-time = "2026-02-24T11:46:42.459Z" }, + { url = "https://files.pythonhosted.org/packages/61/28/d81815c3e4e4fe673bf4218e5e93b28c163a0200f8f802b963e9ea210192/docling_parse-5.4.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:86cede05b6ccb63c1685fbdc5bd16c5332c78c5dd9ea7565fd6f7f91c816ebae", size = 8261911, upload-time = "2026-02-24T11:46:44.234Z" }, + { url = "https://files.pythonhosted.org/packages/b0/63/ca87d27610fa04d9bc321f9253fc688ef751dc27a942fa531c3457947cc0/docling_parse-5.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:549b9bada8df48496e77e6ddf8a45a9c6cd5794d87c0b0e32f89fec108bb7b30", size = 9172252, upload-time = "2026-02-24T11:46:45.736Z" }, ] [[package]] @@ -2200,14 +2201,14 @@ wheels = [ [[package]] name = "gitpython" -version = "3.1.38" +version = "3.1.46" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "gitdb" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/b3/45/cee7af549b6fa33f04531e402693a772b776cd9f845a2cbeca99cfac3331/GitPython-3.1.38.tar.gz", hash = "sha256:4d683e8957c8998b58ddb937e3e6cd167215a180e1ffd4da769ab81c620a89fe", size = 200632, upload-time = "2023-10-17T06:09:52.235Z" } +sdist = { url = "https://files.pythonhosted.org/packages/df/b5/59d16470a1f0dfe8c793f9ef56fd3826093fc52b3bd96d6b9d6c26c7e27b/gitpython-3.1.46.tar.gz", hash = "sha256:400124c7d0ef4ea03f7310ac2fbf7151e09ff97f2a3288d64a440c584a29c37f", size = 215371, upload-time = "2026-01-01T15:37:32.073Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/3c/ae/044453eacd5a526d3f242ccd77e38ee8219c65e0b132562b551bd67c61a4/GitPython-3.1.38-py3-none-any.whl", hash = "sha256:9e98b672ffcb081c2c8d5aa630d4251544fb040fb158863054242f24a2a2ba30", size = 190573, upload-time = "2023-10-17T06:09:50.18Z" }, + { url = "https://files.pythonhosted.org/packages/6a/09/e21df6aef1e1ffc0c816f0522ddc3f6dcded766c3261813131c78a704470/gitpython-3.1.46-py3-none-any.whl", hash = "sha256:79812ed143d9d25b6d176a10bb511de0f9c67b1fa641d82097b0ab90398a2058", size = 208620, upload-time = "2026-01-01T15:37:30.574Z" }, ] [[package]] @@ -2246,6 +2247,11 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/83/1d/d6466de3a5249d35e832a52834115ca9d1d0de6abc22065f049707516d47/google_auth-2.48.0-py3-none-any.whl", hash = "sha256:2e2a537873d449434252a9632c28bfc268b0adb1e53f9fb62afc5333a975903f", size = 236499, upload-time = "2026-01-26T19:22:45.099Z" }, ] +[package.optional-dependencies] +requests = [ + { name = "requests" }, +] + [[package]] name = "google-cloud-vision" version = "3.12.1" @@ -2264,21 +2270,23 @@ wheels = [ [[package]] name = "google-genai" -version = "1.49.0" +version = "1.65.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, - { name = "google-auth" }, + { name = "distro" }, + { name = "google-auth", extra = ["requests"] }, { name = "httpx" }, { name = "pydantic" }, { name = "requests" }, + { name = "sniffio" }, { name = "tenacity" }, { name = "typing-extensions" }, { name = "websockets" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/82/49/1a724ee3c3748fa50721d53a52d9fee88c67d0c43bb16eb2b10ee89ab239/google_genai-1.49.0.tar.gz", hash = "sha256:35eb16023b72e298571ae30e919c810694f258f2ba68fc77a2185c7c8829ad5a", size = 253493, upload-time = "2025-11-05T22:41:03.278Z" } +sdist = { url = "https://files.pythonhosted.org/packages/79/f9/cc1191c2540d6a4e24609a586c4ed45d2db57cfef47931c139ee70e5874a/google_genai-1.65.0.tar.gz", hash = "sha256:d470eb600af802d58a79c7f13342d9ea0d05d965007cae8f76c7adff3d7a4750", size = 497206, upload-time = "2026-02-26T00:20:33.824Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d5/d3/84a152746dc7bdebb8ba0fd7d6157263044acd1d14b2a53e8df4a307b6b7/google_genai-1.49.0-py3-none-any.whl", hash = "sha256:ad49cd5be5b63397069e7aef9a4fe0a84cbdf25fcd93408e795292308db4ef32", size = 256098, upload-time = "2025-11-05T22:41:01.429Z" }, + { url = "https://files.pythonhosted.org/packages/68/3c/3fea4e7c91357c71782d7dcaad7a2577d636c90317e003386893c25bc62c/google_genai-1.65.0-py3-none-any.whl", hash = "sha256:68c025205856919bc03edb0155c11b4b833810b7ce17ad4b7a9eeba5158f6c44", size = 724429, upload-time = "2026-02-26T00:20:32.186Z" }, ] [[package]] @@ -2659,7 +2667,7 @@ dependencies = [ { name = "jmespath", marker = "platform_python_implementation == 'PyPy'" }, { name = "python-dateutil", marker = "platform_python_implementation == 'PyPy'" }, { name = "requests", marker = "platform_python_implementation == 'PyPy'" }, - { name = "urllib3", version = "1.26.20", source = { registry = "https://pypi.org/simple" }, marker = "platform_python_implementation == 'PyPy'" }, + { name = "urllib3", marker = "platform_python_implementation == 'PyPy'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/a5/db/e913f210d66c2ad09521925f29754fb9b7240da11238a29a0186ebad4ffa/ibm_cos_sdk_core-2.14.2.tar.gz", hash = "sha256:d594b2af58f70e892aa3b0f6ae4b0fa5d412422c05beeba083d4561b5fad91b4", size = 1103504, upload-time = "2025-06-18T05:03:42.969Z" } @@ -2677,7 +2685,7 @@ dependencies = [ { name = "jmespath", marker = "platform_python_implementation != 'PyPy'" }, { name = "python-dateutil", marker = "platform_python_implementation != 'PyPy'" }, { name = "requests", marker = "platform_python_implementation != 'PyPy'" }, - { name = "urllib3", version = "2.6.3", source = { registry = "https://pypi.org/simple" }, marker = "platform_python_implementation != 'PyPy'" }, + { name = "urllib3", marker = "platform_python_implementation != 'PyPy'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/7e/45/80c23aa1e13175a9deefe43cbf8e853a3d3bfc8dfa8b6d6fe83e5785fe21/ibm_cos_sdk_core-2.14.3.tar.gz", hash = "sha256:85dee7790c92e8db69bf39dae4c02cac211e3c1d81bb86e64fa2d1e929674623", size = 1103637, upload-time = "2025-08-01T06:35:41.645Z" } @@ -2726,8 +2734,7 @@ dependencies = [ { name = "pandas" }, { name = "requests" }, { name = "tabulate" }, - { name = "urllib3", version = "1.26.20", source = { registry = "https://pypi.org/simple" }, marker = "platform_python_implementation == 'PyPy'" }, - { name = "urllib3", version = "2.6.3", source = { registry = "https://pypi.org/simple" }, marker = "platform_python_implementation != 'PyPy'" }, + { name = "urllib3" }, ] sdist = { url = "https://files.pythonhosted.org/packages/c7/56/2e3df38a1f13062095d7bde23c87a92f3898982993a15186b1bfecbd206f/ibm_watsonx_ai-1.3.42.tar.gz", hash = "sha256:ee5be59009004245d957ce97d1227355516df95a2640189749487614fef674ff", size = 688651, upload-time = "2025-10-01T13:35:41.527Z" } wheels = [ @@ -3214,8 +3221,7 @@ dependencies = [ { name = "requests" }, { name = "requests-oauthlib" }, { name = "six" }, - { name = "urllib3", version = "1.26.20", source = { registry = "https://pypi.org/simple" }, marker = "platform_python_implementation == 'PyPy'" }, - { name = "urllib3", version = "2.6.3", source = { registry = "https://pypi.org/simple" }, marker = "platform_python_implementation != 'PyPy'" }, + { name = "urllib3" }, { name = "websocket-client" }, ] sdist = { url = "https://files.pythonhosted.org/packages/2c/8f/85bf51ad4150f64e8c665daf0d9dfe9787ae92005efb9a4d1cba592bd79d/kubernetes-35.0.0.tar.gz", hash = "sha256:3d00d344944239821458b9efd484d6df9f011da367ecb155dadf9513f05f09ee", size = 1094642, upload-time = "2026-01-16T01:05:27.76Z" } @@ -3224,27 +3230,53 @@ wheels = [ ] [[package]] -name = "lancedb" -version = "0.5.7" +name = "lance-namespace" +version = "0.5.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "lance-namespace-urllib3-client" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2b/c6/aec0d7752e15536564b50cf9a8926f0e5d7780aa3ab8ce8bca46daa55659/lance_namespace-0.5.2.tar.gz", hash = "sha256:566cc33091b5631793ab411f095d46c66391db0a62343cd6b4470265bb04d577", size = 10274, upload-time = "2026-02-20T03:14:31.777Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d6/3d/737c008d8fb2861e7ce260e2ffab0d5058eae41556181f80f1a1c3b52ef5/lance_namespace-0.5.2-py3-none-any.whl", hash = "sha256:6ccaf5649bf6ee6aa92eed9c535a114b7b4eb08e89f40426f58bc1466cbcffa3", size = 12087, upload-time = "2026-02-20T03:14:35.261Z" }, +] + +[[package]] +name = "lance-namespace-urllib3-client" +version = "0.5.2" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "attrs" }, - { name = "cachetools" }, - { name = "click" }, - { name = "deprecation" }, - { name = "overrides" }, { name = "pydantic" }, - { name = "pylance" }, - { name = "pyyaml" }, - { name = "ratelimiter" }, - { name = "requests" }, - { name = "retry" }, - { name = "semver" }, + { name = "python-dateutil" }, + { name = "typing-extensions" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e9/64/51622c93ec8c164483c83b68764e5e76e52286c0137a8247bc6a7fac25f4/lance_namespace_urllib3_client-0.5.2.tar.gz", hash = "sha256:8a3a238006e6eabc01fc9d385ac3de22ba933aef0ae8987558f3c3199c9b3799", size = 172578, upload-time = "2026-02-20T03:14:33.031Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/10/f86d994498b37f7f35d0b8c2f7626a16fe4cb1949b518c1e5d5052ecf95f/lance_namespace_urllib3_client-0.5.2-py3-none-any.whl", hash = "sha256:83cefb6fd6e5df0b99b5e866ee3d46300d375b75e8af32c27bc16fbf7c1a5978", size = 300351, upload-time = "2026-02-20T03:14:34.236Z" }, +] + +[[package]] +name = "lancedb" +version = "0.29.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "deprecation" }, + { name = "lance-namespace" }, + { name = "numpy" }, + { name = "overrides", marker = "python_full_version < '3.12'" }, + { name = "packaging" }, + { name = "pyarrow" }, + { name = "pydantic" }, { name = "tqdm" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/14/1b/f87a2b6420f6f55ea64e5f8f18f231450cc602a0854739bcf946cebc080a/lancedb-0.5.7.tar.gz", hash = "sha256:878914b493f91d09a77b14f1528104741f273234cbdd6671be705f447701fd51", size = 102890, upload-time = "2024-02-22T20:11:29.988Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/01/21/ecb191feff512640a59e17fe1737bd9c33970bc857c59a77fa61d5e314d9/lancedb-0.5.7-py3-none-any.whl", hash = "sha256:6169966f715ef530be545950e1aaf9f3f160967e4ba7456cd67c9f30f678095d", size = 115104, upload-time = "2024-02-22T20:11:25.726Z" }, + { url = "https://files.pythonhosted.org/packages/f7/77/fbb25946a234928958e016c5448343fd314bd601315f9587568321591a17/lancedb-0.29.2-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:bc1faf2e12addb9585569d0fb114ecc25ec3867e4e1aa6934e9343cfb5265ee4", size = 42341708, upload-time = "2026-02-09T06:21:31.677Z" }, + { url = "https://files.pythonhosted.org/packages/cd/95/d3a7b6d0237e343ad5b2afef2bdb99423746d5c3e882a9cab68dc041c2d0/lancedb-0.29.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fec19cfc52a5b9d98e060bd2f02a1c9df6a0bfd15b36021b6017327a41893a3", size = 44147347, upload-time = "2026-02-09T06:31:02.567Z" }, + { url = "https://files.pythonhosted.org/packages/66/21/153a42294279c5b66d763f357808dde0899b71c5c8e41ad5ecbeeb8728df/lancedb-0.29.2-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:636939ab9225d435020ba17c231f5eaba15312a07813bcebcd71128204cc039f", size = 47186355, upload-time = "2026-02-09T06:34:47.726Z" }, + { url = "https://files.pythonhosted.org/packages/a2/f7/f7041ae7d7730332b2754fe7adc2e0bd496f92bf526ac710b7eb3caf1d0a/lancedb-0.29.2-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f79b32083fcab139009db521d2f7fcd6afe4cca98a78c06c5940ff00a170cc1a", size = 44172354, upload-time = "2026-02-09T06:31:03.834Z" }, + { url = "https://files.pythonhosted.org/packages/72/6f/c152497c18cea0f36b523fc03b8e0a48be2b120276cc15a86d79b8b83cde/lancedb-0.29.2-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:991043a28c1f49f14df2479b554a95c759a85666dc58573cc86c1b9df05db794", size = 47228009, upload-time = "2026-02-09T06:34:40.872Z" }, + { url = "https://files.pythonhosted.org/packages/66/50/bd47bca59a87a88a4ca291a0718291422440750d84b34318048c70a537c2/lancedb-0.29.2-cp39-abi3-win_amd64.whl", hash = "sha256:101eb0ac018bb0b643dd9ea22065f6f2102e9d44c9ac58a197477ccbfbc0b9fa", size = 52028768, upload-time = "2026-02-09T07:00:02.272Z" }, ] [[package]] @@ -3263,7 +3295,7 @@ wheels = [ [[package]] name = "langchain-core" -version = "0.3.76" +version = "0.3.83" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "jsonpatch" }, @@ -3273,10 +3305,11 @@ dependencies = [ { name = "pyyaml" }, { name = "tenacity" }, { name = "typing-extensions" }, + { name = "uuid-utils" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/4f/4d/5e2ea7754ee0a1f524c412801c6ba9ad49318ecb58b0d524903c3d9efe0a/langchain_core-0.3.76.tar.gz", hash = "sha256:71136a122dd1abae2c289c5809d035cf12b5f2bb682d8a4c1078cd94feae7419", size = 573568, upload-time = "2025-09-10T14:49:39.863Z" } +sdist = { url = "https://files.pythonhosted.org/packages/21/a4/24f2d787bfcf56e5990924cacefe6f6e7971a3629f97c8162fc7a2a3d851/langchain_core-0.3.83.tar.gz", hash = "sha256:a0a4c7b6ea1c446d3b432116f405dc2afa1fe7891c44140d3d5acca221909415", size = 597965, upload-time = "2026-01-13T01:19:23.854Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/77/b5/501c0ffcb09c734457ceaa86bc7b1dd37b6a261147bd653add03b838aacb/langchain_core-0.3.76-py3-none-any.whl", hash = "sha256:46e0eb48c7ac532432d51f8ca1ece1804c82afe9ae3dcf027b867edadf82b3ec", size = 447508, upload-time = "2025-09-10T14:49:38.179Z" }, + { url = "https://files.pythonhosted.org/packages/5a/db/d71b80d3bd6193812485acea4001cdf86cf95a44bbf942f7a240120ff762/langchain_core-0.3.83-py3-none-any.whl", hash = "sha256:8c92506f8b53fc1958b1c07447f58c5783eb8833dd3cb6dc75607c80891ab1ae", size = 458890, upload-time = "2026-01-13T01:19:21.748Z" }, ] [[package]] @@ -4114,54 +4147,54 @@ wheels = [ [[package]] name = "mypy" -version = "1.19.0" +version = "1.19.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "librt" }, + { name = "librt", marker = "platform_python_implementation != 'PyPy'" }, { name = "mypy-extensions" }, { name = "pathspec" }, { name = "tomli", marker = "python_full_version < '3.11'" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f9/b5/b58cdc25fadd424552804bf410855d52324183112aa004f0732c5f6324cf/mypy-1.19.0.tar.gz", hash = "sha256:f6b874ca77f733222641e5c46e4711648c4037ea13646fd0cdc814c2eaec2528", size = 3579025, upload-time = "2025-11-28T15:49:01.26Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f5/db/4efed9504bc01309ab9c2da7e352cc223569f05478012b5d9ece38fd44d2/mypy-1.19.1.tar.gz", hash = "sha256:19d88bb05303fe63f71dd2c6270daca27cb9401c4ca8255fe50d1d920e0eb9ba", size = 3582404, upload-time = "2025-12-15T05:03:48.42Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/98/8f/55fb488c2b7dabd76e3f30c10f7ab0f6190c1fcbc3e97b1e588ec625bbe2/mypy-1.19.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6148ede033982a8c5ca1143de34c71836a09f105068aaa8b7d5edab2b053e6c8", size = 13093239, upload-time = "2025-11-28T15:45:11.342Z" }, - { url = "https://files.pythonhosted.org/packages/72/1b/278beea978456c56b3262266274f335c3ba5ff2c8108b3b31bec1ffa4c1d/mypy-1.19.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a9ac09e52bb0f7fb912f5d2a783345c72441a08ef56ce3e17c1752af36340a39", size = 12156128, upload-time = "2025-11-28T15:46:02.566Z" }, - { url = "https://files.pythonhosted.org/packages/21/f8/e06f951902e136ff74fd7a4dc4ef9d884faeb2f8eb9c49461235714f079f/mypy-1.19.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:11f7254c15ab3f8ed68f8e8f5cbe88757848df793e31c36aaa4d4f9783fd08ab", size = 12753508, upload-time = "2025-11-28T15:44:47.538Z" }, - { url = "https://files.pythonhosted.org/packages/67/5a/d035c534ad86e09cee274d53cf0fd769c0b29ca6ed5b32e205be3c06878c/mypy-1.19.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:318ba74f75899b0e78b847d8c50821e4c9637c79d9a59680fc1259f29338cb3e", size = 13507553, upload-time = "2025-11-28T15:44:39.26Z" }, - { url = "https://files.pythonhosted.org/packages/6a/17/c4a5498e00071ef29e483a01558b285d086825b61cf1fb2629fbdd019d94/mypy-1.19.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cf7d84f497f78b682edd407f14a7b6e1a2212b433eedb054e2081380b7395aa3", size = 13792898, upload-time = "2025-11-28T15:44:31.102Z" }, - { url = "https://files.pythonhosted.org/packages/67/f6/bb542422b3ee4399ae1cdc463300d2d91515ab834c6233f2fd1d52fa21e0/mypy-1.19.0-cp310-cp310-win_amd64.whl", hash = "sha256:c3385246593ac2b97f155a0e9639be906e73534630f663747c71908dfbf26134", size = 10048835, upload-time = "2025-11-28T15:48:15.744Z" }, - { url = "https://files.pythonhosted.org/packages/0f/d2/010fb171ae5ac4a01cc34fbacd7544531e5ace95c35ca166dd8fd1b901d0/mypy-1.19.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a31e4c28e8ddb042c84c5e977e28a21195d086aaffaf08b016b78e19c9ef8106", size = 13010563, upload-time = "2025-11-28T15:48:23.975Z" }, - { url = "https://files.pythonhosted.org/packages/41/6b/63f095c9f1ce584fdeb595d663d49e0980c735a1d2004720ccec252c5d47/mypy-1.19.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:34ec1ac66d31644f194b7c163d7f8b8434f1b49719d403a5d26c87fff7e913f7", size = 12077037, upload-time = "2025-11-28T15:47:51.582Z" }, - { url = "https://files.pythonhosted.org/packages/d7/83/6cb93d289038d809023ec20eb0b48bbb1d80af40511fa077da78af6ff7c7/mypy-1.19.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cb64b0ba5980466a0f3f9990d1c582bcab8db12e29815ecb57f1408d99b4bff7", size = 12680255, upload-time = "2025-11-28T15:46:57.628Z" }, - { url = "https://files.pythonhosted.org/packages/99/db/d217815705987d2cbace2edd9100926196d6f85bcb9b5af05058d6e3c8ad/mypy-1.19.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:120cffe120cca5c23c03c77f84abc0c14c5d2e03736f6c312480020082f1994b", size = 13421472, upload-time = "2025-11-28T15:47:59.655Z" }, - { url = "https://files.pythonhosted.org/packages/4e/51/d2beaca7c497944b07594f3f8aad8d2f0e8fc53677059848ae5d6f4d193e/mypy-1.19.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:7a500ab5c444268a70565e374fc803972bfd1f09545b13418a5174e29883dab7", size = 13651823, upload-time = "2025-11-28T15:45:29.318Z" }, - { url = "https://files.pythonhosted.org/packages/aa/d1/7883dcf7644db3b69490f37b51029e0870aac4a7ad34d09ceae709a3df44/mypy-1.19.0-cp311-cp311-win_amd64.whl", hash = "sha256:c14a98bc63fd867530e8ec82f217dae29d0550c86e70debc9667fff1ec83284e", size = 10049077, upload-time = "2025-11-28T15:45:39.818Z" }, - { url = "https://files.pythonhosted.org/packages/11/7e/1afa8fb188b876abeaa14460dc4983f909aaacaa4bf5718c00b2c7e0b3d5/mypy-1.19.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:0fb3115cb8fa7c5f887c8a8d81ccdcb94cff334684980d847e5a62e926910e1d", size = 13207728, upload-time = "2025-11-28T15:46:26.463Z" }, - { url = "https://files.pythonhosted.org/packages/b2/13/f103d04962bcbefb1644f5ccb235998b32c337d6c13145ea390b9da47f3e/mypy-1.19.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f3e19e3b897562276bb331074d64c076dbdd3e79213f36eed4e592272dabd760", size = 12202945, upload-time = "2025-11-28T15:48:49.143Z" }, - { url = "https://files.pythonhosted.org/packages/e4/93/a86a5608f74a22284a8ccea8592f6e270b61f95b8588951110ad797c2ddd/mypy-1.19.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b9d491295825182fba01b6ffe2c6fe4e5a49dbf4e2bb4d1217b6ced3b4797bc6", size = 12718673, upload-time = "2025-11-28T15:47:37.193Z" }, - { url = "https://files.pythonhosted.org/packages/3d/58/cf08fff9ced0423b858f2a7495001fda28dc058136818ee9dffc31534ea9/mypy-1.19.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6016c52ab209919b46169651b362068f632efcd5eb8ef9d1735f6f86da7853b2", size = 13608336, upload-time = "2025-11-28T15:48:32.625Z" }, - { url = "https://files.pythonhosted.org/packages/64/ed/9c509105c5a6d4b73bb08733102a3ea62c25bc02c51bca85e3134bf912d3/mypy-1.19.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f188dcf16483b3e59f9278c4ed939ec0254aa8a60e8fc100648d9ab5ee95a431", size = 13833174, upload-time = "2025-11-28T15:45:48.091Z" }, - { url = "https://files.pythonhosted.org/packages/cd/71/01939b66e35c6f8cb3e6fdf0b657f0fd24de2f8ba5e523625c8e72328208/mypy-1.19.0-cp312-cp312-win_amd64.whl", hash = "sha256:0e3c3d1e1d62e678c339e7ade72746a9e0325de42cd2cccc51616c7b2ed1a018", size = 10112208, upload-time = "2025-11-28T15:46:41.702Z" }, - { url = "https://files.pythonhosted.org/packages/cb/0d/a1357e6bb49e37ce26fcf7e3cc55679ce9f4ebee0cd8b6ee3a0e301a9210/mypy-1.19.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7686ed65dbabd24d20066f3115018d2dce030d8fa9db01aa9f0a59b6813e9f9e", size = 13191993, upload-time = "2025-11-28T15:47:22.336Z" }, - { url = "https://files.pythonhosted.org/packages/5d/75/8e5d492a879ec4490e6ba664b5154e48c46c85b5ac9785792a5ec6a4d58f/mypy-1.19.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:fd4a985b2e32f23bead72e2fb4bbe5d6aceee176be471243bd831d5b2644672d", size = 12174411, upload-time = "2025-11-28T15:44:55.492Z" }, - { url = "https://files.pythonhosted.org/packages/71/31/ad5dcee9bfe226e8eaba777e9d9d251c292650130f0450a280aec3485370/mypy-1.19.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fc51a5b864f73a3a182584b1ac75c404396a17eced54341629d8bdcb644a5bba", size = 12727751, upload-time = "2025-11-28T15:44:14.169Z" }, - { url = "https://files.pythonhosted.org/packages/77/06/b6b8994ce07405f6039701f4b66e9d23f499d0b41c6dd46ec28f96d57ec3/mypy-1.19.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:37af5166f9475872034b56c5efdcf65ee25394e9e1d172907b84577120714364", size = 13593323, upload-time = "2025-11-28T15:46:34.699Z" }, - { url = "https://files.pythonhosted.org/packages/68/b1/126e274484cccdf099a8e328d4fda1c7bdb98a5e888fa6010b00e1bbf330/mypy-1.19.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:510c014b722308c9bd377993bcbf9a07d7e0692e5fa8fc70e639c1eb19fc6bee", size = 13818032, upload-time = "2025-11-28T15:46:18.286Z" }, - { url = "https://files.pythonhosted.org/packages/f8/56/53a8f70f562dfc466c766469133a8a4909f6c0012d83993143f2a9d48d2d/mypy-1.19.0-cp313-cp313-win_amd64.whl", hash = "sha256:cabbee74f29aa9cd3b444ec2f1e4fa5a9d0d746ce7567a6a609e224429781f53", size = 10120644, upload-time = "2025-11-28T15:47:43.99Z" }, - { url = "https://files.pythonhosted.org/packages/09/0e/fe228ed5aeab470c6f4eb82481837fadb642a5aa95cc8215fd2214822c10/mypy-1.19.0-py3-none-any.whl", hash = "sha256:0c01c99d626380752e527d5ce8e69ffbba2046eb8a060db0329690849cf9b6f9", size = 2469714, upload-time = "2025-11-28T15:45:33.22Z" }, + { url = "https://files.pythonhosted.org/packages/2f/63/e499890d8e39b1ff2df4c0c6ce5d371b6844ee22b8250687a99fd2f657a8/mypy-1.19.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5f05aa3d375b385734388e844bc01733bd33c644ab48e9684faa54e5389775ec", size = 13101333, upload-time = "2025-12-15T05:03:03.28Z" }, + { url = "https://files.pythonhosted.org/packages/72/4b/095626fc136fba96effc4fd4a82b41d688ab92124f8c4f7564bffe5cf1b0/mypy-1.19.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:022ea7279374af1a5d78dfcab853fe6a536eebfda4b59deab53cd21f6cd9f00b", size = 12164102, upload-time = "2025-12-15T05:02:33.611Z" }, + { url = "https://files.pythonhosted.org/packages/0c/5b/952928dd081bf88a83a5ccd49aaecfcd18fd0d2710c7ff07b8fb6f7032b9/mypy-1.19.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee4c11e460685c3e0c64a4c5de82ae143622410950d6be863303a1c4ba0e36d6", size = 12765799, upload-time = "2025-12-15T05:03:28.44Z" }, + { url = "https://files.pythonhosted.org/packages/2a/0d/93c2e4a287f74ef11a66fb6d49c7a9f05e47b0a4399040e6719b57f500d2/mypy-1.19.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de759aafbae8763283b2ee5869c7255391fbc4de3ff171f8f030b5ec48381b74", size = 13522149, upload-time = "2025-12-15T05:02:36.011Z" }, + { url = "https://files.pythonhosted.org/packages/7b/0e/33a294b56aaad2b338d203e3a1d8b453637ac36cb278b45005e0901cf148/mypy-1.19.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ab43590f9cd5108f41aacf9fca31841142c786827a74ab7cc8a2eacb634e09a1", size = 13810105, upload-time = "2025-12-15T05:02:40.327Z" }, + { url = "https://files.pythonhosted.org/packages/0e/fd/3e82603a0cb66b67c5e7abababce6bf1a929ddf67bf445e652684af5c5a0/mypy-1.19.1-cp310-cp310-win_amd64.whl", hash = "sha256:2899753e2f61e571b3971747e302d5f420c3fd09650e1951e99f823bc3089dac", size = 10057200, upload-time = "2025-12-15T05:02:51.012Z" }, + { url = "https://files.pythonhosted.org/packages/ef/47/6b3ebabd5474d9cdc170d1342fbf9dddc1b0ec13ec90bf9004ee6f391c31/mypy-1.19.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d8dfc6ab58ca7dda47d9237349157500468e404b17213d44fc1cb77bce532288", size = 13028539, upload-time = "2025-12-15T05:03:44.129Z" }, + { url = "https://files.pythonhosted.org/packages/5c/a6/ac7c7a88a3c9c54334f53a941b765e6ec6c4ebd65d3fe8cdcfbe0d0fd7db/mypy-1.19.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e3f276d8493c3c97930e354b2595a44a21348b320d859fb4a2b9f66da9ed27ab", size = 12083163, upload-time = "2025-12-15T05:03:37.679Z" }, + { url = "https://files.pythonhosted.org/packages/67/af/3afa9cf880aa4a2c803798ac24f1d11ef72a0c8079689fac5cfd815e2830/mypy-1.19.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2abb24cf3f17864770d18d673c85235ba52456b36a06b6afc1e07c1fdcd3d0e6", size = 12687629, upload-time = "2025-12-15T05:02:31.526Z" }, + { url = "https://files.pythonhosted.org/packages/2d/46/20f8a7114a56484ab268b0ab372461cb3a8f7deed31ea96b83a4e4cfcfca/mypy-1.19.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a009ffa5a621762d0c926a078c2d639104becab69e79538a494bcccb62cc0331", size = 13436933, upload-time = "2025-12-15T05:03:15.606Z" }, + { url = "https://files.pythonhosted.org/packages/5b/f8/33b291ea85050a21f15da910002460f1f445f8007adb29230f0adea279cb/mypy-1.19.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f7cee03c9a2e2ee26ec07479f38ea9c884e301d42c6d43a19d20fb014e3ba925", size = 13661754, upload-time = "2025-12-15T05:02:26.731Z" }, + { url = "https://files.pythonhosted.org/packages/fd/a3/47cbd4e85bec4335a9cd80cf67dbc02be21b5d4c9c23ad6b95d6c5196bac/mypy-1.19.1-cp311-cp311-win_amd64.whl", hash = "sha256:4b84a7a18f41e167f7995200a1d07a4a6810e89d29859df936f1c3923d263042", size = 10055772, upload-time = "2025-12-15T05:03:26.179Z" }, + { url = "https://files.pythonhosted.org/packages/06/8a/19bfae96f6615aa8a0604915512e0289b1fad33d5909bf7244f02935d33a/mypy-1.19.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a8174a03289288c1f6c46d55cef02379b478bfbc8e358e02047487cad44c6ca1", size = 13206053, upload-time = "2025-12-15T05:03:46.622Z" }, + { url = "https://files.pythonhosted.org/packages/a5/34/3e63879ab041602154ba2a9f99817bb0c85c4df19a23a1443c8986e4d565/mypy-1.19.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ffcebe56eb09ff0c0885e750036a095e23793ba6c2e894e7e63f6d89ad51f22e", size = 12219134, upload-time = "2025-12-15T05:03:24.367Z" }, + { url = "https://files.pythonhosted.org/packages/89/cc/2db6f0e95366b630364e09845672dbee0cbf0bbe753a204b29a944967cd9/mypy-1.19.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b64d987153888790bcdb03a6473d321820597ab8dd9243b27a92153c4fa50fd2", size = 12731616, upload-time = "2025-12-15T05:02:44.725Z" }, + { url = "https://files.pythonhosted.org/packages/00/be/dd56c1fd4807bc1eba1cf18b2a850d0de7bacb55e158755eb79f77c41f8e/mypy-1.19.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c35d298c2c4bba75feb2195655dfea8124d855dfd7343bf8b8c055421eaf0cf8", size = 13620847, upload-time = "2025-12-15T05:03:39.633Z" }, + { url = "https://files.pythonhosted.org/packages/6d/42/332951aae42b79329f743bf1da088cd75d8d4d9acc18fbcbd84f26c1af4e/mypy-1.19.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:34c81968774648ab5ac09c29a375fdede03ba253f8f8287847bd480782f73a6a", size = 13834976, upload-time = "2025-12-15T05:03:08.786Z" }, + { url = "https://files.pythonhosted.org/packages/6f/63/e7493e5f90e1e085c562bb06e2eb32cae27c5057b9653348d38b47daaecc/mypy-1.19.1-cp312-cp312-win_amd64.whl", hash = "sha256:b10e7c2cd7870ba4ad9b2d8a6102eb5ffc1f16ca35e3de6bfa390c1113029d13", size = 10118104, upload-time = "2025-12-15T05:03:10.834Z" }, + { url = "https://files.pythonhosted.org/packages/de/9f/a6abae693f7a0c697dbb435aac52e958dc8da44e92e08ba88d2e42326176/mypy-1.19.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e3157c7594ff2ef1634ee058aafc56a82db665c9438fd41b390f3bde1ab12250", size = 13201927, upload-time = "2025-12-15T05:02:29.138Z" }, + { url = "https://files.pythonhosted.org/packages/9a/a4/45c35ccf6e1c65afc23a069f50e2c66f46bd3798cbe0d680c12d12935caa/mypy-1.19.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:bdb12f69bcc02700c2b47e070238f42cb87f18c0bc1fc4cdb4fb2bc5fd7a3b8b", size = 12206730, upload-time = "2025-12-15T05:03:01.325Z" }, + { url = "https://files.pythonhosted.org/packages/05/bb/cdcf89678e26b187650512620eec8368fded4cfd99cfcb431e4cdfd19dec/mypy-1.19.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f859fb09d9583a985be9a493d5cfc5515b56b08f7447759a0c5deaf68d80506e", size = 12724581, upload-time = "2025-12-15T05:03:20.087Z" }, + { url = "https://files.pythonhosted.org/packages/d1/32/dd260d52babf67bad8e6770f8e1102021877ce0edea106e72df5626bb0ec/mypy-1.19.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c9a6538e0415310aad77cb94004ca6482330fece18036b5f360b62c45814c4ef", size = 13616252, upload-time = "2025-12-15T05:02:49.036Z" }, + { url = "https://files.pythonhosted.org/packages/71/d0/5e60a9d2e3bd48432ae2b454b7ef2b62a960ab51292b1eda2a95edd78198/mypy-1.19.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:da4869fc5e7f62a88f3fe0b5c919d1d9f7ea3cef92d3689de2823fd27e40aa75", size = 13840848, upload-time = "2025-12-15T05:02:55.95Z" }, + { url = "https://files.pythonhosted.org/packages/98/76/d32051fa65ecf6cc8c6610956473abdc9b4c43301107476ac03559507843/mypy-1.19.1-cp313-cp313-win_amd64.whl", hash = "sha256:016f2246209095e8eda7538944daa1d60e1e8134d98983b9fc1e92c1fc0cb8dd", size = 10135510, upload-time = "2025-12-15T05:02:58.438Z" }, + { url = "https://files.pythonhosted.org/packages/8d/f4/4ce9a05ce5ded1de3ec1c1d96cf9f9504a04e54ce0ed55cfa38619a32b8d/mypy-1.19.1-py3-none-any.whl", hash = "sha256:f1235f5ea01b7db5468d53ece6aaddf1ad0b88d9e7462b86ef96fe04995d7247", size = 2471239, upload-time = "2025-12-15T05:03:07.248Z" }, ] [[package]] name = "mypy-boto3-bedrock-runtime" -version = "1.40.76" +version = "1.42.42" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "typing-extensions", marker = "python_full_version < '3.12'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/e7/db/cc668a48a27973df31c7044a6785bd0e8691b1a0419dae001c4c29f1c98f/mypy_boto3_bedrock_runtime-1.40.76.tar.gz", hash = "sha256:52f2a2b3955eb9f4f0d075398f2d430abcc6bf56ff00815b94e3371e66030059", size = 28428, upload-time = "2025-11-18T21:42:43.41Z" } +sdist = { url = "https://files.pythonhosted.org/packages/46/bb/65dc1b2c5796a6ab5f60bdb57343bd6c3ecb82251c580eca415c8548333e/mypy_boto3_bedrock_runtime-1.42.42.tar.gz", hash = "sha256:3a4088218478b6fbbc26055c03c95bee4fc04624a801090b3cce3037e8275c8d", size = 29840, upload-time = "2026-02-04T20:53:05.999Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/05/6f/8b04729224a76952e08406eccbbbebfa75ee7df91313279d76428f13fdc2/mypy_boto3_bedrock_runtime-1.40.76-py3-none-any.whl", hash = "sha256:0347f6d78e342d640da74bbd6158b276c5cb39ef73405084a65fe490766b6dab", size = 34454, upload-time = "2025-11-18T21:42:42.156Z" }, + { url = "https://files.pythonhosted.org/packages/00/43/7ea062f2228f47b5779dcfa14dab48d6e29f979b35d1a5102b0ba80b9c1b/mypy_boto3_bedrock_runtime-1.42.42-py3-none-any.whl", hash = "sha256:b2d16eae22607d0685f90796b3a0afc78c0b09d45872e00eafd634a31dd9358f", size = 36077, upload-time = "2026-02-04T20:53:01.768Z" }, ] [[package]] @@ -4193,7 +4226,7 @@ wheels = [ [[package]] name = "nltk" -version = "3.9.2" +version = "3.9.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "click" }, @@ -4201,9 +4234,9 @@ dependencies = [ { name = "regex" }, { name = "tqdm" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f9/76/3a5e4312c19a028770f86fd7c058cf9f4ec4321c6cf7526bab998a5b683c/nltk-3.9.2.tar.gz", hash = "sha256:0f409e9b069ca4177c1903c3e843eef90c7e92992fa4931ae607da6de49e1419", size = 2887629, upload-time = "2025-10-01T07:19:23.764Z" } +sdist = { url = "https://files.pythonhosted.org/packages/e1/8f/915e1c12df07c70ed779d18ab83d065718a926e70d3ea33eb0cd66ffb7c0/nltk-3.9.3.tar.gz", hash = "sha256:cb5945d6424a98d694c2b9a0264519fab4363711065a46aa0ae7a2195b92e71f", size = 2923673, upload-time = "2026-02-24T12:05:53.833Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/60/90/81ac364ef94209c100e12579629dc92bf7a709a84af32f8c551b02c07e94/nltk-3.9.2-py3-none-any.whl", hash = "sha256:1e209d2b3009110635ed9709a67a1a3e33a10f799490fa71cf4bec218c11c88a", size = 1513404, upload-time = "2025-10-01T07:19:21.648Z" }, + { url = "https://files.pythonhosted.org/packages/c2/7e/9af5a710a1236e4772de8dfcc6af942a561327bb9f42b5b4a24d0cf100fd/nltk-3.9.3-py3-none-any.whl", hash = "sha256:60b3db6e9995b3dd976b1f0fa7dec22069b2677e759c28eb69b62ddd44870522", size = 1525385, upload-time = "2026-02-24T12:05:46.54Z" }, ] [[package]] @@ -4821,11 +4854,11 @@ wheels = [ [[package]] name = "packaging" -version = "26.0" +version = "25.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/65/ee/299d360cdc32edc7d2cf530f3accf79c4fca01e96ffc950d8a52213bd8e4/packaging-26.0.tar.gz", hash = "sha256:00243ae351a257117b6a241061796684b084ed1c516a08c48a3f7e147a9d80b4", size = 143416, upload-time = "2026-01-21T20:50:39.064Z" } +sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b7/b9/c538f279a4e237a006a2c98387d081e9eb060d203d8ed34467cc0f0b9b53/packaging-26.0-py3-none-any.whl", hash = "sha256:b36f1fef9334a5588b4166f8bcd26a14e521f2b55e6b9de3aaa80d3ff7a37529", size = 74366, upload-time = "2026-01-21T20:50:37.788Z" }, + { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, ] [[package]] @@ -5077,61 +5110,75 @@ wheels = [ [[package]] name = "pillow" -version = "10.4.0" +version = "12.1.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/cd/74/ad3d526f3bf7b6d3f408b73fde271ec69dfac8b81341a318ce825f2b3812/pillow-10.4.0.tar.gz", hash = "sha256:166c1cd4d24309b30d61f79f4a9114b7b2313d7450912277855ff5dfd7cd4a06", size = 46555059, upload-time = "2024-07-01T09:48:43.583Z" } +sdist = { url = "https://files.pythonhosted.org/packages/1f/42/5c74462b4fd957fcd7b13b04fb3205ff8349236ea74c7c375766d6c82288/pillow-12.1.1.tar.gz", hash = "sha256:9ad8fa5937ab05218e2b6a4cff30295ad35afd2f83ac592e68c0d871bb0fdbc4", size = 46980264, upload-time = "2026-02-11T04:23:07.146Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/0e/69/a31cccd538ca0b5272be2a38347f8839b97a14be104ea08b0db92f749c74/pillow-10.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:4d9667937cfa347525b319ae34375c37b9ee6b525440f3ef48542fcf66f2731e", size = 3509271, upload-time = "2024-07-01T09:45:22.07Z" }, - { url = "https://files.pythonhosted.org/packages/9a/9e/4143b907be8ea0bce215f2ae4f7480027473f8b61fcedfda9d851082a5d2/pillow-10.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:543f3dc61c18dafb755773efc89aae60d06b6596a63914107f75459cf984164d", size = 3375658, upload-time = "2024-07-01T09:45:25.292Z" }, - { url = "https://files.pythonhosted.org/packages/8a/25/1fc45761955f9359b1169aa75e241551e74ac01a09f487adaaf4c3472d11/pillow-10.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7928ecbf1ece13956b95d9cbcfc77137652b02763ba384d9ab508099a2eca856", size = 4332075, upload-time = "2024-07-01T09:45:27.94Z" }, - { url = "https://files.pythonhosted.org/packages/5e/dd/425b95d0151e1d6c951f45051112394f130df3da67363b6bc75dc4c27aba/pillow-10.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4d49b85c4348ea0b31ea63bc75a9f3857869174e2bf17e7aba02945cd218e6f", size = 4444808, upload-time = "2024-07-01T09:45:30.305Z" }, - { url = "https://files.pythonhosted.org/packages/b1/84/9a15cc5726cbbfe7f9f90bfb11f5d028586595907cd093815ca6644932e3/pillow-10.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:6c762a5b0997f5659a5ef2266abc1d8851ad7749ad9a6a5506eb23d314e4f46b", size = 4356290, upload-time = "2024-07-01T09:45:32.868Z" }, - { url = "https://files.pythonhosted.org/packages/b5/5b/6651c288b08df3b8c1e2f8c1152201e0b25d240e22ddade0f1e242fc9fa0/pillow-10.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a985e028fc183bf12a77a8bbf36318db4238a3ded7fa9df1b9a133f1cb79f8fc", size = 4525163, upload-time = "2024-07-01T09:45:35.279Z" }, - { url = "https://files.pythonhosted.org/packages/07/8b/34854bf11a83c248505c8cb0fcf8d3d0b459a2246c8809b967963b6b12ae/pillow-10.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:812f7342b0eee081eaec84d91423d1b4650bb9828eb53d8511bcef8ce5aecf1e", size = 4463100, upload-time = "2024-07-01T09:45:37.74Z" }, - { url = "https://files.pythonhosted.org/packages/78/63/0632aee4e82476d9cbe5200c0cdf9ba41ee04ed77887432845264d81116d/pillow-10.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ac1452d2fbe4978c2eec89fb5a23b8387aba707ac72810d9490118817d9c0b46", size = 4592880, upload-time = "2024-07-01T09:45:39.89Z" }, - { url = "https://files.pythonhosted.org/packages/df/56/b8663d7520671b4398b9d97e1ed9f583d4afcbefbda3c6188325e8c297bd/pillow-10.4.0-cp310-cp310-win32.whl", hash = "sha256:bcd5e41a859bf2e84fdc42f4edb7d9aba0a13d29a2abadccafad99de3feff984", size = 2235218, upload-time = "2024-07-01T09:45:42.771Z" }, - { url = "https://files.pythonhosted.org/packages/f4/72/0203e94a91ddb4a9d5238434ae6c1ca10e610e8487036132ea9bf806ca2a/pillow-10.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:ecd85a8d3e79cd7158dec1c9e5808e821feea088e2f69a974db5edf84dc53141", size = 2554487, upload-time = "2024-07-01T09:45:45.176Z" }, - { url = "https://files.pythonhosted.org/packages/bd/52/7e7e93d7a6e4290543f17dc6f7d3af4bd0b3dd9926e2e8a35ac2282bc5f4/pillow-10.4.0-cp310-cp310-win_arm64.whl", hash = "sha256:ff337c552345e95702c5fde3158acb0625111017d0e5f24bf3acdb9cc16b90d1", size = 2243219, upload-time = "2024-07-01T09:45:47.274Z" }, - { url = "https://files.pythonhosted.org/packages/a7/62/c9449f9c3043c37f73e7487ec4ef0c03eb9c9afc91a92b977a67b3c0bbc5/pillow-10.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:0a9ec697746f268507404647e531e92889890a087e03681a3606d9b920fbee3c", size = 3509265, upload-time = "2024-07-01T09:45:49.812Z" }, - { url = "https://files.pythonhosted.org/packages/f4/5f/491dafc7bbf5a3cc1845dc0430872e8096eb9e2b6f8161509d124594ec2d/pillow-10.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe91cb65544a1321e631e696759491ae04a2ea11d36715eca01ce07284738be", size = 3375655, upload-time = "2024-07-01T09:45:52.462Z" }, - { url = "https://files.pythonhosted.org/packages/73/d5/c4011a76f4207a3c151134cd22a1415741e42fa5ddecec7c0182887deb3d/pillow-10.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dc6761a6efc781e6a1544206f22c80c3af4c8cf461206d46a1e6006e4429ff3", size = 4340304, upload-time = "2024-07-01T09:45:55.006Z" }, - { url = "https://files.pythonhosted.org/packages/ac/10/c67e20445a707f7a610699bba4fe050583b688d8cd2d202572b257f46600/pillow-10.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e84b6cc6a4a3d76c153a6b19270b3526a5a8ed6b09501d3af891daa2a9de7d6", size = 4452804, upload-time = "2024-07-01T09:45:58.437Z" }, - { url = "https://files.pythonhosted.org/packages/a9/83/6523837906d1da2b269dee787e31df3b0acb12e3d08f024965a3e7f64665/pillow-10.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:bbc527b519bd3aa9d7f429d152fea69f9ad37c95f0b02aebddff592688998abe", size = 4365126, upload-time = "2024-07-01T09:46:00.713Z" }, - { url = "https://files.pythonhosted.org/packages/ba/e5/8c68ff608a4203085158cff5cc2a3c534ec384536d9438c405ed6370d080/pillow-10.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:76a911dfe51a36041f2e756b00f96ed84677cdeb75d25c767f296c1c1eda1319", size = 4533541, upload-time = "2024-07-01T09:46:03.235Z" }, - { url = "https://files.pythonhosted.org/packages/f4/7c/01b8dbdca5bc6785573f4cee96e2358b0918b7b2c7b60d8b6f3abf87a070/pillow-10.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59291fb29317122398786c2d44427bbd1a6d7ff54017075b22be9d21aa59bd8d", size = 4471616, upload-time = "2024-07-01T09:46:05.356Z" }, - { url = "https://files.pythonhosted.org/packages/c8/57/2899b82394a35a0fbfd352e290945440e3b3785655a03365c0ca8279f351/pillow-10.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:416d3a5d0e8cfe4f27f574362435bc9bae57f679a7158e0096ad2beb427b8696", size = 4600802, upload-time = "2024-07-01T09:46:08.145Z" }, - { url = "https://files.pythonhosted.org/packages/4d/d7/a44f193d4c26e58ee5d2d9db3d4854b2cfb5b5e08d360a5e03fe987c0086/pillow-10.4.0-cp311-cp311-win32.whl", hash = "sha256:7086cc1d5eebb91ad24ded9f58bec6c688e9f0ed7eb3dbbf1e4800280a896496", size = 2235213, upload-time = "2024-07-01T09:46:10.211Z" }, - { url = "https://files.pythonhosted.org/packages/c1/d0/5866318eec2b801cdb8c82abf190c8343d8a1cd8bf5a0c17444a6f268291/pillow-10.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cbed61494057c0f83b83eb3a310f0bf774b09513307c434d4366ed64f4128a91", size = 2554498, upload-time = "2024-07-01T09:46:12.685Z" }, - { url = "https://files.pythonhosted.org/packages/d4/c8/310ac16ac2b97e902d9eb438688de0d961660a87703ad1561fd3dfbd2aa0/pillow-10.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:f5f0c3e969c8f12dd2bb7e0b15d5c468b51e5017e01e2e867335c81903046a22", size = 2243219, upload-time = "2024-07-01T09:46:14.83Z" }, - { url = "https://files.pythonhosted.org/packages/05/cb/0353013dc30c02a8be34eb91d25e4e4cf594b59e5a55ea1128fde1e5f8ea/pillow-10.4.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:673655af3eadf4df6b5457033f086e90299fdd7a47983a13827acf7459c15d94", size = 3509350, upload-time = "2024-07-01T09:46:17.177Z" }, - { url = "https://files.pythonhosted.org/packages/e7/cf/5c558a0f247e0bf9cec92bff9b46ae6474dd736f6d906315e60e4075f737/pillow-10.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:866b6942a92f56300012f5fbac71f2d610312ee65e22f1aa2609e491284e5597", size = 3374980, upload-time = "2024-07-01T09:46:19.169Z" }, - { url = "https://files.pythonhosted.org/packages/84/48/6e394b86369a4eb68b8a1382c78dc092245af517385c086c5094e3b34428/pillow-10.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29dbdc4207642ea6aad70fbde1a9338753d33fb23ed6956e706936706f52dd80", size = 4343799, upload-time = "2024-07-01T09:46:21.883Z" }, - { url = "https://files.pythonhosted.org/packages/3b/f3/a8c6c11fa84b59b9df0cd5694492da8c039a24cd159f0f6918690105c3be/pillow-10.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf2342ac639c4cf38799a44950bbc2dfcb685f052b9e262f446482afaf4bffca", size = 4459973, upload-time = "2024-07-01T09:46:24.321Z" }, - { url = "https://files.pythonhosted.org/packages/7d/1b/c14b4197b80150fb64453585247e6fb2e1d93761fa0fa9cf63b102fde822/pillow-10.4.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:f5b92f4d70791b4a67157321c4e8225d60b119c5cc9aee8ecf153aace4aad4ef", size = 4370054, upload-time = "2024-07-01T09:46:26.825Z" }, - { url = "https://files.pythonhosted.org/packages/55/77/40daddf677897a923d5d33329acd52a2144d54a9644f2a5422c028c6bf2d/pillow-10.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:86dcb5a1eb778d8b25659d5e4341269e8590ad6b4e8b44d9f4b07f8d136c414a", size = 4539484, upload-time = "2024-07-01T09:46:29.355Z" }, - { url = "https://files.pythonhosted.org/packages/40/54/90de3e4256b1207300fb2b1d7168dd912a2fb4b2401e439ba23c2b2cabde/pillow-10.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:780c072c2e11c9b2c7ca37f9a2ee8ba66f44367ac3e5c7832afcfe5104fd6d1b", size = 4477375, upload-time = "2024-07-01T09:46:31.756Z" }, - { url = "https://files.pythonhosted.org/packages/13/24/1bfba52f44193860918ff7c93d03d95e3f8748ca1de3ceaf11157a14cf16/pillow-10.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:37fb69d905be665f68f28a8bba3c6d3223c8efe1edf14cc4cfa06c241f8c81d9", size = 4608773, upload-time = "2024-07-01T09:46:33.73Z" }, - { url = "https://files.pythonhosted.org/packages/55/04/5e6de6e6120451ec0c24516c41dbaf80cce1b6451f96561235ef2429da2e/pillow-10.4.0-cp312-cp312-win32.whl", hash = "sha256:7dfecdbad5c301d7b5bde160150b4db4c659cee2b69589705b6f8a0c509d9f42", size = 2235690, upload-time = "2024-07-01T09:46:36.587Z" }, - { url = "https://files.pythonhosted.org/packages/74/0a/d4ce3c44bca8635bd29a2eab5aa181b654a734a29b263ca8efe013beea98/pillow-10.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:1d846aea995ad352d4bdcc847535bd56e0fd88d36829d2c90be880ef1ee4668a", size = 2554951, upload-time = "2024-07-01T09:46:38.777Z" }, - { url = "https://files.pythonhosted.org/packages/b5/ca/184349ee40f2e92439be9b3502ae6cfc43ac4b50bc4fc6b3de7957563894/pillow-10.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:e553cad5179a66ba15bb18b353a19020e73a7921296a7979c4a2b7f6a5cd57f9", size = 2243427, upload-time = "2024-07-01T09:46:43.15Z" }, - { url = "https://files.pythonhosted.org/packages/c3/00/706cebe7c2c12a6318aabe5d354836f54adff7156fd9e1bd6c89f4ba0e98/pillow-10.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8bc1a764ed8c957a2e9cacf97c8b2b053b70307cf2996aafd70e91a082e70df3", size = 3525685, upload-time = "2024-07-01T09:46:45.194Z" }, - { url = "https://files.pythonhosted.org/packages/cf/76/f658cbfa49405e5ecbfb9ba42d07074ad9792031267e782d409fd8fe7c69/pillow-10.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6209bb41dc692ddfee4942517c19ee81b86c864b626dbfca272ec0f7cff5d9fb", size = 3374883, upload-time = "2024-07-01T09:46:47.331Z" }, - { url = "https://files.pythonhosted.org/packages/46/2b/99c28c4379a85e65378211971c0b430d9c7234b1ec4d59b2668f6299e011/pillow-10.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bee197b30783295d2eb680b311af15a20a8b24024a19c3a26431ff83eb8d1f70", size = 4339837, upload-time = "2024-07-01T09:46:49.647Z" }, - { url = "https://files.pythonhosted.org/packages/f1/74/b1ec314f624c0c43711fdf0d8076f82d9d802afd58f1d62c2a86878e8615/pillow-10.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ef61f5dd14c300786318482456481463b9d6b91ebe5ef12f405afbba77ed0be", size = 4455562, upload-time = "2024-07-01T09:46:51.811Z" }, - { url = "https://files.pythonhosted.org/packages/4a/2a/4b04157cb7b9c74372fa867096a1607e6fedad93a44deeff553ccd307868/pillow-10.4.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:297e388da6e248c98bc4a02e018966af0c5f92dfacf5a5ca22fa01cb3179bca0", size = 4366761, upload-time = "2024-07-01T09:46:53.961Z" }, - { url = "https://files.pythonhosted.org/packages/ac/7b/8f1d815c1a6a268fe90481232c98dd0e5fa8c75e341a75f060037bd5ceae/pillow-10.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:e4db64794ccdf6cb83a59d73405f63adbe2a1887012e308828596100a0b2f6cc", size = 4536767, upload-time = "2024-07-01T09:46:56.664Z" }, - { url = "https://files.pythonhosted.org/packages/e5/77/05fa64d1f45d12c22c314e7b97398ffb28ef2813a485465017b7978b3ce7/pillow-10.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bd2880a07482090a3bcb01f4265f1936a903d70bc740bfcb1fd4e8a2ffe5cf5a", size = 4477989, upload-time = "2024-07-01T09:46:58.977Z" }, - { url = "https://files.pythonhosted.org/packages/12/63/b0397cfc2caae05c3fb2f4ed1b4fc4fc878f0243510a7a6034ca59726494/pillow-10.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b35b21b819ac1dbd1233317adeecd63495f6babf21b7b2512d244ff6c6ce309", size = 4610255, upload-time = "2024-07-01T09:47:01.189Z" }, - { url = "https://files.pythonhosted.org/packages/7b/f9/cfaa5082ca9bc4a6de66ffe1c12c2d90bf09c309a5f52b27759a596900e7/pillow-10.4.0-cp313-cp313-win32.whl", hash = "sha256:551d3fd6e9dc15e4c1eb6fc4ba2b39c0c7933fa113b220057a34f4bb3268a060", size = 2235603, upload-time = "2024-07-01T09:47:03.918Z" }, - { url = "https://files.pythonhosted.org/packages/01/6a/30ff0eef6e0c0e71e55ded56a38d4859bf9d3634a94a88743897b5f96936/pillow-10.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:030abdbe43ee02e0de642aee345efa443740aa4d828bfe8e2eb11922ea6a21ea", size = 2554972, upload-time = "2024-07-01T09:47:06.152Z" }, - { url = "https://files.pythonhosted.org/packages/48/2c/2e0a52890f269435eee38b21c8218e102c621fe8d8df8b9dd06fabf879ba/pillow-10.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:5b001114dd152cfd6b23befeb28d7aee43553e2402c9f159807bf55f33af8a8d", size = 2243375, upload-time = "2024-07-01T09:47:09.065Z" }, - { url = "https://files.pythonhosted.org/packages/38/30/095d4f55f3a053392f75e2eae45eba3228452783bab3d9a920b951ac495c/pillow-10.4.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5b4815f2e65b30f5fbae9dfffa8636d992d49705723fe86a3661806e069352d4", size = 3493889, upload-time = "2024-07-01T09:48:04.815Z" }, - { url = "https://files.pythonhosted.org/packages/f3/e8/4ff79788803a5fcd5dc35efdc9386af153569853767bff74540725b45863/pillow-10.4.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8f0aef4ef59694b12cadee839e2ba6afeab89c0f39a3adc02ed51d109117b8da", size = 3346160, upload-time = "2024-07-01T09:48:07.206Z" }, - { url = "https://files.pythonhosted.org/packages/d7/ac/4184edd511b14f760c73f5bb8a5d6fd85c591c8aff7c2229677a355c4179/pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f4727572e2918acaa9077c919cbbeb73bd2b3ebcfe033b72f858fc9fbef0026", size = 3435020, upload-time = "2024-07-01T09:48:09.66Z" }, - { url = "https://files.pythonhosted.org/packages/da/21/1749cd09160149c0a246a81d646e05f35041619ce76f6493d6a96e8d1103/pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff25afb18123cea58a591ea0244b92eb1e61a1fd497bf6d6384f09bc3262ec3e", size = 3490539, upload-time = "2024-07-01T09:48:12.529Z" }, - { url = "https://files.pythonhosted.org/packages/b6/f5/f71fe1888b96083b3f6dfa0709101f61fc9e972c0c8d04e9d93ccef2a045/pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dc3e2db6ba09ffd7d02ae9141cfa0ae23393ee7687248d46a7507b75d610f4f5", size = 3476125, upload-time = "2024-07-01T09:48:14.891Z" }, - { url = "https://files.pythonhosted.org/packages/96/b9/c0362c54290a31866c3526848583a2f45a535aa9d725fd31e25d318c805f/pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:02a2be69f9c9b8c1e97cf2713e789d4e398c751ecfd9967c18d0ce304efbf885", size = 3579373, upload-time = "2024-07-01T09:48:17.601Z" }, - { url = "https://files.pythonhosted.org/packages/52/3b/ce7a01026a7cf46e5452afa86f97a5e88ca97f562cafa76570178ab56d8d/pillow-10.4.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:0755ffd4a0c6f267cccbae2e9903d95477ca2f77c4fcf3a3a09570001856c8a5", size = 2554661, upload-time = "2024-07-01T09:48:20.293Z" }, + { url = "https://files.pythonhosted.org/packages/1d/30/5bd3d794762481f8c8ae9c80e7b76ecea73b916959eb587521358ef0b2f9/pillow-12.1.1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1f1625b72740fdda5d77b4def688eb8fd6490975d06b909fd19f13f391e077e0", size = 5304099, upload-time = "2026-02-11T04:20:06.13Z" }, + { url = "https://files.pythonhosted.org/packages/bd/c1/aab9e8f3eeb4490180e357955e15c2ef74b31f64790ff356c06fb6cf6d84/pillow-12.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:178aa072084bd88ec759052feca8e56cbb14a60b39322b99a049e58090479713", size = 4657880, upload-time = "2026-02-11T04:20:09.291Z" }, + { url = "https://files.pythonhosted.org/packages/f1/0a/9879e30d56815ad529d3985aeff5af4964202425c27261a6ada10f7cbf53/pillow-12.1.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b66e95d05ba806247aaa1561f080abc7975daf715c30780ff92a20e4ec546e1b", size = 6222587, upload-time = "2026-02-11T04:20:10.82Z" }, + { url = "https://files.pythonhosted.org/packages/5a/5f/a1b72ff7139e4f89014e8d451442c74a774d5c43cd938fb0a9f878576b37/pillow-12.1.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:89c7e895002bbe49cdc5426150377cbbc04767d7547ed145473f496dfa40408b", size = 8027678, upload-time = "2026-02-11T04:20:12.455Z" }, + { url = "https://files.pythonhosted.org/packages/e2/c2/c7cb187dac79a3d22c3ebeae727abee01e077c8c7d930791dc592f335153/pillow-12.1.1-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3a5cbdcddad0af3da87cb16b60d23648bc3b51967eb07223e9fed77a82b457c4", size = 6335777, upload-time = "2026-02-11T04:20:14.441Z" }, + { url = "https://files.pythonhosted.org/packages/0c/7b/f9b09a7804ec7336effb96c26d37c29d27225783dc1501b7d62dcef6ae25/pillow-12.1.1-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9f51079765661884a486727f0729d29054242f74b46186026582b4e4769918e4", size = 7027140, upload-time = "2026-02-11T04:20:16.387Z" }, + { url = "https://files.pythonhosted.org/packages/98/b2/2fa3c391550bd421b10849d1a2144c44abcd966daadd2f7c12e19ea988c4/pillow-12.1.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:99c1506ea77c11531d75e3a412832a13a71c7ebc8192ab9e4b2e355555920e3e", size = 6449855, upload-time = "2026-02-11T04:20:18.554Z" }, + { url = "https://files.pythonhosted.org/packages/96/ff/9caf4b5b950c669263c39e96c78c0d74a342c71c4f43fd031bb5cb7ceac9/pillow-12.1.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:36341d06738a9f66c8287cf8b876d24b18db9bd8740fa0672c74e259ad408cff", size = 7151329, upload-time = "2026-02-11T04:20:20.646Z" }, + { url = "https://files.pythonhosted.org/packages/7b/f8/4b24841f582704da675ca535935bccb32b00a6da1226820845fac4a71136/pillow-12.1.1-cp310-cp310-win32.whl", hash = "sha256:6c52f062424c523d6c4db85518774cc3d50f5539dd6eed32b8f6229b26f24d40", size = 6325574, upload-time = "2026-02-11T04:20:22.43Z" }, + { url = "https://files.pythonhosted.org/packages/f8/f9/9f6b01c0881d7036063aa6612ef04c0e2cad96be21325a1e92d0203f8e91/pillow-12.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:c6008de247150668a705a6338156efb92334113421ceecf7438a12c9a12dab23", size = 7032347, upload-time = "2026-02-11T04:20:23.932Z" }, + { url = "https://files.pythonhosted.org/packages/79/13/c7922edded3dcdaf10c59297540b72785620abc0538872c819915746757d/pillow-12.1.1-cp310-cp310-win_arm64.whl", hash = "sha256:1a9b0ee305220b392e1124a764ee4265bd063e54a751a6b62eff69992f457fa9", size = 2453457, upload-time = "2026-02-11T04:20:25.392Z" }, + { url = "https://files.pythonhosted.org/packages/2b/46/5da1ec4a5171ee7bf1a0efa064aba70ba3d6e0788ce3f5acd1375d23c8c0/pillow-12.1.1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:e879bb6cd5c73848ef3b2b48b8af9ff08c5b71ecda8048b7dd22d8a33f60be32", size = 5304084, upload-time = "2026-02-11T04:20:27.501Z" }, + { url = "https://files.pythonhosted.org/packages/78/93/a29e9bc02d1cf557a834da780ceccd54e02421627200696fcf805ebdc3fb/pillow-12.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:365b10bb9417dd4498c0e3b128018c4a624dc11c7b97d8cc54effe3b096f4c38", size = 4657866, upload-time = "2026-02-11T04:20:29.827Z" }, + { url = "https://files.pythonhosted.org/packages/13/84/583a4558d492a179d31e4aae32eadce94b9acf49c0337c4ce0b70e0a01f2/pillow-12.1.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d4ce8e329c93845720cd2014659ca67eac35f6433fd3050393d85f3ecef0dad5", size = 6232148, upload-time = "2026-02-11T04:20:31.329Z" }, + { url = "https://files.pythonhosted.org/packages/d5/e2/53c43334bbbb2d3b938978532fbda8e62bb6e0b23a26ce8592f36bcc4987/pillow-12.1.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc354a04072b765eccf2204f588a7a532c9511e8b9c7f900e1b64e3e33487090", size = 8038007, upload-time = "2026-02-11T04:20:34.225Z" }, + { url = "https://files.pythonhosted.org/packages/b8/a6/3d0e79c8a9d58150dd98e199d7c1c56861027f3829a3a60b3c2784190180/pillow-12.1.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7e7976bf1910a8116b523b9f9f58bf410f3e8aa330cd9a2bb2953f9266ab49af", size = 6345418, upload-time = "2026-02-11T04:20:35.858Z" }, + { url = "https://files.pythonhosted.org/packages/a2/c8/46dfeac5825e600579157eea177be43e2f7ff4a99da9d0d0a49533509ac5/pillow-12.1.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:597bd9c8419bc7c6af5604e55847789b69123bbe25d65cc6ad3012b4f3c98d8b", size = 7034590, upload-time = "2026-02-11T04:20:37.91Z" }, + { url = "https://files.pythonhosted.org/packages/af/bf/e6f65d3db8a8bbfeaf9e13cc0417813f6319863a73de934f14b2229ada18/pillow-12.1.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2c1fc0f2ca5f96a3c8407e41cca26a16e46b21060fe6d5b099d2cb01412222f5", size = 6458655, upload-time = "2026-02-11T04:20:39.496Z" }, + { url = "https://files.pythonhosted.org/packages/f9/c2/66091f3f34a25894ca129362e510b956ef26f8fb67a0e6417bc5744e56f1/pillow-12.1.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:578510d88c6229d735855e1f278aa305270438d36a05031dfaae5067cc8eb04d", size = 7159286, upload-time = "2026-02-11T04:20:41.139Z" }, + { url = "https://files.pythonhosted.org/packages/7b/5a/24bc8eb526a22f957d0cec6243146744966d40857e3d8deb68f7902ca6c1/pillow-12.1.1-cp311-cp311-win32.whl", hash = "sha256:7311c0a0dcadb89b36b7025dfd8326ecfa36964e29913074d47382706e516a7c", size = 6328663, upload-time = "2026-02-11T04:20:43.184Z" }, + { url = "https://files.pythonhosted.org/packages/31/03/bef822e4f2d8f9d7448c133d0a18185d3cce3e70472774fffefe8b0ed562/pillow-12.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:fbfa2a7c10cc2623f412753cddf391c7f971c52ca40a3f65dc5039b2939e8563", size = 7031448, upload-time = "2026-02-11T04:20:44.696Z" }, + { url = "https://files.pythonhosted.org/packages/49/70/f76296f53610bd17b2e7d31728b8b7825e3ac3b5b3688b51f52eab7c0818/pillow-12.1.1-cp311-cp311-win_arm64.whl", hash = "sha256:b81b5e3511211631b3f672a595e3221252c90af017e399056d0faabb9538aa80", size = 2453651, upload-time = "2026-02-11T04:20:46.243Z" }, + { url = "https://files.pythonhosted.org/packages/07/d3/8df65da0d4df36b094351dce696f2989bec731d4f10e743b1c5f4da4d3bf/pillow-12.1.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ab323b787d6e18b3d91a72fc99b1a2c28651e4358749842b8f8dfacd28ef2052", size = 5262803, upload-time = "2026-02-11T04:20:47.653Z" }, + { url = "https://files.pythonhosted.org/packages/d6/71/5026395b290ff404b836e636f51d7297e6c83beceaa87c592718747e670f/pillow-12.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:adebb5bee0f0af4909c30db0d890c773d1a92ffe83da908e2e9e720f8edf3984", size = 4657601, upload-time = "2026-02-11T04:20:49.328Z" }, + { url = "https://files.pythonhosted.org/packages/b1/2e/1001613d941c67442f745aff0f7cc66dd8df9a9c084eb497e6a543ee6f7e/pillow-12.1.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bb66b7cc26f50977108790e2456b7921e773f23db5630261102233eb355a3b79", size = 6234995, upload-time = "2026-02-11T04:20:51.032Z" }, + { url = "https://files.pythonhosted.org/packages/07/26/246ab11455b2549b9233dbd44d358d033a2f780fa9007b61a913c5b2d24e/pillow-12.1.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:aee2810642b2898bb187ced9b349e95d2a7272930796e022efaf12e99dccd293", size = 8045012, upload-time = "2026-02-11T04:20:52.882Z" }, + { url = "https://files.pythonhosted.org/packages/b2/8b/07587069c27be7535ac1fe33874e32de118fbd34e2a73b7f83436a88368c/pillow-12.1.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a0b1cd6232e2b618adcc54d9882e4e662a089d5768cd188f7c245b4c8c44a397", size = 6349638, upload-time = "2026-02-11T04:20:54.444Z" }, + { url = "https://files.pythonhosted.org/packages/ff/79/6df7b2ee763d619cda2fb4fea498e5f79d984dae304d45a8999b80d6cf5c/pillow-12.1.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7aac39bcf8d4770d089588a2e1dd111cbaa42df5a94be3114222057d68336bd0", size = 7041540, upload-time = "2026-02-11T04:20:55.97Z" }, + { url = "https://files.pythonhosted.org/packages/2c/5e/2ba19e7e7236d7529f4d873bdaf317a318896bac289abebd4bb00ef247f0/pillow-12.1.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ab174cd7d29a62dd139c44bf74b698039328f45cb03b4596c43473a46656b2f3", size = 6462613, upload-time = "2026-02-11T04:20:57.542Z" }, + { url = "https://files.pythonhosted.org/packages/03/03/31216ec124bb5c3dacd74ce8efff4cc7f52643653bad4825f8f08c697743/pillow-12.1.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:339ffdcb7cbeaa08221cd401d517d4b1fe7a9ed5d400e4a8039719238620ca35", size = 7166745, upload-time = "2026-02-11T04:20:59.196Z" }, + { url = "https://files.pythonhosted.org/packages/1f/e7/7c4552d80052337eb28653b617eafdef39adfb137c49dd7e831b8dc13bc5/pillow-12.1.1-cp312-cp312-win32.whl", hash = "sha256:5d1f9575a12bed9e9eedd9a4972834b08c97a352bd17955ccdebfeca5913fa0a", size = 6328823, upload-time = "2026-02-11T04:21:01.385Z" }, + { url = "https://files.pythonhosted.org/packages/3d/17/688626d192d7261bbbf98846fc98995726bddc2c945344b65bec3a29d731/pillow-12.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:21329ec8c96c6e979cd0dfd29406c40c1d52521a90544463057d2aaa937d66a6", size = 7033367, upload-time = "2026-02-11T04:21:03.536Z" }, + { url = "https://files.pythonhosted.org/packages/ed/fe/a0ef1f73f939b0eca03ee2c108d0043a87468664770612602c63266a43c4/pillow-12.1.1-cp312-cp312-win_arm64.whl", hash = "sha256:af9a332e572978f0218686636610555ae3defd1633597be015ed50289a03c523", size = 2453811, upload-time = "2026-02-11T04:21:05.116Z" }, + { url = "https://files.pythonhosted.org/packages/d5/11/6db24d4bd7685583caeae54b7009584e38da3c3d4488ed4cd25b439de486/pillow-12.1.1-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:d242e8ac078781f1de88bf823d70c1a9b3c7950a44cdf4b7c012e22ccbcd8e4e", size = 4062689, upload-time = "2026-02-11T04:21:06.804Z" }, + { url = "https://files.pythonhosted.org/packages/33/c0/ce6d3b1fe190f0021203e0d9b5b99e57843e345f15f9ef22fcd43842fd21/pillow-12.1.1-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:02f84dfad02693676692746df05b89cf25597560db2857363a208e393429f5e9", size = 4138535, upload-time = "2026-02-11T04:21:08.452Z" }, + { url = "https://files.pythonhosted.org/packages/a0/c6/d5eb6a4fb32a3f9c21a8c7613ec706534ea1cf9f4b3663e99f0d83f6fca8/pillow-12.1.1-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:e65498daf4b583091ccbb2556c7000abf0f3349fcd57ef7adc9a84a394ed29f6", size = 3601364, upload-time = "2026-02-11T04:21:10.194Z" }, + { url = "https://files.pythonhosted.org/packages/14/a1/16c4b823838ba4c9c52c0e6bbda903a3fe5a1bdbf1b8eb4fff7156f3e318/pillow-12.1.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:6c6db3b84c87d48d0088943bf33440e0c42370b99b1c2a7989216f7b42eede60", size = 5262561, upload-time = "2026-02-11T04:21:11.742Z" }, + { url = "https://files.pythonhosted.org/packages/bb/ad/ad9dc98ff24f485008aa5cdedaf1a219876f6f6c42a4626c08bc4e80b120/pillow-12.1.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:8b7e5304e34942bf62e15184219a7b5ad4ff7f3bb5cca4d984f37df1a0e1aee2", size = 4657460, upload-time = "2026-02-11T04:21:13.786Z" }, + { url = "https://files.pythonhosted.org/packages/9e/1b/f1a4ea9a895b5732152789326202a82464d5254759fbacae4deea3069334/pillow-12.1.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:18e5bddd742a44b7e6b1e773ab5db102bd7a94c32555ba656e76d319d19c3850", size = 6232698, upload-time = "2026-02-11T04:21:15.949Z" }, + { url = "https://files.pythonhosted.org/packages/95/f4/86f51b8745070daf21fd2e5b1fe0eb35d4db9ca26e6d58366562fb56a743/pillow-12.1.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc44ef1f3de4f45b50ccf9136999d71abb99dca7706bc75d222ed350b9fd2289", size = 8041706, upload-time = "2026-02-11T04:21:17.723Z" }, + { url = "https://files.pythonhosted.org/packages/29/9b/d6ecd956bb1266dd1045e995cce9b8d77759e740953a1c9aad9502a0461e/pillow-12.1.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5a8eb7ed8d4198bccbd07058416eeec51686b498e784eda166395a23eb99138e", size = 6346621, upload-time = "2026-02-11T04:21:19.547Z" }, + { url = "https://files.pythonhosted.org/packages/71/24/538bff45bde96535d7d998c6fed1a751c75ac7c53c37c90dc2601b243893/pillow-12.1.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:47b94983da0c642de92ced1702c5b6c292a84bd3a8e1d1702ff923f183594717", size = 7038069, upload-time = "2026-02-11T04:21:21.378Z" }, + { url = "https://files.pythonhosted.org/packages/94/0e/58cb1a6bc48f746bc4cb3adb8cabff73e2742c92b3bf7a220b7cf69b9177/pillow-12.1.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:518a48c2aab7ce596d3bf79d0e275661b846e86e4d0e7dec34712c30fe07f02a", size = 6460040, upload-time = "2026-02-11T04:21:23.148Z" }, + { url = "https://files.pythonhosted.org/packages/6c/57/9045cb3ff11eeb6c1adce3b2d60d7d299d7b273a2e6c8381a524abfdc474/pillow-12.1.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a550ae29b95c6dc13cf69e2c9dc5747f814c54eeb2e32d683e5e93af56caa029", size = 7164523, upload-time = "2026-02-11T04:21:25.01Z" }, + { url = "https://files.pythonhosted.org/packages/73/f2/9be9cb99f2175f0d4dbadd6616ce1bf068ee54a28277ea1bf1fbf729c250/pillow-12.1.1-cp313-cp313-win32.whl", hash = "sha256:a003d7422449f6d1e3a34e3dd4110c22148336918ddbfc6a32581cd54b2e0b2b", size = 6332552, upload-time = "2026-02-11T04:21:27.238Z" }, + { url = "https://files.pythonhosted.org/packages/3f/eb/b0834ad8b583d7d9d42b80becff092082a1c3c156bb582590fcc973f1c7c/pillow-12.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:344cf1e3dab3be4b1fa08e449323d98a2a3f819ad20f4b22e77a0ede31f0faa1", size = 7040108, upload-time = "2026-02-11T04:21:29.462Z" }, + { url = "https://files.pythonhosted.org/packages/d5/7d/fc09634e2aabdd0feabaff4a32f4a7d97789223e7c2042fd805ea4b4d2c2/pillow-12.1.1-cp313-cp313-win_arm64.whl", hash = "sha256:5c0dd1636633e7e6a0afe7bf6a51a14992b7f8e60de5789018ebbdfae55b040a", size = 2453712, upload-time = "2026-02-11T04:21:31.072Z" }, + { url = "https://files.pythonhosted.org/packages/19/2a/b9d62794fc8a0dd14c1943df68347badbd5511103e0d04c035ffe5cf2255/pillow-12.1.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0330d233c1a0ead844fc097a7d16c0abff4c12e856c0b325f231820fee1f39da", size = 5264880, upload-time = "2026-02-11T04:21:32.865Z" }, + { url = "https://files.pythonhosted.org/packages/26/9d/e03d857d1347fa5ed9247e123fcd2a97b6220e15e9cb73ca0a8d91702c6e/pillow-12.1.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5dae5f21afb91322f2ff791895ddd8889e5e947ff59f71b46041c8ce6db790bc", size = 4660616, upload-time = "2026-02-11T04:21:34.97Z" }, + { url = "https://files.pythonhosted.org/packages/f7/ec/8a6d22afd02570d30954e043f09c32772bfe143ba9285e2fdb11284952cd/pillow-12.1.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2e0c664be47252947d870ac0d327fea7e63985a08794758aa8af5b6cb6ec0c9c", size = 6269008, upload-time = "2026-02-11T04:21:36.623Z" }, + { url = "https://files.pythonhosted.org/packages/3d/1d/6d875422c9f28a4a361f495a5f68d9de4a66941dc2c619103ca335fa6446/pillow-12.1.1-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:691ab2ac363b8217f7d31b3497108fb1f50faab2f75dfb03284ec2f217e87bf8", size = 8073226, upload-time = "2026-02-11T04:21:38.585Z" }, + { url = "https://files.pythonhosted.org/packages/a1/cd/134b0b6ee5eda6dc09e25e24b40fdafe11a520bc725c1d0bbaa5e00bf95b/pillow-12.1.1-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e9e8064fb1cc019296958595f6db671fba95209e3ceb0c4734c9baf97de04b20", size = 6380136, upload-time = "2026-02-11T04:21:40.562Z" }, + { url = "https://files.pythonhosted.org/packages/7a/a9/7628f013f18f001c1b98d8fffe3452f306a70dc6aba7d931019e0492f45e/pillow-12.1.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:472a8d7ded663e6162dafdf20015c486a7009483ca671cece7a9279b512fcb13", size = 7067129, upload-time = "2026-02-11T04:21:42.521Z" }, + { url = "https://files.pythonhosted.org/packages/1e/f8/66ab30a2193b277785601e82ee2d49f68ea575d9637e5e234faaa98efa4c/pillow-12.1.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:89b54027a766529136a06cfebeecb3a04900397a3590fd252160b888479517bf", size = 6491807, upload-time = "2026-02-11T04:21:44.22Z" }, + { url = "https://files.pythonhosted.org/packages/da/0b/a877a6627dc8318fdb84e357c5e1a758c0941ab1ddffdafd231983788579/pillow-12.1.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:86172b0831b82ce4f7877f280055892b31179e1576aa00d0df3bb1bbf8c3e524", size = 7190954, upload-time = "2026-02-11T04:21:46.114Z" }, + { url = "https://files.pythonhosted.org/packages/83/43/6f732ff85743cf746b1361b91665d9f5155e1483817f693f8d57ea93147f/pillow-12.1.1-cp313-cp313t-win32.whl", hash = "sha256:44ce27545b6efcf0fdbdceb31c9a5bdea9333e664cda58a7e674bb74608b3986", size = 6336441, upload-time = "2026-02-11T04:21:48.22Z" }, + { url = "https://files.pythonhosted.org/packages/3b/44/e865ef3986611bb75bfabdf94a590016ea327833f434558801122979cd0e/pillow-12.1.1-cp313-cp313t-win_amd64.whl", hash = "sha256:a285e3eb7a5a45a2ff504e31f4a8d1b12ef62e84e5411c6804a42197c1cf586c", size = 7045383, upload-time = "2026-02-11T04:21:50.015Z" }, + { url = "https://files.pythonhosted.org/packages/a8/c6/f4fb24268d0c6908b9f04143697ea18b0379490cb74ba9e8d41b898bd005/pillow-12.1.1-cp313-cp313t-win_arm64.whl", hash = "sha256:cc7d296b5ea4d29e6570dabeaed58d31c3fea35a633a69679fb03d7664f43fb3", size = 2456104, upload-time = "2026-02-11T04:21:51.633Z" }, + { url = "https://files.pythonhosted.org/packages/56/11/5d43209aa4cb58e0cc80127956ff1796a68b928e6324bbf06ef4db34367b/pillow-12.1.1-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:600fd103672b925fe62ed08e0d874ea34d692474df6f4bf7ebe148b30f89f39f", size = 5228606, upload-time = "2026-02-11T04:22:52.106Z" }, + { url = "https://files.pythonhosted.org/packages/5f/d5/3b005b4e4fda6698b371fa6c21b097d4707585d7db99e98d9b0b87ac612a/pillow-12.1.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:665e1b916b043cef294bc54d47bf02d87e13f769bc4bc5fa225a24b3a6c5aca9", size = 4622321, upload-time = "2026-02-11T04:22:53.827Z" }, + { url = "https://files.pythonhosted.org/packages/df/36/ed3ea2d594356fd8037e5a01f6156c74bc8d92dbb0fa60746cc96cabb6e8/pillow-12.1.1-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:495c302af3aad1ca67420ddd5c7bd480c8867ad173528767d906428057a11f0e", size = 5247579, upload-time = "2026-02-11T04:22:56.094Z" }, + { url = "https://files.pythonhosted.org/packages/54/9a/9cc3e029683cf6d20ae5085da0dafc63148e3252c2f13328e553aaa13cfb/pillow-12.1.1-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8fd420ef0c52c88b5a035a0886f367748c72147b2b8f384c9d12656678dfdfa9", size = 6989094, upload-time = "2026-02-11T04:22:58.288Z" }, + { url = "https://files.pythonhosted.org/packages/00/98/fc53ab36da80b88df0967896b6c4b4cd948a0dc5aa40a754266aa3ae48b3/pillow-12.1.1-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f975aa7ef9684ce7e2c18a3aa8f8e2106ce1e46b94ab713d156b2898811651d3", size = 5313850, upload-time = "2026-02-11T04:23:00.554Z" }, + { url = "https://files.pythonhosted.org/packages/30/02/00fa585abfd9fe9d73e5f6e554dc36cc2b842898cbfc46d70353dae227f8/pillow-12.1.1-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8089c852a56c2966cf18835db62d9b34fef7ba74c726ad943928d494fa7f4735", size = 5963343, upload-time = "2026-02-11T04:23:02.934Z" }, + { url = "https://files.pythonhosted.org/packages/f2/26/c56ce33ca856e358d27fda9676c055395abddb82c35ac0f593877ed4562e/pillow-12.1.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:cb9bb857b2d057c6dfc72ac5f3b44836924ba15721882ef103cecb40d002d80e", size = 7029880, upload-time = "2026-02-11T04:23:04.783Z" }, ] [[package]] @@ -5214,7 +5261,7 @@ wheels = [ [[package]] name = "pre-commit" -version = "4.5.0" +version = "4.5.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cfgv" }, @@ -5223,9 +5270,9 @@ dependencies = [ { name = "pyyaml" }, { name = "virtualenv" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f4/9b/6a4ffb4ed980519da959e1cf3122fc6cb41211daa58dbae1c73c0e519a37/pre_commit-4.5.0.tar.gz", hash = "sha256:dc5a065e932b19fc1d4c653c6939068fe54325af8e741e74e88db4d28a4dd66b", size = 198428, upload-time = "2025-11-22T21:02:42.304Z" } +sdist = { url = "https://files.pythonhosted.org/packages/40/f1/6d86a29246dfd2e9b6237f0b5823717f60cad94d47ddc26afa916d21f525/pre_commit-4.5.1.tar.gz", hash = "sha256:eb545fcff725875197837263e977ea257a402056661f09dae08e4b149b030a61", size = 198232, upload-time = "2025-12-16T21:14:33.552Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/5d/c4/b2d28e9d2edf4f1713eb3c29307f1a63f3d67cf09bdda29715a36a68921a/pre_commit-4.5.0-py2.py3-none-any.whl", hash = "sha256:25e2ce09595174d9c97860a95609f9f852c0614ba602de3561e267547f2335e1", size = 226429, upload-time = "2025-11-22T21:02:40.836Z" }, + { url = "https://files.pythonhosted.org/packages/5d/19/fd3ef348460c80af7bb4669ea7926651d1f95c23ff2df18b9d24bab4f3fa/pre_commit-4.5.1-py2.py3-none-any.whl", hash = "sha256:3b3afd891e97337708c1674210f8eba659b52a38ea5f822ff142d10786221f77", size = 226437, upload-time = "2025-12-16T21:14:32.409Z" }, ] [[package]] @@ -5412,15 +5459,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/80/2d/1bb683f64737bbb1f86c82b7359db1eb2be4e2c0c13b947f80efefa7d3e5/psycopg2_binary-2.9.11-cp313-cp313-win_amd64.whl", hash = "sha256:efff12b432179443f54e230fdf60de1f6cc726b6c832db8701227d089310e8aa", size = 2714215, upload-time = "2025-10-10T11:13:07.14Z" }, ] -[[package]] -name = "py" -version = "1.11.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/98/ff/fec109ceb715d2a6b4c4a85a61af3b40c723a961e8828319fbcb15b868dc/py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719", size = 207796, upload-time = "2021-11-04T17:17:01.377Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/f6/f0/10642828a8dfb741e5f3fbaac830550a518a775c7fff6f04a007259b0548/py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378", size = 98708, upload-time = "2021-11-04T17:17:00.152Z" }, -] - [[package]] name = "py-rust-stemmers" version = "0.1.5" @@ -5914,22 +5952,6 @@ crypto = [ { name = "cryptography" }, ] -[[package]] -name = "pylance" -version = "0.9.18" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "numpy" }, - { name = "pyarrow" }, -] -wheels = [ - { url = "https://files.pythonhosted.org/packages/ca/b8/15d4d380f0858dde46d42891776017e3bf9eb40129b3fe222637eecf8f43/pylance-0.9.18-cp38-abi3-macosx_10_15_x86_64.whl", hash = "sha256:fe2445d922c594d90e89111385106f6b152caab27996217db7bb4b8947eb0bea", size = 20319043, upload-time = "2024-02-19T07:36:11.206Z" }, - { url = "https://files.pythonhosted.org/packages/1f/f8/69f927a215d415362300d14a50b3cbc6575fd640ca5e632d488e022d3af1/pylance-0.9.18-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:a2c424c50f5186edbbcc5a26f34063ed09d9a7390e28033395728ce02b5658f0", size = 18780426, upload-time = "2024-02-19T07:30:10.963Z" }, - { url = "https://files.pythonhosted.org/packages/a1/b8/991e4544cfa21de2c7de5dd6bd8410df454fec5b374680fa96cd8698763b/pylance-0.9.18-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10af06edfde3e8451bf2251381d3980a0a164eab9d4c3d4dc8b6318969e958a6", size = 21584420, upload-time = "2024-02-19T07:32:30.283Z" }, - { url = "https://files.pythonhosted.org/packages/3c/5e/ff80f31d995315790393cbe599565f55d03eb717654cfeb65b701803e887/pylance-0.9.18-cp38-abi3-manylinux_2_24_aarch64.whl", hash = "sha256:d8bb9045d7163cc966b9fe34a917044192be37a90915475b77461e5b7d89e442", size = 19960982, upload-time = "2024-02-19T07:32:49.686Z" }, - { url = "https://files.pythonhosted.org/packages/2d/e5/c0e0a6cad08ab86a9c0bce7e8caef8f666337bb7950e2ab151ea4f88242d/pylance-0.9.18-cp38-abi3-win_amd64.whl", hash = "sha256:5ea80b7bf70d992f3fe63bce2d2f064f742124c04eaedeb76baca408ded85a2c", size = 22089079, upload-time = "2024-02-19T07:42:43.262Z" }, -] - [[package]] name = "pylatexenc" version = "2.10" @@ -6149,11 +6171,14 @@ wheels = [ [[package]] name = "pypdf" -version = "4.0.2" +version = "6.7.5" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/5f/de/5ee74158c3090ec99eae9f90c9e9c18f207fa5c722b0e95d6fa7faebcdf8/pypdf-4.0.2.tar.gz", hash = "sha256:3316d9ddfcff5df67ae3cdfe8b945c432aa43e7f970bae7c2a4ab4fe129cd937", size = 280173, upload-time = "2024-02-18T15:45:10.729Z" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f6/52/37cc0aa9e9d1bf7729a737a0d83f8b3f851c8eb137373d9f71eafb0a3405/pypdf-6.7.5.tar.gz", hash = "sha256:40bb2e2e872078655f12b9b89e2f900888bb505e88a82150b64f9f34fa25651d", size = 5304278, upload-time = "2026-03-02T09:05:21.464Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d7/87/30f8a2963247fd7b1267e600379c5e3f51c9849a07d042398e4485b7415c/pypdf-4.0.2-py3-none-any.whl", hash = "sha256:a62daa2a24d5a608ba1b6284dde185317ce3644f89b9ebe5314d0c5d1c9f257d", size = 283953, upload-time = "2024-02-18T15:45:07.857Z" }, + { url = "https://files.pythonhosted.org/packages/05/89/336673efd0a88956562658aba4f0bbef7cb92a6fbcbcaf94926dbc82b408/pypdf-6.7.5-py3-none-any.whl", hash = "sha256:07ba7f1d6e6d9aa2a17f5452e320a84718d4ce863367f7ede2fd72280349ab13", size = 331421, upload-time = "2026-03-02T09:05:19.722Z" }, ] [[package]] @@ -6225,15 +6250,6 @@ dependencies = [ ] sdist = { url = "https://files.pythonhosted.org/packages/12/a0/d0638470df605ce266991fb04f74c69ab1bed3b90ac3838e9c3c8b69b66a/Pysher-1.0.8.tar.gz", hash = "sha256:7849c56032b208e49df67d7bd8d49029a69042ab0bb45b2ed59fa08f11ac5988", size = 9071, upload-time = "2022-10-10T13:41:09.936Z" } -[[package]] -name = "pysocks" -version = "1.7.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/bd/11/293dd436aea955d45fc4e8a35b6ae7270f5b8e00b53cf6c024c83b657a11/PySocks-1.7.1.tar.gz", hash = "sha256:3f8804571ebe159c380ac6de37643bb4685970655d3bba243530d6558b799aa0", size = 284429, upload-time = "2019-09-20T02:07:35.714Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/8d/59/b4572118e098ac8e46e399a1dd0f2d85403ce8bbaad9ec79373ed6badaf9/PySocks-1.7.1-py3-none-any.whl", hash = "sha256:2725bd0a9925919b9b51739eea5f9e2bae91e83288108a9ad338b2e3a4435ee5", size = 16725, upload-time = "2019-09-20T02:06:22.938Z" }, -] - [[package]] name = "pytest" version = "8.4.2" @@ -6524,8 +6540,7 @@ dependencies = [ { name = "portalocker" }, { name = "protobuf" }, { name = "pydantic" }, - { name = "urllib3", version = "1.26.20", source = { registry = "https://pypi.org/simple" }, marker = "platform_python_implementation == 'PyPy'" }, - { name = "urllib3", version = "2.6.3", source = { registry = "https://pypi.org/simple" }, marker = "platform_python_implementation != 'PyPy'" }, + { name = "urllib3" }, ] sdist = { url = "https://files.pythonhosted.org/packages/1d/56/3f355f931c239c260b4fe3bd6433ec6c9e6185cd5ae0970fe89d0ca6daee/qdrant_client-1.14.3.tar.gz", hash = "sha256:bb899e3e065b79c04f5e47053d59176150c0a5dabc09d7f476c8ce8e52f4d281", size = 286766, upload-time = "2025-06-16T11:13:47.838Z" } wheels = [ @@ -6627,15 +6642,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e0/fd/0d025466f0f84552634f2a94c018df34568fe55cc97184a6bb2c719c5b3a/rapidocr-3.6.0-py3-none-any.whl", hash = "sha256:d16b43872fc4dfa1e60996334dcd0dc3e3f1f64161e2332bc1873b9f65754e6b", size = 15067340, upload-time = "2026-01-28T14:45:04.271Z" }, ] -[[package]] -name = "ratelimiter" -version = "1.2.0.post0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/5b/e0/b36010bddcf91444ff51179c076e4a09c513674a56758d7cfea4f6520e29/ratelimiter-1.2.0.post0.tar.gz", hash = "sha256:5c395dcabdbbde2e5178ef3f89b568a3066454a6ddc223b76473dac22f89b4f7", size = 9182, upload-time = "2017-12-12T00:33:38.783Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/51/80/2164fa1e863ad52cc8d870855fba0fbb51edd943edffd516d54b5f6f8ff8/ratelimiter-1.2.0.post0-py3-none-any.whl", hash = "sha256:a52be07bc0bb0b3674b4b304550f10c769bbb00fead3072e035904474259809f", size = 6642, upload-time = "2017-12-12T00:33:37.505Z" }, -] - [[package]] name = "redis" version = "7.1.0" @@ -6759,8 +6765,7 @@ dependencies = [ { name = "certifi" }, { name = "charset-normalizer" }, { name = "idna" }, - { name = "urllib3", version = "1.26.20", source = { registry = "https://pypi.org/simple" }, marker = "platform_python_implementation == 'PyPy'" }, - { name = "urllib3", version = "2.6.3", source = { registry = "https://pypi.org/simple" }, marker = "platform_python_implementation != 'PyPy'" }, + { name = "urllib3" }, ] sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" } wheels = [ @@ -6792,19 +6797,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/3f/51/d4db610ef29373b879047326cbf6fa98b6c1969d6f6dc423279de2b1be2c/requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06", size = 54481, upload-time = "2023-05-01T04:11:28.427Z" }, ] -[[package]] -name = "retry" -version = "0.9.2" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "decorator" }, - { name = "py" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/9d/72/75d0b85443fbc8d9f38d08d2b1b67cc184ce35280e4a3813cda2f445f3a4/retry-0.9.2.tar.gz", hash = "sha256:f8bfa8b99b69c4506d6f5bd3b0aabf77f98cdb17f3c9fc3f5ca820033336fba4", size = 6448, upload-time = "2016-05-11T13:58:51.541Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/4b/0d/53aea75710af4528a25ed6837d71d117602b01946b307a3912cb3cfcbcba/retry-0.9.2-py2.py3-none-any.whl", hash = "sha256:ccddf89761fa2c726ab29391837d4327f819ea14d244c232a1d24c67a2f98606", size = 7986, upload-time = "2016-05-11T13:58:39.925Z" }, -] - [[package]] name = "rich" version = "14.3.2" @@ -6941,28 +6933,27 @@ wheels = [ [[package]] name = "ruff" -version = "0.14.7" +version = "0.15.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/b7/5b/dd7406afa6c95e3d8fa9d652b6d6dd17dd4a6bf63cb477014e8ccd3dcd46/ruff-0.14.7.tar.gz", hash = "sha256:3417deb75d23bd14a722b57b0a1435561db65f0ad97435b4cf9f85ffcef34ae5", size = 5727324, upload-time = "2025-11-28T20:55:10.525Z" } +sdist = { url = "https://files.pythonhosted.org/packages/04/dc/4e6ac71b511b141cf626357a3946679abeba4cf67bc7cc5a17920f31e10d/ruff-0.15.1.tar.gz", hash = "sha256:c590fe13fb57c97141ae975c03a1aedb3d3156030cabd740d6ff0b0d601e203f", size = 4540855, upload-time = "2026-02-12T23:09:09.998Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/8c/b1/7ea5647aaf90106f6d102230e5df874613da43d1089864da1553b899ba5e/ruff-0.14.7-py3-none-linux_armv6l.whl", hash = "sha256:b9d5cb5a176c7236892ad7224bc1e63902e4842c460a0b5210701b13e3de4fca", size = 13414475, upload-time = "2025-11-28T20:54:54.569Z" }, - { url = "https://files.pythonhosted.org/packages/af/19/fddb4cd532299db9cdaf0efdc20f5c573ce9952a11cb532d3b859d6d9871/ruff-0.14.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:3f64fe375aefaf36ca7d7250292141e39b4cea8250427482ae779a2aa5d90015", size = 13634613, upload-time = "2025-11-28T20:55:17.54Z" }, - { url = "https://files.pythonhosted.org/packages/40/2b/469a66e821d4f3de0440676ed3e04b8e2a1dc7575cf6fa3ba6d55e3c8557/ruff-0.14.7-py3-none-macosx_11_0_arm64.whl", hash = "sha256:93e83bd3a9e1a3bda64cb771c0d47cda0e0d148165013ae2d3554d718632d554", size = 12765458, upload-time = "2025-11-28T20:55:26.128Z" }, - { url = "https://files.pythonhosted.org/packages/f1/05/0b001f734fe550bcfde4ce845948ac620ff908ab7241a39a1b39bb3c5f49/ruff-0.14.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3838948e3facc59a6070795de2ae16e5786861850f78d5914a03f12659e88f94", size = 13236412, upload-time = "2025-11-28T20:55:28.602Z" }, - { url = "https://files.pythonhosted.org/packages/11/36/8ed15d243f011b4e5da75cd56d6131c6766f55334d14ba31cce5461f28aa/ruff-0.14.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:24c8487194d38b6d71cd0fd17a5b6715cda29f59baca1defe1e3a03240f851d1", size = 13182949, upload-time = "2025-11-28T20:55:33.265Z" }, - { url = "https://files.pythonhosted.org/packages/3b/cf/fcb0b5a195455729834f2a6eadfe2e4519d8ca08c74f6d2b564a4f18f553/ruff-0.14.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:79c73db6833f058a4be8ffe4a0913b6d4ad41f6324745179bd2aa09275b01d0b", size = 13816470, upload-time = "2025-11-28T20:55:08.203Z" }, - { url = "https://files.pythonhosted.org/packages/7f/5d/34a4748577ff7a5ed2f2471456740f02e86d1568a18c9faccfc73bd9ca3f/ruff-0.14.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:12eb7014fccff10fc62d15c79d8a6be4d0c2d60fe3f8e4d169a0d2def75f5dad", size = 15289621, upload-time = "2025-11-28T20:55:30.837Z" }, - { url = "https://files.pythonhosted.org/packages/53/53/0a9385f047a858ba133d96f3f8e3c9c66a31cc7c4b445368ef88ebeac209/ruff-0.14.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6c623bbdc902de7ff715a93fa3bb377a4e42dd696937bf95669118773dbf0c50", size = 14975817, upload-time = "2025-11-28T20:55:24.107Z" }, - { url = "https://files.pythonhosted.org/packages/a8/d7/2f1c32af54c3b46e7fadbf8006d8b9bcfbea535c316b0bd8813d6fb25e5d/ruff-0.14.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f53accc02ed2d200fa621593cdb3c1ae06aa9b2c3cae70bc96f72f0000ae97a9", size = 14284549, upload-time = "2025-11-28T20:55:06.08Z" }, - { url = "https://files.pythonhosted.org/packages/92/05/434ddd86becd64629c25fb6b4ce7637dd52a45cc4a4415a3008fe61c27b9/ruff-0.14.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:281f0e61a23fcdcffca210591f0f53aafaa15f9025b5b3f9706879aaa8683bc4", size = 14071389, upload-time = "2025-11-28T20:55:35.617Z" }, - { url = "https://files.pythonhosted.org/packages/ff/50/fdf89d4d80f7f9d4f420d26089a79b3bb1538fe44586b148451bc2ba8d9c/ruff-0.14.7-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:dbbaa5e14148965b91cb090236931182ee522a5fac9bc5575bafc5c07b9f9682", size = 14202679, upload-time = "2025-11-28T20:55:01.472Z" }, - { url = "https://files.pythonhosted.org/packages/77/54/87b34988984555425ce967f08a36df0ebd339bb5d9d0e92a47e41151eafc/ruff-0.14.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:1464b6e54880c0fe2f2d6eaefb6db15373331414eddf89d6b903767ae2458143", size = 13147677, upload-time = "2025-11-28T20:55:19.933Z" }, - { url = "https://files.pythonhosted.org/packages/67/29/f55e4d44edfe053918a16a3299e758e1c18eef216b7a7092550d7a9ec51c/ruff-0.14.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:f217ed871e4621ea6128460df57b19ce0580606c23aeab50f5de425d05226784", size = 13151392, upload-time = "2025-11-28T20:55:21.967Z" }, - { url = "https://files.pythonhosted.org/packages/36/69/47aae6dbd4f1d9b4f7085f4d9dcc84e04561ee7ad067bf52e0f9b02e3209/ruff-0.14.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:6be02e849440ed3602d2eb478ff7ff07d53e3758f7948a2a598829660988619e", size = 13412230, upload-time = "2025-11-28T20:55:12.749Z" }, - { url = "https://files.pythonhosted.org/packages/b7/4b/6e96cb6ba297f2ba502a231cd732ed7c3de98b1a896671b932a5eefa3804/ruff-0.14.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:19a0f116ee5e2b468dfe80c41c84e2bbd6b74f7b719bee86c2ecde0a34563bcc", size = 14195397, upload-time = "2025-11-28T20:54:56.896Z" }, - { url = "https://files.pythonhosted.org/packages/69/82/251d5f1aa4dcad30aed491b4657cecd9fb4274214da6960ffec144c260f7/ruff-0.14.7-py3-none-win32.whl", hash = "sha256:e33052c9199b347c8937937163b9b149ef6ab2e4bb37b042e593da2e6f6cccfa", size = 13126751, upload-time = "2025-11-28T20:55:03.47Z" }, - { url = "https://files.pythonhosted.org/packages/a8/b5/d0b7d145963136b564806f6584647af45ab98946660d399ec4da79cae036/ruff-0.14.7-py3-none-win_amd64.whl", hash = "sha256:e17a20ad0d3fad47a326d773a042b924d3ac31c6ca6deb6c72e9e6b5f661a7c6", size = 14531726, upload-time = "2025-11-28T20:54:59.121Z" }, - { url = "https://files.pythonhosted.org/packages/1d/d2/1637f4360ada6a368d3265bf39f2cf737a0aaab15ab520fc005903e883f8/ruff-0.14.7-py3-none-win_arm64.whl", hash = "sha256:be4d653d3bea1b19742fcc6502354e32f65cd61ff2fbdb365803ef2c2aec6228", size = 13609215, upload-time = "2025-11-28T20:55:15.375Z" }, + { url = "https://files.pythonhosted.org/packages/23/bf/e6e4324238c17f9d9120a9d60aa99a7daaa21204c07fcd84e2ef03bb5fd1/ruff-0.15.1-py3-none-linux_armv6l.whl", hash = "sha256:b101ed7cf4615bda6ffe65bdb59f964e9f4a0d3f85cbf0e54f0ab76d7b90228a", size = 10367819, upload-time = "2026-02-12T23:09:03.598Z" }, + { url = "https://files.pythonhosted.org/packages/b3/ea/c8f89d32e7912269d38c58f3649e453ac32c528f93bb7f4219258be2e7ed/ruff-0.15.1-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:939c995e9277e63ea632cc8d3fae17aa758526f49a9a850d2e7e758bfef46602", size = 10798618, upload-time = "2026-02-12T23:09:22.928Z" }, + { url = "https://files.pythonhosted.org/packages/5e/0f/1d0d88bc862624247d82c20c10d4c0f6bb2f346559d8af281674cf327f15/ruff-0.15.1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:1d83466455fdefe60b8d9c8df81d3c1bbb2115cede53549d3b522ce2bc703899", size = 10148518, upload-time = "2026-02-12T23:08:58.339Z" }, + { url = "https://files.pythonhosted.org/packages/f5/c8/291c49cefaa4a9248e986256df2ade7add79388fe179e0691be06fae6f37/ruff-0.15.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9457e3c3291024866222b96108ab2d8265b477e5b1534c7ddb1810904858d16", size = 10518811, upload-time = "2026-02-12T23:09:31.865Z" }, + { url = "https://files.pythonhosted.org/packages/c3/1a/f5707440e5ae43ffa5365cac8bbb91e9665f4a883f560893829cf16a606b/ruff-0.15.1-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:92c92b003e9d4f7fbd33b1867bb15a1b785b1735069108dfc23821ba045b29bc", size = 10196169, upload-time = "2026-02-12T23:09:17.306Z" }, + { url = "https://files.pythonhosted.org/packages/2a/ff/26ddc8c4da04c8fd3ee65a89c9fb99eaa5c30394269d424461467be2271f/ruff-0.15.1-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fe5c41ab43e3a06778844c586251eb5a510f67125427625f9eb2b9526535779", size = 10990491, upload-time = "2026-02-12T23:09:25.503Z" }, + { url = "https://files.pythonhosted.org/packages/fc/00/50920cb385b89413f7cdb4bb9bc8fc59c1b0f30028d8bccc294189a54955/ruff-0.15.1-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:66a6dd6df4d80dc382c6484f8ce1bcceb55c32e9f27a8b94c32f6c7331bf14fb", size = 11843280, upload-time = "2026-02-12T23:09:19.88Z" }, + { url = "https://files.pythonhosted.org/packages/5d/6d/2f5cad8380caf5632a15460c323ae326f1e1a2b5b90a6ee7519017a017ca/ruff-0.15.1-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6a4a42cbb8af0bda9bcd7606b064d7c0bc311a88d141d02f78920be6acb5aa83", size = 11274336, upload-time = "2026-02-12T23:09:14.907Z" }, + { url = "https://files.pythonhosted.org/packages/a3/1d/5f56cae1d6c40b8a318513599b35ea4b075d7dc1cd1d04449578c29d1d75/ruff-0.15.1-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ab064052c31dddada35079901592dfba2e05f5b1e43af3954aafcbc1096a5b2", size = 11137288, upload-time = "2026-02-12T23:09:07.475Z" }, + { url = "https://files.pythonhosted.org/packages/cd/20/6f8d7d8f768c93b0382b33b9306b3b999918816da46537d5a61635514635/ruff-0.15.1-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:5631c940fe9fe91f817a4c2ea4e81f47bee3ca4aa646134a24374f3c19ad9454", size = 11070681, upload-time = "2026-02-12T23:08:55.43Z" }, + { url = "https://files.pythonhosted.org/packages/9a/67/d640ac76069f64cdea59dba02af2e00b1fa30e2103c7f8d049c0cff4cafd/ruff-0.15.1-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:68138a4ba184b4691ccdc39f7795c66b3c68160c586519e7e8444cf5a53e1b4c", size = 10486401, upload-time = "2026-02-12T23:09:27.927Z" }, + { url = "https://files.pythonhosted.org/packages/65/3d/e1429f64a3ff89297497916b88c32a5cc88eeca7e9c787072d0e7f1d3e1e/ruff-0.15.1-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:518f9af03bfc33c03bdb4cb63fabc935341bb7f54af500f92ac309ecfbba6330", size = 10197452, upload-time = "2026-02-12T23:09:12.147Z" }, + { url = "https://files.pythonhosted.org/packages/78/83/e2c3bade17dad63bf1e1c2ffaf11490603b760be149e1419b07049b36ef2/ruff-0.15.1-py3-none-musllinux_1_2_i686.whl", hash = "sha256:da79f4d6a826caaea95de0237a67e33b81e6ec2e25fc7e1993a4015dffca7c61", size = 10693900, upload-time = "2026-02-12T23:09:34.418Z" }, + { url = "https://files.pythonhosted.org/packages/a1/27/fdc0e11a813e6338e0706e8b39bb7a1d61ea5b36873b351acee7e524a72a/ruff-0.15.1-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:3dd86dccb83cd7d4dcfac303ffc277e6048600dfc22e38158afa208e8bf94a1f", size = 11227302, upload-time = "2026-02-12T23:09:36.536Z" }, + { url = "https://files.pythonhosted.org/packages/f6/58/ac864a75067dcbd3b95be5ab4eb2b601d7fbc3d3d736a27e391a4f92a5c1/ruff-0.15.1-py3-none-win32.whl", hash = "sha256:660975d9cb49b5d5278b12b03bb9951d554543a90b74ed5d366b20e2c57c2098", size = 10462555, upload-time = "2026-02-12T23:09:29.899Z" }, + { url = "https://files.pythonhosted.org/packages/e0/5e/d4ccc8a27ecdb78116feac4935dfc39d1304536f4296168f91ed3ec00cd2/ruff-0.15.1-py3-none-win_amd64.whl", hash = "sha256:c820fef9dd5d4172a6570e5721704a96c6679b80cf7be41659ed439653f62336", size = 11599956, upload-time = "2026-02-12T23:09:01.157Z" }, + { url = "https://files.pythonhosted.org/packages/2a/07/5bda6a85b220c64c65686bc85bd0bbb23b29c62b3a9f9433fa55f17cda93/ruff-0.15.1-py3-none-win_arm64.whl", hash = "sha256:5ff7d5f0f88567850f45081fac8f4ec212be8d0b963e385c3f7d0d2eb4899416", size = 10874604, upload-time = "2026-02-12T23:09:05.515Z" }, ] [[package]] @@ -7093,8 +7084,7 @@ dependencies = [ { name = "loguru" }, { name = "python-dateutil" }, { name = "requests" }, - { name = "urllib3", version = "1.26.20", source = { registry = "https://pypi.org/simple" }, marker = "platform_python_implementation == 'PyPy'" }, - { name = "urllib3", version = "2.6.3", source = { registry = "https://pypi.org/simple" }, marker = "platform_python_implementation != 'PyPy'" }, + { name = "urllib3" }, ] sdist = { url = "https://files.pythonhosted.org/packages/17/40/f2baf15372fba9e67c0f918ea9d753916bf875019ead972cd76e8aa0ff1b/scrapfly_sdk-0.8.24.tar.gz", hash = "sha256:84fb0a22c3df9cf3aca9bdc1ed191419e27d92a055ae70d06147ac0ced7ee654", size = 42460, upload-time = "2026-01-07T11:10:50.236Z" } wheels = [ @@ -7116,7 +7106,7 @@ dependencies = [ { name = "trio", marker = "platform_python_implementation == 'PyPy'" }, { name = "trio-websocket", marker = "platform_python_implementation == 'PyPy'" }, { name = "typing-extensions", marker = "platform_python_implementation == 'PyPy'" }, - { name = "urllib3", version = "1.26.20", source = { registry = "https://pypi.org/simple" }, extra = ["socks"], marker = "platform_python_implementation == 'PyPy'" }, + { name = "urllib3", marker = "platform_python_implementation == 'PyPy'" }, { name = "websocket-client", marker = "platform_python_implementation == 'PyPy'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/54/2d/fafffe946099033ccf22bf89e12eede14c1d3c5936110c5f6f2b9830722c/selenium-4.32.0.tar.gz", hash = "sha256:b9509bef4056f4083772abb1ae19ff57247d617a29255384b26be6956615b206", size = 870997, upload-time = "2025-05-02T20:35:27.325Z" } @@ -7142,7 +7132,7 @@ dependencies = [ { name = "types-certifi", marker = "platform_python_implementation != 'PyPy'" }, { name = "types-urllib3", marker = "platform_python_implementation != 'PyPy'" }, { name = "typing-extensions", marker = "platform_python_implementation != 'PyPy'" }, - { name = "urllib3", version = "2.6.3", source = { registry = "https://pypi.org/simple" }, extra = ["socks"], marker = "platform_python_implementation != 'PyPy'" }, + { name = "urllib3", marker = "platform_python_implementation != 'PyPy'" }, { name = "websocket-client", marker = "platform_python_implementation != 'PyPy'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/66/ef/a5727fa7b33d20d296322adf851b76072d8d3513e1b151969d3228437faf/selenium-4.40.0.tar.gz", hash = "sha256:a88f5905d88ad0b84991c2386ea39e2bbde6d6c334be38df5842318ba98eaa8c", size = 930444, upload-time = "2026-01-18T23:12:31.565Z" } @@ -7178,8 +7168,7 @@ version = "2.52.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "certifi" }, - { name = "urllib3", version = "1.26.20", source = { registry = "https://pypi.org/simple" }, marker = "platform_python_implementation == 'PyPy'" }, - { name = "urllib3", version = "2.6.3", source = { registry = "https://pypi.org/simple" }, marker = "platform_python_implementation != 'PyPy'" }, + { name = "urllib3" }, ] sdist = { url = "https://files.pythonhosted.org/packages/59/eb/1b497650eb564701f9a7b8a95c51b2abe9347ed2c0b290ba78f027ebe4ea/sentry_sdk-2.52.0.tar.gz", hash = "sha256:fa0bec872cfec0302970b2996825723d67390cdd5f0229fb9efed93bd5384899", size = 410273, upload-time = "2026-02-04T15:03:54.706Z" } wheels = [ @@ -7338,6 +7327,7 @@ dependencies = [ { name = "sortedcontainers" }, { name = "tomlkit" }, { name = "typing-extensions" }, + { name = "urllib3" }, ] sdist = { url = "https://files.pythonhosted.org/packages/13/d2/4ae9fc7a0df36ad0ac06bc959757dfbfc58f160f58e1d62e7cebe9901fc7/snowflake_connector_python-4.2.0.tar.gz", hash = "sha256:74b1028caee3af4550a366ef89b33de80940bbf856844dd4d788a6b7a6511aff", size = 915327, upload-time = "2026-01-07T16:44:32.541Z" } wheels = [ @@ -7642,68 +7632,32 @@ wheels = [ [[package]] name = "tokenizers" -version = "0.20.3" +version = "0.22.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "huggingface-hub" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/da/25/b1681c1c30ea3ea6e584ae3fffd552430b12faa599b558c4c4783f56d7ff/tokenizers-0.20.3.tar.gz", hash = "sha256:2278b34c5d0dd78e087e1ca7f9b1dcbf129d80211afa645f214bd6e051037539", size = 340513, upload-time = "2024-11-05T17:34:10.403Z" } +sdist = { url = "https://files.pythonhosted.org/packages/73/6f/f80cfef4a312e1fb34baf7d85c72d4411afde10978d4657f8cdd811d3ccc/tokenizers-0.22.2.tar.gz", hash = "sha256:473b83b915e547aa366d1eee11806deaf419e17be16310ac0a14077f1e28f917", size = 372115, upload-time = "2026-01-05T10:45:15.988Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c8/51/421bb0052fc4333f7c1e3231d8c6607552933d919b628c8fabd06f60ba1e/tokenizers-0.20.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:31ccab28dbb1a9fe539787210b0026e22debeab1662970f61c2d921f7557f7e4", size = 2674308, upload-time = "2024-11-05T17:30:25.423Z" }, - { url = "https://files.pythonhosted.org/packages/a6/e9/f651f8d27614fd59af387f4dfa568b55207e5fac8d06eec106dc00b921c4/tokenizers-0.20.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c6361191f762bda98c773da418cf511cbaa0cb8d0a1196f16f8c0119bde68ff8", size = 2559363, upload-time = "2024-11-05T17:30:28.841Z" }, - { url = "https://files.pythonhosted.org/packages/e3/e8/0e9f81a09ab79f409eabfd99391ca519e315496694671bebca24c3e90448/tokenizers-0.20.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f128d5da1202b78fa0a10d8d938610472487da01b57098d48f7e944384362514", size = 2892896, upload-time = "2024-11-05T17:30:30.429Z" }, - { url = "https://files.pythonhosted.org/packages/b0/72/15fdbc149e05005e99431ecd471807db2241983deafe1e704020f608f40e/tokenizers-0.20.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:79c4121a2e9433ad7ef0769b9ca1f7dd7fa4c0cd501763d0a030afcbc6384481", size = 2802785, upload-time = "2024-11-05T17:30:32.045Z" }, - { url = "https://files.pythonhosted.org/packages/26/44/1f8aea48f9bb117d966b7272484671b33a509f6217a8e8544d79442c90db/tokenizers-0.20.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7850fde24197fe5cd6556e2fdba53a6d3bae67c531ea33a3d7c420b90904141", size = 3086060, upload-time = "2024-11-05T17:30:34.11Z" }, - { url = "https://files.pythonhosted.org/packages/2e/83/82ba40da99870b3a0b801cffaf4f099f088a84c7e07d32cc6ca751ce08e6/tokenizers-0.20.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b357970c095dc134978a68c67d845a1e3803ab7c4fbb39195bde914e7e13cf8b", size = 3096760, upload-time = "2024-11-05T17:30:36.276Z" }, - { url = "https://files.pythonhosted.org/packages/f3/46/7a025404201d937f86548928616c0a164308aa3998e546efdf798bf5ee9c/tokenizers-0.20.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a333d878c4970b72d6c07848b90c05f6b045cf9273fc2bc04a27211721ad6118", size = 3380165, upload-time = "2024-11-05T17:30:37.642Z" }, - { url = "https://files.pythonhosted.org/packages/aa/49/15fae66ac62e49255eeedbb7f4127564b2c3f3aef2009913f525732d1a08/tokenizers-0.20.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1fd9fee817f655a8f50049f685e224828abfadd436b8ff67979fc1d054b435f1", size = 2994038, upload-time = "2024-11-05T17:30:40.075Z" }, - { url = "https://files.pythonhosted.org/packages/f4/64/693afc9ba2393c2eed85c02bacb44762f06a29f0d1a5591fa5b40b39c0a2/tokenizers-0.20.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9e7816808b402129393a435ea2a509679b41246175d6e5e9f25b8692bfaa272b", size = 8977285, upload-time = "2024-11-05T17:30:42.095Z" }, - { url = "https://files.pythonhosted.org/packages/be/7e/6126c18694310fe07970717929e889898767c41fbdd95b9078e8aec0f9ef/tokenizers-0.20.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ba96367db9d8a730d3a1d5996b4b7babb846c3994b8ef14008cd8660f55db59d", size = 9294890, upload-time = "2024-11-05T17:30:44.563Z" }, - { url = "https://files.pythonhosted.org/packages/71/7d/5e3307a1091c8608a1e58043dff49521bc19553c6e9548c7fac6840cc2c4/tokenizers-0.20.3-cp310-none-win32.whl", hash = "sha256:ee31ba9d7df6a98619426283e80c6359f167e2e9882d9ce1b0254937dbd32f3f", size = 2196883, upload-time = "2024-11-05T17:30:46.792Z" }, - { url = "https://files.pythonhosted.org/packages/47/62/aaf5b2a526b3b10c20985d9568ff8c8f27159345eaef3347831e78cd5894/tokenizers-0.20.3-cp310-none-win_amd64.whl", hash = "sha256:a845c08fdad554fe0871d1255df85772f91236e5fd6b9287ef8b64f5807dbd0c", size = 2381637, upload-time = "2024-11-05T17:30:48.156Z" }, - { url = "https://files.pythonhosted.org/packages/c6/93/6742ef9206409d5ce1fdf44d5ca1687cdc3847ba0485424e2c731e6bcf67/tokenizers-0.20.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:585b51e06ca1f4839ce7759941e66766d7b060dccfdc57c4ca1e5b9a33013a90", size = 2674224, upload-time = "2024-11-05T17:30:49.972Z" }, - { url = "https://files.pythonhosted.org/packages/aa/14/e75ece72e99f6ef9ae07777ca9fdd78608f69466a5cecf636e9bd2f25d5c/tokenizers-0.20.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:61cbf11954f3b481d08723ebd048ba4b11e582986f9be74d2c3bdd9293a4538d", size = 2558991, upload-time = "2024-11-05T17:30:51.666Z" }, - { url = "https://files.pythonhosted.org/packages/46/54/033b5b2ba0c3ae01e026c6f7ced147d41a2fa1c573d00a66cb97f6d7f9b3/tokenizers-0.20.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef820880d5e4e8484e2fa54ff8d297bb32519eaa7815694dc835ace9130a3eea", size = 2892476, upload-time = "2024-11-05T17:30:53.505Z" }, - { url = "https://files.pythonhosted.org/packages/e6/b0/cc369fb3297d61f3311cab523d16d48c869dc2f0ba32985dbf03ff811041/tokenizers-0.20.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:67ef4dcb8841a4988cd00dd288fb95dfc8e22ed021f01f37348fd51c2b055ba9", size = 2802775, upload-time = "2024-11-05T17:30:55.229Z" }, - { url = "https://files.pythonhosted.org/packages/1a/74/62ad983e8ea6a63e04ed9c5be0b605056bf8aac2f0125f9b5e0b3e2b89fa/tokenizers-0.20.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ff1ef8bd47a02b0dc191688ccb4da53600df5d4c9a05a4b68e1e3de4823e78eb", size = 3086138, upload-time = "2024-11-05T17:30:57.332Z" }, - { url = "https://files.pythonhosted.org/packages/6b/ac/4637ba619db25094998523f9e6f5b456e1db1f8faa770a3d925d436db0c3/tokenizers-0.20.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:444d188186eab3148baf0615b522461b41b1f0cd58cd57b862ec94b6ac9780f1", size = 3098076, upload-time = "2024-11-05T17:30:59.455Z" }, - { url = "https://files.pythonhosted.org/packages/58/ce/9793f2dc2ce529369807c9c74e42722b05034af411d60f5730b720388c7d/tokenizers-0.20.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:37c04c032c1442740b2c2d925f1857885c07619224a533123ac7ea71ca5713da", size = 3379650, upload-time = "2024-11-05T17:31:01.264Z" }, - { url = "https://files.pythonhosted.org/packages/50/f6/2841de926bc4118af996eaf0bdf0ea5b012245044766ffc0347e6c968e63/tokenizers-0.20.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:453c7769d22231960ee0e883d1005c93c68015025a5e4ae56275406d94a3c907", size = 2994005, upload-time = "2024-11-05T17:31:02.985Z" }, - { url = "https://files.pythonhosted.org/packages/a3/b2/00915c4fed08e9505d37cf6eaab45b12b4bff8f6719d459abcb9ead86a4b/tokenizers-0.20.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4bb31f7b2847e439766aaa9cc7bccf7ac7088052deccdb2275c952d96f691c6a", size = 8977488, upload-time = "2024-11-05T17:31:04.424Z" }, - { url = "https://files.pythonhosted.org/packages/e9/ac/1c069e7808181ff57bcf2d39e9b6fbee9133a55410e6ebdaa89f67c32e83/tokenizers-0.20.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:843729bf0f991b29655a069a2ff58a4c24375a553c70955e15e37a90dd4e045c", size = 9294935, upload-time = "2024-11-05T17:31:06.882Z" }, - { url = "https://files.pythonhosted.org/packages/50/47/722feb70ee68d1c4412b12d0ea4acc2713179fd63f054913990f9e259492/tokenizers-0.20.3-cp311-none-win32.whl", hash = "sha256:efcce3a927b1e20ca694ba13f7a68c59b0bd859ef71e441db68ee42cf20c2442", size = 2197175, upload-time = "2024-11-05T17:31:09.385Z" }, - { url = "https://files.pythonhosted.org/packages/75/68/1b4f928b15a36ed278332ac75d66d7eb65d865bf344d049c452c18447bf9/tokenizers-0.20.3-cp311-none-win_amd64.whl", hash = "sha256:88301aa0801f225725b6df5dea3d77c80365ff2362ca7e252583f2b4809c4cc0", size = 2381616, upload-time = "2024-11-05T17:31:10.685Z" }, - { url = "https://files.pythonhosted.org/packages/07/00/92a08af2a6b0c88c50f1ab47d7189e695722ad9714b0ee78ea5e1e2e1def/tokenizers-0.20.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:49d12a32e190fad0e79e5bdb788d05da2f20d8e006b13a70859ac47fecf6ab2f", size = 2667951, upload-time = "2024-11-05T17:31:12.356Z" }, - { url = "https://files.pythonhosted.org/packages/ec/9a/e17a352f0bffbf415cf7d73756f5c73a3219225fc5957bc2f39d52c61684/tokenizers-0.20.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:282848cacfb9c06d5e51489f38ec5aa0b3cd1e247a023061945f71f41d949d73", size = 2555167, upload-time = "2024-11-05T17:31:13.839Z" }, - { url = "https://files.pythonhosted.org/packages/27/37/d108df55daf4f0fcf1f58554692ff71687c273d870a34693066f0847be96/tokenizers-0.20.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:abe4e08c7d0cd6154c795deb5bf81d2122f36daf075e0c12a8b050d824ef0a64", size = 2898389, upload-time = "2024-11-05T17:31:15.12Z" }, - { url = "https://files.pythonhosted.org/packages/b2/27/32f29da16d28f59472fa7fb38e7782069748c7e9ab9854522db20341624c/tokenizers-0.20.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ca94fc1b73b3883c98f0c88c77700b13d55b49f1071dfd57df2b06f3ff7afd64", size = 2795866, upload-time = "2024-11-05T17:31:16.857Z" }, - { url = "https://files.pythonhosted.org/packages/29/4e/8a9a3c89e128c4a40f247b501c10279d2d7ade685953407c4d94c8c0f7a7/tokenizers-0.20.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef279c7e239f95c8bdd6ff319d9870f30f0d24915b04895f55b1adcf96d6c60d", size = 3085446, upload-time = "2024-11-05T17:31:18.392Z" }, - { url = "https://files.pythonhosted.org/packages/b4/3b/a2a7962c496ebcd95860ca99e423254f760f382cd4bd376f8895783afaf5/tokenizers-0.20.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:16384073973f6ccbde9852157a4fdfe632bb65208139c9d0c0bd0176a71fd67f", size = 3094378, upload-time = "2024-11-05T17:31:20.329Z" }, - { url = "https://files.pythonhosted.org/packages/1f/f4/a8a33f0192a1629a3bd0afcad17d4d221bbf9276da4b95d226364208d5eb/tokenizers-0.20.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:312d522caeb8a1a42ebdec87118d99b22667782b67898a76c963c058a7e41d4f", size = 3385755, upload-time = "2024-11-05T17:31:21.778Z" }, - { url = "https://files.pythonhosted.org/packages/9e/65/c83cb3545a65a9eaa2e13b22c93d5e00bd7624b354a44adbdc93d5d9bd91/tokenizers-0.20.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2b7cb962564785a83dafbba0144ecb7f579f1d57d8c406cdaa7f32fe32f18ad", size = 2997679, upload-time = "2024-11-05T17:31:23.134Z" }, - { url = "https://files.pythonhosted.org/packages/55/e9/a80d4e592307688a67c7c59ab77e03687b6a8bd92eb5db763a2c80f93f57/tokenizers-0.20.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:124c5882ebb88dadae1fc788a582299fcd3a8bd84fc3e260b9918cf28b8751f5", size = 8989296, upload-time = "2024-11-05T17:31:24.953Z" }, - { url = "https://files.pythonhosted.org/packages/90/af/60c957af8d2244321124e893828f1a4817cde1a2d08d09d423b73f19bd2f/tokenizers-0.20.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2b6e54e71f84c4202111a489879005cb14b92616a87417f6c102c833af961ea2", size = 9303621, upload-time = "2024-11-05T17:31:27.341Z" }, - { url = "https://files.pythonhosted.org/packages/be/a9/96172310ee141009646d63a1ca267c099c462d747fe5ef7e33f74e27a683/tokenizers-0.20.3-cp312-none-win32.whl", hash = "sha256:83d9bfbe9af86f2d9df4833c22e94d94750f1d0cd9bfb22a7bb90a86f61cdb1c", size = 2188979, upload-time = "2024-11-05T17:31:29.483Z" }, - { url = "https://files.pythonhosted.org/packages/bd/68/61d85ae7ae96dde7d0974ff3538db75d5cdc29be2e4329cd7fc51a283e22/tokenizers-0.20.3-cp312-none-win_amd64.whl", hash = "sha256:44def74cee574d609a36e17c8914311d1b5dbcfe37c55fd29369d42591b91cf2", size = 2380725, upload-time = "2024-11-05T17:31:31.315Z" }, - { url = "https://files.pythonhosted.org/packages/07/19/36e9eaafb229616cb8502b42030fa7fe347550e76cb618de71b498fc3222/tokenizers-0.20.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:e0b630e0b536ef0e3c8b42c685c1bc93bd19e98c0f1543db52911f8ede42cf84", size = 2666813, upload-time = "2024-11-05T17:31:32.783Z" }, - { url = "https://files.pythonhosted.org/packages/b9/c7/e2ce1d4f756c8a62ef93fdb4df877c2185339b6d63667b015bf70ea9d34b/tokenizers-0.20.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a02d160d2b19bcbfdf28bd9a4bf11be4cb97d0499c000d95d4c4b1a4312740b6", size = 2555354, upload-time = "2024-11-05T17:31:34.208Z" }, - { url = "https://files.pythonhosted.org/packages/7c/cf/5309c2d173a6a67f9ec8697d8e710ea32418de6fd8541778032c202a1c3e/tokenizers-0.20.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e3d80d89b068bc30034034b5319218c7c0a91b00af19679833f55f3becb6945", size = 2897745, upload-time = "2024-11-05T17:31:35.733Z" }, - { url = "https://files.pythonhosted.org/packages/2c/e5/af3078e32f225e680e69d61f78855880edb8d53f5850a1834d519b2b103f/tokenizers-0.20.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:174a54910bed1b089226512b4458ea60d6d6fd93060254734d3bc3540953c51c", size = 2794385, upload-time = "2024-11-05T17:31:37.497Z" }, - { url = "https://files.pythonhosted.org/packages/0b/a7/bc421fe46650cc4eb4a913a236b88c243204f32c7480684d2f138925899e/tokenizers-0.20.3-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:098b8a632b8656aa5802c46689462c5c48f02510f24029d71c208ec2c822e771", size = 3084580, upload-time = "2024-11-05T17:31:39.456Z" }, - { url = "https://files.pythonhosted.org/packages/c6/22/97e1e95ee81f75922c9f569c23cb2b1fdc7f5a7a29c4c9fae17e63f751a6/tokenizers-0.20.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:78c8c143e3ae41e718588281eb3e212c2b31623c9d6d40410ec464d7d6221fb5", size = 3093581, upload-time = "2024-11-05T17:31:41.224Z" }, - { url = "https://files.pythonhosted.org/packages/d5/14/f0df0ee3b9e516121e23c0099bccd7b9f086ba9150021a750e99b16ce56f/tokenizers-0.20.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b26b0aadb18cd8701077362ba359a06683662d5cafe3e8e8aba10eb05c037f1", size = 3385934, upload-time = "2024-11-05T17:31:43.811Z" }, - { url = "https://files.pythonhosted.org/packages/66/52/7a171bd4929e3ffe61a29b4340fe5b73484709f92a8162a18946e124c34c/tokenizers-0.20.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07d7851a72717321022f3774e84aa9d595a041d643fafa2e87fbc9b18711dac0", size = 2997311, upload-time = "2024-11-05T17:31:46.224Z" }, - { url = "https://files.pythonhosted.org/packages/7c/64/f1993bb8ebf775d56875ca0d50a50f2648bfbbb143da92fe2e6ceeb4abd5/tokenizers-0.20.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:bd44e48a430ada902c6266a8245f5036c4fe744fcb51f699999fbe82aa438797", size = 8988601, upload-time = "2024-11-05T17:31:47.907Z" }, - { url = "https://files.pythonhosted.org/packages/d6/3f/49fa63422159bbc2f2a4ac5bfc597d04d4ec0ad3d2ef46649b5e9a340e37/tokenizers-0.20.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:a4c186bb006ccbe1f5cc4e0380d1ce7806f5955c244074fd96abc55e27b77f01", size = 9303950, upload-time = "2024-11-05T17:31:50.674Z" }, - { url = "https://files.pythonhosted.org/packages/66/11/79d91aeb2817ad1993ef61c690afe73e6dbedbfb21918b302ef5a2ba9bfb/tokenizers-0.20.3-cp313-none-win32.whl", hash = "sha256:6e19e0f1d854d6ab7ea0c743d06e764d1d9a546932be0a67f33087645f00fe13", size = 2188941, upload-time = "2024-11-05T17:31:53.334Z" }, - { url = "https://files.pythonhosted.org/packages/c2/ff/ac8410f868fb8b14b5e619efa304aa119cb8a40bd7df29fc81a898e64f99/tokenizers-0.20.3-cp313-none-win_amd64.whl", hash = "sha256:d50ede425c7e60966a9680d41b58b3a0950afa1bb570488e2972fa61662c4273", size = 2380269, upload-time = "2024-11-05T17:31:54.796Z" }, - { url = "https://files.pythonhosted.org/packages/29/cd/ff1586dd572aaf1637d59968df3f6f6532fa255f4638fbc29f6d27e0b690/tokenizers-0.20.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e919f2e3e68bb51dc31de4fcbbeff3bdf9c1cad489044c75e2b982a91059bd3c", size = 2672044, upload-time = "2024-11-05T17:33:07.796Z" }, - { url = "https://files.pythonhosted.org/packages/b5/9e/7a2c00abbc8edb021ee0b1f12aab76a7b7824b49f94bcd9f075d0818d4b0/tokenizers-0.20.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b8e9608f2773996cc272156e305bd79066163a66b0390fe21750aff62df1ac07", size = 2558841, upload-time = "2024-11-05T17:33:09.542Z" }, - { url = "https://files.pythonhosted.org/packages/8e/c1/6af62ef61316f33ecf785bbb2bee4292f34ea62b491d4480ad9b09acf6b6/tokenizers-0.20.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39270a7050deaf50f7caff4c532c01b3c48f6608d42b3eacdebdc6795478c8df", size = 2897936, upload-time = "2024-11-05T17:33:11.413Z" }, - { url = "https://files.pythonhosted.org/packages/9a/0b/c076b2ff3ee6dc70c805181fbe325668b89cfee856f8dfa24cc9aa293c84/tokenizers-0.20.3-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e005466632b1c5d2d2120f6de8aa768cc9d36cd1ab7d51d0c27a114c91a1e6ee", size = 3082688, upload-time = "2024-11-05T17:33:13.538Z" }, - { url = "https://files.pythonhosted.org/packages/0a/60/56510124933136c2e90879e1c81603cfa753ae5a87830e3ef95056b20d8f/tokenizers-0.20.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a07962340b36189b6c8feda552ea1bfeee6cf067ff922a1d7760662c2ee229e5", size = 2998924, upload-time = "2024-11-05T17:33:16.249Z" }, - { url = "https://files.pythonhosted.org/packages/68/60/4107b618b7b9155cb34ad2e0fc90946b7e71f041b642122fb6314f660688/tokenizers-0.20.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:55046ad3dd5f2b3c67501fcc8c9cbe3e901d8355f08a3b745e9b57894855f85b", size = 8989514, upload-time = "2024-11-05T17:33:18.161Z" }, - { url = "https://files.pythonhosted.org/packages/e8/bd/48475818e614b73316baf37ac1e4e51b578bbdf58651812d7e55f43b88d8/tokenizers-0.20.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:efcf0eb939988b627558aaf2b9dc3e56d759cad2e0cfa04fcab378e4b48fc4fd", size = 9303476, upload-time = "2024-11-05T17:33:21.251Z" }, + { url = "https://files.pythonhosted.org/packages/92/97/5dbfabf04c7e348e655e907ed27913e03db0923abb5dfdd120d7b25630e1/tokenizers-0.22.2-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:544dd704ae7238755d790de45ba8da072e9af3eea688f698b137915ae959281c", size = 3100275, upload-time = "2026-01-05T10:41:02.158Z" }, + { url = "https://files.pythonhosted.org/packages/2e/47/174dca0502ef88b28f1c9e06b73ce33500eedfac7a7692108aec220464e7/tokenizers-0.22.2-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:1e418a55456beedca4621dbab65a318981467a2b188e982a23e117f115ce5001", size = 2981472, upload-time = "2026-01-05T10:41:00.276Z" }, + { url = "https://files.pythonhosted.org/packages/d6/84/7990e799f1309a8b87af6b948f31edaa12a3ed22d11b352eaf4f4b2e5753/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2249487018adec45d6e3554c71d46eb39fa8ea67156c640f7513eb26f318cec7", size = 3290736, upload-time = "2026-01-05T10:40:32.165Z" }, + { url = "https://files.pythonhosted.org/packages/78/59/09d0d9ba94dcd5f4f1368d4858d24546b4bdc0231c2354aa31d6199f0399/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25b85325d0815e86e0bac263506dd114578953b7b53d7de09a6485e4a160a7dd", size = 3168835, upload-time = "2026-01-05T10:40:38.847Z" }, + { url = "https://files.pythonhosted.org/packages/47/50/b3ebb4243e7160bda8d34b731e54dd8ab8b133e50775872e7a434e524c28/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bfb88f22a209ff7b40a576d5324bf8286b519d7358663db21d6246fb17eea2d5", size = 3521673, upload-time = "2026-01-05T10:40:56.614Z" }, + { url = "https://files.pythonhosted.org/packages/e0/fa/89f4cb9e08df770b57adb96f8cbb7e22695a4cb6c2bd5f0c4f0ebcf33b66/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c774b1276f71e1ef716e5486f21e76333464f47bece56bbd554485982a9e03e", size = 3724818, upload-time = "2026-01-05T10:40:44.507Z" }, + { url = "https://files.pythonhosted.org/packages/64/04/ca2363f0bfbe3b3d36e95bf67e56a4c88c8e3362b658e616d1ac185d47f2/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:df6c4265b289083bf710dff49bc51ef252f9d5be33a45ee2bed151114a56207b", size = 3379195, upload-time = "2026-01-05T10:40:51.139Z" }, + { url = "https://files.pythonhosted.org/packages/2e/76/932be4b50ef6ccedf9d3c6639b056a967a86258c6d9200643f01269211ca/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:369cc9fc8cc10cb24143873a0d95438bb8ee257bb80c71989e3ee290e8d72c67", size = 3274982, upload-time = "2026-01-05T10:40:58.331Z" }, + { url = "https://files.pythonhosted.org/packages/1d/28/5f9f5a4cc211b69e89420980e483831bcc29dade307955cc9dc858a40f01/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:29c30b83d8dcd061078b05ae0cb94d3c710555fbb44861139f9f83dcca3dc3e4", size = 9478245, upload-time = "2026-01-05T10:41:04.053Z" }, + { url = "https://files.pythonhosted.org/packages/6c/fb/66e2da4704d6aadebf8cb39f1d6d1957df667ab24cff2326b77cda0dcb85/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:37ae80a28c1d3265bb1f22464c856bd23c02a05bb211e56d0c5301a435be6c1a", size = 9560069, upload-time = "2026-01-05T10:45:10.673Z" }, + { url = "https://files.pythonhosted.org/packages/16/04/fed398b05caa87ce9b1a1bb5166645e38196081b225059a6edaff6440fac/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:791135ee325f2336f498590eb2f11dc5c295232f288e75c99a36c5dbce63088a", size = 9899263, upload-time = "2026-01-05T10:45:12.559Z" }, + { url = "https://files.pythonhosted.org/packages/05/a1/d62dfe7376beaaf1394917e0f8e93ee5f67fea8fcf4107501db35996586b/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:38337540fbbddff8e999d59970f3c6f35a82de10053206a7562f1ea02d046fa5", size = 10033429, upload-time = "2026-01-05T10:45:14.333Z" }, + { url = "https://files.pythonhosted.org/packages/fd/18/a545c4ea42af3df6effd7d13d250ba77a0a86fb20393143bbb9a92e434d4/tokenizers-0.22.2-cp39-abi3-win32.whl", hash = "sha256:a6bf3f88c554a2b653af81f3204491c818ae2ac6fbc09e76ef4773351292bc92", size = 2502363, upload-time = "2026-01-05T10:45:20.593Z" }, + { url = "https://files.pythonhosted.org/packages/65/71/0670843133a43d43070abeb1949abfdef12a86d490bea9cd9e18e37c5ff7/tokenizers-0.22.2-cp39-abi3-win_amd64.whl", hash = "sha256:c9ea31edff2968b44a88f97d784c2f16dc0729b8b143ed004699ebca91f05c48", size = 2747786, upload-time = "2026-01-05T10:45:18.411Z" }, + { url = "https://files.pythonhosted.org/packages/72/f4/0de46cfa12cdcbcd464cc59fde36912af405696f687e53a091fb432f694c/tokenizers-0.22.2-cp39-abi3-win_arm64.whl", hash = "sha256:9ce725d22864a1e965217204946f830c37876eee3b2ba6fc6255e8e903d5fcbc", size = 2612133, upload-time = "2026-01-05T10:45:17.232Z" }, + { url = "https://files.pythonhosted.org/packages/84/04/655b79dbcc9b3ac5f1479f18e931a344af67e5b7d3b251d2dcdcd7558592/tokenizers-0.22.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:753d47ebd4542742ef9261d9da92cd545b2cacbb48349a1225466745bb866ec4", size = 3282301, upload-time = "2026-01-05T10:40:34.858Z" }, + { url = "https://files.pythonhosted.org/packages/46/cd/e4851401f3d8f6f45d8480262ab6a5c8cb9c4302a790a35aa14eeed6d2fd/tokenizers-0.22.2-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e10bf9113d209be7cd046d40fbabbaf3278ff6d18eb4da4c500443185dc1896c", size = 3161308, upload-time = "2026-01-05T10:40:40.737Z" }, + { url = "https://files.pythonhosted.org/packages/6f/6e/55553992a89982cd12d4a66dddb5e02126c58677ea3931efcbe601d419db/tokenizers-0.22.2-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:64d94e84f6660764e64e7e0b22baa72f6cd942279fdbb21d46abd70d179f0195", size = 3718964, upload-time = "2026-01-05T10:40:46.56Z" }, + { url = "https://files.pythonhosted.org/packages/59/8c/b1c87148aa15e099243ec9f0cf9d0e970cc2234c3257d558c25a2c5304e6/tokenizers-0.22.2-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f01a9c019878532f98927d2bacb79bbb404b43d3437455522a00a30718cdedb5", size = 3373542, upload-time = "2026-01-05T10:40:52.803Z" }, ] [[package]] @@ -7857,7 +7811,7 @@ wheels = [ [[package]] name = "transformers" -version = "4.46.3" +version = "4.57.6" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "filelock" }, @@ -7871,9 +7825,9 @@ dependencies = [ { name = "tokenizers" }, { name = "tqdm" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/37/5a/58f96c83e566f907ae39f16d4401bbefd8bb85c60bd1e6a95c419752ab90/transformers-4.46.3.tar.gz", hash = "sha256:8ee4b3ae943fe33e82afff8e837f4b052058b07ca9be3cb5b729ed31295f72cc", size = 8627944, upload-time = "2024-11-18T22:13:01.012Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c4/35/67252acc1b929dc88b6602e8c4a982e64f31e733b804c14bc24b47da35e6/transformers-4.57.6.tar.gz", hash = "sha256:55e44126ece9dc0a291521b7e5492b572e6ef2766338a610b9ab5afbb70689d3", size = 10134912, upload-time = "2026-01-16T10:38:39.284Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/51/51/b87caa939fedf307496e4dbf412f4b909af3d9ca8b189fc3b65c1faa456f/transformers-4.46.3-py3-none-any.whl", hash = "sha256:a12ef6f52841fd190a3e5602145b542d03507222f2c64ebb7ee92e8788093aef", size = 10034536, upload-time = "2024-11-18T22:12:57.024Z" }, + { url = "https://files.pythonhosted.org/packages/03/b8/e484ef633af3887baeeb4b6ad12743363af7cce68ae51e938e00aaa0529d/transformers-4.57.6-py3-none-any.whl", hash = "sha256:4c9e9de11333ddfe5114bc872c9f370509198acf0b87a832a0ab9458e2bd0550", size = 11993498, upload-time = "2026-01-16T10:38:31.289Z" }, ] [[package]] @@ -8116,11 +8070,11 @@ wheels = [ [[package]] name = "types-regex" -version = "2024.11.6.20250403" +version = "2026.1.15.20260116" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/c7/75/012b90c8557d3abb3b58a9073a94d211c8f75c9b2e26bf0d8af7ecf7bc78/types_regex-2024.11.6.20250403.tar.gz", hash = "sha256:3fdf2a70bbf830de4b3a28e9649a52d43dabb57cdb18fbfe2252eefb53666665", size = 12394, upload-time = "2025-04-03T02:54:35.379Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c4/1a/fefad12cbe6214303d30027933a3e521188d9f283e383a183d9fda5c62fb/types_regex-2026.1.15.20260116.tar.gz", hash = "sha256:7151a9bcc5bbf9ecfccf8335c451aca8204f5a0992e0622aafaf482876cee4f7", size = 12877, upload-time = "2026-01-16T03:21:49.461Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/61/49/67200c4708f557be6aa4ecdb1fa212d67a10558c5240251efdc799cca22f/types_regex-2024.11.6.20250403-py3-none-any.whl", hash = "sha256:e22c0f67d73f4b4af6086a340f387b6f7d03bed8a0bb306224b75c51a29b0001", size = 10396, upload-time = "2025-04-03T02:54:34.555Z" }, + { url = "https://files.pythonhosted.org/packages/7c/d4/0d47227ea84365bea532dca287fe73cba985d6e1d3a31a71849a8aa91370/types_regex-2026.1.15.20260116-py3-none-any.whl", hash = "sha256:b20786eacbde2f2a261cbe7f5096f483da995488d196f81e585ffd2dffc555e0", size = 11099, upload-time = "2026-01-16T03:21:48.647Z" }, ] [[package]] @@ -8342,46 +8296,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/10/6d/adb955ecf60811a3735d508974bbb5358e7745b635dc001329267529c6f2/unstructured.pytesseract-0.3.15-py3-none-any.whl", hash = "sha256:a3f505c5efb7ff9f10379051a7dd6aa624b3be6b0f023ed6767cc80d0b1613d1", size = 14992, upload-time = "2025-03-05T00:59:15.962Z" }, ] -[[package]] -name = "urllib3" -version = "1.26.20" -source = { registry = "https://pypi.org/simple" } -resolution-markers = [ - "python_full_version < '3.11' and platform_python_implementation == 'PyPy'", - "python_full_version == '3.11.*' and platform_python_implementation == 'PyPy'", - "python_full_version == '3.12.*' and platform_python_implementation == 'PyPy'", - "python_full_version >= '3.13' and platform_python_implementation == 'PyPy'", -] -sdist = { url = "https://files.pythonhosted.org/packages/e4/e8/6ff5e6bc22095cfc59b6ea711b687e2b7ed4bdb373f7eeec370a97d7392f/urllib3-1.26.20.tar.gz", hash = "sha256:40c2dc0c681e47eb8f90e7e27bf6ff7df2e677421fd46756da1161c39ca70d32", size = 307380, upload-time = "2024-08-29T15:43:11.37Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/33/cf/8435d5a7159e2a9c83a95896ed596f68cf798005fe107cc655b5c5c14704/urllib3-1.26.20-py2.py3-none-any.whl", hash = "sha256:0ed14ccfbf1c30a9072c7ca157e4319b70d65f623e91e7b32fadb2853431016e", size = 144225, upload-time = "2024-08-29T15:43:08.921Z" }, -] - -[package.optional-dependencies] -socks = [ - { name = "pysocks", marker = "platform_python_implementation == 'PyPy'" }, -] - [[package]] name = "urllib3" version = "2.6.3" source = { registry = "https://pypi.org/simple" } -resolution-markers = [ - "python_full_version < '3.11' and platform_python_implementation != 'PyPy'", - "python_full_version == '3.11.*' and platform_python_implementation != 'PyPy'", - "python_full_version == '3.12.*' and platform_python_implementation != 'PyPy'", - "python_full_version >= '3.13' and platform_python_implementation != 'PyPy'", -] sdist = { url = "https://files.pythonhosted.org/packages/c7/24/5f1b3bdffd70275f6661c76461e25f024d5a38a46f04aaca912426a2b1d3/urllib3-2.6.3.tar.gz", hash = "sha256:1b62b6884944a57dbe321509ab94fd4d3b307075e0c2eae991ac71ee15ad38ed", size = 435556, upload-time = "2026-01-07T16:24:43.925Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/39/08/aaaad47bc4e9dc8c725e68f9d04865dbcb2052843ff09c97b08904852d84/urllib3-2.6.3-py3-none-any.whl", hash = "sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4", size = 131584, upload-time = "2026-01-07T16:24:42.685Z" }, ] -[package.optional-dependencies] -socks = [ - { name = "pysocks", marker = "platform_python_implementation != 'PyPy'" }, -] - [[package]] name = "uuid-utils" version = "0.14.0" @@ -8509,8 +8432,7 @@ version = "7.0.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "pyyaml" }, - { name = "urllib3", version = "1.26.20", source = { registry = "https://pypi.org/simple" }, marker = "platform_python_implementation == 'PyPy'" }, - { name = "urllib3", version = "2.6.3", source = { registry = "https://pypi.org/simple" }, marker = "platform_python_implementation != 'PyPy'" }, + { name = "urllib3" }, { name = "wrapt" }, { name = "yarl" }, ]