mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-28 17:48:13 +00:00
Compare commits
1 Commits
gl/fix/goo
...
devin/1769
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
615f6ad9d6 |
@@ -4,74 +4,6 @@ description: "Product updates, improvements, and bug fixes for CrewAI"
|
||||
icon: "clock"
|
||||
mode: "wide"
|
||||
---
|
||||
<Update label="Jan 26, 2026">
|
||||
## v1.9.0
|
||||
|
||||
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.9.0)
|
||||
|
||||
## What's Changed
|
||||
|
||||
### Features
|
||||
- Add structured outputs and response_format support across providers
|
||||
- Add response ID to streaming responses
|
||||
- Add event ordering with parent-child hierarchies
|
||||
- Add Keycloak SSO authentication support
|
||||
- Add multimodal file handling capabilities
|
||||
- Add native OpenAI responses API support
|
||||
- Add A2A task execution utilities
|
||||
- Add A2A server configuration and agent card generation
|
||||
- Enhance event system and expand transport options
|
||||
- Improve tool calling mechanisms
|
||||
|
||||
### Bug Fixes
|
||||
- Enhance file store with fallback memory cache when aiocache is not available
|
||||
- Ensure document list is not empty
|
||||
- Handle Bedrock stop sequences properly
|
||||
- Add Google Vertex API key support
|
||||
- Enhance Azure model stop word detection
|
||||
- Improve error handling for HumanFeedbackPending in flow execution
|
||||
- Fix execution span task unlinking
|
||||
|
||||
### Documentation
|
||||
- Add native file handling documentation
|
||||
- Add OpenAI responses API documentation
|
||||
- Add agent card implementation guidance
|
||||
- Refine A2A documentation
|
||||
- Update changelog for v1.8.0
|
||||
|
||||
### Contributors
|
||||
@Anaisdg, @GininDenis, @Vidit-Ostwal, @greysonlalonde, @heitorado, @joaomdmoura, @koushiv777, @lorenzejay, @nicoferdi96, @vinibrsl
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="Jan 15, 2026">
|
||||
## v1.8.1
|
||||
|
||||
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.8.1)
|
||||
|
||||
## What's Changed
|
||||
|
||||
### Features
|
||||
- Add A2A task execution utilities
|
||||
- Add A2A server configuration and agent card generation
|
||||
- Add additional transport mechanisms
|
||||
- Add Galileo integration support
|
||||
|
||||
### Bug Fixes
|
||||
- Improve Azure model compatibility
|
||||
- Expand frame inspection depth to detect parent_flow
|
||||
- Resolve task execution span management issues
|
||||
- Enhance error handling for human feedback scenarios during flow execution
|
||||
|
||||
### Documentation
|
||||
- Add A2A agent card documentation
|
||||
- Add PII redaction feature documentation
|
||||
|
||||
### Contributors
|
||||
@Anaisdg, @GininDenis, @greysonlalonde, @joaomdmoura, @koushiv777, @lorenzejay, @vinibrsl
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="Jan 08, 2026">
|
||||
## v1.8.0
|
||||
|
||||
|
||||
@@ -401,58 +401,23 @@ crew = Crew(
|
||||
|
||||
### Vertex AI Embeddings
|
||||
|
||||
For Google Cloud users with Vertex AI access. Supports both legacy and new embedding models with automatic SDK selection.
|
||||
|
||||
<Note>
|
||||
**Deprecation Notice:** Legacy models (`textembedding-gecko*`) use the deprecated `vertexai.language_models` SDK which will be removed after June 24, 2026. Consider migrating to newer models like `gemini-embedding-001`. See the [Google migration guide](https://docs.cloud.google.com/vertex-ai/generative-ai/docs/deprecations/genai-vertexai-sdk) for details.
|
||||
</Note>
|
||||
For Google Cloud users with Vertex AI access.
|
||||
|
||||
```python
|
||||
# Recommended: Using new models with google-genai SDK
|
||||
crew = Crew(
|
||||
memory=True,
|
||||
embedder={
|
||||
"provider": "google-vertex",
|
||||
"provider": "vertexai",
|
||||
"config": {
|
||||
"project_id": "your-gcp-project-id",
|
||||
"location": "us-central1",
|
||||
"model_name": "gemini-embedding-001", # or "text-embedding-005", "text-multilingual-embedding-002"
|
||||
"task_type": "RETRIEVAL_DOCUMENT", # Optional
|
||||
"output_dimensionality": 768 # Optional
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
# Using API key authentication (Exp)
|
||||
crew = Crew(
|
||||
memory=True,
|
||||
embedder={
|
||||
"provider": "google-vertex",
|
||||
"config": {
|
||||
"api_key": "your-google-api-key",
|
||||
"model_name": "gemini-embedding-001"
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
# Legacy models (backwards compatible, emits deprecation warning)
|
||||
crew = Crew(
|
||||
memory=True,
|
||||
embedder={
|
||||
"provider": "google-vertex",
|
||||
"config": {
|
||||
"project_id": "your-gcp-project-id",
|
||||
"region": "us-central1", # or "location" (region is deprecated)
|
||||
"model_name": "textembedding-gecko" # Legacy model
|
||||
"region": "us-central1", # or your preferred region
|
||||
"api_key": "your-service-account-key",
|
||||
"model_name": "textembedding-gecko"
|
||||
}
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
**Available models:**
|
||||
- **New SDK models** (recommended): `gemini-embedding-001`, `text-embedding-005`, `text-multilingual-embedding-002`
|
||||
- **Legacy models** (deprecated): `textembedding-gecko`, `textembedding-gecko@001`, `textembedding-gecko-multilingual`
|
||||
|
||||
### Ollama Embeddings (Local)
|
||||
|
||||
Run embeddings locally for privacy and cost savings.
|
||||
@@ -604,7 +569,7 @@ mem0_client_embedder_config = {
|
||||
"project_id": "my_project_id", # Optional
|
||||
"api_key": "custom-api-key" # Optional - overrides env var
|
||||
"run_id": "my_run_id", # Optional - for short-term memory
|
||||
"includes": "include1", # Optional
|
||||
"includes": "include1", # Optional
|
||||
"excludes": "exclude1", # Optional
|
||||
"infer": True # Optional defaults to True
|
||||
"custom_categories": new_categories # Optional - custom categories for user memory
|
||||
@@ -626,7 +591,7 @@ crew = Crew(
|
||||
|
||||
### Choosing the Right Embedding Provider
|
||||
|
||||
When selecting an embedding provider, consider factors like performance, privacy, cost, and integration needs.
|
||||
When selecting an embedding provider, consider factors like performance, privacy, cost, and integration needs.
|
||||
Below is a comparison to help you decide:
|
||||
|
||||
| Provider | Best For | Pros | Cons |
|
||||
@@ -784,7 +749,7 @@ Entity Memory supports batching when saving multiple entities at once. When you
|
||||
|
||||
This improves performance and observability when writing many entities in one operation.
|
||||
|
||||
## 2. External Memory
|
||||
## 2. External Memory
|
||||
External Memory provides a standalone memory system that operates independently from the crew's built-in memory. This is ideal for specialized memory providers or cross-application memory sharing.
|
||||
|
||||
### Basic External Memory with Mem0
|
||||
@@ -854,7 +819,7 @@ external_memory = ExternalMemory(
|
||||
"project_id": "my_project_id", # Optional
|
||||
"api_key": "custom-api-key" # Optional - overrides env var
|
||||
"run_id": "my_run_id", # Optional - for short-term memory
|
||||
"includes": "include1", # Optional
|
||||
"includes": "include1", # Optional
|
||||
"excludes": "exclude1", # Optional
|
||||
"infer": True # Optional defaults to True
|
||||
"custom_categories": new_categories # Optional - custom categories for user memory
|
||||
|
||||
@@ -4,74 +4,6 @@ description: "CrewAI의 제품 업데이트, 개선 사항 및 버그 수정"
|
||||
icon: "clock"
|
||||
mode: "wide"
|
||||
---
|
||||
<Update label="2026년 1월 26일">
|
||||
## v1.9.0
|
||||
|
||||
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.9.0)
|
||||
|
||||
## 변경 사항
|
||||
|
||||
### 기능
|
||||
- 프로바이더 전반에 걸친 구조화된 출력 및 response_format 지원 추가
|
||||
- 스트리밍 응답에 응답 ID 추가
|
||||
- 부모-자식 계층 구조를 가진 이벤트 순서 추가
|
||||
- Keycloak SSO 인증 지원 추가
|
||||
- 멀티모달 파일 처리 기능 추가
|
||||
- 네이티브 OpenAI responses API 지원 추가
|
||||
- A2A 작업 실행 유틸리티 추가
|
||||
- A2A 서버 구성 및 에이전트 카드 생성 추가
|
||||
- 이벤트 시스템 향상 및 전송 옵션 확장
|
||||
- 도구 호출 메커니즘 개선
|
||||
|
||||
### 버그 수정
|
||||
- aiocache를 사용할 수 없을 때 폴백 메모리 캐시로 파일 저장소 향상
|
||||
- 문서 목록이 비어 있지 않도록 보장
|
||||
- Bedrock 중지 시퀀스 적절히 처리
|
||||
- Google Vertex API 키 지원 추가
|
||||
- Azure 모델 중지 단어 감지 향상
|
||||
- 흐름 실행 시 HumanFeedbackPending 오류 처리 개선
|
||||
- 실행 스팬 작업 연결 해제 수정
|
||||
|
||||
### 문서
|
||||
- 네이티브 파일 처리 문서 추가
|
||||
- OpenAI responses API 문서 추가
|
||||
- 에이전트 카드 구현 가이드 추가
|
||||
- A2A 문서 개선
|
||||
- v1.8.0 변경 로그 업데이트
|
||||
|
||||
### 기여자
|
||||
@Anaisdg, @GininDenis, @Vidit-Ostwal, @greysonlalonde, @heitorado, @joaomdmoura, @koushiv777, @lorenzejay, @nicoferdi96, @vinibrsl
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="2026년 1월 15일">
|
||||
## v1.8.1
|
||||
|
||||
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.8.1)
|
||||
|
||||
## 변경 사항
|
||||
|
||||
### 기능
|
||||
- A2A 작업 실행 유틸리티 추가
|
||||
- A2A 서버 구성 및 에이전트 카드 생성 추가
|
||||
- 추가 전송 메커니즘 추가
|
||||
- Galileo 통합 지원 추가
|
||||
|
||||
### 버그 수정
|
||||
- Azure 모델 호환성 개선
|
||||
- parent_flow 감지를 위한 프레임 검사 깊이 확장
|
||||
- 작업 실행 스팬 관리 문제 해결
|
||||
- 흐름 실행 중 휴먼 피드백 시나리오에 대한 오류 처리 향상
|
||||
|
||||
### 문서
|
||||
- A2A 에이전트 카드 문서 추가
|
||||
- PII 삭제 기능 문서 추가
|
||||
|
||||
### 기여자
|
||||
@Anaisdg, @GininDenis, @greysonlalonde, @joaomdmoura, @koushiv777, @lorenzejay, @vinibrsl
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="2026년 1월 8일">
|
||||
## v1.8.0
|
||||
|
||||
|
||||
@@ -4,74 +4,6 @@ description: "Atualizações de produto, melhorias e correções do CrewAI"
|
||||
icon: "clock"
|
||||
mode: "wide"
|
||||
---
|
||||
<Update label="26 jan 2026">
|
||||
## v1.9.0
|
||||
|
||||
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.9.0)
|
||||
|
||||
## O que Mudou
|
||||
|
||||
### Funcionalidades
|
||||
- Adicionar suporte a saídas estruturadas e response_format em vários provedores
|
||||
- Adicionar ID de resposta às respostas de streaming
|
||||
- Adicionar ordenação de eventos com hierarquias pai-filho
|
||||
- Adicionar suporte à autenticação SSO Keycloak
|
||||
- Adicionar capacidades de manipulação de arquivos multimodais
|
||||
- Adicionar suporte nativo à API de respostas OpenAI
|
||||
- Adicionar utilitários de execução de tarefas A2A
|
||||
- Adicionar configuração de servidor A2A e geração de cartão de agente
|
||||
- Aprimorar sistema de eventos e expandir opções de transporte
|
||||
- Melhorar mecanismos de chamada de ferramentas
|
||||
|
||||
### Correções de Bugs
|
||||
- Aprimorar armazenamento de arquivos com cache de memória de fallback quando aiocache não está disponível
|
||||
- Garantir que lista de documentos não esteja vazia
|
||||
- Tratar sequências de parada do Bedrock adequadamente
|
||||
- Adicionar suporte à chave de API do Google Vertex
|
||||
- Aprimorar detecção de palavras de parada do modelo Azure
|
||||
- Melhorar tratamento de erros para HumanFeedbackPending na execução de fluxo
|
||||
- Corrigir desvinculação de tarefa do span de execução
|
||||
|
||||
### Documentação
|
||||
- Adicionar documentação de manipulação nativa de arquivos
|
||||
- Adicionar documentação da API de respostas OpenAI
|
||||
- Adicionar orientação de implementação de cartão de agente
|
||||
- Refinar documentação A2A
|
||||
- Atualizar changelog para v1.8.0
|
||||
|
||||
### Contribuidores
|
||||
@Anaisdg, @GininDenis, @Vidit-Ostwal, @greysonlalonde, @heitorado, @joaomdmoura, @koushiv777, @lorenzejay, @nicoferdi96, @vinibrsl
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="15 jan 2026">
|
||||
## v1.8.1
|
||||
|
||||
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.8.1)
|
||||
|
||||
## O que Mudou
|
||||
|
||||
### Funcionalidades
|
||||
- Adicionar utilitários de execução de tarefas A2A
|
||||
- Adicionar configuração de servidor A2A e geração de cartão de agente
|
||||
- Adicionar mecanismos de transporte adicionais
|
||||
- Adicionar suporte à integração Galileo
|
||||
|
||||
### Correções de Bugs
|
||||
- Melhorar compatibilidade do modelo Azure
|
||||
- Expandir profundidade de inspeção de frame para detectar parent_flow
|
||||
- Resolver problemas de gerenciamento de span de execução de tarefas
|
||||
- Aprimorar tratamento de erros para cenários de feedback humano durante execução de fluxo
|
||||
|
||||
### Documentação
|
||||
- Adicionar documentação de cartão de agente A2A
|
||||
- Adicionar documentação de recurso de redação de PII
|
||||
|
||||
### Contribuidores
|
||||
@Anaisdg, @GininDenis, @greysonlalonde, @joaomdmoura, @koushiv777, @lorenzejay, @vinibrsl
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="08 jan 2026">
|
||||
## v1.8.0
|
||||
|
||||
|
||||
@@ -152,4 +152,4 @@ __all__ = [
|
||||
"wrap_file_source",
|
||||
]
|
||||
|
||||
__version__ = "1.9.1"
|
||||
__version__ = "1.8.1"
|
||||
|
||||
@@ -12,7 +12,7 @@ dependencies = [
|
||||
"pytube~=15.0.0",
|
||||
"requests~=2.32.5",
|
||||
"docker~=7.1.0",
|
||||
"crewai==1.9.1",
|
||||
"crewai==1.8.1",
|
||||
"lancedb~=0.5.4",
|
||||
"tiktoken~=0.8.0",
|
||||
"beautifulsoup4~=4.13.4",
|
||||
|
||||
@@ -291,4 +291,4 @@ __all__ = [
|
||||
"ZapierActionTools",
|
||||
]
|
||||
|
||||
__version__ = "1.9.1"
|
||||
__version__ = "1.8.1"
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
"""Crewai Enterprise Tools."""
|
||||
|
||||
import json
|
||||
import os
|
||||
from typing import Any
|
||||
import json
|
||||
import re
|
||||
from typing import Any, Optional, Union, cast, get_origin
|
||||
|
||||
from crewai.tools import BaseTool
|
||||
from crewai.utilities.pydantic_schema_utils import create_model_from_schema
|
||||
from pydantic import Field, create_model
|
||||
import requests
|
||||
|
||||
@@ -15,6 +14,77 @@ from crewai_tools.tools.crewai_platform_tools.misc import (
|
||||
)
|
||||
|
||||
|
||||
class AllOfSchemaAnalyzer:
|
||||
"""Helper class to analyze and merge allOf schemas."""
|
||||
|
||||
def __init__(self, schemas: list[dict[str, Any]]):
|
||||
self.schemas = schemas
|
||||
self._explicit_types: list[str] = []
|
||||
self._merged_properties: dict[str, Any] = {}
|
||||
self._merged_required: list[str] = []
|
||||
self._analyze_schemas()
|
||||
|
||||
def _analyze_schemas(self) -> None:
|
||||
"""Analyze all schemas and extract relevant information."""
|
||||
for schema in self.schemas:
|
||||
if "type" in schema:
|
||||
self._explicit_types.append(schema["type"])
|
||||
|
||||
# Merge object properties
|
||||
if schema.get("type") == "object" and "properties" in schema:
|
||||
self._merged_properties.update(schema["properties"])
|
||||
if "required" in schema:
|
||||
self._merged_required.extend(schema["required"])
|
||||
|
||||
def has_consistent_type(self) -> bool:
|
||||
"""Check if all schemas have the same explicit type."""
|
||||
return len(set(self._explicit_types)) == 1 if self._explicit_types else False
|
||||
|
||||
def get_consistent_type(self) -> type[Any]:
|
||||
"""Get the consistent type if all schemas agree."""
|
||||
if not self.has_consistent_type():
|
||||
raise ValueError("No consistent type found")
|
||||
|
||||
type_mapping = {
|
||||
"string": str,
|
||||
"integer": int,
|
||||
"number": float,
|
||||
"boolean": bool,
|
||||
"array": list,
|
||||
"object": dict,
|
||||
"null": type(None),
|
||||
}
|
||||
return type_mapping.get(self._explicit_types[0], str)
|
||||
|
||||
def has_object_schemas(self) -> bool:
|
||||
"""Check if any schemas are object types with properties."""
|
||||
return bool(self._merged_properties)
|
||||
|
||||
def get_merged_properties(self) -> dict[str, Any]:
|
||||
"""Get merged properties from all object schemas."""
|
||||
return self._merged_properties
|
||||
|
||||
def get_merged_required_fields(self) -> list[str]:
|
||||
"""Get merged required fields from all object schemas."""
|
||||
return list(set(self._merged_required)) # Remove duplicates
|
||||
|
||||
def get_fallback_type(self) -> type[Any]:
|
||||
"""Get a fallback type when merging fails."""
|
||||
if self._explicit_types:
|
||||
# Use the first explicit type
|
||||
type_mapping = {
|
||||
"string": str,
|
||||
"integer": int,
|
||||
"number": float,
|
||||
"boolean": bool,
|
||||
"array": list,
|
||||
"object": dict,
|
||||
"null": type(None),
|
||||
}
|
||||
return type_mapping.get(self._explicit_types[0], str)
|
||||
return str
|
||||
|
||||
|
||||
class CrewAIPlatformActionTool(BaseTool):
|
||||
action_name: str = Field(default="", description="The name of the action")
|
||||
action_schema: dict[str, Any] = Field(
|
||||
@@ -27,19 +97,42 @@ class CrewAIPlatformActionTool(BaseTool):
|
||||
action_name: str,
|
||||
action_schema: dict[str, Any],
|
||||
):
|
||||
parameters = action_schema.get("function", {}).get("parameters", {})
|
||||
self._model_registry: dict[str, type[Any]] = {}
|
||||
self._base_name = self._sanitize_name(action_name)
|
||||
|
||||
schema_props, required = self._extract_schema_info(action_schema)
|
||||
|
||||
field_definitions: dict[str, Any] = {}
|
||||
for param_name, param_details in schema_props.items():
|
||||
param_desc = param_details.get("description", "")
|
||||
is_required = param_name in required
|
||||
|
||||
if parameters and parameters.get("properties"):
|
||||
try:
|
||||
if "title" not in parameters:
|
||||
parameters = {**parameters, "title": f"{action_name}Schema"}
|
||||
if "type" not in parameters:
|
||||
parameters = {**parameters, "type": "object"}
|
||||
args_schema = create_model_from_schema(parameters)
|
||||
field_type = self._process_schema_type(
|
||||
param_details, self._sanitize_name(param_name).title()
|
||||
)
|
||||
except Exception:
|
||||
args_schema = create_model(f"{action_name}Schema")
|
||||
field_type = str
|
||||
|
||||
field_definitions[param_name] = self._create_field_definition(
|
||||
field_type, is_required, param_desc
|
||||
)
|
||||
|
||||
if field_definitions:
|
||||
try:
|
||||
args_schema = create_model(
|
||||
f"{self._base_name}Schema", **field_definitions
|
||||
)
|
||||
except Exception:
|
||||
args_schema = create_model(
|
||||
f"{self._base_name}Schema",
|
||||
input_text=(str, Field(description="Input for the action")),
|
||||
)
|
||||
else:
|
||||
args_schema = create_model(f"{action_name}Schema")
|
||||
args_schema = create_model(
|
||||
f"{self._base_name}Schema",
|
||||
input_text=(str, Field(description="Input for the action")),
|
||||
)
|
||||
|
||||
super().__init__(
|
||||
name=action_name.lower().replace(" ", "_"),
|
||||
@@ -49,12 +142,285 @@ class CrewAIPlatformActionTool(BaseTool):
|
||||
self.action_name = action_name
|
||||
self.action_schema = action_schema
|
||||
|
||||
def _run(self, **kwargs: Any) -> str:
|
||||
@staticmethod
|
||||
def _sanitize_name(name: str) -> str:
|
||||
name = name.lower().replace(" ", "_")
|
||||
sanitized = re.sub(r"[^a-zA-Z0-9_]", "", name)
|
||||
parts = sanitized.split("_")
|
||||
return "".join(word.capitalize() for word in parts if word)
|
||||
|
||||
@staticmethod
|
||||
def _extract_schema_info(
|
||||
action_schema: dict[str, Any],
|
||||
) -> tuple[dict[str, Any], list[str]]:
|
||||
schema_props = (
|
||||
action_schema.get("function", {})
|
||||
.get("parameters", {})
|
||||
.get("properties", {})
|
||||
)
|
||||
required = (
|
||||
action_schema.get("function", {}).get("parameters", {}).get("required", [])
|
||||
)
|
||||
return schema_props, required
|
||||
|
||||
def _process_schema_type(self, schema: dict[str, Any], type_name: str) -> type[Any]:
|
||||
"""
|
||||
Process a JSON Schema type definition into a Python type.
|
||||
|
||||
Handles complex schema constructs like anyOf, oneOf, allOf, enums, arrays, and objects.
|
||||
"""
|
||||
# Handle composite schema types (anyOf, oneOf, allOf)
|
||||
if composite_type := self._process_composite_schema(schema, type_name):
|
||||
return composite_type
|
||||
|
||||
# Handle primitive types and simple constructs
|
||||
return self._process_primitive_schema(schema, type_name)
|
||||
|
||||
def _process_composite_schema(
|
||||
self, schema: dict[str, Any], type_name: str
|
||||
) -> type[Any] | None:
|
||||
"""Process composite schema types: anyOf, oneOf, allOf."""
|
||||
if "anyOf" in schema:
|
||||
return self._process_any_of_schema(schema["anyOf"], type_name)
|
||||
if "oneOf" in schema:
|
||||
return self._process_one_of_schema(schema["oneOf"], type_name)
|
||||
if "allOf" in schema:
|
||||
return self._process_all_of_schema(schema["allOf"], type_name)
|
||||
return None
|
||||
|
||||
def _process_any_of_schema(
|
||||
self, any_of_types: list[dict[str, Any]], type_name: str
|
||||
) -> type[Any]:
|
||||
"""Process anyOf schema - creates Union of possible types."""
|
||||
is_nullable = any(t.get("type") == "null" for t in any_of_types)
|
||||
non_null_types = [t for t in any_of_types if t.get("type") != "null"]
|
||||
|
||||
if not non_null_types:
|
||||
return cast(
|
||||
type[Any], cast(object, str | None)
|
||||
) # fallback for only-null case
|
||||
|
||||
base_type = (
|
||||
self._process_schema_type(non_null_types[0], type_name)
|
||||
if len(non_null_types) == 1
|
||||
else self._create_union_type(non_null_types, type_name, "AnyOf")
|
||||
)
|
||||
return base_type | None if is_nullable else base_type # type: ignore[return-value]
|
||||
|
||||
def _process_one_of_schema(
|
||||
self, one_of_types: list[dict[str, Any]], type_name: str
|
||||
) -> type[Any]:
|
||||
"""Process oneOf schema - creates Union of mutually exclusive types."""
|
||||
return (
|
||||
self._process_schema_type(one_of_types[0], type_name)
|
||||
if len(one_of_types) == 1
|
||||
else self._create_union_type(one_of_types, type_name, "OneOf")
|
||||
)
|
||||
|
||||
def _process_all_of_schema(
|
||||
self, all_of_schemas: list[dict[str, Any]], type_name: str
|
||||
) -> type[Any]:
|
||||
"""Process allOf schema - merges schemas that must all be satisfied."""
|
||||
if len(all_of_schemas) == 1:
|
||||
return self._process_schema_type(all_of_schemas[0], type_name)
|
||||
return self._merge_all_of_schemas(all_of_schemas, type_name)
|
||||
|
||||
def _create_union_type(
|
||||
self, schemas: list[dict[str, Any]], type_name: str, prefix: str
|
||||
) -> type[Any]:
|
||||
"""Create a Union type from multiple schemas."""
|
||||
return Union[ # type: ignore # noqa: UP007
|
||||
tuple(
|
||||
self._process_schema_type(schema, f"{type_name}{prefix}{i}")
|
||||
for i, schema in enumerate(schemas)
|
||||
)
|
||||
]
|
||||
|
||||
def _process_primitive_schema(
|
||||
self, schema: dict[str, Any], type_name: str
|
||||
) -> type[Any]:
|
||||
"""Process primitive schema types: string, number, array, object, etc."""
|
||||
json_type = schema.get("type", "string")
|
||||
|
||||
if "enum" in schema:
|
||||
return self._process_enum_schema(schema, json_type)
|
||||
|
||||
if json_type == "array":
|
||||
return self._process_array_schema(schema, type_name)
|
||||
|
||||
if json_type == "object":
|
||||
return self._create_nested_model(schema, type_name)
|
||||
|
||||
return self._map_json_type_to_python(json_type)
|
||||
|
||||
def _process_enum_schema(self, schema: dict[str, Any], json_type: str) -> type[Any]:
|
||||
"""Process enum schema - currently falls back to base type."""
|
||||
enum_values = schema["enum"]
|
||||
if not enum_values:
|
||||
return self._map_json_type_to_python(json_type)
|
||||
|
||||
# For Literal types, we need to pass the values directly, not as a tuple
|
||||
# This is a workaround since we can't dynamically create Literal types easily
|
||||
# Fall back to the base JSON type for now
|
||||
return self._map_json_type_to_python(json_type)
|
||||
|
||||
def _process_array_schema(
|
||||
self, schema: dict[str, Any], type_name: str
|
||||
) -> type[Any]:
|
||||
items_schema = schema.get("items", {"type": "string"})
|
||||
item_type = self._process_schema_type(items_schema, f"{type_name}Item")
|
||||
return list[item_type] # type: ignore
|
||||
|
||||
def _merge_all_of_schemas(
|
||||
self, schemas: list[dict[str, Any]], type_name: str
|
||||
) -> type[Any]:
|
||||
schema_analyzer = AllOfSchemaAnalyzer(schemas)
|
||||
|
||||
if schema_analyzer.has_consistent_type():
|
||||
return schema_analyzer.get_consistent_type()
|
||||
|
||||
if schema_analyzer.has_object_schemas():
|
||||
return self._create_merged_object_model(
|
||||
schema_analyzer.get_merged_properties(),
|
||||
schema_analyzer.get_merged_required_fields(),
|
||||
type_name,
|
||||
)
|
||||
|
||||
return schema_analyzer.get_fallback_type()
|
||||
|
||||
def _create_merged_object_model(
|
||||
self, properties: dict[str, Any], required: list[str], model_name: str
|
||||
) -> type[Any]:
|
||||
full_model_name = f"{self._base_name}{model_name}AllOf"
|
||||
|
||||
if full_model_name in self._model_registry:
|
||||
return self._model_registry[full_model_name]
|
||||
|
||||
if not properties:
|
||||
return dict
|
||||
|
||||
field_definitions = self._build_field_definitions(
|
||||
properties, required, model_name
|
||||
)
|
||||
|
||||
try:
|
||||
merged_model = create_model(full_model_name, **field_definitions)
|
||||
self._model_registry[full_model_name] = merged_model
|
||||
return merged_model
|
||||
except Exception:
|
||||
return dict
|
||||
|
||||
def _build_field_definitions(
|
||||
self, properties: dict[str, Any], required: list[str], model_name: str
|
||||
) -> dict[str, Any]:
|
||||
field_definitions = {}
|
||||
|
||||
for prop_name, prop_schema in properties.items():
|
||||
prop_desc = prop_schema.get("description", "")
|
||||
is_required = prop_name in required
|
||||
|
||||
try:
|
||||
prop_type = self._process_schema_type(
|
||||
prop_schema, f"{model_name}{self._sanitize_name(prop_name).title()}"
|
||||
)
|
||||
except Exception:
|
||||
prop_type = str
|
||||
|
||||
field_definitions[prop_name] = self._create_field_definition(
|
||||
prop_type, is_required, prop_desc
|
||||
)
|
||||
|
||||
return field_definitions
|
||||
|
||||
def _create_nested_model(
|
||||
self, schema: dict[str, Any], model_name: str
|
||||
) -> type[Any]:
|
||||
full_model_name = f"{self._base_name}{model_name}"
|
||||
|
||||
if full_model_name in self._model_registry:
|
||||
return self._model_registry[full_model_name]
|
||||
|
||||
properties = schema.get("properties", {})
|
||||
required_fields = schema.get("required", [])
|
||||
|
||||
if not properties:
|
||||
return dict
|
||||
|
||||
field_definitions = {}
|
||||
for prop_name, prop_schema in properties.items():
|
||||
prop_desc = prop_schema.get("description", "")
|
||||
is_required = prop_name in required_fields
|
||||
|
||||
try:
|
||||
prop_type = self._process_schema_type(
|
||||
prop_schema, f"{model_name}{self._sanitize_name(prop_name).title()}"
|
||||
)
|
||||
except Exception:
|
||||
prop_type = str
|
||||
|
||||
field_definitions[prop_name] = self._create_field_definition(
|
||||
prop_type, is_required, prop_desc
|
||||
)
|
||||
|
||||
try:
|
||||
nested_model = create_model(full_model_name, **field_definitions) # type: ignore
|
||||
self._model_registry[full_model_name] = nested_model
|
||||
return nested_model
|
||||
except Exception:
|
||||
return dict
|
||||
|
||||
def _create_field_definition(
|
||||
self, field_type: type[Any], is_required: bool, description: str
|
||||
) -> tuple:
|
||||
if is_required:
|
||||
return (field_type, Field(description=description))
|
||||
if get_origin(field_type) is Union:
|
||||
return (field_type, Field(default=None, description=description))
|
||||
return (
|
||||
Optional[field_type], # noqa: UP045
|
||||
Field(default=None, description=description),
|
||||
)
|
||||
|
||||
def _map_json_type_to_python(self, json_type: str) -> type[Any]:
|
||||
type_mapping = {
|
||||
"string": str,
|
||||
"integer": int,
|
||||
"number": float,
|
||||
"boolean": bool,
|
||||
"array": list,
|
||||
"object": dict,
|
||||
"null": type(None),
|
||||
}
|
||||
return type_mapping.get(json_type, str)
|
||||
|
||||
def _get_required_nullable_fields(self) -> list[str]:
|
||||
schema_props, required = self._extract_schema_info(self.action_schema)
|
||||
|
||||
required_nullable_fields = []
|
||||
for param_name in required:
|
||||
param_details = schema_props.get(param_name, {})
|
||||
if self._is_nullable_type(param_details):
|
||||
required_nullable_fields.append(param_name)
|
||||
|
||||
return required_nullable_fields
|
||||
|
||||
def _is_nullable_type(self, schema: dict[str, Any]) -> bool:
|
||||
if "anyOf" in schema:
|
||||
return any(t.get("type") == "null" for t in schema["anyOf"])
|
||||
return schema.get("type") == "null"
|
||||
|
||||
def _run(self, **kwargs) -> str:
|
||||
try:
|
||||
cleaned_kwargs = {
|
||||
key: value for key, value in kwargs.items() if value is not None
|
||||
}
|
||||
|
||||
required_nullable_fields = self._get_required_nullable_fields()
|
||||
|
||||
for field_name in required_nullable_fields:
|
||||
if field_name not in cleaned_kwargs:
|
||||
cleaned_kwargs[field_name] = None
|
||||
|
||||
api_url = (
|
||||
f"{get_platform_api_base_url()}/actions/{self.action_name}/execute"
|
||||
)
|
||||
@@ -63,9 +429,7 @@ class CrewAIPlatformActionTool(BaseTool):
|
||||
"Authorization": f"Bearer {token}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
payload = {
|
||||
"integration": cleaned_kwargs if cleaned_kwargs else {"_noop": True}
|
||||
}
|
||||
payload = cleaned_kwargs
|
||||
|
||||
response = requests.post(
|
||||
url=api_url,
|
||||
@@ -77,14 +441,7 @@ class CrewAIPlatformActionTool(BaseTool):
|
||||
|
||||
data = response.json()
|
||||
if not response.ok:
|
||||
if isinstance(data, dict):
|
||||
error_info = data.get("error", {})
|
||||
if isinstance(error_info, dict):
|
||||
error_message = error_info.get("message", json.dumps(data))
|
||||
else:
|
||||
error_message = str(error_info)
|
||||
else:
|
||||
error_message = str(data)
|
||||
error_message = data.get("error", {}).get("message", json.dumps(data))
|
||||
return f"API request failed: {error_message}"
|
||||
|
||||
return json.dumps(data, indent=2)
|
||||
|
||||
@@ -1,10 +1,5 @@
|
||||
"""CrewAI platform tool builder for fetching and creating action tools."""
|
||||
|
||||
import logging
|
||||
import os
|
||||
from types import TracebackType
|
||||
from typing import Any
|
||||
|
||||
import os
|
||||
from crewai.tools import BaseTool
|
||||
import requests
|
||||
|
||||
@@ -17,29 +12,22 @@ from crewai_tools.tools.crewai_platform_tools.misc import (
|
||||
)
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CrewaiPlatformToolBuilder:
|
||||
"""Builds platform tools from remote action schemas."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
apps: list[str],
|
||||
) -> None:
|
||||
):
|
||||
self._apps = apps
|
||||
self._actions_schema: dict[str, dict[str, Any]] = {}
|
||||
self._tools: list[BaseTool] | None = None
|
||||
self._actions_schema = {} # type: ignore[var-annotated]
|
||||
self._tools = None
|
||||
|
||||
def tools(self) -> list[BaseTool]:
|
||||
"""Fetch actions and return built tools."""
|
||||
if self._tools is None:
|
||||
self._fetch_actions()
|
||||
self._create_tools()
|
||||
return self._tools if self._tools is not None else []
|
||||
|
||||
def _fetch_actions(self) -> None:
|
||||
"""Fetch action schemas from the platform API."""
|
||||
def _fetch_actions(self):
|
||||
actions_url = f"{get_platform_api_base_url()}/actions"
|
||||
headers = {"Authorization": f"Bearer {get_platform_integration_token()}"}
|
||||
|
||||
@@ -52,8 +40,7 @@ class CrewaiPlatformToolBuilder:
|
||||
verify=os.environ.get("CREWAI_FACTORY", "false").lower() != "true",
|
||||
)
|
||||
response.raise_for_status()
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to fetch platform tools for apps {self._apps}: {e}")
|
||||
except Exception:
|
||||
return
|
||||
|
||||
raw_data = response.json()
|
||||
@@ -64,8 +51,6 @@ class CrewaiPlatformToolBuilder:
|
||||
for app, action_list in action_categories.items():
|
||||
if isinstance(action_list, list):
|
||||
for action in action_list:
|
||||
if not isinstance(action, dict):
|
||||
continue
|
||||
if action_name := action.get("name"):
|
||||
action_schema = {
|
||||
"function": {
|
||||
@@ -79,16 +64,72 @@ class CrewaiPlatformToolBuilder:
|
||||
}
|
||||
self._actions_schema[action_name] = action_schema
|
||||
|
||||
def _create_tools(self) -> None:
|
||||
"""Create tool instances from fetched action schemas."""
|
||||
tools: list[BaseTool] = []
|
||||
def _generate_detailed_description(
|
||||
self, schema: dict[str, Any], indent: int = 0
|
||||
) -> list[str]:
|
||||
descriptions = []
|
||||
indent_str = " " * indent
|
||||
|
||||
schema_type = schema.get("type", "string")
|
||||
|
||||
if schema_type == "object":
|
||||
properties = schema.get("properties", {})
|
||||
required_fields = schema.get("required", [])
|
||||
|
||||
if properties:
|
||||
descriptions.append(f"{indent_str}Object with properties:")
|
||||
for prop_name, prop_schema in properties.items():
|
||||
prop_desc = prop_schema.get("description", "")
|
||||
is_required = prop_name in required_fields
|
||||
req_str = " (required)" if is_required else " (optional)"
|
||||
descriptions.append(
|
||||
f"{indent_str} - {prop_name}: {prop_desc}{req_str}"
|
||||
)
|
||||
|
||||
if prop_schema.get("type") == "object":
|
||||
descriptions.extend(
|
||||
self._generate_detailed_description(prop_schema, indent + 2)
|
||||
)
|
||||
elif prop_schema.get("type") == "array":
|
||||
items_schema = prop_schema.get("items", {})
|
||||
if items_schema.get("type") == "object":
|
||||
descriptions.append(f"{indent_str} Array of objects:")
|
||||
descriptions.extend(
|
||||
self._generate_detailed_description(
|
||||
items_schema, indent + 3
|
||||
)
|
||||
)
|
||||
elif "enum" in items_schema:
|
||||
descriptions.append(
|
||||
f"{indent_str} Array of enum values: {items_schema['enum']}"
|
||||
)
|
||||
elif "enum" in prop_schema:
|
||||
descriptions.append(
|
||||
f"{indent_str} Enum values: {prop_schema['enum']}"
|
||||
)
|
||||
|
||||
return descriptions
|
||||
|
||||
def _create_tools(self):
|
||||
tools = []
|
||||
|
||||
for action_name, action_schema in self._actions_schema.items():
|
||||
function_details = action_schema.get("function", {})
|
||||
description = function_details.get("description", f"Execute {action_name}")
|
||||
|
||||
parameters = function_details.get("parameters", {})
|
||||
param_descriptions = []
|
||||
|
||||
if parameters.get("properties"):
|
||||
param_descriptions.append("\nDetailed Parameter Structure:")
|
||||
param_descriptions.extend(
|
||||
self._generate_detailed_description(parameters)
|
||||
)
|
||||
|
||||
full_description = description + "\n".join(param_descriptions)
|
||||
|
||||
tool = CrewAIPlatformActionTool(
|
||||
description=description,
|
||||
description=full_description,
|
||||
action_name=action_name,
|
||||
action_schema=action_schema,
|
||||
)
|
||||
@@ -97,14 +138,8 @@ class CrewaiPlatformToolBuilder:
|
||||
|
||||
self._tools = tools
|
||||
|
||||
def __enter__(self) -> list[BaseTool]:
|
||||
"""Enter context manager and return tools."""
|
||||
def __enter__(self):
|
||||
return self.tools()
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
"""Exit context manager."""
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
pass
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from typing import Union, get_args, get_origin
|
||||
from unittest.mock import patch, Mock
|
||||
import os
|
||||
|
||||
@@ -6,6 +7,251 @@ from crewai_tools.tools.crewai_platform_tools.crewai_platform_action_tool import
|
||||
)
|
||||
|
||||
|
||||
class TestSchemaProcessing:
|
||||
|
||||
def setup_method(self):
|
||||
self.base_action_schema = {
|
||||
"function": {
|
||||
"parameters": {
|
||||
"properties": {},
|
||||
"required": []
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def create_test_tool(self, action_name="test_action"):
|
||||
return CrewAIPlatformActionTool(
|
||||
description="Test tool",
|
||||
action_name=action_name,
|
||||
action_schema=self.base_action_schema
|
||||
)
|
||||
|
||||
def test_anyof_multiple_types(self):
|
||||
tool = self.create_test_tool()
|
||||
|
||||
test_schema = {
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{"type": "number"},
|
||||
{"type": "integer"}
|
||||
]
|
||||
}
|
||||
|
||||
result_type = tool._process_schema_type(test_schema, "TestField")
|
||||
|
||||
assert get_origin(result_type) is Union
|
||||
|
||||
args = get_args(result_type)
|
||||
expected_types = (str, float, int)
|
||||
|
||||
for expected_type in expected_types:
|
||||
assert expected_type in args
|
||||
|
||||
def test_anyof_with_null(self):
|
||||
tool = self.create_test_tool()
|
||||
|
||||
test_schema = {
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{"type": "number"},
|
||||
{"type": "null"}
|
||||
]
|
||||
}
|
||||
|
||||
result_type = tool._process_schema_type(test_schema, "TestFieldNullable")
|
||||
|
||||
assert get_origin(result_type) is Union
|
||||
|
||||
args = get_args(result_type)
|
||||
assert type(None) in args
|
||||
assert str in args
|
||||
assert float in args
|
||||
|
||||
def test_anyof_single_type(self):
|
||||
tool = self.create_test_tool()
|
||||
|
||||
test_schema = {
|
||||
"anyOf": [
|
||||
{"type": "string"}
|
||||
]
|
||||
}
|
||||
|
||||
result_type = tool._process_schema_type(test_schema, "TestFieldSingle")
|
||||
|
||||
assert result_type is str
|
||||
|
||||
def test_oneof_multiple_types(self):
|
||||
tool = self.create_test_tool()
|
||||
|
||||
test_schema = {
|
||||
"oneOf": [
|
||||
{"type": "string"},
|
||||
{"type": "boolean"}
|
||||
]
|
||||
}
|
||||
|
||||
result_type = tool._process_schema_type(test_schema, "TestFieldOneOf")
|
||||
|
||||
assert get_origin(result_type) is Union
|
||||
|
||||
args = get_args(result_type)
|
||||
expected_types = (str, bool)
|
||||
|
||||
for expected_type in expected_types:
|
||||
assert expected_type in args
|
||||
|
||||
def test_oneof_single_type(self):
|
||||
tool = self.create_test_tool()
|
||||
|
||||
test_schema = {
|
||||
"oneOf": [
|
||||
{"type": "integer"}
|
||||
]
|
||||
}
|
||||
|
||||
result_type = tool._process_schema_type(test_schema, "TestFieldOneOfSingle")
|
||||
|
||||
assert result_type is int
|
||||
|
||||
def test_basic_types(self):
|
||||
tool = self.create_test_tool()
|
||||
|
||||
test_cases = [
|
||||
({"type": "string"}, str),
|
||||
({"type": "integer"}, int),
|
||||
({"type": "number"}, float),
|
||||
({"type": "boolean"}, bool),
|
||||
({"type": "array", "items": {"type": "string"}}, list),
|
||||
]
|
||||
|
||||
for schema, expected_type in test_cases:
|
||||
result_type = tool._process_schema_type(schema, "TestField")
|
||||
if schema["type"] == "array":
|
||||
assert get_origin(result_type) is list
|
||||
else:
|
||||
assert result_type is expected_type
|
||||
|
||||
def test_enum_handling(self):
|
||||
tool = self.create_test_tool()
|
||||
|
||||
test_schema = {
|
||||
"type": "string",
|
||||
"enum": ["option1", "option2", "option3"]
|
||||
}
|
||||
|
||||
result_type = tool._process_schema_type(test_schema, "TestFieldEnum")
|
||||
|
||||
assert result_type is str
|
||||
|
||||
def test_nested_anyof(self):
|
||||
tool = self.create_test_tool()
|
||||
|
||||
test_schema = {
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"anyOf": [
|
||||
{"type": "integer"},
|
||||
{"type": "boolean"}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
result_type = tool._process_schema_type(test_schema, "TestFieldNested")
|
||||
|
||||
assert get_origin(result_type) is Union
|
||||
args = get_args(result_type)
|
||||
|
||||
assert str in args
|
||||
|
||||
if len(args) == 3:
|
||||
assert int in args
|
||||
assert bool in args
|
||||
else:
|
||||
nested_union = next(arg for arg in args if get_origin(arg) is Union)
|
||||
nested_args = get_args(nested_union)
|
||||
assert int in nested_args
|
||||
assert bool in nested_args
|
||||
|
||||
def test_allof_same_types(self):
|
||||
tool = self.create_test_tool()
|
||||
|
||||
test_schema = {
|
||||
"allOf": [
|
||||
{"type": "string"},
|
||||
{"type": "string", "maxLength": 100}
|
||||
]
|
||||
}
|
||||
|
||||
result_type = tool._process_schema_type(test_schema, "TestFieldAllOfSame")
|
||||
|
||||
assert result_type is str
|
||||
|
||||
def test_allof_object_merge(self):
|
||||
tool = self.create_test_tool()
|
||||
|
||||
test_schema = {
|
||||
"allOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {"type": "string"},
|
||||
"age": {"type": "integer"}
|
||||
},
|
||||
"required": ["name"]
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"email": {"type": "string"},
|
||||
"age": {"type": "integer"}
|
||||
},
|
||||
"required": ["email"]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
result_type = tool._process_schema_type(test_schema, "TestFieldAllOfMerged")
|
||||
|
||||
# Should create a merged model with all properties
|
||||
# The implementation might fall back to dict if model creation fails
|
||||
# Let's just verify it's not a basic scalar type
|
||||
assert result_type is not str
|
||||
assert result_type is not int
|
||||
assert result_type is not bool
|
||||
# It could be dict (fallback) or a proper model class
|
||||
assert result_type in (dict, type) or hasattr(result_type, '__name__')
|
||||
|
||||
def test_allof_single_schema(self):
|
||||
"""Test that allOf with single schema works correctly."""
|
||||
tool = self.create_test_tool()
|
||||
|
||||
test_schema = {
|
||||
"allOf": [
|
||||
{"type": "boolean"}
|
||||
]
|
||||
}
|
||||
|
||||
result_type = tool._process_schema_type(test_schema, "TestFieldAllOfSingle")
|
||||
|
||||
# Should be just bool
|
||||
assert result_type is bool
|
||||
|
||||
def test_allof_mixed_types(self):
|
||||
tool = self.create_test_tool()
|
||||
|
||||
test_schema = {
|
||||
"allOf": [
|
||||
{"type": "string"},
|
||||
{"type": "integer"}
|
||||
]
|
||||
}
|
||||
|
||||
result_type = tool._process_schema_type(test_schema, "TestFieldAllOfMixed")
|
||||
|
||||
assert result_type is str
|
||||
|
||||
class TestCrewAIPlatformActionToolVerify:
|
||||
"""Test suite for SSL verification behavior based on CREWAI_FACTORY environment variable"""
|
||||
|
||||
|
||||
@@ -224,6 +224,43 @@ class TestCrewaiPlatformToolBuilder(unittest.TestCase):
|
||||
_, kwargs = mock_get.call_args
|
||||
assert kwargs["params"]["apps"] == ""
|
||||
|
||||
def test_detailed_description_generation(self):
|
||||
builder = CrewaiPlatformToolBuilder(apps=["test"])
|
||||
|
||||
complex_schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"simple_string": {"type": "string", "description": "A simple string"},
|
||||
"nested_object": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"inner_prop": {
|
||||
"type": "integer",
|
||||
"description": "Inner property",
|
||||
}
|
||||
},
|
||||
"description": "Nested object",
|
||||
},
|
||||
"array_prop": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "Array of strings",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
descriptions = builder._generate_detailed_description(complex_schema)
|
||||
|
||||
assert isinstance(descriptions, list)
|
||||
assert len(descriptions) > 0
|
||||
|
||||
description_text = "\n".join(descriptions)
|
||||
assert "simple_string" in description_text
|
||||
assert "nested_object" in description_text
|
||||
assert "array_prop" in description_text
|
||||
|
||||
|
||||
|
||||
class TestCrewaiPlatformToolBuilderVerify(unittest.TestCase):
|
||||
"""Test suite for SSL verification behavior in CrewaiPlatformToolBuilder"""
|
||||
|
||||
|
||||
@@ -49,7 +49,7 @@ Repository = "https://github.com/crewAIInc/crewAI"
|
||||
|
||||
[project.optional-dependencies]
|
||||
tools = [
|
||||
"crewai-tools==1.9.1",
|
||||
"crewai-tools==1.8.1",
|
||||
]
|
||||
embeddings = [
|
||||
"tiktoken~=0.8.0"
|
||||
@@ -90,7 +90,7 @@ azure-ai-inference = [
|
||||
"azure-ai-inference~=1.0.0b9",
|
||||
]
|
||||
anthropic = [
|
||||
"anthropic~=0.73.0",
|
||||
"anthropic~=0.71.0",
|
||||
]
|
||||
a2a = [
|
||||
"a2a-sdk~=0.3.10",
|
||||
|
||||
@@ -40,7 +40,7 @@ def _suppress_pydantic_deprecation_warnings() -> None:
|
||||
|
||||
_suppress_pydantic_deprecation_warnings()
|
||||
|
||||
__version__ = "1.9.1"
|
||||
__version__ = "1.8.1"
|
||||
_telemetry_submitted = False
|
||||
|
||||
|
||||
|
||||
@@ -37,8 +37,7 @@ class CrewAgentExecutorMixin:
|
||||
self.crew
|
||||
and self.agent
|
||||
and self.task
|
||||
and f"Action: {sanitize_tool_name('Delegate work to coworker')}"
|
||||
not in output.text
|
||||
and f"Action: {sanitize_tool_name('Delegate work to coworker')}" not in output.text
|
||||
):
|
||||
try:
|
||||
if (
|
||||
@@ -133,11 +132,10 @@ class CrewAgentExecutorMixin:
|
||||
and self.crew._long_term_memory
|
||||
and self.crew._entity_memory is None
|
||||
):
|
||||
if self.agent and self.agent.verbose:
|
||||
self._printer.print(
|
||||
content="Long term memory is enabled, but entity memory is not enabled. Please configure entity memory or set memory=True to automatically enable it.",
|
||||
color="bold_yellow",
|
||||
)
|
||||
self._printer.print(
|
||||
content="Long term memory is enabled, but entity memory is not enabled. Please configure entity memory or set memory=True to automatically enable it.",
|
||||
color="bold_yellow",
|
||||
)
|
||||
|
||||
def _ask_human_input(self, final_answer: str) -> str:
|
||||
"""Prompt human input with mode-appropriate messaging.
|
||||
|
||||
@@ -28,11 +28,6 @@ from crewai.hooks.llm_hooks import (
|
||||
get_after_llm_call_hooks,
|
||||
get_before_llm_call_hooks,
|
||||
)
|
||||
from crewai.hooks.tool_hooks import (
|
||||
ToolCallHookContext,
|
||||
get_after_tool_call_hooks,
|
||||
get_before_tool_call_hooks,
|
||||
)
|
||||
from crewai.utilities.agent_utils import (
|
||||
aget_llm_response,
|
||||
convert_tools_to_openai_schema,
|
||||
@@ -206,14 +201,13 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
try:
|
||||
formatted_answer = self._invoke_loop()
|
||||
except AssertionError:
|
||||
if self.agent.verbose:
|
||||
self._printer.print(
|
||||
content="Agent failed to reach a final answer. This is likely a bug - please report it.",
|
||||
color="red",
|
||||
)
|
||||
self._printer.print(
|
||||
content="Agent failed to reach a final answer. This is likely a bug - please report it.",
|
||||
color="red",
|
||||
)
|
||||
raise
|
||||
except Exception as e:
|
||||
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise
|
||||
|
||||
if self.ask_for_human_input:
|
||||
@@ -328,7 +322,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
messages=self.messages,
|
||||
llm=self.llm,
|
||||
callbacks=self.callbacks,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
break
|
||||
|
||||
@@ -343,7 +336,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
from_agent=self.agent,
|
||||
response_model=self.response_model,
|
||||
executor_context=self,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
# breakpoint()
|
||||
if self.response_model is not None:
|
||||
@@ -402,7 +394,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
iterations=self.iterations,
|
||||
log_error_after=self.log_error_after,
|
||||
printer=self._printer,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
@@ -417,10 +408,9 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
llm=self.llm,
|
||||
callbacks=self.callbacks,
|
||||
i18n=self._i18n,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
continue
|
||||
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise e
|
||||
finally:
|
||||
self.iterations += 1
|
||||
@@ -466,7 +456,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
messages=self.messages,
|
||||
llm=self.llm,
|
||||
callbacks=self.callbacks,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
@@ -488,7 +477,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
from_agent=self.agent,
|
||||
response_model=self.response_model,
|
||||
executor_context=self,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
|
||||
# Check if the response is a list of tool calls
|
||||
@@ -542,10 +530,9 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
llm=self.llm,
|
||||
callbacks=self.callbacks,
|
||||
i18n=self._i18n,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
continue
|
||||
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise e
|
||||
finally:
|
||||
self.iterations += 1
|
||||
@@ -567,7 +554,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
from_agent=self.agent,
|
||||
response_model=self.response_model,
|
||||
executor_context=self,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
|
||||
formatted_answer = AgentFinish(
|
||||
@@ -763,42 +749,8 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
|
||||
track_delegation_if_needed(func_name, args_dict, self.task)
|
||||
|
||||
# Find the structured tool for hook context
|
||||
structured_tool: CrewStructuredTool | None = None
|
||||
for structured in self.tools or []:
|
||||
if sanitize_tool_name(structured.name) == func_name:
|
||||
structured_tool = structured
|
||||
break
|
||||
|
||||
# Execute before_tool_call hooks
|
||||
hook_blocked = False
|
||||
before_hook_context = ToolCallHookContext(
|
||||
tool_name=func_name,
|
||||
tool_input=args_dict,
|
||||
tool=structured_tool, # type: ignore[arg-type]
|
||||
agent=self.agent,
|
||||
task=self.task,
|
||||
crew=self.crew,
|
||||
)
|
||||
before_hooks = get_before_tool_call_hooks()
|
||||
try:
|
||||
for hook in before_hooks:
|
||||
hook_result = hook(before_hook_context)
|
||||
if hook_result is False:
|
||||
hook_blocked = True
|
||||
break
|
||||
except Exception as hook_error:
|
||||
if self.agent.verbose:
|
||||
self._printer.print(
|
||||
content=f"Error in before_tool_call hook: {hook_error}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
# If hook blocked execution, set result and skip tool execution
|
||||
if hook_blocked:
|
||||
result = f"Tool execution blocked by hook. Tool: {func_name}"
|
||||
# Execute the tool (only if not cached, not at max usage, and not blocked by hook)
|
||||
elif not from_cache and not max_usage_reached:
|
||||
# Execute the tool (only if not cached and not at max usage)
|
||||
if not from_cache and not max_usage_reached:
|
||||
result = "Tool not found"
|
||||
if func_name in available_functions:
|
||||
try:
|
||||
@@ -846,29 +798,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
# Return error message when max usage limit is reached
|
||||
result = f"Tool '{func_name}' has reached its usage limit of {original_tool.max_usage_count} times and cannot be used anymore."
|
||||
|
||||
after_hook_context = ToolCallHookContext(
|
||||
tool_name=func_name,
|
||||
tool_input=args_dict,
|
||||
tool=structured_tool, # type: ignore[arg-type]
|
||||
agent=self.agent,
|
||||
task=self.task,
|
||||
crew=self.crew,
|
||||
tool_result=result,
|
||||
)
|
||||
after_hooks = get_after_tool_call_hooks()
|
||||
try:
|
||||
for after_hook in after_hooks:
|
||||
after_hook_result = after_hook(after_hook_context)
|
||||
if after_hook_result is not None:
|
||||
result = after_hook_result
|
||||
after_hook_context.tool_result = result
|
||||
except Exception as hook_error:
|
||||
if self.agent.verbose:
|
||||
self._printer.print(
|
||||
content=f"Error in after_tool_call hook: {hook_error}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
# Emit tool usage finished event
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
@@ -953,14 +882,13 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
try:
|
||||
formatted_answer = await self._ainvoke_loop()
|
||||
except AssertionError:
|
||||
if self.agent.verbose:
|
||||
self._printer.print(
|
||||
content="Agent failed to reach a final answer. This is likely a bug - please report it.",
|
||||
color="red",
|
||||
)
|
||||
self._printer.print(
|
||||
content="Agent failed to reach a final answer. This is likely a bug - please report it.",
|
||||
color="red",
|
||||
)
|
||||
raise
|
||||
except Exception as e:
|
||||
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise
|
||||
|
||||
if self.ask_for_human_input:
|
||||
@@ -1011,7 +939,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
messages=self.messages,
|
||||
llm=self.llm,
|
||||
callbacks=self.callbacks,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
break
|
||||
|
||||
@@ -1026,7 +953,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
from_agent=self.agent,
|
||||
response_model=self.response_model,
|
||||
executor_context=self,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
|
||||
if self.response_model is not None:
|
||||
@@ -1084,7 +1010,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
iterations=self.iterations,
|
||||
log_error_after=self.log_error_after,
|
||||
printer=self._printer,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
@@ -1098,10 +1023,9 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
llm=self.llm,
|
||||
callbacks=self.callbacks,
|
||||
i18n=self._i18n,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
continue
|
||||
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise e
|
||||
finally:
|
||||
self.iterations += 1
|
||||
@@ -1141,7 +1065,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
messages=self.messages,
|
||||
llm=self.llm,
|
||||
callbacks=self.callbacks,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
@@ -1163,7 +1086,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
from_agent=self.agent,
|
||||
response_model=self.response_model,
|
||||
executor_context=self,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
# Check if the response is a list of tool calls
|
||||
if (
|
||||
@@ -1216,10 +1138,9 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
llm=self.llm,
|
||||
callbacks=self.callbacks,
|
||||
i18n=self._i18n,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
continue
|
||||
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise e
|
||||
finally:
|
||||
self.iterations += 1
|
||||
@@ -1241,7 +1162,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
from_agent=self.agent,
|
||||
response_model=self.response_model,
|
||||
executor_context=self,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
|
||||
formatted_answer = AgentFinish(
|
||||
@@ -1359,11 +1279,10 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
)
|
||||
|
||||
if train_iteration is None or not isinstance(train_iteration, int):
|
||||
if self.agent.verbose:
|
||||
self._printer.print(
|
||||
content="Invalid or missing train iteration. Cannot save training data.",
|
||||
color="red",
|
||||
)
|
||||
self._printer.print(
|
||||
content="Invalid or missing train iteration. Cannot save training data.",
|
||||
color="red",
|
||||
)
|
||||
return
|
||||
|
||||
training_handler = CrewTrainingHandler(TRAINING_DATA_FILE)
|
||||
@@ -1383,14 +1302,13 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
if train_iteration in agent_training_data:
|
||||
agent_training_data[train_iteration]["improved_output"] = result.output
|
||||
else:
|
||||
if self.agent.verbose:
|
||||
self._printer.print(
|
||||
content=(
|
||||
f"No existing training data for agent {agent_id} and iteration "
|
||||
f"{train_iteration}. Cannot save improved output."
|
||||
),
|
||||
color="red",
|
||||
)
|
||||
self._printer.print(
|
||||
content=(
|
||||
f"No existing training data for agent {agent_id} and iteration "
|
||||
f"{train_iteration}. Cannot save improved output."
|
||||
),
|
||||
color="red",
|
||||
)
|
||||
return
|
||||
|
||||
# Update the training data and save
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]==1.9.1"
|
||||
"crewai[tools]==1.8.1"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]==1.9.1"
|
||||
"crewai[tools]==1.8.1"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -84,4 +84,3 @@ class LLMStreamChunkEvent(LLMEventBase):
|
||||
chunk: str
|
||||
tool_call: ToolCall | None = None
|
||||
call_type: LLMCallType | None = None
|
||||
response_id: str | None = None
|
||||
|
||||
@@ -36,12 +36,6 @@ from crewai.hooks.llm_hooks import (
|
||||
get_after_llm_call_hooks,
|
||||
get_before_llm_call_hooks,
|
||||
)
|
||||
from crewai.hooks.tool_hooks import (
|
||||
ToolCallHookContext,
|
||||
get_after_tool_call_hooks,
|
||||
get_before_tool_call_hooks,
|
||||
)
|
||||
from crewai.hooks.types import AfterLLMCallHookType, BeforeLLMCallHookType
|
||||
from crewai.utilities.agent_utils import (
|
||||
convert_tools_to_openai_schema,
|
||||
enforce_rpm_limit,
|
||||
@@ -191,8 +185,8 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
|
||||
self._instance_id = str(uuid4())[:8]
|
||||
|
||||
self.before_llm_call_hooks: list[BeforeLLMCallHookType] = []
|
||||
self.after_llm_call_hooks: list[AfterLLMCallHookType] = []
|
||||
self.before_llm_call_hooks: list[Callable] = []
|
||||
self.after_llm_call_hooks: list[Callable] = []
|
||||
self.before_llm_call_hooks.extend(get_before_llm_call_hooks())
|
||||
self.after_llm_call_hooks.extend(get_after_llm_call_hooks())
|
||||
|
||||
@@ -305,21 +299,11 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
"""Compatibility property for mixin - returns state messages."""
|
||||
return self._state.messages
|
||||
|
||||
@messages.setter
|
||||
def messages(self, value: list[LLMMessage]) -> None:
|
||||
"""Set state messages."""
|
||||
self._state.messages = value
|
||||
|
||||
@property
|
||||
def iterations(self) -> int:
|
||||
"""Compatibility property for mixin - returns state iterations."""
|
||||
return self._state.iterations
|
||||
|
||||
@iterations.setter
|
||||
def iterations(self, value: int) -> None:
|
||||
"""Set state iterations."""
|
||||
self._state.iterations = value
|
||||
|
||||
@start()
|
||||
def initialize_reasoning(self) -> Literal["initialized"]:
|
||||
"""Initialize the reasoning flow and emit agent start logs."""
|
||||
@@ -341,7 +325,6 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
messages=list(self.state.messages),
|
||||
llm=self.llm,
|
||||
callbacks=self.callbacks,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
|
||||
self.state.current_answer = formatted_answer
|
||||
@@ -367,7 +350,6 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
from_agent=self.agent,
|
||||
response_model=None,
|
||||
executor_context=self,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
|
||||
# Parse the LLM response
|
||||
@@ -403,7 +385,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
return "context_error"
|
||||
if e.__class__.__module__.startswith("litellm"):
|
||||
raise e
|
||||
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise
|
||||
|
||||
@listen("continue_reasoning_native")
|
||||
@@ -438,7 +420,6 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
from_agent=self.agent,
|
||||
response_model=None,
|
||||
executor_context=self,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
|
||||
# Check if the response is a list of tool calls
|
||||
@@ -477,7 +458,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
return "context_error"
|
||||
if e.__class__.__module__.startswith("litellm"):
|
||||
raise e
|
||||
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise
|
||||
|
||||
@router(call_llm_and_parse)
|
||||
@@ -596,12 +577,6 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
"content": None,
|
||||
"tool_calls": tool_calls_to_report,
|
||||
}
|
||||
if all(
|
||||
type(tc).__qualname__ == "Part" for tc in self.state.pending_tool_calls
|
||||
):
|
||||
assistant_message["raw_tool_call_parts"] = list(
|
||||
self.state.pending_tool_calls
|
||||
)
|
||||
self.state.messages.append(assistant_message)
|
||||
|
||||
# Now execute each tool
|
||||
@@ -636,12 +611,14 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
|
||||
# Check if tool has reached max usage count
|
||||
max_usage_reached = False
|
||||
if (
|
||||
original_tool
|
||||
and original_tool.max_usage_count is not None
|
||||
and original_tool.current_usage_count >= original_tool.max_usage_count
|
||||
):
|
||||
max_usage_reached = True
|
||||
if original_tool:
|
||||
if (
|
||||
hasattr(original_tool, "max_usage_count")
|
||||
and original_tool.max_usage_count is not None
|
||||
and original_tool.current_usage_count
|
||||
>= original_tool.max_usage_count
|
||||
):
|
||||
max_usage_reached = True
|
||||
|
||||
# Check cache before executing
|
||||
from_cache = False
|
||||
@@ -673,38 +650,8 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
|
||||
track_delegation_if_needed(func_name, args_dict, self.task)
|
||||
|
||||
structured_tool: CrewStructuredTool | None = None
|
||||
for structured in self.tools or []:
|
||||
if sanitize_tool_name(structured.name) == func_name:
|
||||
structured_tool = structured
|
||||
break
|
||||
|
||||
hook_blocked = False
|
||||
before_hook_context = ToolCallHookContext(
|
||||
tool_name=func_name,
|
||||
tool_input=args_dict,
|
||||
tool=structured_tool, # type: ignore[arg-type]
|
||||
agent=self.agent,
|
||||
task=self.task,
|
||||
crew=self.crew,
|
||||
)
|
||||
before_hooks = get_before_tool_call_hooks()
|
||||
try:
|
||||
for hook in before_hooks:
|
||||
hook_result = hook(before_hook_context)
|
||||
if hook_result is False:
|
||||
hook_blocked = True
|
||||
break
|
||||
except Exception as hook_error:
|
||||
if self.agent.verbose:
|
||||
self._printer.print(
|
||||
content=f"Error in before_tool_call hook: {hook_error}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
if hook_blocked:
|
||||
result = f"Tool execution blocked by hook. Tool: {func_name}"
|
||||
elif not from_cache and not max_usage_reached:
|
||||
# Execute the tool (only if not cached and not at max usage)
|
||||
if not from_cache and not max_usage_reached:
|
||||
result = "Tool not found"
|
||||
if func_name in self._available_functions:
|
||||
try:
|
||||
@@ -714,7 +661,11 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
# Add to cache after successful execution (before string conversion)
|
||||
if self.tools_handler and self.tools_handler.cache:
|
||||
should_cache = True
|
||||
if original_tool:
|
||||
if (
|
||||
original_tool
|
||||
and hasattr(original_tool, "cache_function")
|
||||
and original_tool.cache_function
|
||||
):
|
||||
should_cache = original_tool.cache_function(
|
||||
args_dict, raw_result
|
||||
)
|
||||
@@ -745,34 +696,10 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
error=e,
|
||||
),
|
||||
)
|
||||
elif max_usage_reached and original_tool:
|
||||
elif max_usage_reached:
|
||||
# Return error message when max usage limit is reached
|
||||
result = f"Tool '{func_name}' has reached its usage limit of {original_tool.max_usage_count} times and cannot be used anymore."
|
||||
|
||||
# Execute after_tool_call hooks (even if blocked, to allow logging/monitoring)
|
||||
after_hook_context = ToolCallHookContext(
|
||||
tool_name=func_name,
|
||||
tool_input=args_dict,
|
||||
tool=structured_tool, # type: ignore[arg-type]
|
||||
agent=self.agent,
|
||||
task=self.task,
|
||||
crew=self.crew,
|
||||
tool_result=result,
|
||||
)
|
||||
after_hooks = get_after_tool_call_hooks()
|
||||
try:
|
||||
for after_hook in after_hooks:
|
||||
after_hook_result = after_hook(after_hook_context)
|
||||
if after_hook_result is not None:
|
||||
result = after_hook_result
|
||||
after_hook_context.tool_result = result
|
||||
except Exception as hook_error:
|
||||
if self.agent.verbose:
|
||||
self._printer.print(
|
||||
content=f"Error in after_tool_call hook: {hook_error}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
# Emit tool usage finished event
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
@@ -906,17 +833,12 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
@listen("parser_error")
|
||||
def recover_from_parser_error(self) -> Literal["initialized"]:
|
||||
"""Recover from output parser errors and retry."""
|
||||
if not self._last_parser_error:
|
||||
self.state.iterations += 1
|
||||
return "initialized"
|
||||
|
||||
formatted_answer = handle_output_parser_exception(
|
||||
e=self._last_parser_error,
|
||||
messages=list(self.state.messages),
|
||||
iterations=self.state.iterations,
|
||||
log_error_after=self.log_error_after,
|
||||
printer=self._printer,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
|
||||
if formatted_answer:
|
||||
@@ -936,7 +858,6 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
llm=self.llm,
|
||||
callbacks=self.callbacks,
|
||||
i18n=self._i18n,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
|
||||
self.state.iterations += 1
|
||||
@@ -1028,7 +949,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
self._console.print(fail_text)
|
||||
raise
|
||||
except Exception as e:
|
||||
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise
|
||||
finally:
|
||||
self._is_executing = False
|
||||
@@ -1113,7 +1034,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
self._console.print(fail_text)
|
||||
raise
|
||||
except Exception as e:
|
||||
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise
|
||||
finally:
|
||||
self._is_executing = False
|
||||
|
||||
@@ -118,20 +118,17 @@ class PersistenceDecorator:
|
||||
)
|
||||
except Exception as e:
|
||||
error_msg = LOG_MESSAGES["save_error"].format(method_name, str(e))
|
||||
if verbose:
|
||||
cls._printer.print(error_msg, color="red")
|
||||
cls._printer.print(error_msg, color="red")
|
||||
logger.error(error_msg)
|
||||
raise RuntimeError(f"State persistence failed: {e!s}") from e
|
||||
except AttributeError as e:
|
||||
error_msg = LOG_MESSAGES["state_missing"]
|
||||
if verbose:
|
||||
cls._printer.print(error_msg, color="red")
|
||||
cls._printer.print(error_msg, color="red")
|
||||
logger.error(error_msg)
|
||||
raise ValueError(error_msg) from e
|
||||
except (TypeError, ValueError) as e:
|
||||
error_msg = LOG_MESSAGES["id_missing"]
|
||||
if verbose:
|
||||
cls._printer.print(error_msg, color="red")
|
||||
cls._printer.print(error_msg, color="red")
|
||||
logger.error(error_msg)
|
||||
raise ValueError(error_msg) from e
|
||||
|
||||
|
||||
@@ -151,9 +151,7 @@ def _unwrap_function(function: Any) -> Any:
|
||||
return function
|
||||
|
||||
|
||||
def get_possible_return_constants(
|
||||
function: Any, verbose: bool = True
|
||||
) -> list[str] | None:
|
||||
def get_possible_return_constants(function: Any) -> list[str] | None:
|
||||
"""Extract possible string return values from a function using AST parsing.
|
||||
|
||||
This function analyzes the source code of a router method to identify
|
||||
@@ -180,11 +178,10 @@ def get_possible_return_constants(
|
||||
# Can't get source code
|
||||
return None
|
||||
except Exception as e:
|
||||
if verbose:
|
||||
_printer.print(
|
||||
f"Error retrieving source code for function {function.__name__}: {e}",
|
||||
color="red",
|
||||
)
|
||||
_printer.print(
|
||||
f"Error retrieving source code for function {function.__name__}: {e}",
|
||||
color="red",
|
||||
)
|
||||
return None
|
||||
|
||||
try:
|
||||
@@ -193,28 +190,25 @@ def get_possible_return_constants(
|
||||
# Parse the source code into an AST
|
||||
code_ast = ast.parse(source)
|
||||
except IndentationError as e:
|
||||
if verbose:
|
||||
_printer.print(
|
||||
f"IndentationError while parsing source code of {function.__name__}: {e}",
|
||||
color="red",
|
||||
)
|
||||
_printer.print(f"Source code:\n{source}", color="yellow")
|
||||
_printer.print(
|
||||
f"IndentationError while parsing source code of {function.__name__}: {e}",
|
||||
color="red",
|
||||
)
|
||||
_printer.print(f"Source code:\n{source}", color="yellow")
|
||||
return None
|
||||
except SyntaxError as e:
|
||||
if verbose:
|
||||
_printer.print(
|
||||
f"SyntaxError while parsing source code of {function.__name__}: {e}",
|
||||
color="red",
|
||||
)
|
||||
_printer.print(f"Source code:\n{source}", color="yellow")
|
||||
_printer.print(
|
||||
f"SyntaxError while parsing source code of {function.__name__}: {e}",
|
||||
color="red",
|
||||
)
|
||||
_printer.print(f"Source code:\n{source}", color="yellow")
|
||||
return None
|
||||
except Exception as e:
|
||||
if verbose:
|
||||
_printer.print(
|
||||
f"Unexpected error while parsing source code of {function.__name__}: {e}",
|
||||
color="red",
|
||||
)
|
||||
_printer.print(f"Source code:\n{source}", color="yellow")
|
||||
_printer.print(
|
||||
f"Unexpected error while parsing source code of {function.__name__}: {e}",
|
||||
color="red",
|
||||
)
|
||||
_printer.print(f"Source code:\n{source}", color="yellow")
|
||||
return None
|
||||
|
||||
return_values: set[str] = set()
|
||||
@@ -394,17 +388,15 @@ def get_possible_return_constants(
|
||||
|
||||
StateAttributeVisitor().visit(class_ast)
|
||||
except Exception as e:
|
||||
if verbose:
|
||||
_printer.print(
|
||||
f"Could not analyze class context for {function.__name__}: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
_printer.print(
|
||||
f"Could not analyze class context for {function.__name__}: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
except Exception as e:
|
||||
if verbose:
|
||||
_printer.print(
|
||||
f"Could not introspect class for {function.__name__}: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
_printer.print(
|
||||
f"Could not introspect class for {function.__name__}: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
VariableAssignmentVisitor().visit(code_ast)
|
||||
ReturnVisitor().visit(code_ast)
|
||||
|
||||
@@ -9,7 +9,6 @@ from crewai.utilities.printer import Printer
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||
from crewai.experimental.agent_executor import AgentExecutor
|
||||
from crewai.lite_agent import LiteAgent
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.utilities.types import LLMMessage
|
||||
@@ -42,7 +41,7 @@ class LLMCallHookContext:
|
||||
Can be modified by returning a new string from after_llm_call hook.
|
||||
"""
|
||||
|
||||
executor: CrewAgentExecutor | AgentExecutor | LiteAgent | None
|
||||
executor: CrewAgentExecutor | LiteAgent | None
|
||||
messages: list[LLMMessage]
|
||||
agent: Any
|
||||
task: Any
|
||||
@@ -53,7 +52,7 @@ class LLMCallHookContext:
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
executor: CrewAgentExecutor | AgentExecutor | LiteAgent | None = None,
|
||||
executor: CrewAgentExecutor | LiteAgent | None = None,
|
||||
response: str | None = None,
|
||||
messages: list[LLMMessage] | None = None,
|
||||
llm: BaseLLM | str | Any | None = None, # TODO: look into
|
||||
|
||||
@@ -72,13 +72,13 @@ from crewai.utilities.agent_utils import (
|
||||
from crewai.utilities.converter import (
|
||||
Converter,
|
||||
ConverterError,
|
||||
generate_model_description,
|
||||
)
|
||||
from crewai.utilities.guardrail import process_guardrail
|
||||
from crewai.utilities.guardrail_types import GuardrailCallable, GuardrailType
|
||||
from crewai.utilities.i18n import I18N, get_i18n
|
||||
from crewai.utilities.llm_utils import create_llm
|
||||
from crewai.utilities.printer import Printer
|
||||
from crewai.utilities.pydantic_schema_utils import generate_model_description
|
||||
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
||||
from crewai.utilities.tool_utils import execute_tool_and_check_finality
|
||||
from crewai.utilities.types import LLMMessage
|
||||
@@ -344,12 +344,11 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
if self.verbose:
|
||||
self._printer.print(
|
||||
content="Agent failed to reach a final answer. This is likely a bug - please report it.",
|
||||
color="red",
|
||||
)
|
||||
handle_unknown_error(self._printer, e, verbose=self.verbose)
|
||||
self._printer.print(
|
||||
content="Agent failed to reach a final answer. This is likely a bug - please report it.",
|
||||
color="red",
|
||||
)
|
||||
handle_unknown_error(self._printer, e)
|
||||
# Emit error event
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
@@ -397,11 +396,10 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
if isinstance(result, BaseModel):
|
||||
formatted_result = result
|
||||
except ConverterError as e:
|
||||
if self.verbose:
|
||||
self._printer.print(
|
||||
content=f"Failed to parse output into response format after retries: {e.message}",
|
||||
color="yellow",
|
||||
)
|
||||
self._printer.print(
|
||||
content=f"Failed to parse output into response format after retries: {e.message}",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
# Calculate token usage metrics
|
||||
if isinstance(self.llm, BaseLLM):
|
||||
@@ -607,7 +605,6 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
messages=self._messages,
|
||||
llm=cast(LLM, self.llm),
|
||||
callbacks=self._callbacks,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
|
||||
enforce_rpm_limit(self.request_within_rpm_limit)
|
||||
@@ -620,7 +617,6 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
printer=self._printer,
|
||||
from_agent=self,
|
||||
executor_context=self,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
@@ -650,18 +646,16 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
|
||||
self._append_message(formatted_answer.text, role="assistant")
|
||||
except OutputParserError as e: # noqa: PERF203
|
||||
if self.verbose:
|
||||
self._printer.print(
|
||||
content="Failed to parse LLM output. Retrying...",
|
||||
color="yellow",
|
||||
)
|
||||
self._printer.print(
|
||||
content="Failed to parse LLM output. Retrying...",
|
||||
color="yellow",
|
||||
)
|
||||
formatted_answer = handle_output_parser_exception(
|
||||
e=e,
|
||||
messages=self._messages,
|
||||
iterations=self._iterations,
|
||||
log_error_after=3,
|
||||
printer=self._printer,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
@@ -676,10 +670,9 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
llm=cast(LLM, self.llm),
|
||||
callbacks=self._callbacks,
|
||||
i18n=self.i18n,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
continue
|
||||
handle_unknown_error(self._printer, e, verbose=self.verbose)
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise e
|
||||
|
||||
finally:
|
||||
|
||||
@@ -768,10 +768,6 @@ class LLM(BaseLLM):
|
||||
|
||||
# Extract content from the chunk
|
||||
chunk_content = None
|
||||
response_id = None
|
||||
|
||||
if hasattr(chunk,'id'):
|
||||
response_id = chunk.id
|
||||
|
||||
# Safely extract content from various chunk formats
|
||||
try:
|
||||
@@ -827,7 +823,6 @@ class LLM(BaseLLM):
|
||||
available_functions=available_functions,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
response_id=response_id
|
||||
)
|
||||
|
||||
if result is not None:
|
||||
@@ -849,7 +844,6 @@ class LLM(BaseLLM):
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
response_id=response_id
|
||||
),
|
||||
)
|
||||
# --- 4) Fallback to non-streaming if no content received
|
||||
@@ -1027,7 +1021,6 @@ class LLM(BaseLLM):
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
response_id: str | None = None,
|
||||
) -> Any:
|
||||
for tool_call in tool_calls:
|
||||
current_tool_accumulator = accumulated_tool_args[tool_call.index]
|
||||
@@ -1048,7 +1041,6 @@ class LLM(BaseLLM):
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
call_type=LLMCallType.TOOL_CALL,
|
||||
response_id=response_id
|
||||
),
|
||||
)
|
||||
|
||||
@@ -1410,13 +1402,11 @@ class LLM(BaseLLM):
|
||||
|
||||
params["stream"] = True
|
||||
params["stream_options"] = {"include_usage": True}
|
||||
response_id = None
|
||||
|
||||
try:
|
||||
async for chunk in await litellm.acompletion(**params):
|
||||
chunk_count += 1
|
||||
chunk_content = None
|
||||
response_id = chunk.id if hasattr(chunk, "id") else None
|
||||
|
||||
try:
|
||||
choices = None
|
||||
@@ -1476,7 +1466,6 @@ class LLM(BaseLLM):
|
||||
chunk=chunk_content,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
response_id=response_id
|
||||
),
|
||||
)
|
||||
|
||||
@@ -1514,7 +1503,6 @@ class LLM(BaseLLM):
|
||||
available_functions=available_functions,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
response_id=response_id,
|
||||
)
|
||||
if result is not None:
|
||||
return result
|
||||
|
||||
@@ -404,7 +404,6 @@ class BaseLLM(ABC):
|
||||
from_agent: Agent | None = None,
|
||||
tool_call: dict[str, Any] | None = None,
|
||||
call_type: LLMCallType | None = None,
|
||||
response_id: str | None = None,
|
||||
) -> None:
|
||||
"""Emit stream chunk event.
|
||||
|
||||
@@ -414,7 +413,6 @@ class BaseLLM(ABC):
|
||||
from_agent: The agent that initiated the call.
|
||||
tool_call: Tool call information if this is a tool call chunk.
|
||||
call_type: The type of LLM call (LLM_CALL or TOOL_CALL).
|
||||
response_id: Unique ID for a particular LLM response, chunks have same response_id.
|
||||
"""
|
||||
if not hasattr(crewai_event_bus, "emit"):
|
||||
raise ValueError("crewai_event_bus does not have an emit method") from None
|
||||
@@ -427,7 +425,6 @@ class BaseLLM(ABC):
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
call_type=call_type,
|
||||
response_id=response_id,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -497,7 +494,7 @@ class BaseLLM(ABC):
|
||||
from_agent=from_agent,
|
||||
)
|
||||
|
||||
return str(result) if not isinstance(result, str) else result
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Error executing function '{function_name}': {e!s}"
|
||||
@@ -737,25 +734,22 @@ class BaseLLM(ABC):
|
||||
task=None,
|
||||
crew=None,
|
||||
)
|
||||
verbose = getattr(from_agent, "verbose", True) if from_agent else True
|
||||
printer = Printer()
|
||||
|
||||
try:
|
||||
for hook in before_hooks:
|
||||
result = hook(hook_context)
|
||||
if result is False:
|
||||
if verbose:
|
||||
printer.print(
|
||||
content="LLM call blocked by before_llm_call hook",
|
||||
color="yellow",
|
||||
)
|
||||
printer.print(
|
||||
content="LLM call blocked by before_llm_call hook",
|
||||
color="yellow",
|
||||
)
|
||||
return False
|
||||
except Exception as e:
|
||||
if verbose:
|
||||
printer.print(
|
||||
content=f"Error in before_llm_call hook: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
printer.print(
|
||||
content=f"Error in before_llm_call hook: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
@@ -808,7 +802,6 @@ class BaseLLM(ABC):
|
||||
crew=None,
|
||||
response=response,
|
||||
)
|
||||
verbose = getattr(from_agent, "verbose", True) if from_agent else True
|
||||
printer = Printer()
|
||||
modified_response = response
|
||||
|
||||
@@ -819,10 +812,9 @@ class BaseLLM(ABC):
|
||||
modified_response = result
|
||||
hook_context.response = modified_response
|
||||
except Exception as e:
|
||||
if verbose:
|
||||
printer.print(
|
||||
content=f"Error in after_llm_call hook: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
printer.print(
|
||||
content=f"Error in after_llm_call hook: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
return modified_response
|
||||
|
||||
@@ -3,8 +3,9 @@ from __future__ import annotations
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from typing import TYPE_CHECKING, Any, Final, Literal, TypeGuard, cast
|
||||
from typing import TYPE_CHECKING, Any, Literal, cast
|
||||
|
||||
from anthropic.types import ThinkingBlock
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.events.types.llm_events import LLMCallType
|
||||
@@ -21,9 +22,8 @@ if TYPE_CHECKING:
|
||||
from crewai.llms.hooks.base import BaseInterceptor
|
||||
|
||||
try:
|
||||
from anthropic import Anthropic, AsyncAnthropic, transform_schema
|
||||
from anthropic import Anthropic, AsyncAnthropic
|
||||
from anthropic.types import Message, TextBlock, ThinkingBlock, ToolUseBlock
|
||||
from anthropic.types.beta import BetaMessage
|
||||
import httpx
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
@@ -31,62 +31,7 @@ except ImportError:
|
||||
) from None
|
||||
|
||||
|
||||
ANTHROPIC_FILES_API_BETA: Final = "files-api-2025-04-14"
|
||||
ANTHROPIC_STRUCTURED_OUTPUTS_BETA: Final = "structured-outputs-2025-11-13"
|
||||
|
||||
NATIVE_STRUCTURED_OUTPUT_MODELS: Final[
|
||||
tuple[
|
||||
Literal["claude-sonnet-4-5"],
|
||||
Literal["claude-sonnet-4.5"],
|
||||
Literal["claude-opus-4-5"],
|
||||
Literal["claude-opus-4.5"],
|
||||
Literal["claude-opus-4-1"],
|
||||
Literal["claude-opus-4.1"],
|
||||
Literal["claude-haiku-4-5"],
|
||||
Literal["claude-haiku-4.5"],
|
||||
]
|
||||
] = (
|
||||
"claude-sonnet-4-5",
|
||||
"claude-sonnet-4.5",
|
||||
"claude-opus-4-5",
|
||||
"claude-opus-4.5",
|
||||
"claude-opus-4-1",
|
||||
"claude-opus-4.1",
|
||||
"claude-haiku-4-5",
|
||||
"claude-haiku-4.5",
|
||||
)
|
||||
|
||||
|
||||
def _supports_native_structured_outputs(model: str) -> bool:
|
||||
"""Check if the model supports native structured outputs.
|
||||
|
||||
Native structured outputs are only available for Claude 4.5 models
|
||||
(Sonnet 4.5, Opus 4.5, Opus 4.1, Haiku 4.5).
|
||||
Other models require the tool-based fallback approach.
|
||||
|
||||
Args:
|
||||
model: The model name/identifier.
|
||||
|
||||
Returns:
|
||||
True if the model supports native structured outputs.
|
||||
"""
|
||||
model_lower = model.lower()
|
||||
return any(prefix in model_lower for prefix in NATIVE_STRUCTURED_OUTPUT_MODELS)
|
||||
|
||||
|
||||
def _is_pydantic_model_class(obj: Any) -> TypeGuard[type[BaseModel]]:
|
||||
"""Check if an object is a Pydantic model class.
|
||||
|
||||
This distinguishes between Pydantic model classes that support structured
|
||||
outputs (have model_json_schema) and plain dicts like {"type": "json_object"}.
|
||||
|
||||
Args:
|
||||
obj: The object to check.
|
||||
|
||||
Returns:
|
||||
True if obj is a Pydantic model class.
|
||||
"""
|
||||
return isinstance(obj, type) and issubclass(obj, BaseModel)
|
||||
ANTHROPIC_FILES_API_BETA = "files-api-2025-04-14"
|
||||
|
||||
|
||||
def _contains_file_id_reference(messages: list[dict[str, Any]]) -> bool:
|
||||
@@ -139,7 +84,6 @@ class AnthropicCompletion(BaseLLM):
|
||||
client_params: dict[str, Any] | None = None,
|
||||
interceptor: BaseInterceptor[httpx.Request, httpx.Response] | None = None,
|
||||
thinking: AnthropicThinkingConfig | None = None,
|
||||
response_format: type[BaseModel] | None = None,
|
||||
**kwargs: Any,
|
||||
):
|
||||
"""Initialize Anthropic chat completion client.
|
||||
@@ -157,8 +101,6 @@ class AnthropicCompletion(BaseLLM):
|
||||
stream: Enable streaming responses
|
||||
client_params: Additional parameters for the Anthropic client
|
||||
interceptor: HTTP interceptor for modifying requests/responses at transport level.
|
||||
response_format: Pydantic model for structured output. When provided, responses
|
||||
will be validated against this model schema.
|
||||
**kwargs: Additional parameters
|
||||
"""
|
||||
super().__init__(
|
||||
@@ -189,7 +131,6 @@ class AnthropicCompletion(BaseLLM):
|
||||
self.stop_sequences = stop_sequences or []
|
||||
self.thinking = thinking
|
||||
self.previous_thinking_blocks: list[ThinkingBlock] = []
|
||||
self.response_format = response_format
|
||||
# Model-specific settings
|
||||
self.is_claude_3 = "claude-3" in model.lower()
|
||||
self.supports_tools = True
|
||||
@@ -290,8 +231,6 @@ class AnthropicCompletion(BaseLLM):
|
||||
formatted_messages, system_message, tools
|
||||
)
|
||||
|
||||
effective_response_model = response_model or self.response_format
|
||||
|
||||
# Handle streaming vs non-streaming
|
||||
if self.stream:
|
||||
return self._handle_streaming_completion(
|
||||
@@ -299,7 +238,7 @@ class AnthropicCompletion(BaseLLM):
|
||||
available_functions,
|
||||
from_task,
|
||||
from_agent,
|
||||
effective_response_model,
|
||||
response_model,
|
||||
)
|
||||
|
||||
return self._handle_completion(
|
||||
@@ -307,7 +246,7 @@ class AnthropicCompletion(BaseLLM):
|
||||
available_functions,
|
||||
from_task,
|
||||
from_agent,
|
||||
effective_response_model,
|
||||
response_model,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
@@ -359,15 +298,13 @@ class AnthropicCompletion(BaseLLM):
|
||||
formatted_messages, system_message, tools
|
||||
)
|
||||
|
||||
effective_response_model = response_model or self.response_format
|
||||
|
||||
if self.stream:
|
||||
return await self._ahandle_streaming_completion(
|
||||
completion_params,
|
||||
available_functions,
|
||||
from_task,
|
||||
from_agent,
|
||||
effective_response_model,
|
||||
response_model,
|
||||
)
|
||||
|
||||
return await self._ahandle_completion(
|
||||
@@ -375,7 +312,7 @@ class AnthropicCompletion(BaseLLM):
|
||||
available_functions,
|
||||
from_task,
|
||||
from_agent,
|
||||
effective_response_model,
|
||||
response_model,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
@@ -628,40 +565,22 @@ class AnthropicCompletion(BaseLLM):
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str | Any:
|
||||
"""Handle non-streaming message completion."""
|
||||
if response_model:
|
||||
structured_tool = {
|
||||
"name": "structured_output",
|
||||
"description": "Returns structured data according to the schema",
|
||||
"input_schema": response_model.model_json_schema(),
|
||||
}
|
||||
|
||||
params["tools"] = [structured_tool]
|
||||
params["tool_choice"] = {"type": "tool", "name": "structured_output"}
|
||||
|
||||
uses_file_api = _contains_file_id_reference(params.get("messages", []))
|
||||
betas: list[str] = []
|
||||
use_native_structured_output = False
|
||||
|
||||
if uses_file_api:
|
||||
betas.append(ANTHROPIC_FILES_API_BETA)
|
||||
|
||||
extra_body: dict[str, Any] | None = None
|
||||
if _is_pydantic_model_class(response_model):
|
||||
schema = transform_schema(response_model.model_json_schema())
|
||||
if _supports_native_structured_outputs(self.model):
|
||||
use_native_structured_output = True
|
||||
betas.append(ANTHROPIC_STRUCTURED_OUTPUTS_BETA)
|
||||
extra_body = {
|
||||
"output_format": {
|
||||
"type": "json_schema",
|
||||
"schema": schema,
|
||||
}
|
||||
}
|
||||
else:
|
||||
structured_tool = {
|
||||
"name": "structured_output",
|
||||
"description": "Output the structured response",
|
||||
"input_schema": schema,
|
||||
}
|
||||
params["tools"] = [structured_tool]
|
||||
params["tool_choice"] = {"type": "tool", "name": "structured_output"}
|
||||
|
||||
try:
|
||||
if betas:
|
||||
params["betas"] = betas
|
||||
response = self.client.beta.messages.create(
|
||||
**params, extra_body=extra_body
|
||||
)
|
||||
if uses_file_api:
|
||||
params["betas"] = [ANTHROPIC_FILES_API_BETA]
|
||||
response = self.client.beta.messages.create(**params)
|
||||
else:
|
||||
response = self.client.messages.create(**params)
|
||||
|
||||
@@ -674,34 +593,22 @@ class AnthropicCompletion(BaseLLM):
|
||||
usage = self._extract_anthropic_token_usage(response)
|
||||
self._track_token_usage_internal(usage)
|
||||
|
||||
if _is_pydantic_model_class(response_model) and response.content:
|
||||
if use_native_structured_output:
|
||||
for block in response.content:
|
||||
if isinstance(block, TextBlock):
|
||||
structured_json = block.text
|
||||
self._emit_call_completed_event(
|
||||
response=structured_json,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
return structured_json
|
||||
else:
|
||||
for block in response.content:
|
||||
if (
|
||||
isinstance(block, ToolUseBlock)
|
||||
and block.name == "structured_output"
|
||||
):
|
||||
structured_json = json.dumps(block.input)
|
||||
self._emit_call_completed_event(
|
||||
response=structured_json,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
return structured_json
|
||||
if response_model and response.content:
|
||||
tool_uses = [
|
||||
block for block in response.content if isinstance(block, ToolUseBlock)
|
||||
]
|
||||
if tool_uses and tool_uses[0].name == "structured_output":
|
||||
structured_data = tool_uses[0].input
|
||||
structured_json = json.dumps(structured_data)
|
||||
self._emit_call_completed_event(
|
||||
response=structured_json,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
|
||||
return structured_json
|
||||
|
||||
# Check if Claude wants to use tools
|
||||
if response.content:
|
||||
@@ -771,31 +678,17 @@ class AnthropicCompletion(BaseLLM):
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str | Any:
|
||||
) -> str:
|
||||
"""Handle streaming message completion."""
|
||||
betas: list[str] = []
|
||||
use_native_structured_output = False
|
||||
if response_model:
|
||||
structured_tool = {
|
||||
"name": "structured_output",
|
||||
"description": "Returns structured data according to the schema",
|
||||
"input_schema": response_model.model_json_schema(),
|
||||
}
|
||||
|
||||
extra_body: dict[str, Any] | None = None
|
||||
if _is_pydantic_model_class(response_model):
|
||||
schema = transform_schema(response_model.model_json_schema())
|
||||
if _supports_native_structured_outputs(self.model):
|
||||
use_native_structured_output = True
|
||||
betas.append(ANTHROPIC_STRUCTURED_OUTPUTS_BETA)
|
||||
extra_body = {
|
||||
"output_format": {
|
||||
"type": "json_schema",
|
||||
"schema": schema,
|
||||
}
|
||||
}
|
||||
else:
|
||||
structured_tool = {
|
||||
"name": "structured_output",
|
||||
"description": "Output the structured response",
|
||||
"input_schema": schema,
|
||||
}
|
||||
params["tools"] = [structured_tool]
|
||||
params["tool_choice"] = {"type": "tool", "name": "structured_output"}
|
||||
params["tools"] = [structured_tool]
|
||||
params["tool_choice"] = {"type": "tool", "name": "structured_output"}
|
||||
|
||||
full_response = ""
|
||||
|
||||
@@ -803,22 +696,11 @@ class AnthropicCompletion(BaseLLM):
|
||||
# (the SDK sets it internally)
|
||||
stream_params = {k: v for k, v in params.items() if k != "stream"}
|
||||
|
||||
if betas:
|
||||
stream_params["betas"] = betas
|
||||
|
||||
current_tool_calls: dict[int, dict[str, Any]] = {}
|
||||
|
||||
stream_context = (
|
||||
self.client.beta.messages.stream(**stream_params, extra_body=extra_body)
|
||||
if betas
|
||||
else self.client.messages.stream(**stream_params)
|
||||
)
|
||||
with stream_context as stream:
|
||||
response_id = None
|
||||
# Make streaming API call
|
||||
with self.client.messages.stream(**stream_params) as stream:
|
||||
for event in stream:
|
||||
if hasattr(event, "message") and hasattr(event.message, "id"):
|
||||
response_id = event.message.id
|
||||
|
||||
if hasattr(event, "delta") and hasattr(event.delta, "text"):
|
||||
text_delta = event.delta.text
|
||||
full_response += text_delta
|
||||
@@ -826,7 +708,6 @@ class AnthropicCompletion(BaseLLM):
|
||||
chunk=text_delta,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
response_id=response_id,
|
||||
)
|
||||
|
||||
if event.type == "content_block_start":
|
||||
@@ -853,7 +734,6 @@ class AnthropicCompletion(BaseLLM):
|
||||
"index": block_index,
|
||||
},
|
||||
call_type=LLMCallType.TOOL_CALL,
|
||||
response_id=response_id,
|
||||
)
|
||||
elif event.type == "content_block_delta":
|
||||
if event.delta.type == "input_json_delta":
|
||||
@@ -877,10 +757,9 @@ class AnthropicCompletion(BaseLLM):
|
||||
"index": block_index,
|
||||
},
|
||||
call_type=LLMCallType.TOOL_CALL,
|
||||
response_id=response_id,
|
||||
)
|
||||
|
||||
final_message = stream.get_final_message()
|
||||
final_message: Message = stream.get_final_message()
|
||||
|
||||
thinking_blocks: list[ThinkingBlock] = []
|
||||
if final_message.content:
|
||||
@@ -895,30 +774,25 @@ class AnthropicCompletion(BaseLLM):
|
||||
usage = self._extract_anthropic_token_usage(final_message)
|
||||
self._track_token_usage_internal(usage)
|
||||
|
||||
if _is_pydantic_model_class(response_model):
|
||||
if use_native_structured_output:
|
||||
if response_model and final_message.content:
|
||||
tool_uses = [
|
||||
block
|
||||
for block in final_message.content
|
||||
if isinstance(block, ToolUseBlock)
|
||||
]
|
||||
if tool_uses and tool_uses[0].name == "structured_output":
|
||||
structured_data = tool_uses[0].input
|
||||
structured_json = json.dumps(structured_data)
|
||||
|
||||
self._emit_call_completed_event(
|
||||
response=full_response,
|
||||
response=structured_json,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
return full_response
|
||||
for block in final_message.content:
|
||||
if (
|
||||
isinstance(block, ToolUseBlock)
|
||||
and block.name == "structured_output"
|
||||
):
|
||||
structured_json = json.dumps(block.input)
|
||||
self._emit_call_completed_event(
|
||||
response=structured_json,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
return structured_json
|
||||
|
||||
return structured_json
|
||||
|
||||
if final_message.content:
|
||||
tool_uses = [
|
||||
@@ -928,9 +802,11 @@ class AnthropicCompletion(BaseLLM):
|
||||
]
|
||||
|
||||
if tool_uses:
|
||||
# If no available_functions, return tool calls for executor to handle
|
||||
if not available_functions:
|
||||
return list(tool_uses)
|
||||
|
||||
# Handle tool use conversation flow internally
|
||||
return self._handle_tool_use_conversation(
|
||||
final_message,
|
||||
tool_uses,
|
||||
@@ -940,8 +816,10 @@ class AnthropicCompletion(BaseLLM):
|
||||
from_agent,
|
||||
)
|
||||
|
||||
# Apply stop words to full response
|
||||
full_response = self._apply_stop_words(full_response)
|
||||
|
||||
# Emit completion event and return full response
|
||||
self._emit_call_completed_event(
|
||||
response=full_response,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
@@ -999,7 +877,7 @@ class AnthropicCompletion(BaseLLM):
|
||||
|
||||
def _handle_tool_use_conversation(
|
||||
self,
|
||||
initial_response: Message | BetaMessage,
|
||||
initial_response: Message,
|
||||
tool_uses: list[ToolUseBlock],
|
||||
params: dict[str, Any],
|
||||
available_functions: dict[str, Any],
|
||||
@@ -1117,40 +995,22 @@ class AnthropicCompletion(BaseLLM):
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str | Any:
|
||||
"""Handle non-streaming async message completion."""
|
||||
if response_model:
|
||||
structured_tool = {
|
||||
"name": "structured_output",
|
||||
"description": "Returns structured data according to the schema",
|
||||
"input_schema": response_model.model_json_schema(),
|
||||
}
|
||||
|
||||
params["tools"] = [structured_tool]
|
||||
params["tool_choice"] = {"type": "tool", "name": "structured_output"}
|
||||
|
||||
uses_file_api = _contains_file_id_reference(params.get("messages", []))
|
||||
betas: list[str] = []
|
||||
use_native_structured_output = False
|
||||
|
||||
if uses_file_api:
|
||||
betas.append(ANTHROPIC_FILES_API_BETA)
|
||||
|
||||
extra_body: dict[str, Any] | None = None
|
||||
if _is_pydantic_model_class(response_model):
|
||||
schema = transform_schema(response_model.model_json_schema())
|
||||
if _supports_native_structured_outputs(self.model):
|
||||
use_native_structured_output = True
|
||||
betas.append(ANTHROPIC_STRUCTURED_OUTPUTS_BETA)
|
||||
extra_body = {
|
||||
"output_format": {
|
||||
"type": "json_schema",
|
||||
"schema": schema,
|
||||
}
|
||||
}
|
||||
else:
|
||||
structured_tool = {
|
||||
"name": "structured_output",
|
||||
"description": "Output the structured response",
|
||||
"input_schema": schema,
|
||||
}
|
||||
params["tools"] = [structured_tool]
|
||||
params["tool_choice"] = {"type": "tool", "name": "structured_output"}
|
||||
|
||||
try:
|
||||
if betas:
|
||||
params["betas"] = betas
|
||||
response = await self.async_client.beta.messages.create(
|
||||
**params, extra_body=extra_body
|
||||
)
|
||||
if uses_file_api:
|
||||
params["betas"] = [ANTHROPIC_FILES_API_BETA]
|
||||
response = await self.async_client.beta.messages.create(**params)
|
||||
else:
|
||||
response = await self.async_client.messages.create(**params)
|
||||
|
||||
@@ -1163,34 +1023,23 @@ class AnthropicCompletion(BaseLLM):
|
||||
usage = self._extract_anthropic_token_usage(response)
|
||||
self._track_token_usage_internal(usage)
|
||||
|
||||
if _is_pydantic_model_class(response_model) and response.content:
|
||||
if use_native_structured_output:
|
||||
for block in response.content:
|
||||
if isinstance(block, TextBlock):
|
||||
structured_json = block.text
|
||||
self._emit_call_completed_event(
|
||||
response=structured_json,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
return structured_json
|
||||
else:
|
||||
for block in response.content:
|
||||
if (
|
||||
isinstance(block, ToolUseBlock)
|
||||
and block.name == "structured_output"
|
||||
):
|
||||
structured_json = json.dumps(block.input)
|
||||
self._emit_call_completed_event(
|
||||
response=structured_json,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
return structured_json
|
||||
if response_model and response.content:
|
||||
tool_uses = [
|
||||
block for block in response.content if isinstance(block, ToolUseBlock)
|
||||
]
|
||||
if tool_uses and tool_uses[0].name == "structured_output":
|
||||
structured_data = tool_uses[0].input
|
||||
structured_json = json.dumps(structured_data)
|
||||
|
||||
self._emit_call_completed_event(
|
||||
response=structured_json,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
|
||||
return structured_json
|
||||
|
||||
if response.content:
|
||||
tool_uses = [
|
||||
@@ -1246,54 +1095,26 @@ class AnthropicCompletion(BaseLLM):
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str | Any:
|
||||
) -> str:
|
||||
"""Handle async streaming message completion."""
|
||||
betas: list[str] = []
|
||||
use_native_structured_output = False
|
||||
if response_model:
|
||||
structured_tool = {
|
||||
"name": "structured_output",
|
||||
"description": "Returns structured data according to the schema",
|
||||
"input_schema": response_model.model_json_schema(),
|
||||
}
|
||||
|
||||
extra_body: dict[str, Any] | None = None
|
||||
if _is_pydantic_model_class(response_model):
|
||||
schema = transform_schema(response_model.model_json_schema())
|
||||
if _supports_native_structured_outputs(self.model):
|
||||
use_native_structured_output = True
|
||||
betas.append(ANTHROPIC_STRUCTURED_OUTPUTS_BETA)
|
||||
extra_body = {
|
||||
"output_format": {
|
||||
"type": "json_schema",
|
||||
"schema": schema,
|
||||
}
|
||||
}
|
||||
else:
|
||||
structured_tool = {
|
||||
"name": "structured_output",
|
||||
"description": "Output the structured response",
|
||||
"input_schema": schema,
|
||||
}
|
||||
params["tools"] = [structured_tool]
|
||||
params["tool_choice"] = {"type": "tool", "name": "structured_output"}
|
||||
params["tools"] = [structured_tool]
|
||||
params["tool_choice"] = {"type": "tool", "name": "structured_output"}
|
||||
|
||||
full_response = ""
|
||||
|
||||
stream_params = {k: v for k, v in params.items() if k != "stream"}
|
||||
|
||||
if betas:
|
||||
stream_params["betas"] = betas
|
||||
|
||||
current_tool_calls: dict[int, dict[str, Any]] = {}
|
||||
|
||||
stream_context = (
|
||||
self.async_client.beta.messages.stream(
|
||||
**stream_params, extra_body=extra_body
|
||||
)
|
||||
if betas
|
||||
else self.async_client.messages.stream(**stream_params)
|
||||
)
|
||||
async with stream_context as stream:
|
||||
response_id = None
|
||||
async with self.async_client.messages.stream(**stream_params) as stream:
|
||||
async for event in stream:
|
||||
if hasattr(event, "message") and hasattr(event.message, "id"):
|
||||
response_id = event.message.id
|
||||
|
||||
if hasattr(event, "delta") and hasattr(event.delta, "text"):
|
||||
text_delta = event.delta.text
|
||||
full_response += text_delta
|
||||
@@ -1301,7 +1122,6 @@ class AnthropicCompletion(BaseLLM):
|
||||
chunk=text_delta,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
response_id=response_id,
|
||||
)
|
||||
|
||||
if event.type == "content_block_start":
|
||||
@@ -1328,7 +1148,6 @@ class AnthropicCompletion(BaseLLM):
|
||||
"index": block_index,
|
||||
},
|
||||
call_type=LLMCallType.TOOL_CALL,
|
||||
response_id=response_id,
|
||||
)
|
||||
elif event.type == "content_block_delta":
|
||||
if event.delta.type == "input_json_delta":
|
||||
@@ -1352,38 +1171,32 @@ class AnthropicCompletion(BaseLLM):
|
||||
"index": block_index,
|
||||
},
|
||||
call_type=LLMCallType.TOOL_CALL,
|
||||
response_id=response_id,
|
||||
)
|
||||
|
||||
final_message = await stream.get_final_message()
|
||||
final_message: Message = await stream.get_final_message()
|
||||
|
||||
usage = self._extract_anthropic_token_usage(final_message)
|
||||
self._track_token_usage_internal(usage)
|
||||
|
||||
if _is_pydantic_model_class(response_model):
|
||||
if use_native_structured_output:
|
||||
if response_model and final_message.content:
|
||||
tool_uses = [
|
||||
block
|
||||
for block in final_message.content
|
||||
if isinstance(block, ToolUseBlock)
|
||||
]
|
||||
if tool_uses and tool_uses[0].name == "structured_output":
|
||||
structured_data = tool_uses[0].input
|
||||
structured_json = json.dumps(structured_data)
|
||||
|
||||
self._emit_call_completed_event(
|
||||
response=full_response,
|
||||
response=structured_json,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
return full_response
|
||||
for block in final_message.content:
|
||||
if (
|
||||
isinstance(block, ToolUseBlock)
|
||||
and block.name == "structured_output"
|
||||
):
|
||||
structured_json = json.dumps(block.input)
|
||||
self._emit_call_completed_event(
|
||||
response=structured_json,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
return structured_json
|
||||
|
||||
return structured_json
|
||||
|
||||
if final_message.content:
|
||||
tool_uses = [
|
||||
@@ -1393,6 +1206,7 @@ class AnthropicCompletion(BaseLLM):
|
||||
]
|
||||
|
||||
if tool_uses:
|
||||
# If no available_functions, return tool calls for executor to handle
|
||||
if not available_functions:
|
||||
return list(tool_uses)
|
||||
|
||||
@@ -1419,7 +1233,7 @@ class AnthropicCompletion(BaseLLM):
|
||||
|
||||
async def _ahandle_tool_use_conversation(
|
||||
self,
|
||||
initial_response: Message | BetaMessage,
|
||||
initial_response: Message,
|
||||
tool_uses: list[ToolUseBlock],
|
||||
params: dict[str, Any],
|
||||
available_functions: dict[str, Any],
|
||||
@@ -1528,9 +1342,7 @@ class AnthropicCompletion(BaseLLM):
|
||||
return int(200000 * CONTEXT_WINDOW_USAGE_RATIO)
|
||||
|
||||
@staticmethod
|
||||
def _extract_anthropic_token_usage(
|
||||
response: Message | BetaMessage,
|
||||
) -> dict[str, Any]:
|
||||
def _extract_anthropic_token_usage(response: Message) -> dict[str, Any]:
|
||||
"""Extract token usage from Anthropic response."""
|
||||
if hasattr(response, "usage") and response.usage:
|
||||
usage = response.usage
|
||||
|
||||
@@ -92,7 +92,6 @@ class AzureCompletion(BaseLLM):
|
||||
stop: list[str] | None = None,
|
||||
stream: bool = False,
|
||||
interceptor: BaseInterceptor[Any, Any] | None = None,
|
||||
response_format: type[BaseModel] | None = None,
|
||||
**kwargs: Any,
|
||||
):
|
||||
"""Initialize Azure AI Inference chat completion client.
|
||||
@@ -112,9 +111,6 @@ class AzureCompletion(BaseLLM):
|
||||
stop: Stop sequences
|
||||
stream: Enable streaming responses
|
||||
interceptor: HTTP interceptor (not yet supported for Azure).
|
||||
response_format: Pydantic model for structured output. Used as default when
|
||||
response_model is not passed to call()/acall() methods.
|
||||
Only works with OpenAI models deployed on Azure.
|
||||
**kwargs: Additional parameters
|
||||
"""
|
||||
if interceptor is not None:
|
||||
@@ -169,7 +165,6 @@ class AzureCompletion(BaseLLM):
|
||||
self.presence_penalty = presence_penalty
|
||||
self.max_tokens = max_tokens
|
||||
self.stream = stream
|
||||
self.response_format = response_format
|
||||
|
||||
self.is_openai_model = any(
|
||||
prefix in model.lower() for prefix in ["gpt-", "o1-", "text-"]
|
||||
@@ -303,7 +298,6 @@ class AzureCompletion(BaseLLM):
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
)
|
||||
effective_response_model = response_model or self.response_format
|
||||
|
||||
# Format messages for Azure
|
||||
formatted_messages = self._format_messages_for_azure(messages)
|
||||
@@ -313,7 +307,7 @@ class AzureCompletion(BaseLLM):
|
||||
|
||||
# Prepare completion parameters
|
||||
completion_params = self._prepare_completion_params(
|
||||
formatted_messages, tools, effective_response_model
|
||||
formatted_messages, tools, response_model
|
||||
)
|
||||
|
||||
# Handle streaming vs non-streaming
|
||||
@@ -323,7 +317,7 @@ class AzureCompletion(BaseLLM):
|
||||
available_functions,
|
||||
from_task,
|
||||
from_agent,
|
||||
effective_response_model,
|
||||
response_model,
|
||||
)
|
||||
|
||||
return self._handle_completion(
|
||||
@@ -331,7 +325,7 @@ class AzureCompletion(BaseLLM):
|
||||
available_functions,
|
||||
from_task,
|
||||
from_agent,
|
||||
effective_response_model,
|
||||
response_model,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
@@ -370,12 +364,11 @@ class AzureCompletion(BaseLLM):
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
)
|
||||
effective_response_model = response_model or self.response_format
|
||||
|
||||
formatted_messages = self._format_messages_for_azure(messages)
|
||||
|
||||
completion_params = self._prepare_completion_params(
|
||||
formatted_messages, tools, effective_response_model
|
||||
formatted_messages, tools, response_model
|
||||
)
|
||||
|
||||
if self.stream:
|
||||
@@ -384,7 +377,7 @@ class AzureCompletion(BaseLLM):
|
||||
available_functions,
|
||||
from_task,
|
||||
from_agent,
|
||||
effective_response_model,
|
||||
response_model,
|
||||
)
|
||||
|
||||
return await self._ahandle_completion(
|
||||
@@ -392,7 +385,7 @@ class AzureCompletion(BaseLLM):
|
||||
available_functions,
|
||||
from_task,
|
||||
from_agent,
|
||||
effective_response_model,
|
||||
response_model,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
@@ -733,7 +726,6 @@ class AzureCompletion(BaseLLM):
|
||||
"""
|
||||
if update.choices:
|
||||
choice = update.choices[0]
|
||||
response_id = update.id if hasattr(update, "id") else None
|
||||
if choice.delta and choice.delta.content:
|
||||
content_delta = choice.delta.content
|
||||
full_response += content_delta
|
||||
@@ -741,7 +733,6 @@ class AzureCompletion(BaseLLM):
|
||||
chunk=content_delta,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
response_id=response_id,
|
||||
)
|
||||
|
||||
if choice.delta and choice.delta.tool_calls:
|
||||
@@ -776,7 +767,6 @@ class AzureCompletion(BaseLLM):
|
||||
"index": idx,
|
||||
},
|
||||
call_type=LLMCallType.TOOL_CALL,
|
||||
response_id=response_id,
|
||||
)
|
||||
|
||||
return full_response
|
||||
|
||||
@@ -16,7 +16,6 @@ from crewai.utilities.agent_utils import is_context_length_exceeded
|
||||
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
LLMContextLengthExceededError,
|
||||
)
|
||||
from crewai.utilities.pydantic_schema_utils import generate_model_description
|
||||
from crewai.utilities.types import LLMMessage
|
||||
|
||||
|
||||
@@ -173,7 +172,6 @@ class BedrockCompletion(BaseLLM):
|
||||
additional_model_request_fields: dict[str, Any] | None = None,
|
||||
additional_model_response_field_paths: list[str] | None = None,
|
||||
interceptor: BaseInterceptor[Any, Any] | None = None,
|
||||
response_format: type[BaseModel] | None = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize AWS Bedrock completion client.
|
||||
@@ -194,8 +192,6 @@ class BedrockCompletion(BaseLLM):
|
||||
additional_model_request_fields: Model-specific request parameters
|
||||
additional_model_response_field_paths: Custom response field paths
|
||||
interceptor: HTTP interceptor (not yet supported for Bedrock).
|
||||
response_format: Pydantic model for structured output. Used as default when
|
||||
response_model is not passed to call()/acall() methods.
|
||||
**kwargs: Additional parameters
|
||||
"""
|
||||
if interceptor is not None:
|
||||
@@ -251,8 +247,7 @@ class BedrockCompletion(BaseLLM):
|
||||
self.top_p = top_p
|
||||
self.top_k = top_k
|
||||
self.stream = stream
|
||||
self.stop_sequences = stop_sequences
|
||||
self.response_format = response_format
|
||||
self.stop_sequences = stop_sequences or []
|
||||
|
||||
# Store advanced features (optional)
|
||||
self.guardrail_config = guardrail_config
|
||||
@@ -272,7 +267,7 @@ class BedrockCompletion(BaseLLM):
|
||||
@property
|
||||
def stop(self) -> list[str]:
|
||||
"""Get stop sequences sent to the API."""
|
||||
return [] if self.stop_sequences is None else list(self.stop_sequences)
|
||||
return list(self.stop_sequences)
|
||||
|
||||
@stop.setter
|
||||
def stop(self, value: Sequence[str] | str | None) -> None:
|
||||
@@ -304,8 +299,6 @@ class BedrockCompletion(BaseLLM):
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str | Any:
|
||||
"""Call AWS Bedrock Converse API."""
|
||||
effective_response_model = response_model or self.response_format
|
||||
|
||||
try:
|
||||
# Emit call started event
|
||||
self._emit_call_started_event(
|
||||
@@ -382,7 +375,6 @@ class BedrockCompletion(BaseLLM):
|
||||
available_functions,
|
||||
from_task,
|
||||
from_agent,
|
||||
effective_response_model,
|
||||
)
|
||||
|
||||
return self._handle_converse(
|
||||
@@ -391,7 +383,6 @@ class BedrockCompletion(BaseLLM):
|
||||
available_functions,
|
||||
from_task,
|
||||
from_agent,
|
||||
effective_response_model,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
@@ -434,8 +425,6 @@ class BedrockCompletion(BaseLLM):
|
||||
NotImplementedError: If aiobotocore is not installed.
|
||||
LLMContextLengthExceededError: If context window is exceeded.
|
||||
"""
|
||||
effective_response_model = response_model or self.response_format
|
||||
|
||||
if not AIOBOTOCORE_AVAILABLE:
|
||||
raise NotImplementedError(
|
||||
"Async support for AWS Bedrock requires aiobotocore. "
|
||||
@@ -505,21 +494,11 @@ class BedrockCompletion(BaseLLM):
|
||||
|
||||
if self.stream:
|
||||
return await self._ahandle_streaming_converse(
|
||||
formatted_messages,
|
||||
body,
|
||||
available_functions,
|
||||
from_task,
|
||||
from_agent,
|
||||
effective_response_model,
|
||||
formatted_messages, body, available_functions, from_task, from_agent
|
||||
)
|
||||
|
||||
return await self._ahandle_converse(
|
||||
formatted_messages,
|
||||
body,
|
||||
available_functions,
|
||||
from_task,
|
||||
from_agent,
|
||||
effective_response_model,
|
||||
formatted_messages, body, available_functions, from_task, from_agent
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
@@ -541,33 +520,10 @@ class BedrockCompletion(BaseLLM):
|
||||
available_functions: Mapping[str, Any] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str | Any:
|
||||
) -> str:
|
||||
"""Handle non-streaming converse API call following AWS best practices."""
|
||||
if response_model:
|
||||
structured_tool: ConverseToolTypeDef = {
|
||||
"toolSpec": {
|
||||
"name": "structured_output",
|
||||
"description": "Returns structured data according to the schema",
|
||||
"inputSchema": {
|
||||
"json": generate_model_description(response_model)
|
||||
.get("json_schema", {})
|
||||
.get("schema", {})
|
||||
},
|
||||
}
|
||||
}
|
||||
body["toolConfig"] = cast(
|
||||
"ToolConfigurationTypeDef",
|
||||
cast(
|
||||
object,
|
||||
{
|
||||
"tools": [structured_tool],
|
||||
"toolChoice": {"tool": {"name": "structured_output"}},
|
||||
},
|
||||
),
|
||||
)
|
||||
|
||||
try:
|
||||
# Validate messages format before API call
|
||||
if not messages:
|
||||
raise ValueError("Messages cannot be empty")
|
||||
|
||||
@@ -615,21 +571,6 @@ class BedrockCompletion(BaseLLM):
|
||||
|
||||
# If there are tool uses but no available_functions, return them for the executor to handle
|
||||
tool_uses = [block["toolUse"] for block in content if "toolUse" in block]
|
||||
|
||||
if response_model and tool_uses:
|
||||
for tool_use in tool_uses:
|
||||
if tool_use.get("name") == "structured_output":
|
||||
structured_data = tool_use.get("input", {})
|
||||
result = response_model.model_validate(structured_data)
|
||||
self._emit_call_completed_event(
|
||||
response=result.model_dump_json(),
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=messages,
|
||||
)
|
||||
return result
|
||||
|
||||
if tool_uses and not available_functions:
|
||||
self._emit_call_completed_event(
|
||||
response=tool_uses,
|
||||
@@ -776,32 +717,8 @@ class BedrockCompletion(BaseLLM):
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str:
|
||||
"""Handle streaming converse API call with comprehensive event handling."""
|
||||
if response_model:
|
||||
structured_tool: ConverseToolTypeDef = {
|
||||
"toolSpec": {
|
||||
"name": "structured_output",
|
||||
"description": "Returns structured data according to the schema",
|
||||
"inputSchema": {
|
||||
"json": generate_model_description(response_model)
|
||||
.get("json_schema", {})
|
||||
.get("schema", {})
|
||||
},
|
||||
}
|
||||
}
|
||||
body["toolConfig"] = cast(
|
||||
"ToolConfigurationTypeDef",
|
||||
cast(
|
||||
object,
|
||||
{
|
||||
"tools": [structured_tool],
|
||||
"toolChoice": {"tool": {"name": "structured_output"}},
|
||||
},
|
||||
),
|
||||
)
|
||||
|
||||
full_response = ""
|
||||
current_tool_use: dict[str, Any] | None = None
|
||||
tool_use_id: str | None = None
|
||||
@@ -819,7 +736,6 @@ class BedrockCompletion(BaseLLM):
|
||||
)
|
||||
|
||||
stream = response.get("stream")
|
||||
response_id = None
|
||||
if stream:
|
||||
for event in stream:
|
||||
if "messageStart" in event:
|
||||
@@ -851,7 +767,6 @@ class BedrockCompletion(BaseLLM):
|
||||
"index": tool_use_index,
|
||||
},
|
||||
call_type=LLMCallType.TOOL_CALL,
|
||||
response_id=response_id,
|
||||
)
|
||||
logging.debug(
|
||||
f"Tool use started in stream: {json.dumps(current_tool_use)} (ID: {tool_use_id})"
|
||||
@@ -867,7 +782,6 @@ class BedrockCompletion(BaseLLM):
|
||||
chunk=text_chunk,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
response_id=response_id,
|
||||
)
|
||||
elif "toolUse" in delta and current_tool_use:
|
||||
tool_input = delta["toolUse"].get("input", "")
|
||||
@@ -888,7 +802,6 @@ class BedrockCompletion(BaseLLM):
|
||||
"index": tool_use_index,
|
||||
},
|
||||
call_type=LLMCallType.TOOL_CALL,
|
||||
response_id=response_id,
|
||||
)
|
||||
elif "contentBlockStop" in event:
|
||||
logging.debug("Content block stopped in stream")
|
||||
@@ -1012,32 +925,8 @@ class BedrockCompletion(BaseLLM):
|
||||
available_functions: Mapping[str, Any] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str | Any:
|
||||
) -> str:
|
||||
"""Handle async non-streaming converse API call."""
|
||||
if response_model:
|
||||
structured_tool: ConverseToolTypeDef = {
|
||||
"toolSpec": {
|
||||
"name": "structured_output",
|
||||
"description": "Returns structured data according to the schema",
|
||||
"inputSchema": {
|
||||
"json": generate_model_description(response_model)
|
||||
.get("json_schema", {})
|
||||
.get("schema", {})
|
||||
},
|
||||
}
|
||||
}
|
||||
body["toolConfig"] = cast(
|
||||
"ToolConfigurationTypeDef",
|
||||
cast(
|
||||
object,
|
||||
{
|
||||
"tools": [structured_tool],
|
||||
"toolChoice": {"tool": {"name": "structured_output"}},
|
||||
},
|
||||
),
|
||||
)
|
||||
|
||||
try:
|
||||
if not messages:
|
||||
raise ValueError("Messages cannot be empty")
|
||||
@@ -1083,21 +972,6 @@ class BedrockCompletion(BaseLLM):
|
||||
|
||||
# If there are tool uses but no available_functions, return them for the executor to handle
|
||||
tool_uses = [block["toolUse"] for block in content if "toolUse" in block]
|
||||
|
||||
if response_model and tool_uses:
|
||||
for tool_use in tool_uses:
|
||||
if tool_use.get("name") == "structured_output":
|
||||
structured_data = tool_use.get("input", {})
|
||||
result = response_model.model_validate(structured_data)
|
||||
self._emit_call_completed_event(
|
||||
response=result.model_dump_json(),
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=messages,
|
||||
)
|
||||
return result
|
||||
|
||||
if tool_uses and not available_functions:
|
||||
self._emit_call_completed_event(
|
||||
response=tool_uses,
|
||||
@@ -1228,32 +1102,8 @@ class BedrockCompletion(BaseLLM):
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str:
|
||||
"""Handle async streaming converse API call."""
|
||||
if response_model:
|
||||
structured_tool: ConverseToolTypeDef = {
|
||||
"toolSpec": {
|
||||
"name": "structured_output",
|
||||
"description": "Returns structured data according to the schema",
|
||||
"inputSchema": {
|
||||
"json": generate_model_description(response_model)
|
||||
.get("json_schema", {})
|
||||
.get("schema", {})
|
||||
},
|
||||
}
|
||||
}
|
||||
body["toolConfig"] = cast(
|
||||
"ToolConfigurationTypeDef",
|
||||
cast(
|
||||
object,
|
||||
{
|
||||
"tools": [structured_tool],
|
||||
"toolChoice": {"tool": {"name": "structured_output"}},
|
||||
},
|
||||
),
|
||||
)
|
||||
|
||||
full_response = ""
|
||||
current_tool_use: dict[str, Any] | None = None
|
||||
tool_use_id: str | None = None
|
||||
@@ -1272,7 +1122,6 @@ class BedrockCompletion(BaseLLM):
|
||||
)
|
||||
|
||||
stream = response.get("stream")
|
||||
response_id = None
|
||||
if stream:
|
||||
async for event in stream:
|
||||
if "messageStart" in event:
|
||||
@@ -1304,7 +1153,6 @@ class BedrockCompletion(BaseLLM):
|
||||
"index": tool_use_index,
|
||||
},
|
||||
call_type=LLMCallType.TOOL_CALL,
|
||||
response_id=response_id,
|
||||
)
|
||||
logging.debug(
|
||||
f"Tool use started in stream: {current_tool_use.get('name')} (ID: {tool_use_id})"
|
||||
@@ -1320,7 +1168,6 @@ class BedrockCompletion(BaseLLM):
|
||||
chunk=text_chunk,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
response_id=response_id,
|
||||
)
|
||||
elif "toolUse" in delta and current_tool_use:
|
||||
tool_input = delta["toolUse"].get("input", "")
|
||||
@@ -1341,7 +1188,6 @@ class BedrockCompletion(BaseLLM):
|
||||
"index": tool_use_index,
|
||||
},
|
||||
call_type=LLMCallType.TOOL_CALL,
|
||||
response_id=response_id,
|
||||
)
|
||||
|
||||
elif "contentBlockStop" in event:
|
||||
|
||||
@@ -15,7 +15,6 @@ from crewai.utilities.agent_utils import is_context_length_exceeded
|
||||
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
LLMContextLengthExceededError,
|
||||
)
|
||||
from crewai.utilities.pydantic_schema_utils import generate_model_description
|
||||
from crewai.utilities.types import LLMMessage
|
||||
|
||||
|
||||
@@ -57,7 +56,6 @@ class GeminiCompletion(BaseLLM):
|
||||
client_params: dict[str, Any] | None = None,
|
||||
interceptor: BaseInterceptor[Any, Any] | None = None,
|
||||
use_vertexai: bool | None = None,
|
||||
response_format: type[BaseModel] | None = None,
|
||||
**kwargs: Any,
|
||||
):
|
||||
"""Initialize Google Gemini chat completion client.
|
||||
@@ -88,8 +86,6 @@ class GeminiCompletion(BaseLLM):
|
||||
- None (default): Check GOOGLE_GENAI_USE_VERTEXAI env var
|
||||
When using Vertex AI with API key (Express mode), http_options with
|
||||
api_version="v1" is automatically configured.
|
||||
response_format: Pydantic model for structured output. Used as default when
|
||||
response_model is not passed to call()/acall() methods.
|
||||
**kwargs: Additional parameters
|
||||
"""
|
||||
if interceptor is not None:
|
||||
@@ -125,7 +121,6 @@ class GeminiCompletion(BaseLLM):
|
||||
self.safety_settings = safety_settings or {}
|
||||
self.stop_sequences = stop_sequences or []
|
||||
self.tools: list[dict[str, Any]] | None = None
|
||||
self.response_format = response_format
|
||||
|
||||
# Model-specific settings
|
||||
version_match = re.search(r"gemini-(\d+(?:\.\d+)?)", model.lower())
|
||||
@@ -297,7 +292,6 @@ class GeminiCompletion(BaseLLM):
|
||||
from_agent=from_agent,
|
||||
)
|
||||
self.tools = tools
|
||||
effective_response_model = response_model or self.response_format
|
||||
|
||||
formatted_content, system_instruction = self._format_messages_for_gemini(
|
||||
messages
|
||||
@@ -309,7 +303,7 @@ class GeminiCompletion(BaseLLM):
|
||||
raise ValueError("LLM call blocked by before_llm_call hook")
|
||||
|
||||
config = self._prepare_generation_config(
|
||||
system_instruction, tools, effective_response_model
|
||||
system_instruction, tools, response_model
|
||||
)
|
||||
|
||||
if self.stream:
|
||||
@@ -319,7 +313,7 @@ class GeminiCompletion(BaseLLM):
|
||||
available_functions,
|
||||
from_task,
|
||||
from_agent,
|
||||
effective_response_model,
|
||||
response_model,
|
||||
)
|
||||
|
||||
return self._handle_completion(
|
||||
@@ -328,7 +322,7 @@ class GeminiCompletion(BaseLLM):
|
||||
available_functions,
|
||||
from_task,
|
||||
from_agent,
|
||||
effective_response_model,
|
||||
response_model,
|
||||
)
|
||||
|
||||
except APIError as e:
|
||||
@@ -380,14 +374,13 @@ class GeminiCompletion(BaseLLM):
|
||||
from_agent=from_agent,
|
||||
)
|
||||
self.tools = tools
|
||||
effective_response_model = response_model or self.response_format
|
||||
|
||||
formatted_content, system_instruction = self._format_messages_for_gemini(
|
||||
messages
|
||||
)
|
||||
|
||||
config = self._prepare_generation_config(
|
||||
system_instruction, tools, effective_response_model
|
||||
system_instruction, tools, response_model
|
||||
)
|
||||
|
||||
if self.stream:
|
||||
@@ -397,7 +390,7 @@ class GeminiCompletion(BaseLLM):
|
||||
available_functions,
|
||||
from_task,
|
||||
from_agent,
|
||||
effective_response_model,
|
||||
response_model,
|
||||
)
|
||||
|
||||
return await self._ahandle_completion(
|
||||
@@ -406,7 +399,7 @@ class GeminiCompletion(BaseLLM):
|
||||
available_functions,
|
||||
from_task,
|
||||
from_agent,
|
||||
effective_response_model,
|
||||
response_model,
|
||||
)
|
||||
|
||||
except APIError as e:
|
||||
@@ -465,10 +458,7 @@ class GeminiCompletion(BaseLLM):
|
||||
|
||||
if response_model:
|
||||
config_params["response_mime_type"] = "application/json"
|
||||
schema_output = generate_model_description(response_model)
|
||||
config_params["response_json_schema"] = schema_output.get(
|
||||
"json_schema", {}
|
||||
).get("schema", {})
|
||||
config_params["response_schema"] = response_model.model_json_schema()
|
||||
|
||||
# Handle tools for supported models
|
||||
if tools and self.supports_tools:
|
||||
@@ -493,7 +483,7 @@ class GeminiCompletion(BaseLLM):
|
||||
function_declaration = types.FunctionDeclaration(
|
||||
name=name,
|
||||
description=description,
|
||||
parameters_json_schema=parameters if parameters else None,
|
||||
parameters=parameters if parameters else None,
|
||||
)
|
||||
|
||||
gemini_tool = types.Tool(function_declarations=[function_declaration])
|
||||
@@ -547,10 +537,11 @@ class GeminiCompletion(BaseLLM):
|
||||
else:
|
||||
parts.append(types.Part.from_text(text=str(content) if content else ""))
|
||||
|
||||
text_content: str = " ".join(p.text for p in parts if p.text is not None)
|
||||
|
||||
if role == "system":
|
||||
# Extract system instruction - Gemini handles it separately
|
||||
text_content = " ".join(
|
||||
p.text for p in parts if hasattr(p, "text") and p.text
|
||||
)
|
||||
if system_instruction:
|
||||
system_instruction += f"\n\n{text_content}"
|
||||
else:
|
||||
@@ -564,11 +555,7 @@ class GeminiCompletion(BaseLLM):
|
||||
|
||||
response_data: dict[str, Any]
|
||||
try:
|
||||
parsed = json.loads(text_content) if text_content else {}
|
||||
if isinstance(parsed, dict):
|
||||
response_data = parsed
|
||||
else:
|
||||
response_data = {"result": parsed}
|
||||
response_data = json.loads(text_content) if text_content else {}
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
response_data = {"result": text_content}
|
||||
|
||||
@@ -579,42 +566,33 @@ class GeminiCompletion(BaseLLM):
|
||||
types.Content(role="user", parts=[function_response_part])
|
||||
)
|
||||
elif role == "assistant" and message.get("tool_calls"):
|
||||
raw_parts: list[Any] | None = message.get("raw_tool_call_parts")
|
||||
if raw_parts and all(isinstance(p, types.Part) for p in raw_parts):
|
||||
tool_parts: list[types.Part] = list(raw_parts)
|
||||
if text_content:
|
||||
tool_parts.insert(0, types.Part.from_text(text=text_content))
|
||||
else:
|
||||
tool_parts = []
|
||||
if text_content:
|
||||
tool_parts.append(types.Part.from_text(text=text_content))
|
||||
parts: list[types.Part] = []
|
||||
|
||||
tool_calls: list[dict[str, Any]] = message.get("tool_calls") or []
|
||||
for tool_call in tool_calls:
|
||||
func: dict[str, Any] = tool_call.get("function") or {}
|
||||
func_name: str = str(func.get("name") or "")
|
||||
func_args_raw: str | dict[str, Any] = (
|
||||
func.get("arguments") or {}
|
||||
)
|
||||
if text_content:
|
||||
parts.append(types.Part.from_text(text=text_content))
|
||||
|
||||
func_args: dict[str, Any]
|
||||
if isinstance(func_args_raw, str):
|
||||
try:
|
||||
func_args = (
|
||||
json.loads(func_args_raw) if func_args_raw else {}
|
||||
)
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
func_args = {}
|
||||
else:
|
||||
func_args = func_args_raw
|
||||
tool_calls: list[dict[str, Any]] = message.get("tool_calls") or []
|
||||
for tool_call in tool_calls:
|
||||
func: dict[str, Any] = tool_call.get("function") or {}
|
||||
func_name: str = str(func.get("name") or "")
|
||||
func_args_raw: str | dict[str, Any] = func.get("arguments") or {}
|
||||
|
||||
tool_parts.append(
|
||||
types.Part.from_function_call(
|
||||
name=func_name, args=func_args
|
||||
func_args: dict[str, Any]
|
||||
if isinstance(func_args_raw, str):
|
||||
try:
|
||||
func_args = (
|
||||
json.loads(func_args_raw) if func_args_raw else {}
|
||||
)
|
||||
)
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
func_args = {}
|
||||
else:
|
||||
func_args = func_args_raw
|
||||
|
||||
contents.append(types.Content(role="model", parts=tool_parts))
|
||||
parts.append(
|
||||
types.Part.from_function_call(name=func_name, args=func_args)
|
||||
)
|
||||
|
||||
contents.append(types.Content(role="model", parts=parts))
|
||||
else:
|
||||
# Convert role for Gemini (assistant -> model)
|
||||
gemini_role = "model" if role == "assistant" else "user"
|
||||
@@ -808,7 +786,6 @@ class GeminiCompletion(BaseLLM):
|
||||
Returns:
|
||||
Tuple of (updated full_response, updated function_calls, updated usage_data)
|
||||
"""
|
||||
response_id = chunk.response_id if hasattr(chunk, "response_id") else None
|
||||
if chunk.usage_metadata:
|
||||
usage_data = self._extract_token_usage(chunk)
|
||||
|
||||
@@ -818,7 +795,6 @@ class GeminiCompletion(BaseLLM):
|
||||
chunk=chunk.text,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
response_id=response_id,
|
||||
)
|
||||
|
||||
if chunk.candidates:
|
||||
@@ -855,7 +831,6 @@ class GeminiCompletion(BaseLLM):
|
||||
"index": call_index,
|
||||
},
|
||||
call_type=LLMCallType.TOOL_CALL,
|
||||
response_id=response_id,
|
||||
)
|
||||
|
||||
return full_response, function_calls, usage_data
|
||||
@@ -990,7 +965,7 @@ class GeminiCompletion(BaseLLM):
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str | Any:
|
||||
) -> str:
|
||||
"""Handle streaming content generation."""
|
||||
full_response = ""
|
||||
function_calls: dict[int, dict[str, Any]] = {}
|
||||
@@ -1068,7 +1043,7 @@ class GeminiCompletion(BaseLLM):
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str | Any:
|
||||
) -> str:
|
||||
"""Handle async streaming content generation."""
|
||||
full_response = ""
|
||||
function_calls: dict[int, dict[str, Any]] = {}
|
||||
|
||||
@@ -693,14 +693,14 @@ class OpenAICompletion(BaseLLM):
|
||||
if response_model or self.response_format:
|
||||
format_model = response_model or self.response_format
|
||||
if isinstance(format_model, type) and issubclass(format_model, BaseModel):
|
||||
schema_output = generate_model_description(format_model)
|
||||
json_schema = schema_output.get("json_schema", {})
|
||||
schema = format_model.model_json_schema()
|
||||
schema["additionalProperties"] = False
|
||||
params["text"] = {
|
||||
"format": {
|
||||
"type": "json_schema",
|
||||
"name": json_schema.get("name", format_model.__name__),
|
||||
"strict": json_schema.get("strict", True),
|
||||
"schema": json_schema.get("schema", {}),
|
||||
"name": format_model.__name__,
|
||||
"strict": True,
|
||||
"schema": schema,
|
||||
}
|
||||
}
|
||||
elif isinstance(format_model, dict):
|
||||
@@ -1047,12 +1047,8 @@ class OpenAICompletion(BaseLLM):
|
||||
final_response: Response | None = None
|
||||
|
||||
stream = self.client.responses.create(**params)
|
||||
response_id_stream = None
|
||||
|
||||
for event in stream:
|
||||
if event.type == "response.created":
|
||||
response_id_stream = event.response.id
|
||||
|
||||
if event.type == "response.output_text.delta":
|
||||
delta_text = event.delta or ""
|
||||
full_response += delta_text
|
||||
@@ -1060,7 +1056,6 @@ class OpenAICompletion(BaseLLM):
|
||||
chunk=delta_text,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
response_id=response_id_stream,
|
||||
)
|
||||
|
||||
elif event.type == "response.function_call_arguments.delta":
|
||||
@@ -1175,12 +1170,8 @@ class OpenAICompletion(BaseLLM):
|
||||
final_response: Response | None = None
|
||||
|
||||
stream = await self.async_client.responses.create(**params)
|
||||
response_id_stream = None
|
||||
|
||||
async for event in stream:
|
||||
if event.type == "response.created":
|
||||
response_id_stream = event.response.id
|
||||
|
||||
if event.type == "response.output_text.delta":
|
||||
delta_text = event.delta or ""
|
||||
full_response += delta_text
|
||||
@@ -1188,7 +1179,6 @@ class OpenAICompletion(BaseLLM):
|
||||
chunk=delta_text,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
response_id=response_id_stream,
|
||||
)
|
||||
|
||||
elif event.type == "response.function_call_arguments.delta":
|
||||
@@ -1709,8 +1699,6 @@ class OpenAICompletion(BaseLLM):
|
||||
**parse_params, response_format=response_model
|
||||
) as stream:
|
||||
for chunk in stream:
|
||||
response_id_stream = chunk.id if hasattr(chunk, "id") else None
|
||||
|
||||
if chunk.type == "content.delta":
|
||||
delta_content = chunk.delta
|
||||
if delta_content:
|
||||
@@ -1718,7 +1706,6 @@ class OpenAICompletion(BaseLLM):
|
||||
chunk=delta_content,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
response_id=response_id_stream,
|
||||
)
|
||||
|
||||
final_completion = stream.get_final_completion()
|
||||
@@ -1748,10 +1735,6 @@ class OpenAICompletion(BaseLLM):
|
||||
usage_data = {"total_tokens": 0}
|
||||
|
||||
for completion_chunk in completion_stream:
|
||||
response_id_stream = (
|
||||
completion_chunk.id if hasattr(completion_chunk, "id") else None
|
||||
)
|
||||
|
||||
if hasattr(completion_chunk, "usage") and completion_chunk.usage:
|
||||
usage_data = self._extract_openai_token_usage(completion_chunk)
|
||||
continue
|
||||
@@ -1768,7 +1751,6 @@ class OpenAICompletion(BaseLLM):
|
||||
chunk=chunk_delta.content,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
response_id=response_id_stream,
|
||||
)
|
||||
|
||||
if chunk_delta.tool_calls:
|
||||
@@ -1807,7 +1789,6 @@ class OpenAICompletion(BaseLLM):
|
||||
"index": tool_calls[tool_index]["index"],
|
||||
},
|
||||
call_type=LLMCallType.TOOL_CALL,
|
||||
response_id=response_id_stream,
|
||||
)
|
||||
|
||||
self._track_token_usage_internal(usage_data)
|
||||
@@ -2019,8 +2000,6 @@ class OpenAICompletion(BaseLLM):
|
||||
accumulated_content = ""
|
||||
usage_data = {"total_tokens": 0}
|
||||
async for chunk in completion_stream:
|
||||
response_id_stream = chunk.id if hasattr(chunk, "id") else None
|
||||
|
||||
if hasattr(chunk, "usage") and chunk.usage:
|
||||
usage_data = self._extract_openai_token_usage(chunk)
|
||||
continue
|
||||
@@ -2037,7 +2016,6 @@ class OpenAICompletion(BaseLLM):
|
||||
chunk=delta.content,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
response_id=response_id_stream,
|
||||
)
|
||||
|
||||
self._track_token_usage_internal(usage_data)
|
||||
@@ -2073,8 +2051,6 @@ class OpenAICompletion(BaseLLM):
|
||||
usage_data = {"total_tokens": 0}
|
||||
|
||||
async for chunk in stream:
|
||||
response_id_stream = chunk.id if hasattr(chunk, "id") else None
|
||||
|
||||
if hasattr(chunk, "usage") and chunk.usage:
|
||||
usage_data = self._extract_openai_token_usage(chunk)
|
||||
continue
|
||||
@@ -2091,7 +2067,6 @@ class OpenAICompletion(BaseLLM):
|
||||
chunk=chunk_delta.content,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
response_id=response_id_stream,
|
||||
)
|
||||
|
||||
if chunk_delta.tool_calls:
|
||||
@@ -2130,7 +2105,6 @@ class OpenAICompletion(BaseLLM):
|
||||
"index": tool_calls[tool_index]["index"],
|
||||
},
|
||||
call_type=LLMCallType.TOOL_CALL,
|
||||
response_id=response_id_stream,
|
||||
)
|
||||
|
||||
self._track_token_usage_internal(usage_data)
|
||||
|
||||
@@ -2,7 +2,6 @@ import logging
|
||||
import re
|
||||
from typing import Any
|
||||
|
||||
from crewai.utilities.pydantic_schema_utils import generate_model_description
|
||||
from crewai.utilities.string_utils import sanitize_tool_name
|
||||
|
||||
|
||||
@@ -78,8 +77,7 @@ def extract_tool_info(tool: dict[str, Any]) -> tuple[str, str, dict[str, Any]]:
|
||||
# Also check for args_schema (Pydantic format)
|
||||
if not parameters and "args_schema" in tool:
|
||||
if hasattr(tool["args_schema"], "model_json_schema"):
|
||||
schema_output = generate_model_description(tool["args_schema"])
|
||||
parameters = schema_output.get("json_schema", {}).get("schema", {})
|
||||
parameters = tool["args_schema"].model_json_schema()
|
||||
|
||||
return name, description, parameters
|
||||
|
||||
|
||||
@@ -12,17 +12,15 @@ from crewai.utilities.paths import db_storage_path
|
||||
class LTMSQLiteStorage:
|
||||
"""SQLite storage class for long-term memory data."""
|
||||
|
||||
def __init__(self, db_path: str | None = None, verbose: bool = True) -> None:
|
||||
def __init__(self, db_path: str | None = None) -> None:
|
||||
"""Initialize the SQLite storage.
|
||||
|
||||
Args:
|
||||
db_path: Optional path to the database file.
|
||||
verbose: Whether to print error messages.
|
||||
"""
|
||||
if db_path is None:
|
||||
db_path = str(Path(db_storage_path()) / "long_term_memory_storage.db")
|
||||
self.db_path = db_path
|
||||
self._verbose = verbose
|
||||
self._printer: Printer = Printer()
|
||||
Path(self.db_path).parent.mkdir(parents=True, exist_ok=True)
|
||||
self._initialize_db()
|
||||
@@ -46,11 +44,10 @@ class LTMSQLiteStorage:
|
||||
|
||||
conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
if self._verbose:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred during database initialization: {e}",
|
||||
color="red",
|
||||
)
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred during database initialization: {e}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
def save(
|
||||
self,
|
||||
@@ -72,11 +69,10 @@ class LTMSQLiteStorage:
|
||||
)
|
||||
conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
if self._verbose:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while saving to LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while saving to LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
def load(self, task_description: str, latest_n: int) -> list[dict[str, Any]] | None:
|
||||
"""Queries the LTM table by task description with error handling."""
|
||||
@@ -105,11 +101,10 @@ class LTMSQLiteStorage:
|
||||
]
|
||||
|
||||
except sqlite3.Error as e:
|
||||
if self._verbose:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while querying LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while querying LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
return None
|
||||
|
||||
def reset(self) -> None:
|
||||
@@ -121,11 +116,10 @@ class LTMSQLiteStorage:
|
||||
conn.commit()
|
||||
|
||||
except sqlite3.Error as e:
|
||||
if self._verbose:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while deleting all rows in LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while deleting all rows in LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
async def asave(
|
||||
self,
|
||||
@@ -153,11 +147,10 @@ class LTMSQLiteStorage:
|
||||
)
|
||||
await conn.commit()
|
||||
except aiosqlite.Error as e:
|
||||
if self._verbose:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while saving to LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while saving to LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
async def aload(
|
||||
self, task_description: str, latest_n: int
|
||||
@@ -194,11 +187,10 @@ class LTMSQLiteStorage:
|
||||
for row in rows
|
||||
]
|
||||
except aiosqlite.Error as e:
|
||||
if self._verbose:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while querying LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while querying LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
return None
|
||||
|
||||
async def areset(self) -> None:
|
||||
@@ -208,8 +200,7 @@ class LTMSQLiteStorage:
|
||||
await conn.execute("DELETE FROM long_term_memories")
|
||||
await conn.commit()
|
||||
except aiosqlite.Error as e:
|
||||
if self._verbose:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while deleting all rows in LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while deleting all rows in LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
@@ -41,7 +41,6 @@ def _default_settings() -> Settings:
|
||||
persist_directory=DEFAULT_STORAGE_PATH,
|
||||
allow_reset=True,
|
||||
is_persistent=True,
|
||||
anonymized_telemetry=False,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ if TYPE_CHECKING:
|
||||
)
|
||||
from chromadb.utils.embedding_functions.google_embedding_function import (
|
||||
GoogleGenerativeAiEmbeddingFunction,
|
||||
GoogleVertexEmbeddingFunction,
|
||||
)
|
||||
from chromadb.utils.embedding_functions.huggingface_embedding_function import (
|
||||
HuggingFaceEmbeddingFunction,
|
||||
@@ -51,9 +52,6 @@ if TYPE_CHECKING:
|
||||
from crewai.rag.embeddings.providers.aws.types import BedrockProviderSpec
|
||||
from crewai.rag.embeddings.providers.cohere.types import CohereProviderSpec
|
||||
from crewai.rag.embeddings.providers.custom.types import CustomProviderSpec
|
||||
from crewai.rag.embeddings.providers.google.genai_vertex_embedding import (
|
||||
GoogleGenAIVertexEmbeddingFunction,
|
||||
)
|
||||
from crewai.rag.embeddings.providers.google.types import (
|
||||
GenerativeAiProviderSpec,
|
||||
VertexAIProviderSpec,
|
||||
@@ -165,7 +163,7 @@ def build_embedder_from_dict(spec: OpenAIProviderSpec) -> OpenAIEmbeddingFunctio
|
||||
@overload
|
||||
def build_embedder_from_dict(
|
||||
spec: VertexAIProviderSpec,
|
||||
) -> GoogleGenAIVertexEmbeddingFunction: ...
|
||||
) -> GoogleVertexEmbeddingFunction: ...
|
||||
|
||||
|
||||
@overload
|
||||
@@ -298,9 +296,7 @@ def build_embedder(spec: OpenAIProviderSpec) -> OpenAIEmbeddingFunction: ...
|
||||
|
||||
|
||||
@overload
|
||||
def build_embedder(
|
||||
spec: VertexAIProviderSpec,
|
||||
) -> GoogleGenAIVertexEmbeddingFunction: ...
|
||||
def build_embedder(spec: VertexAIProviderSpec) -> GoogleVertexEmbeddingFunction: ...
|
||||
|
||||
|
||||
@overload
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
"""Google embedding providers."""
|
||||
|
||||
from crewai.rag.embeddings.providers.google.genai_vertex_embedding import (
|
||||
GoogleGenAIVertexEmbeddingFunction,
|
||||
)
|
||||
from crewai.rag.embeddings.providers.google.generative_ai import (
|
||||
GenerativeAiProvider,
|
||||
)
|
||||
@@ -21,7 +18,6 @@ __all__ = [
|
||||
"GenerativeAiProvider",
|
||||
"GenerativeAiProviderConfig",
|
||||
"GenerativeAiProviderSpec",
|
||||
"GoogleGenAIVertexEmbeddingFunction",
|
||||
"VertexAIProvider",
|
||||
"VertexAIProviderConfig",
|
||||
"VertexAIProviderSpec",
|
||||
|
||||
@@ -1,237 +0,0 @@
|
||||
"""Google Vertex AI embedding function implementation.
|
||||
|
||||
This module supports both the new google-genai SDK and the deprecated
|
||||
vertexai.language_models module for backwards compatibility.
|
||||
|
||||
The deprecated vertexai.language_models module will be removed after June 24, 2026.
|
||||
Migration guide: https://docs.cloud.google.com/vertex-ai/generative-ai/docs/deprecations/genai-vertexai-sdk
|
||||
"""
|
||||
|
||||
from typing import Any, ClassVar, cast
|
||||
import warnings
|
||||
|
||||
from chromadb.api.types import Documents, EmbeddingFunction, Embeddings
|
||||
from typing_extensions import Unpack
|
||||
|
||||
from crewai.rag.embeddings.providers.google.types import VertexAIProviderConfig
|
||||
|
||||
|
||||
class GoogleGenAIVertexEmbeddingFunction(EmbeddingFunction[Documents]):
|
||||
"""Embedding function for Google Vertex AI with dual SDK support.
|
||||
|
||||
This class supports both:
|
||||
- Legacy models (textembedding-gecko*) using the deprecated vertexai.language_models SDK
|
||||
- New models (gemini-embedding-*, text-embedding-*) using the google-genai SDK
|
||||
|
||||
The SDK is automatically selected based on the model name. Legacy models will
|
||||
emit a deprecation warning.
|
||||
|
||||
Supports two authentication modes:
|
||||
1. Vertex AI backend: Set project_id and location/region (uses Application Default Credentials)
|
||||
2. API key: Set api_key for direct API access
|
||||
|
||||
Example:
|
||||
# Using legacy model (will emit deprecation warning)
|
||||
embedder = GoogleGenAIVertexEmbeddingFunction(
|
||||
project_id="my-project",
|
||||
region="us-central1",
|
||||
model_name="textembedding-gecko"
|
||||
)
|
||||
|
||||
# Using new model with google-genai SDK
|
||||
embedder = GoogleGenAIVertexEmbeddingFunction(
|
||||
project_id="my-project",
|
||||
location="us-central1",
|
||||
model_name="gemini-embedding-001"
|
||||
)
|
||||
|
||||
# Using API key (new SDK only)
|
||||
embedder = GoogleGenAIVertexEmbeddingFunction(
|
||||
api_key="your-api-key",
|
||||
model_name="gemini-embedding-001"
|
||||
)
|
||||
"""
|
||||
|
||||
# Models that use the legacy vertexai.language_models SDK
|
||||
LEGACY_MODELS: ClassVar[set[str]] = {
|
||||
"textembedding-gecko",
|
||||
"textembedding-gecko@001",
|
||||
"textembedding-gecko@002",
|
||||
"textembedding-gecko@003",
|
||||
"textembedding-gecko@latest",
|
||||
"textembedding-gecko-multilingual",
|
||||
"textembedding-gecko-multilingual@001",
|
||||
"textembedding-gecko-multilingual@latest",
|
||||
}
|
||||
|
||||
# Models that use the new google-genai SDK
|
||||
GENAI_MODELS: ClassVar[set[str]] = {
|
||||
"gemini-embedding-001",
|
||||
"text-embedding-005",
|
||||
"text-multilingual-embedding-002",
|
||||
}
|
||||
|
||||
def __init__(self, **kwargs: Unpack[VertexAIProviderConfig]) -> None:
|
||||
"""Initialize Google Vertex AI embedding function.
|
||||
|
||||
Args:
|
||||
**kwargs: Configuration parameters including:
|
||||
- model_name: Model to use for embeddings (default: "textembedding-gecko")
|
||||
- api_key: Optional API key for authentication (new SDK only)
|
||||
- project_id: GCP project ID (for Vertex AI backend)
|
||||
- location: GCP region (default: "us-central1")
|
||||
- region: Deprecated alias for location
|
||||
- task_type: Task type for embeddings (default: "RETRIEVAL_DOCUMENT", new SDK only)
|
||||
- output_dimensionality: Optional output embedding dimension (new SDK only)
|
||||
"""
|
||||
# Handle deprecated 'region' parameter (only if it has a value)
|
||||
region_value = kwargs.pop("region", None) # type: ignore[typeddict-item]
|
||||
if region_value is not None:
|
||||
warnings.warn(
|
||||
"The 'region' parameter is deprecated, use 'location' instead. "
|
||||
"See: https://docs.cloud.google.com/vertex-ai/generative-ai/docs/deprecations/genai-vertexai-sdk",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
if "location" not in kwargs or kwargs.get("location") is None:
|
||||
kwargs["location"] = region_value # type: ignore[typeddict-unknown-key]
|
||||
|
||||
self._config = kwargs
|
||||
self._model_name = str(kwargs.get("model_name", "textembedding-gecko"))
|
||||
self._use_legacy = self._is_legacy_model(self._model_name)
|
||||
|
||||
if self._use_legacy:
|
||||
self._init_legacy_client(**kwargs)
|
||||
else:
|
||||
self._init_genai_client(**kwargs)
|
||||
|
||||
def _is_legacy_model(self, model_name: str) -> bool:
|
||||
"""Check if the model uses the legacy SDK."""
|
||||
return model_name in self.LEGACY_MODELS or model_name.startswith(
|
||||
"textembedding-gecko"
|
||||
)
|
||||
|
||||
def _init_legacy_client(self, **kwargs: Any) -> None:
|
||||
"""Initialize using the deprecated vertexai.language_models SDK."""
|
||||
warnings.warn(
|
||||
f"Model '{self._model_name}' uses the deprecated vertexai.language_models SDK "
|
||||
"which will be removed after June 24, 2026. Consider migrating to newer models "
|
||||
"like 'gemini-embedding-001'. "
|
||||
"See: https://docs.cloud.google.com/vertex-ai/generative-ai/docs/deprecations/genai-vertexai-sdk",
|
||||
DeprecationWarning,
|
||||
stacklevel=3,
|
||||
)
|
||||
|
||||
try:
|
||||
import vertexai
|
||||
from vertexai.language_models import TextEmbeddingModel
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"vertexai is required for legacy embedding models (textembedding-gecko*). "
|
||||
"Install it with: pip install google-cloud-aiplatform"
|
||||
) from e
|
||||
|
||||
project_id = kwargs.get("project_id")
|
||||
location = str(kwargs.get("location", "us-central1"))
|
||||
|
||||
if not project_id:
|
||||
raise ValueError(
|
||||
"project_id is required for legacy models. "
|
||||
"For API key authentication, use newer models like 'gemini-embedding-001'."
|
||||
)
|
||||
|
||||
vertexai.init(project=str(project_id), location=location)
|
||||
self._legacy_model = TextEmbeddingModel.from_pretrained(self._model_name)
|
||||
|
||||
def _init_genai_client(self, **kwargs: Any) -> None:
|
||||
"""Initialize using the new google-genai SDK."""
|
||||
try:
|
||||
from google import genai
|
||||
from google.genai.types import EmbedContentConfig
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"google-genai is required for Google Gen AI embeddings. "
|
||||
"Install it with: uv add 'crewai[google-genai]'"
|
||||
) from e
|
||||
|
||||
self._genai = genai
|
||||
self._EmbedContentConfig = EmbedContentConfig
|
||||
self._task_type = kwargs.get("task_type", "RETRIEVAL_DOCUMENT")
|
||||
self._output_dimensionality = kwargs.get("output_dimensionality")
|
||||
|
||||
# Initialize client based on authentication mode
|
||||
api_key = kwargs.get("api_key")
|
||||
project_id = kwargs.get("project_id")
|
||||
location: str = str(kwargs.get("location", "us-central1"))
|
||||
|
||||
if api_key:
|
||||
self._client = genai.Client(api_key=api_key)
|
||||
elif project_id:
|
||||
self._client = genai.Client(
|
||||
vertexai=True,
|
||||
project=str(project_id),
|
||||
location=location,
|
||||
)
|
||||
else:
|
||||
raise ValueError(
|
||||
"Either 'api_key' (for API key authentication) or 'project_id' "
|
||||
"(for Vertex AI backend with ADC) must be provided."
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def name() -> str:
|
||||
"""Return the name of the embedding function for ChromaDB compatibility."""
|
||||
return "google-vertex"
|
||||
|
||||
def __call__(self, input: Documents) -> Embeddings:
|
||||
"""Generate embeddings for input documents.
|
||||
|
||||
Args:
|
||||
input: List of documents to embed.
|
||||
|
||||
Returns:
|
||||
List of embedding vectors.
|
||||
"""
|
||||
if isinstance(input, str):
|
||||
input = [input]
|
||||
|
||||
if self._use_legacy:
|
||||
return self._call_legacy(input)
|
||||
return self._call_genai(input)
|
||||
|
||||
def _call_legacy(self, input: list[str]) -> Embeddings:
|
||||
"""Generate embeddings using the legacy SDK."""
|
||||
import numpy as np
|
||||
|
||||
embeddings_list = []
|
||||
for text in input:
|
||||
embedding_result = self._legacy_model.get_embeddings([text])
|
||||
embeddings_list.append(
|
||||
np.array(embedding_result[0].values, dtype=np.float32)
|
||||
)
|
||||
|
||||
return cast(Embeddings, embeddings_list)
|
||||
|
||||
def _call_genai(self, input: list[str]) -> Embeddings:
|
||||
"""Generate embeddings using the new google-genai SDK."""
|
||||
# Build config for embed_content
|
||||
config_kwargs: dict[str, Any] = {
|
||||
"task_type": self._task_type,
|
||||
}
|
||||
if self._output_dimensionality is not None:
|
||||
config_kwargs["output_dimensionality"] = self._output_dimensionality
|
||||
|
||||
config = self._EmbedContentConfig(**config_kwargs)
|
||||
|
||||
# Call the embedding API
|
||||
response = self._client.models.embed_content(
|
||||
model=self._model_name,
|
||||
contents=input, # type: ignore[arg-type]
|
||||
config=config,
|
||||
)
|
||||
|
||||
# Extract embeddings from response
|
||||
if response.embeddings is None:
|
||||
raise ValueError("No embeddings returned from the API")
|
||||
embeddings = [emb.values for emb in response.embeddings]
|
||||
return cast(Embeddings, embeddings)
|
||||
@@ -34,47 +34,12 @@ class GenerativeAiProviderSpec(TypedDict):
|
||||
|
||||
|
||||
class VertexAIProviderConfig(TypedDict, total=False):
|
||||
"""Configuration for Vertex AI provider with dual SDK support.
|
||||
|
||||
Supports both legacy models (textembedding-gecko*) using the deprecated
|
||||
vertexai.language_models SDK and new models using google-genai SDK.
|
||||
|
||||
Attributes:
|
||||
api_key: Google API key (optional if using project_id with ADC). Only for new SDK models.
|
||||
model_name: Embedding model name (default: "textembedding-gecko").
|
||||
Legacy models: textembedding-gecko, textembedding-gecko@001, etc.
|
||||
New models: gemini-embedding-001, text-embedding-005, text-multilingual-embedding-002
|
||||
project_id: GCP project ID (required for Vertex AI backend and legacy models).
|
||||
location: GCP region/location (default: "us-central1").
|
||||
region: Deprecated alias for location (kept for backwards compatibility).
|
||||
task_type: Task type for embeddings (default: "RETRIEVAL_DOCUMENT"). Only for new SDK models.
|
||||
output_dimensionality: Output embedding dimension (optional). Only for new SDK models.
|
||||
"""
|
||||
"""Configuration for Vertex AI provider."""
|
||||
|
||||
api_key: str
|
||||
model_name: Annotated[
|
||||
Literal[
|
||||
# Legacy models (deprecated vertexai.language_models SDK)
|
||||
"textembedding-gecko",
|
||||
"textembedding-gecko@001",
|
||||
"textembedding-gecko@002",
|
||||
"textembedding-gecko@003",
|
||||
"textembedding-gecko@latest",
|
||||
"textembedding-gecko-multilingual",
|
||||
"textembedding-gecko-multilingual@001",
|
||||
"textembedding-gecko-multilingual@latest",
|
||||
# New models (google-genai SDK)
|
||||
"gemini-embedding-001",
|
||||
"text-embedding-005",
|
||||
"text-multilingual-embedding-002",
|
||||
],
|
||||
"textembedding-gecko",
|
||||
]
|
||||
project_id: str
|
||||
location: Annotated[str, "us-central1"]
|
||||
region: Annotated[str, "us-central1"] # Deprecated alias for location
|
||||
task_type: Annotated[str, "RETRIEVAL_DOCUMENT"]
|
||||
output_dimensionality: int
|
||||
model_name: Annotated[str, "textembedding-gecko"]
|
||||
project_id: Annotated[str, "cloud-large-language-models"]
|
||||
region: Annotated[str, "us-central1"]
|
||||
|
||||
|
||||
class VertexAIProviderSpec(TypedDict, total=False):
|
||||
|
||||
@@ -1,126 +1,46 @@
|
||||
"""Google Vertex AI embeddings provider.
|
||||
|
||||
This module supports both the new google-genai SDK and the deprecated
|
||||
vertexai.language_models module for backwards compatibility.
|
||||
|
||||
The SDK is automatically selected based on the model name:
|
||||
- Legacy models (textembedding-gecko*) use vertexai.language_models (deprecated)
|
||||
- New models (gemini-embedding-*, text-embedding-*) use google-genai
|
||||
|
||||
Migration guide: https://docs.cloud.google.com/vertex-ai/generative-ai/docs/deprecations/genai-vertexai-sdk
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
"""Google Vertex AI embeddings provider."""
|
||||
|
||||
from chromadb.utils.embedding_functions.google_embedding_function import (
|
||||
GoogleVertexEmbeddingFunction,
|
||||
)
|
||||
from pydantic import AliasChoices, Field
|
||||
|
||||
from crewai.rag.core.base_embeddings_provider import BaseEmbeddingsProvider
|
||||
from crewai.rag.embeddings.providers.google.genai_vertex_embedding import (
|
||||
GoogleGenAIVertexEmbeddingFunction,
|
||||
)
|
||||
|
||||
|
||||
class VertexAIProvider(BaseEmbeddingsProvider[GoogleGenAIVertexEmbeddingFunction]):
|
||||
"""Google Vertex AI embeddings provider with dual SDK support.
|
||||
class VertexAIProvider(BaseEmbeddingsProvider[GoogleVertexEmbeddingFunction]):
|
||||
"""Google Vertex AI embeddings provider."""
|
||||
|
||||
Supports both legacy models (textembedding-gecko*) using the deprecated
|
||||
vertexai.language_models SDK and new models (gemini-embedding-*, text-embedding-*)
|
||||
using the google-genai SDK.
|
||||
|
||||
The SDK is automatically selected based on the model name. Legacy models will
|
||||
emit a deprecation warning.
|
||||
|
||||
Authentication modes:
|
||||
1. Vertex AI backend: Set project_id and location/region (uses Application Default Credentials)
|
||||
2. API key: Set api_key for direct API access (new SDK models only)
|
||||
|
||||
Example:
|
||||
# Legacy model (backwards compatible, will emit deprecation warning)
|
||||
provider = VertexAIProvider(
|
||||
project_id="my-project",
|
||||
region="us-central1", # or location="us-central1"
|
||||
model_name="textembedding-gecko"
|
||||
)
|
||||
|
||||
# New model with Vertex AI backend
|
||||
provider = VertexAIProvider(
|
||||
project_id="my-project",
|
||||
location="us-central1",
|
||||
model_name="gemini-embedding-001"
|
||||
)
|
||||
|
||||
# New model with API key
|
||||
provider = VertexAIProvider(
|
||||
api_key="your-api-key",
|
||||
model_name="gemini-embedding-001"
|
||||
)
|
||||
"""
|
||||
|
||||
embedding_callable: type[GoogleGenAIVertexEmbeddingFunction] = Field(
|
||||
default=GoogleGenAIVertexEmbeddingFunction,
|
||||
description="Google Vertex AI embedding function class",
|
||||
embedding_callable: type[GoogleVertexEmbeddingFunction] = Field(
|
||||
default=GoogleVertexEmbeddingFunction,
|
||||
description="Vertex AI embedding function class",
|
||||
)
|
||||
model_name: str = Field(
|
||||
default="textembedding-gecko",
|
||||
description=(
|
||||
"Model name to use for embeddings. Legacy models (textembedding-gecko*) "
|
||||
"use the deprecated SDK. New models (gemini-embedding-001, text-embedding-005) "
|
||||
"use the google-genai SDK."
|
||||
),
|
||||
description="Model name to use for embeddings",
|
||||
validation_alias=AliasChoices(
|
||||
"EMBEDDINGS_GOOGLE_VERTEX_MODEL_NAME",
|
||||
"GOOGLE_VERTEX_MODEL_NAME",
|
||||
"model",
|
||||
),
|
||||
)
|
||||
api_key: str | None = Field(
|
||||
default=None,
|
||||
description="Google API key (optional if using project_id with Application Default Credentials)",
|
||||
api_key: str = Field(
|
||||
description="Google API key",
|
||||
validation_alias=AliasChoices(
|
||||
"EMBEDDINGS_GOOGLE_CLOUD_API_KEY",
|
||||
"GOOGLE_CLOUD_API_KEY",
|
||||
"GOOGLE_API_KEY",
|
||||
"EMBEDDINGS_GOOGLE_CLOUD_API_KEY", "GOOGLE_CLOUD_API_KEY"
|
||||
),
|
||||
)
|
||||
project_id: str | None = Field(
|
||||
default=None,
|
||||
description="GCP project ID (required for Vertex AI backend and legacy models)",
|
||||
project_id: str = Field(
|
||||
default="cloud-large-language-models",
|
||||
description="GCP project ID",
|
||||
validation_alias=AliasChoices(
|
||||
"EMBEDDINGS_GOOGLE_CLOUD_PROJECT",
|
||||
"GOOGLE_CLOUD_PROJECT",
|
||||
"EMBEDDINGS_GOOGLE_CLOUD_PROJECT", "GOOGLE_CLOUD_PROJECT"
|
||||
),
|
||||
)
|
||||
location: str = Field(
|
||||
region: str = Field(
|
||||
default="us-central1",
|
||||
description="GCP region/location",
|
||||
description="GCP region",
|
||||
validation_alias=AliasChoices(
|
||||
"EMBEDDINGS_GOOGLE_CLOUD_LOCATION",
|
||||
"EMBEDDINGS_GOOGLE_CLOUD_REGION",
|
||||
"GOOGLE_CLOUD_LOCATION",
|
||||
"GOOGLE_CLOUD_REGION",
|
||||
),
|
||||
)
|
||||
region: str | None = Field(
|
||||
default=None,
|
||||
description="Deprecated: Use 'location' instead. GCP region (kept for backwards compatibility)",
|
||||
validation_alias=AliasChoices(
|
||||
"EMBEDDINGS_GOOGLE_VERTEX_REGION",
|
||||
"GOOGLE_VERTEX_REGION",
|
||||
),
|
||||
)
|
||||
task_type: str = Field(
|
||||
default="RETRIEVAL_DOCUMENT",
|
||||
description="Task type for embeddings (e.g., RETRIEVAL_DOCUMENT, RETRIEVAL_QUERY). Only used with new SDK models.",
|
||||
validation_alias=AliasChoices(
|
||||
"EMBEDDINGS_GOOGLE_VERTEX_TASK_TYPE",
|
||||
"GOOGLE_VERTEX_TASK_TYPE",
|
||||
),
|
||||
)
|
||||
output_dimensionality: int | None = Field(
|
||||
default=None,
|
||||
description="Output embedding dimensionality (optional). Only used with new SDK models.",
|
||||
validation_alias=AliasChoices(
|
||||
"EMBEDDINGS_GOOGLE_VERTEX_OUTPUT_DIMENSIONALITY",
|
||||
"GOOGLE_VERTEX_OUTPUT_DIMENSIONALITY",
|
||||
"EMBEDDINGS_GOOGLE_CLOUD_REGION", "GOOGLE_CLOUD_REGION"
|
||||
),
|
||||
)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
"""IBM WatsonX embedding function implementation."""
|
||||
|
||||
from typing import Any, cast
|
||||
from typing import cast
|
||||
|
||||
from chromadb.api.types import Documents, EmbeddingFunction, Embeddings
|
||||
from typing_extensions import Unpack
|
||||
@@ -15,18 +15,14 @@ _printer = Printer()
|
||||
class WatsonXEmbeddingFunction(EmbeddingFunction[Documents]):
|
||||
"""Embedding function for IBM WatsonX models."""
|
||||
|
||||
def __init__(
|
||||
self, *, verbose: bool = True, **kwargs: Unpack[WatsonXProviderConfig]
|
||||
) -> None:
|
||||
def __init__(self, **kwargs: Unpack[WatsonXProviderConfig]) -> None:
|
||||
"""Initialize WatsonX embedding function.
|
||||
|
||||
Args:
|
||||
verbose: Whether to print error messages.
|
||||
**kwargs: Configuration parameters for WatsonX Embeddings and Credentials.
|
||||
"""
|
||||
super().__init__(**kwargs)
|
||||
self._config = kwargs
|
||||
self._verbose = verbose
|
||||
|
||||
@staticmethod
|
||||
def name() -> str:
|
||||
@@ -60,7 +56,7 @@ class WatsonXEmbeddingFunction(EmbeddingFunction[Documents]):
|
||||
if isinstance(input, str):
|
||||
input = [input]
|
||||
|
||||
embeddings_config: dict[str, Any] = {
|
||||
embeddings_config: dict = {
|
||||
"model_id": self._config["model_id"],
|
||||
}
|
||||
if "params" in self._config and self._config["params"] is not None:
|
||||
@@ -94,7 +90,7 @@ class WatsonXEmbeddingFunction(EmbeddingFunction[Documents]):
|
||||
if "credentials" in self._config and self._config["credentials"] is not None:
|
||||
embeddings_config["credentials"] = self._config["credentials"]
|
||||
else:
|
||||
cred_config: dict[str, Any] = {}
|
||||
cred_config: dict = {}
|
||||
if "url" in self._config and self._config["url"] is not None:
|
||||
cred_config["url"] = self._config["url"]
|
||||
if "api_key" in self._config and self._config["api_key"] is not None:
|
||||
@@ -163,6 +159,5 @@ class WatsonXEmbeddingFunction(EmbeddingFunction[Documents]):
|
||||
embeddings = embedding.embed_documents(input)
|
||||
return cast(Embeddings, embeddings)
|
||||
except Exception as e:
|
||||
if self._verbose:
|
||||
_printer.print(f"Error during WatsonX embedding: {e}", color="red")
|
||||
_printer.print(f"Error during WatsonX embedding: {e}", color="red")
|
||||
raise
|
||||
|
||||
@@ -767,11 +767,10 @@ class Task(BaseModel):
|
||||
if files:
|
||||
supported_types: list[str] = []
|
||||
if self.agent.llm and self.agent.llm.supports_multimodal():
|
||||
provider: str = str(
|
||||
getattr(self.agent.llm, "provider", None)
|
||||
or getattr(self.agent.llm, "model", "openai")
|
||||
provider = getattr(self.agent.llm, "provider", None) or getattr(
|
||||
self.agent.llm, "model", "openai"
|
||||
)
|
||||
api: str | None = getattr(self.agent.llm, "api", None)
|
||||
api = getattr(self.agent.llm, "api", None)
|
||||
supported_types = get_supported_content_types(provider, api)
|
||||
|
||||
def is_auto_injected(content_type: str) -> bool:
|
||||
@@ -888,11 +887,10 @@ Follow these guidelines:
|
||||
try:
|
||||
crew_chat_messages = json.loads(crew_chat_messages_json)
|
||||
except json.JSONDecodeError as e:
|
||||
if self.agent and self.agent.verbose:
|
||||
_printer.print(
|
||||
f"An error occurred while parsing crew chat messages: {e}",
|
||||
color="red",
|
||||
)
|
||||
_printer.print(
|
||||
f"An error occurred while parsing crew chat messages: {e}",
|
||||
color="red",
|
||||
)
|
||||
raise
|
||||
|
||||
conversation_history = "\n".join(
|
||||
@@ -1134,12 +1132,11 @@ Follow these guidelines:
|
||||
guardrail_result_error=guardrail_result.error,
|
||||
task_output=task_output.raw,
|
||||
)
|
||||
if agent and agent.verbose:
|
||||
printer = Printer()
|
||||
printer.print(
|
||||
content=f"Guardrail {guardrail_index if guardrail_index is not None else ''} blocked (attempt {attempt + 1}/{max_attempts}), retrying due to: {guardrail_result.error}\n",
|
||||
color="yellow",
|
||||
)
|
||||
printer = Printer()
|
||||
printer.print(
|
||||
content=f"Guardrail {guardrail_index if guardrail_index is not None else ''} blocked (attempt {attempt + 1}/{max_attempts}), retrying due to: {guardrail_result.error}\n",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
# Regenerate output from agent
|
||||
result = agent.execute_task(
|
||||
@@ -1232,12 +1229,11 @@ Follow these guidelines:
|
||||
guardrail_result_error=guardrail_result.error,
|
||||
task_output=task_output.raw,
|
||||
)
|
||||
if agent and agent.verbose:
|
||||
printer = Printer()
|
||||
printer.print(
|
||||
content=f"Guardrail {guardrail_index if guardrail_index is not None else ''} blocked (attempt {attempt + 1}/{max_attempts}), retrying due to: {guardrail_result.error}\n",
|
||||
color="yellow",
|
||||
)
|
||||
printer = Printer()
|
||||
printer.print(
|
||||
content=f"Guardrail {guardrail_index if guardrail_index is not None else ''} blocked (attempt {attempt + 1}/{max_attempts}), retrying due to: {guardrail_result.error}\n",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
result = await agent.aexecute_task(
|
||||
task=self,
|
||||
|
||||
@@ -384,8 +384,6 @@ class ToolUsage:
|
||||
if (
|
||||
hasattr(available_tool, "max_usage_count")
|
||||
and available_tool.max_usage_count is not None
|
||||
and self.agent
|
||||
and self.agent.verbose
|
||||
):
|
||||
self._printer.print(
|
||||
content=f"Tool '{sanitize_tool_name(available_tool.name)}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}",
|
||||
@@ -398,8 +396,6 @@ class ToolUsage:
|
||||
if (
|
||||
hasattr(available_tool, "max_usage_count")
|
||||
and available_tool.max_usage_count is not None
|
||||
and self.agent
|
||||
and self.agent.verbose
|
||||
):
|
||||
self._printer.print(
|
||||
content=f"Tool '{sanitize_tool_name(available_tool.name)}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}",
|
||||
@@ -614,8 +610,6 @@ class ToolUsage:
|
||||
if (
|
||||
hasattr(available_tool, "max_usage_count")
|
||||
and available_tool.max_usage_count is not None
|
||||
and self.agent
|
||||
and self.agent.verbose
|
||||
):
|
||||
self._printer.print(
|
||||
content=f"Tool '{sanitize_tool_name(available_tool.name)}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}",
|
||||
@@ -628,8 +622,6 @@ class ToolUsage:
|
||||
if (
|
||||
hasattr(available_tool, "max_usage_count")
|
||||
and available_tool.max_usage_count is not None
|
||||
and self.agent
|
||||
and self.agent.verbose
|
||||
):
|
||||
self._printer.print(
|
||||
content=f"Tool '{sanitize_tool_name(available_tool.name)}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}",
|
||||
@@ -892,17 +884,15 @@ class ToolUsage:
|
||||
# Attempt 4: Repair JSON
|
||||
try:
|
||||
repaired_input = str(repair_json(tool_input, skip_json_loads=True))
|
||||
if self.agent and self.agent.verbose:
|
||||
self._printer.print(
|
||||
content=f"Repaired JSON: {repaired_input}", color="blue"
|
||||
)
|
||||
self._printer.print(
|
||||
content=f"Repaired JSON: {repaired_input}", color="blue"
|
||||
)
|
||||
arguments = json.loads(repaired_input)
|
||||
if isinstance(arguments, dict):
|
||||
return arguments
|
||||
except Exception as e:
|
||||
error = f"Failed to repair JSON: {e}"
|
||||
if self.agent and self.agent.verbose:
|
||||
self._printer.print(content=error, color="red")
|
||||
self._printer.print(content=error, color="red")
|
||||
|
||||
error_message = (
|
||||
"Tool input must be a valid dictionary in JSON or Python literal format"
|
||||
|
||||
@@ -28,7 +28,6 @@ from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
)
|
||||
from crewai.utilities.i18n import I18N
|
||||
from crewai.utilities.printer import ColoredText, Printer
|
||||
from crewai.utilities.pydantic_schema_utils import generate_model_description
|
||||
from crewai.utilities.string_utils import sanitize_tool_name
|
||||
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
||||
from crewai.utilities.types import LLMMessage
|
||||
@@ -37,7 +36,6 @@ from crewai.utilities.types import LLMMessage
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agent import Agent
|
||||
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||
from crewai.experimental.agent_executor import AgentExecutor
|
||||
from crewai.lite_agent import LiteAgent
|
||||
from crewai.llm import LLM
|
||||
from crewai.task import Task
|
||||
@@ -160,8 +158,7 @@ def convert_tools_to_openai_schema(
|
||||
parameters: dict[str, Any] = {}
|
||||
if hasattr(tool, "args_schema") and tool.args_schema is not None:
|
||||
try:
|
||||
schema_output = generate_model_description(tool.args_schema)
|
||||
parameters = schema_output.get("json_schema", {}).get("schema", {})
|
||||
parameters = tool.args_schema.model_json_schema()
|
||||
# Remove title and description from schema root as they're redundant
|
||||
parameters.pop("title", None)
|
||||
parameters.pop("description", None)
|
||||
@@ -210,7 +207,6 @@ def handle_max_iterations_exceeded(
|
||||
messages: list[LLMMessage],
|
||||
llm: LLM | BaseLLM,
|
||||
callbacks: list[TokenCalcHandler],
|
||||
verbose: bool = True,
|
||||
) -> AgentFinish:
|
||||
"""Handles the case when the maximum number of iterations is exceeded. Performs one more LLM call to get the final answer.
|
||||
|
||||
@@ -221,16 +217,14 @@ def handle_max_iterations_exceeded(
|
||||
messages: List of messages to send to the LLM.
|
||||
llm: The LLM instance to call.
|
||||
callbacks: List of callbacks for the LLM call.
|
||||
verbose: Whether to print output.
|
||||
|
||||
Returns:
|
||||
AgentFinish with the final answer after exceeding max iterations.
|
||||
"""
|
||||
if verbose:
|
||||
printer.print(
|
||||
content="Maximum iterations reached. Requesting final answer.",
|
||||
color="yellow",
|
||||
)
|
||||
printer.print(
|
||||
content="Maximum iterations reached. Requesting final answer.",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
if formatted_answer and hasattr(formatted_answer, "text"):
|
||||
assistant_message = (
|
||||
@@ -248,11 +242,10 @@ def handle_max_iterations_exceeded(
|
||||
)
|
||||
|
||||
if answer is None or answer == "":
|
||||
if verbose:
|
||||
printer.print(
|
||||
content="Received None or empty response from LLM call.",
|
||||
color="red",
|
||||
)
|
||||
printer.print(
|
||||
content="Received None or empty response from LLM call.",
|
||||
color="red",
|
||||
)
|
||||
raise ValueError("Invalid response from LLM call - None or empty.")
|
||||
|
||||
formatted = format_answer(answer=answer)
|
||||
@@ -325,8 +318,7 @@ def get_llm_response(
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | LiteAgent | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
executor_context: CrewAgentExecutor | AgentExecutor | LiteAgent | None = None,
|
||||
verbose: bool = True,
|
||||
executor_context: CrewAgentExecutor | LiteAgent | None = None,
|
||||
) -> str | Any:
|
||||
"""Call the LLM and return the response, handling any invalid responses.
|
||||
|
||||
@@ -352,7 +344,7 @@ def get_llm_response(
|
||||
"""
|
||||
|
||||
if executor_context is not None:
|
||||
if not _setup_before_llm_call_hooks(executor_context, printer, verbose=verbose):
|
||||
if not _setup_before_llm_call_hooks(executor_context, printer):
|
||||
raise ValueError("LLM call blocked by before_llm_call hook")
|
||||
messages = executor_context.messages
|
||||
|
||||
@@ -369,16 +361,13 @@ def get_llm_response(
|
||||
except Exception as e:
|
||||
raise e
|
||||
if not answer:
|
||||
if verbose:
|
||||
printer.print(
|
||||
content="Received None or empty response from LLM call.",
|
||||
color="red",
|
||||
)
|
||||
printer.print(
|
||||
content="Received None or empty response from LLM call.",
|
||||
color="red",
|
||||
)
|
||||
raise ValueError("Invalid response from LLM call - None or empty.")
|
||||
|
||||
return _setup_after_llm_call_hooks(
|
||||
executor_context, answer, printer, verbose=verbose
|
||||
)
|
||||
return _setup_after_llm_call_hooks(executor_context, answer, printer)
|
||||
|
||||
|
||||
async def aget_llm_response(
|
||||
@@ -391,8 +380,7 @@ async def aget_llm_response(
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | LiteAgent | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
executor_context: CrewAgentExecutor | AgentExecutor | None = None,
|
||||
verbose: bool = True,
|
||||
executor_context: CrewAgentExecutor | None = None,
|
||||
) -> str | Any:
|
||||
"""Call the LLM asynchronously and return the response.
|
||||
|
||||
@@ -417,7 +405,7 @@ async def aget_llm_response(
|
||||
ValueError: If the response is None or empty.
|
||||
"""
|
||||
if executor_context is not None:
|
||||
if not _setup_before_llm_call_hooks(executor_context, printer, verbose=verbose):
|
||||
if not _setup_before_llm_call_hooks(executor_context, printer):
|
||||
raise ValueError("LLM call blocked by before_llm_call hook")
|
||||
messages = executor_context.messages
|
||||
|
||||
@@ -434,16 +422,13 @@ async def aget_llm_response(
|
||||
except Exception as e:
|
||||
raise e
|
||||
if not answer:
|
||||
if verbose:
|
||||
printer.print(
|
||||
content="Received None or empty response from LLM call.",
|
||||
color="red",
|
||||
)
|
||||
printer.print(
|
||||
content="Received None or empty response from LLM call.",
|
||||
color="red",
|
||||
)
|
||||
raise ValueError("Invalid response from LLM call - None or empty.")
|
||||
|
||||
return _setup_after_llm_call_hooks(
|
||||
executor_context, answer, printer, verbose=verbose
|
||||
)
|
||||
return _setup_after_llm_call_hooks(executor_context, answer, printer)
|
||||
|
||||
|
||||
def process_llm_response(
|
||||
@@ -510,19 +495,13 @@ def handle_agent_action_core(
|
||||
return formatted_answer
|
||||
|
||||
|
||||
def handle_unknown_error(
|
||||
printer: Printer, exception: Exception, verbose: bool = True
|
||||
) -> None:
|
||||
def handle_unknown_error(printer: Printer, exception: Exception) -> None:
|
||||
"""Handle unknown errors by informing the user.
|
||||
|
||||
Args:
|
||||
printer: Printer instance for output
|
||||
exception: The exception that occurred
|
||||
verbose: Whether to print output.
|
||||
"""
|
||||
if not verbose:
|
||||
return
|
||||
|
||||
error_message = str(exception)
|
||||
|
||||
if "litellm" in error_message:
|
||||
@@ -544,7 +523,6 @@ def handle_output_parser_exception(
|
||||
iterations: int,
|
||||
log_error_after: int = 3,
|
||||
printer: Printer | None = None,
|
||||
verbose: bool = True,
|
||||
) -> AgentAction:
|
||||
"""Handle OutputParserError by updating messages and formatted_answer.
|
||||
|
||||
@@ -567,7 +545,7 @@ def handle_output_parser_exception(
|
||||
thought="",
|
||||
)
|
||||
|
||||
if verbose and iterations > log_error_after and printer:
|
||||
if iterations > log_error_after and printer:
|
||||
printer.print(
|
||||
content=f"Error parsing LLM output, agent will retry: {e.error}",
|
||||
color="red",
|
||||
@@ -597,7 +575,6 @@ def handle_context_length(
|
||||
llm: LLM | BaseLLM,
|
||||
callbacks: list[TokenCalcHandler],
|
||||
i18n: I18N,
|
||||
verbose: bool = True,
|
||||
) -> None:
|
||||
"""Handle context length exceeded by either summarizing or raising an error.
|
||||
|
||||
@@ -613,20 +590,16 @@ def handle_context_length(
|
||||
SystemExit: If context length is exceeded and user opts not to summarize
|
||||
"""
|
||||
if respect_context_window:
|
||||
if verbose:
|
||||
printer.print(
|
||||
content="Context length exceeded. Summarizing content to fit the model context window. Might take a while...",
|
||||
color="yellow",
|
||||
)
|
||||
summarize_messages(
|
||||
messages=messages, llm=llm, callbacks=callbacks, i18n=i18n, verbose=verbose
|
||||
printer.print(
|
||||
content="Context length exceeded. Summarizing content to fit the model context window. Might take a while...",
|
||||
color="yellow",
|
||||
)
|
||||
summarize_messages(messages=messages, llm=llm, callbacks=callbacks, i18n=i18n)
|
||||
else:
|
||||
if verbose:
|
||||
printer.print(
|
||||
content="Context length exceeded. Consider using smaller text or RAG tools from crewai_tools.",
|
||||
color="red",
|
||||
)
|
||||
printer.print(
|
||||
content="Context length exceeded. Consider using smaller text or RAG tools from crewai_tools.",
|
||||
color="red",
|
||||
)
|
||||
raise SystemExit(
|
||||
"Context length exceeded and user opted not to summarize. Consider using smaller text or RAG tools from crewai_tools."
|
||||
)
|
||||
@@ -637,7 +610,6 @@ def summarize_messages(
|
||||
llm: LLM | BaseLLM,
|
||||
callbacks: list[TokenCalcHandler],
|
||||
i18n: I18N,
|
||||
verbose: bool = True,
|
||||
) -> None:
|
||||
"""Summarize messages to fit within context window.
|
||||
|
||||
@@ -669,11 +641,10 @@ def summarize_messages(
|
||||
|
||||
total_groups = len(messages_groups)
|
||||
for idx, group in enumerate(messages_groups, 1):
|
||||
if verbose:
|
||||
Printer().print(
|
||||
content=f"Summarizing {idx}/{total_groups}...",
|
||||
color="yellow",
|
||||
)
|
||||
Printer().print(
|
||||
content=f"Summarizing {idx}/{total_groups}...",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
summarization_messages = [
|
||||
format_message_for_llm(
|
||||
@@ -929,16 +900,13 @@ def extract_tool_call_info(
|
||||
|
||||
|
||||
def _setup_before_llm_call_hooks(
|
||||
executor_context: CrewAgentExecutor | AgentExecutor | LiteAgent | None,
|
||||
printer: Printer,
|
||||
verbose: bool = True,
|
||||
executor_context: CrewAgentExecutor | LiteAgent | None, printer: Printer
|
||||
) -> bool:
|
||||
"""Setup and invoke before_llm_call hooks for the executor context.
|
||||
|
||||
Args:
|
||||
executor_context: The executor context to setup the hooks for.
|
||||
printer: Printer instance for error logging.
|
||||
verbose: Whether to print output.
|
||||
|
||||
Returns:
|
||||
True if LLM execution should proceed, False if blocked by a hook.
|
||||
@@ -953,29 +921,26 @@ def _setup_before_llm_call_hooks(
|
||||
for hook in executor_context.before_llm_call_hooks:
|
||||
result = hook(hook_context)
|
||||
if result is False:
|
||||
if verbose:
|
||||
printer.print(
|
||||
content="LLM call blocked by before_llm_call hook",
|
||||
color="yellow",
|
||||
)
|
||||
printer.print(
|
||||
content="LLM call blocked by before_llm_call hook",
|
||||
color="yellow",
|
||||
)
|
||||
return False
|
||||
except Exception as e:
|
||||
if verbose:
|
||||
printer.print(
|
||||
content=f"Error in before_llm_call hook: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
printer.print(
|
||||
content=f"Error in before_llm_call hook: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
if not isinstance(executor_context.messages, list):
|
||||
if verbose:
|
||||
printer.print(
|
||||
content=(
|
||||
"Warning: before_llm_call hook replaced messages with non-list. "
|
||||
"Restoring original messages list. Hooks should modify messages in-place, "
|
||||
"not replace the list (e.g., use context.messages.append() not context.messages = [])."
|
||||
),
|
||||
color="yellow",
|
||||
)
|
||||
printer.print(
|
||||
content=(
|
||||
"Warning: before_llm_call hook replaced messages with non-list. "
|
||||
"Restoring original messages list. Hooks should modify messages in-place, "
|
||||
"not replace the list (e.g., use context.messages.append() not context.messages = [])."
|
||||
),
|
||||
color="yellow",
|
||||
)
|
||||
if isinstance(original_messages, list):
|
||||
executor_context.messages = original_messages
|
||||
else:
|
||||
@@ -985,10 +950,9 @@ def _setup_before_llm_call_hooks(
|
||||
|
||||
|
||||
def _setup_after_llm_call_hooks(
|
||||
executor_context: CrewAgentExecutor | AgentExecutor | LiteAgent | None,
|
||||
executor_context: CrewAgentExecutor | LiteAgent | None,
|
||||
answer: str,
|
||||
printer: Printer,
|
||||
verbose: bool = True,
|
||||
) -> str:
|
||||
"""Setup and invoke after_llm_call hooks for the executor context.
|
||||
|
||||
@@ -996,7 +960,6 @@ def _setup_after_llm_call_hooks(
|
||||
executor_context: The executor context to setup the hooks for.
|
||||
answer: The LLM response string.
|
||||
printer: Printer instance for error logging.
|
||||
verbose: Whether to print output.
|
||||
|
||||
Returns:
|
||||
The potentially modified response string.
|
||||
@@ -1014,22 +977,20 @@ def _setup_after_llm_call_hooks(
|
||||
answer = modified_response
|
||||
|
||||
except Exception as e:
|
||||
if verbose:
|
||||
printer.print(
|
||||
content=f"Error in after_llm_call hook: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
printer.print(
|
||||
content=f"Error in after_llm_call hook: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
if not isinstance(executor_context.messages, list):
|
||||
if verbose:
|
||||
printer.print(
|
||||
content=(
|
||||
"Warning: after_llm_call hook replaced messages with non-list. "
|
||||
"Restoring original messages list. Hooks should modify messages in-place, "
|
||||
"not replace the list (e.g., use context.messages.append() not context.messages = [])."
|
||||
),
|
||||
color="yellow",
|
||||
)
|
||||
printer.print(
|
||||
content=(
|
||||
"Warning: after_llm_call hook replaced messages with non-list. "
|
||||
"Restoring original messages list. Hooks should modify messages in-place, "
|
||||
"not replace the list (e.g., use context.messages.append() not context.messages = [])."
|
||||
),
|
||||
color="yellow",
|
||||
)
|
||||
if isinstance(original_messages, list):
|
||||
executor_context.messages = original_messages
|
||||
else:
|
||||
|
||||
@@ -205,11 +205,10 @@ def convert_to_model(
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
if agent and getattr(agent, "verbose", True):
|
||||
Printer().print(
|
||||
content=f"Unexpected error during model conversion: {type(e).__name__}: {e}. Returning original result.",
|
||||
color="red",
|
||||
)
|
||||
Printer().print(
|
||||
content=f"Unexpected error during model conversion: {type(e).__name__}: {e}. Returning original result.",
|
||||
color="red",
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
@@ -263,11 +262,10 @@ def handle_partial_json(
|
||||
except ValidationError:
|
||||
raise
|
||||
except Exception as e:
|
||||
if agent and getattr(agent, "verbose", True):
|
||||
Printer().print(
|
||||
content=f"Unexpected error during partial JSON handling: {type(e).__name__}: {e}. Attempting alternative conversion method.",
|
||||
color="red",
|
||||
)
|
||||
Printer().print(
|
||||
content=f"Unexpected error during partial JSON handling: {type(e).__name__}: {e}. Attempting alternative conversion method.",
|
||||
color="red",
|
||||
)
|
||||
|
||||
return convert_with_instructions(
|
||||
result=result,
|
||||
@@ -325,11 +323,10 @@ def convert_with_instructions(
|
||||
)
|
||||
|
||||
if isinstance(exported_result, ConverterError):
|
||||
if agent and getattr(agent, "verbose", True):
|
||||
Printer().print(
|
||||
content=f"Failed to convert result to model: {exported_result}",
|
||||
color="red",
|
||||
)
|
||||
Printer().print(
|
||||
content=f"Failed to convert result to model: {exported_result}",
|
||||
color="red",
|
||||
)
|
||||
return result
|
||||
|
||||
return exported_result
|
||||
|
||||
@@ -5,29 +5,17 @@ from __future__ import annotations
|
||||
import asyncio
|
||||
from collections.abc import Coroutine
|
||||
import concurrent.futures
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, TypeVar
|
||||
from uuid import UUID
|
||||
|
||||
from aiocache import Cache # type: ignore[import-untyped]
|
||||
from aiocache.serializers import PickleSerializer # type: ignore[import-untyped]
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from aiocache import Cache
|
||||
from crewai_files import FileInput
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_file_store: Cache | None = None
|
||||
|
||||
try:
|
||||
from aiocache import Cache
|
||||
from aiocache.serializers import PickleSerializer
|
||||
|
||||
_file_store = Cache(Cache.MEMORY, serializer=PickleSerializer())
|
||||
except ImportError:
|
||||
logger.debug(
|
||||
"aiocache is not installed. File store features will be disabled. "
|
||||
"Install with: uv add aiocache"
|
||||
)
|
||||
_file_store = Cache(Cache.MEMORY, serializer=PickleSerializer())
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
@@ -71,8 +59,6 @@ async def astore_files(
|
||||
files: Dictionary mapping names to file inputs.
|
||||
ttl: Time-to-live in seconds.
|
||||
"""
|
||||
if _file_store is None:
|
||||
return
|
||||
await _file_store.set(f"{_CREW_PREFIX}{execution_id}", files, ttl=ttl)
|
||||
|
||||
|
||||
@@ -85,8 +71,6 @@ async def aget_files(execution_id: UUID) -> dict[str, FileInput] | None:
|
||||
Returns:
|
||||
Dictionary of files or None if not found.
|
||||
"""
|
||||
if _file_store is None:
|
||||
return None
|
||||
result: dict[str, FileInput] | None = await _file_store.get(
|
||||
f"{_CREW_PREFIX}{execution_id}"
|
||||
)
|
||||
@@ -99,8 +83,6 @@ async def aclear_files(execution_id: UUID) -> None:
|
||||
Args:
|
||||
execution_id: Unique identifier for the crew execution.
|
||||
"""
|
||||
if _file_store is None:
|
||||
return
|
||||
await _file_store.delete(f"{_CREW_PREFIX}{execution_id}")
|
||||
|
||||
|
||||
@@ -116,8 +98,6 @@ async def astore_task_files(
|
||||
files: Dictionary mapping names to file inputs.
|
||||
ttl: Time-to-live in seconds.
|
||||
"""
|
||||
if _file_store is None:
|
||||
return
|
||||
await _file_store.set(f"{_TASK_PREFIX}{task_id}", files, ttl=ttl)
|
||||
|
||||
|
||||
@@ -130,8 +110,6 @@ async def aget_task_files(task_id: UUID) -> dict[str, FileInput] | None:
|
||||
Returns:
|
||||
Dictionary of files or None if not found.
|
||||
"""
|
||||
if _file_store is None:
|
||||
return None
|
||||
result: dict[str, FileInput] | None = await _file_store.get(
|
||||
f"{_TASK_PREFIX}{task_id}"
|
||||
)
|
||||
@@ -144,8 +122,6 @@ async def aclear_task_files(task_id: UUID) -> None:
|
||||
Args:
|
||||
task_id: Unique identifier for the task.
|
||||
"""
|
||||
if _file_store is None:
|
||||
return
|
||||
await _file_store.delete(f"{_TASK_PREFIX}{task_id}")
|
||||
|
||||
|
||||
|
||||
@@ -1,72 +1,14 @@
|
||||
"""Dynamic Pydantic model creation from JSON schemas.
|
||||
|
||||
This module provides utilities for converting JSON schemas to Pydantic models at runtime.
|
||||
The main function is `create_model_from_schema`, which takes a JSON schema and returns
|
||||
a dynamically created Pydantic model class.
|
||||
|
||||
This is used by the A2A server to honor response schemas sent by clients, allowing
|
||||
structured output from agent tasks.
|
||||
|
||||
Based on dydantic (https://github.com/zenbase-ai/dydantic).
|
||||
"""Utilities for generating JSON schemas from Pydantic models.
|
||||
|
||||
This module provides functions for converting Pydantic models to JSON schemas
|
||||
suitable for use with LLMs and tool definitions.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Callable
|
||||
from copy import deepcopy
|
||||
import datetime
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Annotated, Any, Literal, Union
|
||||
import uuid
|
||||
from typing import Any
|
||||
|
||||
from pydantic import (
|
||||
UUID1,
|
||||
UUID3,
|
||||
UUID4,
|
||||
UUID5,
|
||||
AnyUrl,
|
||||
BaseModel,
|
||||
ConfigDict,
|
||||
DirectoryPath,
|
||||
Field,
|
||||
FilePath,
|
||||
FileUrl,
|
||||
HttpUrl,
|
||||
Json,
|
||||
MongoDsn,
|
||||
NewPath,
|
||||
PostgresDsn,
|
||||
SecretBytes,
|
||||
SecretStr,
|
||||
StrictBytes,
|
||||
create_model as create_model_base,
|
||||
)
|
||||
from pydantic.networks import ( # type: ignore[attr-defined]
|
||||
IPv4Address,
|
||||
IPv6Address,
|
||||
IPvAnyAddress,
|
||||
IPvAnyInterface,
|
||||
IPvAnyNetwork,
|
||||
)
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pydantic import EmailStr
|
||||
from pydantic.main import AnyClassMethod
|
||||
else:
|
||||
try:
|
||||
from pydantic import EmailStr
|
||||
except ImportError:
|
||||
logger.warning(
|
||||
"EmailStr unavailable, using str fallback",
|
||||
extra={"missing_package": "email_validator"},
|
||||
)
|
||||
EmailStr = str
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
def resolve_refs(schema: dict[str, Any]) -> dict[str, Any]:
|
||||
@@ -301,319 +243,3 @@ def generate_model_description(model: type[BaseModel]) -> dict[str, Any]:
|
||||
"schema": json_schema,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
FORMAT_TYPE_MAP: dict[str, type[Any]] = {
|
||||
"base64": Annotated[bytes, Field(json_schema_extra={"format": "base64"})], # type: ignore[dict-item]
|
||||
"binary": StrictBytes,
|
||||
"date": datetime.date,
|
||||
"time": datetime.time,
|
||||
"date-time": datetime.datetime,
|
||||
"duration": datetime.timedelta,
|
||||
"directory-path": DirectoryPath,
|
||||
"email": EmailStr,
|
||||
"file-path": FilePath,
|
||||
"ipv4": IPv4Address,
|
||||
"ipv6": IPv6Address,
|
||||
"ipvanyaddress": IPvAnyAddress, # type: ignore[dict-item]
|
||||
"ipvanyinterface": IPvAnyInterface, # type: ignore[dict-item]
|
||||
"ipvanynetwork": IPvAnyNetwork, # type: ignore[dict-item]
|
||||
"json-string": Json,
|
||||
"multi-host-uri": PostgresDsn | MongoDsn, # type: ignore[dict-item]
|
||||
"password": SecretStr,
|
||||
"path": NewPath,
|
||||
"uri": AnyUrl,
|
||||
"uuid": uuid.UUID,
|
||||
"uuid1": UUID1,
|
||||
"uuid3": UUID3,
|
||||
"uuid4": UUID4,
|
||||
"uuid5": UUID5,
|
||||
}
|
||||
|
||||
|
||||
def create_model_from_schema( # type: ignore[no-any-unimported]
|
||||
json_schema: dict[str, Any],
|
||||
*,
|
||||
root_schema: dict[str, Any] | None = None,
|
||||
__config__: ConfigDict | None = None,
|
||||
__base__: type[BaseModel] | None = None,
|
||||
__module__: str = __name__,
|
||||
__validators__: dict[str, AnyClassMethod] | None = None,
|
||||
__cls_kwargs__: dict[str, Any] | None = None,
|
||||
) -> type[BaseModel]:
|
||||
"""Create a Pydantic model from a JSON schema.
|
||||
|
||||
This function takes a JSON schema as input and dynamically creates a Pydantic
|
||||
model class based on the schema. It supports various JSON schema features such
|
||||
as nested objects, referenced definitions ($ref), arrays with typed items,
|
||||
union types (anyOf/oneOf), and string formats.
|
||||
|
||||
Args:
|
||||
json_schema: A dictionary representing the JSON schema.
|
||||
root_schema: The root schema containing $defs. If not provided, the
|
||||
current schema is treated as the root schema.
|
||||
__config__: Pydantic configuration for the generated model.
|
||||
__base__: Base class for the generated model. Defaults to BaseModel.
|
||||
__module__: Module name for the generated model class.
|
||||
__validators__: A dictionary of custom validators for the generated model.
|
||||
__cls_kwargs__: Additional keyword arguments for the generated model class.
|
||||
|
||||
Returns:
|
||||
A dynamically created Pydantic model class based on the provided JSON schema.
|
||||
|
||||
Example:
|
||||
>>> schema = {
|
||||
... "title": "Person",
|
||||
... "type": "object",
|
||||
... "properties": {
|
||||
... "name": {"type": "string"},
|
||||
... "age": {"type": "integer"},
|
||||
... },
|
||||
... "required": ["name"],
|
||||
... }
|
||||
>>> Person = create_model_from_schema(schema)
|
||||
>>> person = Person(name="John", age=30)
|
||||
>>> person.name
|
||||
'John'
|
||||
"""
|
||||
effective_root = root_schema or json_schema
|
||||
|
||||
if "allOf" in json_schema:
|
||||
json_schema = _merge_all_of_schemas(json_schema["allOf"], effective_root)
|
||||
if "title" not in json_schema and "title" in (root_schema or {}):
|
||||
json_schema["title"] = (root_schema or {}).get("title")
|
||||
|
||||
model_name = json_schema.get("title", "DynamicModel")
|
||||
field_definitions = {
|
||||
name: _json_schema_to_pydantic_field(
|
||||
name, prop, json_schema.get("required", []), effective_root
|
||||
)
|
||||
for name, prop in (json_schema.get("properties", {}) or {}).items()
|
||||
}
|
||||
|
||||
return create_model_base(
|
||||
model_name,
|
||||
__config__=__config__,
|
||||
__base__=__base__,
|
||||
__module__=__module__,
|
||||
__validators__=__validators__,
|
||||
__cls_kwargs__=__cls_kwargs__,
|
||||
**field_definitions,
|
||||
)
|
||||
|
||||
|
||||
def _json_schema_to_pydantic_field(
|
||||
name: str,
|
||||
json_schema: dict[str, Any],
|
||||
required: list[str],
|
||||
root_schema: dict[str, Any],
|
||||
) -> Any:
|
||||
"""Convert a JSON schema property to a Pydantic field definition.
|
||||
|
||||
Args:
|
||||
name: The field name.
|
||||
json_schema: The JSON schema for this field.
|
||||
required: List of required field names.
|
||||
root_schema: The root schema for resolving $ref.
|
||||
|
||||
Returns:
|
||||
A tuple of (type, Field) for use with create_model.
|
||||
"""
|
||||
type_ = _json_schema_to_pydantic_type(json_schema, root_schema, name_=name.title())
|
||||
description = json_schema.get("description")
|
||||
examples = json_schema.get("examples")
|
||||
is_required = name in required
|
||||
|
||||
field_params: dict[str, Any] = {}
|
||||
schema_extra: dict[str, Any] = {}
|
||||
|
||||
if description:
|
||||
field_params["description"] = description
|
||||
if examples:
|
||||
schema_extra["examples"] = examples
|
||||
|
||||
default = ... if is_required else None
|
||||
|
||||
if isinstance(type_, type) and issubclass(type_, (int, float)):
|
||||
if "minimum" in json_schema:
|
||||
field_params["ge"] = json_schema["minimum"]
|
||||
if "exclusiveMinimum" in json_schema:
|
||||
field_params["gt"] = json_schema["exclusiveMinimum"]
|
||||
if "maximum" in json_schema:
|
||||
field_params["le"] = json_schema["maximum"]
|
||||
if "exclusiveMaximum" in json_schema:
|
||||
field_params["lt"] = json_schema["exclusiveMaximum"]
|
||||
if "multipleOf" in json_schema:
|
||||
field_params["multiple_of"] = json_schema["multipleOf"]
|
||||
|
||||
format_ = json_schema.get("format")
|
||||
if format_ in FORMAT_TYPE_MAP:
|
||||
pydantic_type = FORMAT_TYPE_MAP[format_]
|
||||
|
||||
if format_ == "password":
|
||||
if json_schema.get("writeOnly"):
|
||||
pydantic_type = SecretBytes
|
||||
elif format_ == "uri":
|
||||
allowed_schemes = json_schema.get("scheme")
|
||||
if allowed_schemes:
|
||||
if len(allowed_schemes) == 1 and allowed_schemes[0] == "http":
|
||||
pydantic_type = HttpUrl
|
||||
elif len(allowed_schemes) == 1 and allowed_schemes[0] == "file":
|
||||
pydantic_type = FileUrl
|
||||
|
||||
type_ = pydantic_type
|
||||
|
||||
if isinstance(type_, type) and issubclass(type_, str):
|
||||
if "minLength" in json_schema:
|
||||
field_params["min_length"] = json_schema["minLength"]
|
||||
if "maxLength" in json_schema:
|
||||
field_params["max_length"] = json_schema["maxLength"]
|
||||
if "pattern" in json_schema:
|
||||
field_params["pattern"] = json_schema["pattern"]
|
||||
|
||||
if not is_required:
|
||||
type_ = type_ | None
|
||||
|
||||
if schema_extra:
|
||||
field_params["json_schema_extra"] = schema_extra
|
||||
|
||||
return type_, Field(default, **field_params)
|
||||
|
||||
|
||||
def _resolve_ref(ref: str, root_schema: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Resolve a $ref to its actual schema.
|
||||
|
||||
Args:
|
||||
ref: The $ref string (e.g., "#/$defs/MyType").
|
||||
root_schema: The root schema containing $defs.
|
||||
|
||||
Returns:
|
||||
The resolved schema dict.
|
||||
"""
|
||||
from typing import cast
|
||||
|
||||
ref_path = ref.split("/")
|
||||
if ref.startswith("#/$defs/"):
|
||||
ref_schema: dict[str, Any] = root_schema["$defs"]
|
||||
start_idx = 2
|
||||
else:
|
||||
ref_schema = root_schema
|
||||
start_idx = 1
|
||||
for path in ref_path[start_idx:]:
|
||||
ref_schema = cast(dict[str, Any], ref_schema[path])
|
||||
return ref_schema
|
||||
|
||||
|
||||
def _merge_all_of_schemas(
|
||||
schemas: list[dict[str, Any]],
|
||||
root_schema: dict[str, Any],
|
||||
) -> dict[str, Any]:
|
||||
"""Merge multiple allOf schemas into a single schema.
|
||||
|
||||
Combines properties and required fields from all schemas.
|
||||
|
||||
Args:
|
||||
schemas: List of schemas to merge.
|
||||
root_schema: The root schema for resolving $ref.
|
||||
|
||||
Returns:
|
||||
Merged schema with combined properties and required fields.
|
||||
"""
|
||||
merged: dict[str, Any] = {"type": "object", "properties": {}, "required": []}
|
||||
|
||||
for schema in schemas:
|
||||
if "$ref" in schema:
|
||||
schema = _resolve_ref(schema["$ref"], root_schema)
|
||||
|
||||
if "properties" in schema:
|
||||
merged["properties"].update(schema["properties"])
|
||||
|
||||
if "required" in schema:
|
||||
for field in schema["required"]:
|
||||
if field not in merged["required"]:
|
||||
merged["required"].append(field)
|
||||
|
||||
if "title" in schema and "title" not in merged:
|
||||
merged["title"] = schema["title"]
|
||||
|
||||
return merged
|
||||
|
||||
|
||||
def _json_schema_to_pydantic_type(
|
||||
json_schema: dict[str, Any],
|
||||
root_schema: dict[str, Any],
|
||||
*,
|
||||
name_: str | None = None,
|
||||
) -> Any:
|
||||
"""Convert a JSON schema to a Python/Pydantic type.
|
||||
|
||||
Args:
|
||||
json_schema: The JSON schema to convert.
|
||||
root_schema: The root schema for resolving $ref.
|
||||
name_: Optional name for nested models.
|
||||
|
||||
Returns:
|
||||
A Python type corresponding to the JSON schema.
|
||||
"""
|
||||
ref = json_schema.get("$ref")
|
||||
if ref:
|
||||
ref_schema = _resolve_ref(ref, root_schema)
|
||||
return _json_schema_to_pydantic_type(ref_schema, root_schema, name_=name_)
|
||||
|
||||
enum_values = json_schema.get("enum")
|
||||
if enum_values:
|
||||
return Literal[tuple(enum_values)]
|
||||
|
||||
if "const" in json_schema:
|
||||
return Literal[json_schema["const"]]
|
||||
|
||||
any_of_schemas = []
|
||||
if "anyOf" in json_schema or "oneOf" in json_schema:
|
||||
any_of_schemas = json_schema.get("anyOf", []) + json_schema.get("oneOf", [])
|
||||
if any_of_schemas:
|
||||
any_of_types = [
|
||||
_json_schema_to_pydantic_type(schema, root_schema)
|
||||
for schema in any_of_schemas
|
||||
]
|
||||
return Union[tuple(any_of_types)] # noqa: UP007
|
||||
|
||||
all_of_schemas = json_schema.get("allOf")
|
||||
if all_of_schemas:
|
||||
if len(all_of_schemas) == 1:
|
||||
return _json_schema_to_pydantic_type(
|
||||
all_of_schemas[0], root_schema, name_=name_
|
||||
)
|
||||
merged = _merge_all_of_schemas(all_of_schemas, root_schema)
|
||||
return _json_schema_to_pydantic_type(merged, root_schema, name_=name_)
|
||||
|
||||
type_ = json_schema.get("type")
|
||||
|
||||
if type_ == "string":
|
||||
return str
|
||||
if type_ == "integer":
|
||||
return int
|
||||
if type_ == "number":
|
||||
return float
|
||||
if type_ == "boolean":
|
||||
return bool
|
||||
if type_ == "array":
|
||||
items_schema = json_schema.get("items")
|
||||
if items_schema:
|
||||
item_type = _json_schema_to_pydantic_type(
|
||||
items_schema, root_schema, name_=name_
|
||||
)
|
||||
return list[item_type] # type: ignore[valid-type]
|
||||
return list
|
||||
if type_ == "object":
|
||||
properties = json_schema.get("properties")
|
||||
if properties:
|
||||
json_schema_ = json_schema.copy()
|
||||
if json_schema_.get("title") is None:
|
||||
json_schema_["title"] = name_
|
||||
return create_model_from_schema(json_schema_, root_schema=root_schema)
|
||||
return dict
|
||||
if type_ == "null":
|
||||
return None
|
||||
if type_ is None:
|
||||
return Any
|
||||
raise ValueError(f"Unsupported JSON schema type: {type_} from {json_schema}")
|
||||
|
||||
@@ -26,5 +26,4 @@ class LLMMessage(TypedDict):
|
||||
tool_call_id: NotRequired[str]
|
||||
name: NotRequired[str]
|
||||
tool_calls: NotRequired[list[dict[str, Any]]]
|
||||
raw_tool_call_parts: NotRequired[list[Any]]
|
||||
files: NotRequired[dict[str, FileInput]]
|
||||
|
||||
@@ -1004,53 +1004,3 @@ def test_prepare_kickoff_param_files_override_message_files():
|
||||
|
||||
assert "files" in inputs
|
||||
assert inputs["files"]["same.png"] is param_file # param takes precedence
|
||||
|
||||
|
||||
def test_lite_agent_verbose_false_suppresses_printer_output():
|
||||
"""Test that setting verbose=False suppresses all printer output."""
|
||||
from crewai.agents.parser import AgentFinish
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
|
||||
mock_llm = Mock(spec=LLM)
|
||||
mock_llm.call.return_value = "Final Answer: Hello!"
|
||||
mock_llm.stop = []
|
||||
mock_llm.supports_stop_words.return_value = False
|
||||
mock_llm.get_token_usage_summary.return_value = UsageMetrics(
|
||||
total_tokens=100,
|
||||
prompt_tokens=50,
|
||||
completion_tokens=50,
|
||||
cached_prompt_tokens=0,
|
||||
successful_requests=1,
|
||||
)
|
||||
|
||||
with pytest.warns(DeprecationWarning):
|
||||
agent = LiteAgent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
llm=mock_llm,
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
result = agent.kickoff("Say hello")
|
||||
|
||||
assert result is not None
|
||||
assert isinstance(result, LiteAgentOutput)
|
||||
# Verify the printer was never called
|
||||
agent._printer.print = Mock()
|
||||
# For a clean verification, patch printer before execution
|
||||
with pytest.warns(DeprecationWarning):
|
||||
agent2 = LiteAgent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
llm=mock_llm,
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
mock_printer = Mock()
|
||||
agent2._printer = mock_printer
|
||||
|
||||
agent2.kickoff("Say hello")
|
||||
|
||||
mock_printer.print.assert_not_called()
|
||||
|
||||
@@ -1,224 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Calculator. You are a
|
||||
calculator assistant\nYour personal goal is: Perform calculations"},{"role":"user","content":"\nCurrent
|
||||
Task: What is 7 times 6? Use the multiply_numbers tool.\n\nThis is VERY important
|
||||
to you, your job depends on it!"}],"model":"gpt-4.1-mini","tool_choice":"auto","tools":[{"type":"function","function":{"name":"multiply_numbers","description":"Multiply
|
||||
two numbers together.","parameters":{"properties":{"a":{"title":"A","type":"integer"},"b":{"title":"B","type":"integer"}},"required":["a","b"],"type":"object"}}}]}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '589'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D2gblVDQeSH6tTrJiUtxgjoVoPuAR\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1769532813,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
|
||||
\ \"id\": \"call_gO6PtjoOIDVeDWs7Wf680BHh\",\n \"type\":
|
||||
\"function\",\n \"function\": {\n \"name\": \"multiply_numbers\",\n
|
||||
\ \"arguments\": \"{\\\"a\\\":7,\\\"b\\\":6}\"\n }\n
|
||||
\ }\n ],\n \"refusal\": null,\n \"annotations\":
|
||||
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 100,\n \"completion_tokens\":
|
||||
18,\n \"total_tokens\": 118,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 27 Jan 2026 16:53:34 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '593'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Calculator. You are a
|
||||
calculator assistant\nYour personal goal is: Perform calculations"},{"role":"user","content":"\nCurrent
|
||||
Task: What is 7 times 6? Use the multiply_numbers tool.\n\nThis is VERY important
|
||||
to you, your job depends on it!"},{"role":"assistant","content":null,"tool_calls":[{"id":"call_gO6PtjoOIDVeDWs7Wf680BHh","type":"function","function":{"name":"multiply_numbers","arguments":"{\"a\":7,\"b\":6}"}}]},{"role":"tool","tool_call_id":"call_gO6PtjoOIDVeDWs7Wf680BHh","name":"multiply_numbers","content":"42"},{"role":"user","content":"Analyze
|
||||
the tool result. If requirements are met, provide the Final Answer. Otherwise,
|
||||
call the next tool. Deliver only the answer without meta-commentary."}],"model":"gpt-4.1-mini","tool_choice":"auto","tools":[{"type":"function","function":{"name":"multiply_numbers","description":"Multiply
|
||||
two numbers together.","parameters":{"properties":{"a":{"title":"A","type":"integer"},"b":{"title":"B","type":"integer"}},"required":["a","b"],"type":"object"}}}]}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1056'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- COOKIE-XXX
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D2gbm9NaGCXkI3QwW3eOTFSP4L4lh\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1769532814,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"42\",\n \"refusal\": null,\n
|
||||
\ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 162,\n \"completion_tokens\":
|
||||
2,\n \"total_tokens\": 164,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 27 Jan 2026 16:53:34 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '259'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -1,351 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Test Agent. You are a
|
||||
test agent\nYour personal goal is: Try to use the dangerous operation tool"},{"role":"user","content":"\nCurrent
|
||||
Task: Use the dangerous_operation tool with action ''delete_all''.\n\nThis is
|
||||
the expected criteria for your final answer: The result of the operation\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nThis
|
||||
is VERY important to you, your job depends on it!"}],"model":"gpt-4.1-mini","tool_choice":"auto","tools":[{"type":"function","function":{"name":"dangerous_operation","description":"Perform
|
||||
a dangerous operation that should be blocked.","parameters":{"properties":{"action":{"title":"Action","type":"string"}},"required":["action"],"type":"object"}}}]}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '773'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D2giKEOxBDVqJVqVECwcFjbzdQKSA\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1769533220,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
|
||||
\ \"id\": \"call_3OM1qS0QaWqhiJaHyJbNz1ME\",\n \"type\":
|
||||
\"function\",\n \"function\": {\n \"name\": \"dangerous_operation\",\n
|
||||
\ \"arguments\": \"{\\\"action\\\":\\\"delete_all\\\"}\"\n }\n
|
||||
\ }\n ],\n \"refusal\": null,\n \"annotations\":
|
||||
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 133,\n \"completion_tokens\":
|
||||
17,\n \"total_tokens\": 150,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 27 Jan 2026 17:00:20 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '484'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Test Agent. You are a
|
||||
test agent\nYour personal goal is: Try to use the dangerous operation tool"},{"role":"user","content":"\nCurrent
|
||||
Task: Use the dangerous_operation tool with action ''delete_all''.\n\nThis is
|
||||
the expected criteria for your final answer: The result of the operation\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nThis
|
||||
is VERY important to you, your job depends on it!"},{"role":"assistant","content":null,"tool_calls":[{"id":"call_3OM1qS0QaWqhiJaHyJbNz1ME","type":"function","function":{"name":"dangerous_operation","arguments":"{\"action\":\"delete_all\"}"}}]},{"role":"tool","tool_call_id":"call_3OM1qS0QaWqhiJaHyJbNz1ME","name":"dangerous_operation","content":"Tool
|
||||
execution blocked by hook. Tool: dangerous_operation"},{"role":"user","content":"Analyze
|
||||
the tool result. If requirements are met, provide the Final Answer. Otherwise,
|
||||
call the next tool. Deliver only the answer without meta-commentary."}],"model":"gpt-4.1-mini","tool_choice":"auto","tools":[{"type":"function","function":{"name":"dangerous_operation","description":"Perform
|
||||
a dangerous operation that should be blocked.","parameters":{"properties":{"action":{"title":"Action","type":"string"}},"required":["action"],"type":"object"}}}]}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1311'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- COOKIE-XXX
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D2giLnD91JxhK0yXninQ7oHYttNDY\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1769533221,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
|
||||
\ \"id\": \"call_qF1c2e31GgjoSNJx0HBxI3zX\",\n \"type\":
|
||||
\"function\",\n \"function\": {\n \"name\": \"dangerous_operation\",\n
|
||||
\ \"arguments\": \"{\\\"action\\\":\\\"delete_all\\\"}\"\n }\n
|
||||
\ }\n ],\n \"refusal\": null,\n \"annotations\":
|
||||
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 204,\n \"completion_tokens\":
|
||||
17,\n \"total_tokens\": 221,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 27 Jan 2026 17:00:21 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '447'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Test Agent. You are a
|
||||
test agent\nYour personal goal is: Try to use the dangerous operation tool"},{"role":"user","content":"\nCurrent
|
||||
Task: Use the dangerous_operation tool with action ''delete_all''.\n\nThis is
|
||||
the expected criteria for your final answer: The result of the operation\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nThis
|
||||
is VERY important to you, your job depends on it!"},{"role":"assistant","content":null,"tool_calls":[{"id":"call_3OM1qS0QaWqhiJaHyJbNz1ME","type":"function","function":{"name":"dangerous_operation","arguments":"{\"action\":\"delete_all\"}"}}]},{"role":"tool","tool_call_id":"call_3OM1qS0QaWqhiJaHyJbNz1ME","name":"dangerous_operation","content":"Tool
|
||||
execution blocked by hook. Tool: dangerous_operation"},{"role":"user","content":"Analyze
|
||||
the tool result. If requirements are met, provide the Final Answer. Otherwise,
|
||||
call the next tool. Deliver only the answer without meta-commentary."},{"role":"assistant","content":null,"tool_calls":[{"id":"call_qF1c2e31GgjoSNJx0HBxI3zX","type":"function","function":{"name":"dangerous_operation","arguments":"{\"action\":\"delete_all\"}"}}]},{"role":"tool","tool_call_id":"call_qF1c2e31GgjoSNJx0HBxI3zX","name":"dangerous_operation","content":"Tool
|
||||
execution blocked by hook. Tool: dangerous_operation"},{"role":"user","content":"Analyze
|
||||
the tool result. If requirements are met, provide the Final Answer. Otherwise,
|
||||
call the next tool. Deliver only the answer without meta-commentary."}],"model":"gpt-4.1-mini","tool_choice":"auto","tools":[{"type":"function","function":{"name":"dangerous_operation","description":"Perform
|
||||
a dangerous operation that should be blocked.","parameters":{"properties":{"action":{"title":"Action","type":"string"}},"required":["action"],"type":"object"}}}]}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1849'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- COOKIE-XXX
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D2giM1tAvEOCNwDw1qNmNUN5PIg2Y\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1769533222,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"The dangerous_operation tool with action
|
||||
'delete_all' was blocked and did not execute. There is no result from the
|
||||
operation to provide.\",\n \"refusal\": null,\n \"annotations\":
|
||||
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 275,\n \"completion_tokens\":
|
||||
28,\n \"total_tokens\": 303,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 27 Jan 2026 17:00:22 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '636'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -1,230 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Math Assistant. You are
|
||||
a math assistant that helps with division\nYour personal goal is: Perform division
|
||||
calculations accurately"},{"role":"user","content":"\nCurrent Task: Calculate
|
||||
100 divided by 4 using the divide_numbers tool.\n\nThis is the expected criteria
|
||||
for your final answer: The result of the division\nyou MUST return the actual
|
||||
complete content as the final answer, not a summary.\n\nThis is VERY important
|
||||
to you, your job depends on it!"}],"model":"gpt-4.1-mini","tool_choice":"auto","tools":[{"type":"function","function":{"name":"divide_numbers","description":"Divide
|
||||
first number by second number.","parameters":{"properties":{"a":{"title":"A","type":"integer"},"b":{"title":"B","type":"integer"}},"required":["a","b"],"type":"object"}}}]}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '809'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D2gbkWUn8InDLeD1Cf8w0LxiUQOIS\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1769532812,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
|
||||
\ \"id\": \"call_gwIV3i71RNqfpr7KguEciCuV\",\n \"type\":
|
||||
\"function\",\n \"function\": {\n \"name\": \"divide_numbers\",\n
|
||||
\ \"arguments\": \"{\\\"a\\\":100,\\\"b\\\":4}\"\n }\n
|
||||
\ }\n ],\n \"refusal\": null,\n \"annotations\":
|
||||
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 140,\n \"completion_tokens\":
|
||||
18,\n \"total_tokens\": 158,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 27 Jan 2026 16:53:32 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '435'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Math Assistant. You are
|
||||
a math assistant that helps with division\nYour personal goal is: Perform division
|
||||
calculations accurately"},{"role":"user","content":"\nCurrent Task: Calculate
|
||||
100 divided by 4 using the divide_numbers tool.\n\nThis is the expected criteria
|
||||
for your final answer: The result of the division\nyou MUST return the actual
|
||||
complete content as the final answer, not a summary.\n\nThis is VERY important
|
||||
to you, your job depends on it!"},{"role":"assistant","content":null,"tool_calls":[{"id":"call_gwIV3i71RNqfpr7KguEciCuV","type":"function","function":{"name":"divide_numbers","arguments":"{\"a\":100,\"b\":4}"}}]},{"role":"tool","tool_call_id":"call_gwIV3i71RNqfpr7KguEciCuV","name":"divide_numbers","content":"25.0"},{"role":"user","content":"Analyze
|
||||
the tool result. If requirements are met, provide the Final Answer. Otherwise,
|
||||
call the next tool. Deliver only the answer without meta-commentary."}],"model":"gpt-4.1-mini","tool_choice":"auto","tools":[{"type":"function","function":{"name":"divide_numbers","description":"Divide
|
||||
first number by second number.","parameters":{"properties":{"a":{"title":"A","type":"integer"},"b":{"title":"B","type":"integer"}},"required":["a","b"],"type":"object"}}}]}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1276'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- COOKIE-XXX
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D2gbkHw19D5oEBOhpZP5FR5MvRFgb\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1769532812,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"25.0\",\n \"refusal\": null,\n
|
||||
\ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 204,\n \"completion_tokens\":
|
||||
4,\n \"total_tokens\": 208,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 27 Jan 2026 16:53:33 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '523'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -1,22 +1,7 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Calculator Assistant.
|
||||
You are a helpful calculator assistant\nYour personal goal is: Help with math
|
||||
calculations\n\nYou ONLY have access to the following tools, and should NEVER
|
||||
make up tools that are not listed here:\n\nTool Name: calculate_sum\nTool Arguments:
|
||||
{\n \"properties\": {\n \"a\": {\n \"title\": \"A\",\n \"type\":
|
||||
\"integer\"\n },\n \"b\": {\n \"title\": \"B\",\n \"type\":
|
||||
\"integer\"\n }\n },\n \"required\": [\n \"a\",\n \"b\"\n ],\n \"title\":
|
||||
\"Calculate_Sum\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\nTool
|
||||
Description: Add two numbers together.\n\nIMPORTANT: Use the following format
|
||||
in your response:\n\n```\nThought: you should always think about what to do\nAction:
|
||||
the action to take, only one name of [calculate_sum], just the name, exactly
|
||||
as it''s written.\nAction Input: the input to the action, just a simple JSON
|
||||
object, enclosed in curly braces, using \" to wrap keys and values.\nObservation:
|
||||
the result of the action\n```\n\nOnce all necessary information is gathered,
|
||||
return the following format:\n\n```\nThought: I now know the final answer\nFinal
|
||||
Answer: the final answer to the original input question\n```"},{"role":"user","content":"What
|
||||
is 5 + 3? Use the calculate_sum tool."}],"model":"gpt-4.1-mini"}'
|
||||
body: '{"messages":[{"role":"system","content":"You are Calculator Assistant. You are a helpful calculator assistant\nYour personal goal is: Help with math calculations\n\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\nTool Name: calculate_sum\nTool Arguments: {''a'': {''description'': None, ''type'': ''int''}, ''b'': {''description'': None, ''type'': ''int''}}\nTool Description: Add two numbers together.\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought: you should always think about what to do\nAction: the action to take, only one name of [calculate_sum], just the name, exactly as it''s written.\nAction Input: the input to the action, just a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce all necessary information is gathered, return the following format:\n\n```\nThought: I now know the final answer\nFinal Answer: the final
|
||||
answer to the original input question\n```"},{"role":"user","content":"What is 5 + 3? Use the calculate_sum tool."}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
@@ -29,7 +14,7 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1356'
|
||||
- '1119'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
@@ -56,18 +41,8 @@ interactions:
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D2gSz7JfTi4NQ2QRTANg8Z2afJI8b\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1769532269,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"```\\nThought: I need to use the calculate_sum
|
||||
tool to find the sum of 5 and 3\\nAction: calculate_sum\\nAction Input: {\\\"a\\\":5,\\\"b\\\":3}\\n```\",\n
|
||||
\ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
295,\n \"completion_tokens\": 41,\n \"total_tokens\": 336,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||
string: "{\n \"id\": \"chatcmpl-CiksV15hVLWURKZH4BxQEGjiCFWpz\",\n \"object\": \"chat.completion\",\n \"created\": 1764782667,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"```\\nThought: I should use the calculate_sum tool to add 5 and 3.\\nAction: calculate_sum\\nAction Input: {\\\"a\\\": 5, \\\"b\\\": 3}\\n```\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 234,\n \"completion_tokens\": 40,\n \"total_tokens\": 274,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\"\
|
||||
: \"default\",\n \"system_fingerprint\": \"fp_9766e549b2\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
@@ -76,7 +51,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 27 Jan 2026 16:44:30 GMT
|
||||
- Wed, 03 Dec 2025 17:24:28 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
@@ -96,11 +71,13 @@ interactions:
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '827'
|
||||
- '681'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '871'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
@@ -121,25 +98,8 @@ interactions:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Calculator Assistant.
|
||||
You are a helpful calculator assistant\nYour personal goal is: Help with math
|
||||
calculations\n\nYou ONLY have access to the following tools, and should NEVER
|
||||
make up tools that are not listed here:\n\nTool Name: calculate_sum\nTool Arguments:
|
||||
{\n \"properties\": {\n \"a\": {\n \"title\": \"A\",\n \"type\":
|
||||
\"integer\"\n },\n \"b\": {\n \"title\": \"B\",\n \"type\":
|
||||
\"integer\"\n }\n },\n \"required\": [\n \"a\",\n \"b\"\n ],\n \"title\":
|
||||
\"Calculate_Sum\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\nTool
|
||||
Description: Add two numbers together.\n\nIMPORTANT: Use the following format
|
||||
in your response:\n\n```\nThought: you should always think about what to do\nAction:
|
||||
the action to take, only one name of [calculate_sum], just the name, exactly
|
||||
as it''s written.\nAction Input: the input to the action, just a simple JSON
|
||||
object, enclosed in curly braces, using \" to wrap keys and values.\nObservation:
|
||||
the result of the action\n```\n\nOnce all necessary information is gathered,
|
||||
return the following format:\n\n```\nThought: I now know the final answer\nFinal
|
||||
Answer: the final answer to the original input question\n```"},{"role":"user","content":"What
|
||||
is 5 + 3? Use the calculate_sum tool."},{"role":"assistant","content":"```\nThought:
|
||||
I need to use the calculate_sum tool to find the sum of 5 and 3\nAction: calculate_sum\nAction
|
||||
Input: {\"a\":5,\"b\":3}\n```\nObservation: 8"}],"model":"gpt-4.1-mini"}'
|
||||
body: '{"messages":[{"role":"system","content":"You are Calculator Assistant. You are a helpful calculator assistant\nYour personal goal is: Help with math calculations\n\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\nTool Name: calculate_sum\nTool Arguments: {''a'': {''description'': None, ''type'': ''int''}, ''b'': {''description'': None, ''type'': ''int''}}\nTool Description: Add two numbers together.\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought: you should always think about what to do\nAction: the action to take, only one name of [calculate_sum], just the name, exactly as it''s written.\nAction Input: the input to the action, just a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce all necessary information is gathered, return the following format:\n\n```\nThought: I now know the final answer\nFinal Answer: the final
|
||||
answer to the original input question\n```"},{"role":"user","content":"What is 5 + 3? Use the calculate_sum tool."},{"role":"assistant","content":"```\nThought: I should use the calculate_sum tool to add 5 and 3.\nAction: calculate_sum\nAction Input: {\"a\": 5, \"b\": 3}\n```\nObservation: 8"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
@@ -152,7 +112,7 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1544'
|
||||
- '1298'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
@@ -181,18 +141,7 @@ interactions:
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D2gT0RU66XqjAUOXnGmokD1Q8Fman\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1769532270,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"```\\nThought: I now know the final
|
||||
answer\\nFinal Answer: 8\\n```\",\n \"refusal\": null,\n \"annotations\":
|
||||
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 345,\n \"completion_tokens\":
|
||||
18,\n \"total_tokens\": 363,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||
string: "{\n \"id\": \"chatcmpl-CiksWrVbyJFurKCm7XPRU1b1pT7qF\",\n \"object\": \"chat.completion\",\n \"created\": 1764782668,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"```\\nThought: I now know the final answer\\nFinal Answer: 8\\n```\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 283,\n \"completion_tokens\": 18,\n \"total_tokens\": 301,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_9766e549b2\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
@@ -201,7 +150,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 27 Jan 2026 16:44:31 GMT
|
||||
- Wed, 03 Dec 2025 17:24:29 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
@@ -219,11 +168,208 @@ interactions:
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '606'
|
||||
- '427'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '442'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Calculator Assistant. You are a helpful calculator assistant\nYour personal goal is: Help with math calculations\n\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\nTool Name: calculate_sum\nTool Arguments: {''a'': {''description'': None, ''type'': ''int''}, ''b'': {''description'': None, ''type'': ''int''}}\nTool Description: Add two numbers together.\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought: you should always think about what to do\nAction: the action to take, only one name of [calculate_sum], just the name, exactly as it''s written.\nAction Input: the input to the action, just a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce all necessary information is gathered, return the following format:\n\n```\nThought: I now know the final answer\nFinal Answer: the final
|
||||
answer to the original input question\n```"},{"role":"user","content":"What is 5 + 3? Use the calculate_sum tool."}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1119'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-CimX8hwYiUUZijApUDk1yBMzTpBj9\",\n \"object\": \"chat.completion\",\n \"created\": 1764789030,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"```\\nThought: I need to add 5 and 3 using the calculate_sum tool.\\nAction: calculate_sum\\nAction Input: {\\\"a\\\":5,\\\"b\\\":3}\\n```\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 234,\n \"completion_tokens\": 37,\n \"total_tokens\": 271,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\"\
|
||||
: \"default\",\n \"system_fingerprint\": \"fp_9766e549b2\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 03 Dec 2025 19:10:33 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '2329'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '2349'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Calculator Assistant. You are a helpful calculator assistant\nYour personal goal is: Help with math calculations\n\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\nTool Name: calculate_sum\nTool Arguments: {''a'': {''description'': None, ''type'': ''int''}, ''b'': {''description'': None, ''type'': ''int''}}\nTool Description: Add two numbers together.\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought: you should always think about what to do\nAction: the action to take, only one name of [calculate_sum], just the name, exactly as it''s written.\nAction Input: the input to the action, just a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce all necessary information is gathered, return the following format:\n\n```\nThought: I now know the final answer\nFinal Answer: the final
|
||||
answer to the original input question\n```"},{"role":"user","content":"What is 5 + 3? Use the calculate_sum tool."},{"role":"assistant","content":"```\nThought: I need to add 5 and 3 using the calculate_sum tool.\nAction: calculate_sum\nAction Input: {\"a\":5,\"b\":3}\n```\nObservation: 8"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1295'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- COOKIE-XXX
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-CimXBrY5sdbr2pJnqGlazPTra4dor\",\n \"object\": \"chat.completion\",\n \"created\": 1764789033,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"```\\nThought: I now know the final answer\\nFinal Answer: 8\\n```\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 280,\n \"completion_tokens\": 18,\n \"total_tokens\": 298,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_9766e549b2\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 03 Dec 2025 19:10:35 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '1647'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '1694'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"max_tokens":4096,"messages":[{"role":"user","content":"Say hello in French"}],"model":"claude-sonnet-4-0","stream":false,"tool_choice":{"type":"tool","name":"structured_output"},"tools":[{"name":"structured_output","description":"Output
|
||||
the structured response","input_schema":{"type":"object","description":"Response
|
||||
model for greeting test.","title":"GreetingResponse","properties":{"greeting":{"type":"string","title":"Greeting"},"language":{"type":"string","title":"Language"}},"additionalProperties":false,"required":["greeting","language"]}}]}'
|
||||
body: '{"max_tokens":4096,"messages":[{"role":"user","content":"Say hello in French"}],"model":"claude-sonnet-4-0","stream":false,"tool_choice":{"type":"tool","name":"structured_output"},"tools":[{"name":"structured_output","description":"Returns structured data according to the schema","input_schema":{"description":"Response model for greeting test.","properties":{"greeting":{"title":"Greeting","type":"string"},"language":{"title":"Language","type":"string"}},"required":["greeting","language"],"title":"GreetingResponse","type":"object"}}]}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
@@ -15,7 +13,7 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '551'
|
||||
- '539'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
@@ -31,7 +29,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 0.76.0
|
||||
- 0.75.0
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
@@ -44,7 +42,7 @@ interactions:
|
||||
uri: https://api.anthropic.com/v1/messages
|
||||
response:
|
||||
body:
|
||||
string: '{"model":"claude-sonnet-4-20250514","id":"msg_01CKTyVmak15L5oQ36mv4sL9","type":"message","role":"assistant","content":[{"type":"tool_use","id":"toolu_0174BYmn6xiSnUwVhFD8S7EW","name":"structured_output","input":{"greeting":"Bonjour","language":"French"}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":436,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":53,"service_tier":"standard"}}'
|
||||
string: '{"model":"claude-sonnet-4-20250514","id":"msg_01XjvX2nCho1knuucbwwgCpw","type":"message","role":"assistant","content":[{"type":"tool_use","id":"toolu_019rfPRSDmBb7CyCTdGMv5rK","name":"structured_output","input":{"greeting":"Bonjour","language":"French"}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":432,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":53,"service_tier":"standard"}}'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
@@ -53,7 +51,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 26 Jan 2026 14:59:34 GMT
|
||||
- Mon, 01 Dec 2025 11:19:38 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -84,10 +82,12 @@ interactions:
|
||||
- DYNAMIC
|
||||
request-id:
|
||||
- REQUEST-ID-XXX
|
||||
retry-after:
|
||||
- '24'
|
||||
strict-transport-security:
|
||||
- STS-XXX
|
||||
x-envoy-upstream-service-time:
|
||||
- '968'
|
||||
- '2101'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
|
||||
@@ -1,319 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"contents": [{"parts": [{"text": "\nCurrent Task: What is 10000 + 20000?
|
||||
Use the sum_numbers tool to calculate this.\n\nThis is the expected criteria
|
||||
for your final answer: The sum of the two numbers\nyou MUST return the actual
|
||||
complete content as the final answer, not a summary.\n\nThis is VERY important
|
||||
to you, your job depends on it!"}], "role": "user"}], "systemInstruction": {"parts":
|
||||
[{"text": "You are Calculator. You are a calculator that adds numbers.\nYour
|
||||
personal goal is: Calculate numbers accurately"}], "role": "user"}, "tools":
|
||||
[{"functionDeclarations": [{"description": "Add two numbers together and return
|
||||
the result", "name": "sum_numbers", "parameters": {"properties": {"a": {"description":
|
||||
"The first number to add", "title": "A", "type": "NUMBER"}, "b": {"description":
|
||||
"The second number to add", "title": "B", "type": "NUMBER"}}, "required": ["a",
|
||||
"b"], "type": "OBJECT"}}]}], "generationConfig": {"stopSequences": ["\nObservation:"]}}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- '*/*'
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '962'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- generativelanguage.googleapis.com
|
||||
x-goog-api-client:
|
||||
- google-genai-sdk/1.49.0 gl-python/3.13.3
|
||||
x-goog-api-key:
|
||||
- X-GOOG-API-KEY-XXX
|
||||
method: POST
|
||||
uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash-001:generateContent
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\":
|
||||
[\n {\n \"functionCall\": {\n \"name\": \"sum_numbers\",\n
|
||||
\ \"args\": {\n \"a\": 10000,\n \"b\":
|
||||
20000\n }\n }\n }\n ],\n \"role\":
|
||||
\"model\"\n },\n \"finishReason\": \"STOP\",\n \"avgLogprobs\":
|
||||
-0.00059548033667462211\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\":
|
||||
127,\n \"candidatesTokenCount\": 7,\n \"totalTokenCount\": 134,\n \"promptTokensDetails\":
|
||||
[\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 127\n
|
||||
\ }\n ],\n \"candidatesTokensDetails\": [\n {\n \"modality\":
|
||||
\"TEXT\",\n \"tokenCount\": 7\n }\n ]\n },\n \"modelVersion\":
|
||||
\"gemini-2.0-flash-001\",\n \"responseId\": \"bLBzabiACaP3-8YP7s-P6QI\"\n}\n"
|
||||
headers:
|
||||
Alt-Svc:
|
||||
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
|
||||
Content-Type:
|
||||
- application/json; charset=UTF-8
|
||||
Date:
|
||||
- Fri, 23 Jan 2026 17:31:24 GMT
|
||||
Server:
|
||||
- scaffolding on HTTPServer2
|
||||
Server-Timing:
|
||||
- gfet4t7; dur=673
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
X-Frame-Options:
|
||||
- X-FRAME-OPTIONS-XXX
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"contents": [{"parts": [{"text": "\nCurrent Task: What is 10000 + 20000?
|
||||
Use the sum_numbers tool to calculate this.\n\nThis is the expected criteria
|
||||
for your final answer: The sum of the two numbers\nyou MUST return the actual
|
||||
complete content as the final answer, not a summary.\n\nThis is VERY important
|
||||
to you, your job depends on it!"}], "role": "user"}, {"parts": [{"functionCall":
|
||||
{"args": {"a": 10000, "b": 20000}, "name": "sum_numbers"}}], "role": "model"},
|
||||
{"parts": [{"functionResponse": {"name": "sum_numbers", "response": {"result":
|
||||
30000}}}], "role": "user"}, {"parts": [{"text": "Analyze the tool result. If
|
||||
requirements are met, provide the Final Answer. Otherwise, call the next tool.
|
||||
Deliver only the answer without meta-commentary."}], "role": "user"}], "systemInstruction":
|
||||
{"parts": [{"text": "You are Calculator. You are a calculator that adds numbers.\nYour
|
||||
personal goal is: Calculate numbers accurately"}], "role": "user"}, "tools":
|
||||
[{"functionDeclarations": [{"description": "Add two numbers together and return
|
||||
the result", "name": "sum_numbers", "parameters": {"properties": {"a": {"description":
|
||||
"The first number to add", "title": "A", "type": "NUMBER"}, "b": {"description":
|
||||
"The second number to add", "title": "B", "type": "NUMBER"}}, "required": ["a",
|
||||
"b"], "type": "OBJECT"}}]}], "generationConfig": {"stopSequences": ["\nObservation:"]}}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- '*/*'
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1374'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- generativelanguage.googleapis.com
|
||||
x-goog-api-client:
|
||||
- google-genai-sdk/1.49.0 gl-python/3.13.3
|
||||
x-goog-api-key:
|
||||
- X-GOOG-API-KEY-XXX
|
||||
method: POST
|
||||
uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash-001:generateContent
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\":
|
||||
[\n {\n \"text\": \"\"\n }\n ],\n \"role\":
|
||||
\"model\"\n },\n \"finishReason\": \"STOP\"\n }\n ],\n \"usageMetadata\":
|
||||
{\n \"promptTokenCount\": 171,\n \"totalTokenCount\": 171,\n \"promptTokensDetails\":
|
||||
[\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 171\n
|
||||
\ }\n ]\n },\n \"modelVersion\": \"gemini-2.0-flash-001\",\n \"responseId\":
|
||||
\"bLBzaaKgMc-ajrEPk7bIuQ8\"\n}\n"
|
||||
headers:
|
||||
Alt-Svc:
|
||||
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
|
||||
Content-Type:
|
||||
- application/json; charset=UTF-8
|
||||
Date:
|
||||
- Fri, 23 Jan 2026 17:31:25 GMT
|
||||
Server:
|
||||
- scaffolding on HTTPServer2
|
||||
Server-Timing:
|
||||
- gfet4t7; dur=382
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
X-Frame-Options:
|
||||
- X-FRAME-OPTIONS-XXX
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"contents": [{"parts": [{"text": "\nCurrent Task: What is 10000 + 20000?
|
||||
Use the sum_numbers tool to calculate this.\n\nThis is the expected criteria
|
||||
for your final answer: The sum of the two numbers\nyou MUST return the actual
|
||||
complete content as the final answer, not a summary.\n\nThis is VERY important
|
||||
to you, your job depends on it!"}], "role": "user"}, {"parts": [{"functionCall":
|
||||
{"args": {"a": 10000, "b": 20000}, "name": "sum_numbers"}}], "role": "model"},
|
||||
{"parts": [{"functionResponse": {"name": "sum_numbers", "response": {"result":
|
||||
30000}}}], "role": "user"}, {"parts": [{"text": "Analyze the tool result. If
|
||||
requirements are met, provide the Final Answer. Otherwise, call the next tool.
|
||||
Deliver only the answer without meta-commentary."}], "role": "user"}, {"parts":
|
||||
[{"text": "\nCurrent Task: What is 10000 + 20000? Use the sum_numbers tool to
|
||||
calculate this.\n\nThis is the expected criteria for your final answer: The
|
||||
sum of the two numbers\nyou MUST return the actual complete content as the final
|
||||
answer, not a summary.\n\nThis is VERY important to you, your job depends on
|
||||
it!"}], "role": "user"}], "systemInstruction": {"parts": [{"text": "You are
|
||||
Calculator. You are a calculator that adds numbers.\nYour personal goal is:
|
||||
Calculate numbers accurately\n\nYou are Calculator. You are a calculator that
|
||||
adds numbers.\nYour personal goal is: Calculate numbers accurately"}], "role":
|
||||
"user"}, "tools": [{"functionDeclarations": [{"description": "Add two numbers
|
||||
together and return the result", "name": "sum_numbers", "parameters": {"properties":
|
||||
{"a": {"description": "The first number to add", "title": "A", "type": "NUMBER"},
|
||||
"b": {"description": "The second number to add", "title": "B", "type": "NUMBER"}},
|
||||
"required": ["a", "b"], "type": "OBJECT"}}]}], "generationConfig": {"stopSequences":
|
||||
["\nObservation:"]}}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- '*/*'
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1837'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- generativelanguage.googleapis.com
|
||||
x-goog-api-client:
|
||||
- google-genai-sdk/1.49.0 gl-python/3.13.3
|
||||
x-goog-api-key:
|
||||
- X-GOOG-API-KEY-XXX
|
||||
method: POST
|
||||
uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash-001:generateContent
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\":
|
||||
[\n {\n \"text\": \"\"\n }\n ],\n \"role\":
|
||||
\"model\"\n },\n \"finishReason\": \"STOP\"\n }\n ],\n \"usageMetadata\":
|
||||
{\n \"promptTokenCount\": 271,\n \"totalTokenCount\": 271,\n \"promptTokensDetails\":
|
||||
[\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 271\n
|
||||
\ }\n ]\n },\n \"modelVersion\": \"gemini-2.0-flash-001\",\n \"responseId\":
|
||||
\"bbBzaczHDcW7jrEPgaj1CA\"\n}\n"
|
||||
headers:
|
||||
Alt-Svc:
|
||||
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
|
||||
Content-Type:
|
||||
- application/json; charset=UTF-8
|
||||
Date:
|
||||
- Fri, 23 Jan 2026 17:31:25 GMT
|
||||
Server:
|
||||
- scaffolding on HTTPServer2
|
||||
Server-Timing:
|
||||
- gfet4t7; dur=410
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
X-Frame-Options:
|
||||
- X-FRAME-OPTIONS-XXX
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"contents": [{"parts": [{"text": "\nCurrent Task: What is 10000 + 20000?
|
||||
Use the sum_numbers tool to calculate this.\n\nThis is the expected criteria
|
||||
for your final answer: The sum of the two numbers\nyou MUST return the actual
|
||||
complete content as the final answer, not a summary.\n\nThis is VERY important
|
||||
to you, your job depends on it!"}], "role": "user"}, {"parts": [{"functionCall":
|
||||
{"args": {"a": 10000, "b": 20000}, "name": "sum_numbers"}}], "role": "model"},
|
||||
{"parts": [{"functionResponse": {"name": "sum_numbers", "response": {"result":
|
||||
30000}}}], "role": "user"}, {"parts": [{"text": "Analyze the tool result. If
|
||||
requirements are met, provide the Final Answer. Otherwise, call the next tool.
|
||||
Deliver only the answer without meta-commentary."}], "role": "user"}, {"parts":
|
||||
[{"text": "\nCurrent Task: What is 10000 + 20000? Use the sum_numbers tool to
|
||||
calculate this.\n\nThis is the expected criteria for your final answer: The
|
||||
sum of the two numbers\nyou MUST return the actual complete content as the final
|
||||
answer, not a summary.\n\nThis is VERY important to you, your job depends on
|
||||
it!"}], "role": "user"}, {"parts": [{"text": "\nCurrent Task: What is 10000
|
||||
+ 20000? Use the sum_numbers tool to calculate this.\n\nThis is the expected
|
||||
criteria for your final answer: The sum of the two numbers\nyou MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nThis is VERY
|
||||
important to you, your job depends on it!"}], "role": "user"}], "systemInstruction":
|
||||
{"parts": [{"text": "You are Calculator. You are a calculator that adds numbers.\nYour
|
||||
personal goal is: Calculate numbers accurately\n\nYou are Calculator. You are
|
||||
a calculator that adds numbers.\nYour personal goal is: Calculate numbers accurately\n\nYou
|
||||
are Calculator. You are a calculator that adds numbers.\nYour personal goal
|
||||
is: Calculate numbers accurately"}], "role": "user"}, "tools": [{"functionDeclarations":
|
||||
[{"description": "Add two numbers together and return the result", "name": "sum_numbers",
|
||||
"parameters": {"properties": {"a": {"description": "The first number to add",
|
||||
"title": "A", "type": "NUMBER"}, "b": {"description": "The second number to
|
||||
add", "title": "B", "type": "NUMBER"}}, "required": ["a", "b"], "type": "OBJECT"}}]}],
|
||||
"generationConfig": {"stopSequences": ["\nObservation:"]}}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- '*/*'
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '2300'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- generativelanguage.googleapis.com
|
||||
x-goog-api-client:
|
||||
- google-genai-sdk/1.49.0 gl-python/3.13.3
|
||||
x-goog-api-key:
|
||||
- X-GOOG-API-KEY-XXX
|
||||
method: POST
|
||||
uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash-001:generateContent
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\":
|
||||
[\n {\n \"text\": \"\\n{\\\"sum_numbers_response\\\":
|
||||
{\\\"result\\\": 30000}}\\n\"\n }\n ],\n \"role\":
|
||||
\"model\"\n },\n \"finishReason\": \"STOP\",\n \"avgLogprobs\":
|
||||
-0.0038021293125654523\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\":
|
||||
371,\n \"candidatesTokenCount\": 19,\n \"totalTokenCount\": 390,\n \"promptTokensDetails\":
|
||||
[\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 371\n
|
||||
\ }\n ],\n \"candidatesTokensDetails\": [\n {\n \"modality\":
|
||||
\"TEXT\",\n \"tokenCount\": 19\n }\n ]\n },\n \"modelVersion\":
|
||||
\"gemini-2.0-flash-001\",\n \"responseId\": \"bbBzaauxJ_SgjrEP7onK2Ak\"\n}\n"
|
||||
headers:
|
||||
Alt-Svc:
|
||||
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
|
||||
Content-Type:
|
||||
- application/json; charset=UTF-8
|
||||
Date:
|
||||
- Fri, 23 Jan 2026 17:31:26 GMT
|
||||
Server:
|
||||
- scaffolding on HTTPServer2
|
||||
Server-Timing:
|
||||
- gfet4t7; dur=454
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
X-Frame-Options:
|
||||
- X-FRAME-OPTIONS-XXX
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -590,233 +590,3 @@ class TestToolHooksIntegration:
|
||||
# Clean up hooks
|
||||
unregister_before_tool_call_hook(before_tool_call_hook)
|
||||
unregister_after_tool_call_hook(after_tool_call_hook)
|
||||
|
||||
|
||||
class TestNativeToolCallingHooksIntegration:
|
||||
"""Integration tests for hooks with native function calling (Agent and Crew)."""
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_native_tool_hooks_before_and_after(self):
|
||||
"""Test that Agent with native tool calling executes before/after hooks."""
|
||||
import os
|
||||
from crewai import Agent
|
||||
from crewai.tools import tool
|
||||
|
||||
hook_calls = {"before": [], "after": []}
|
||||
|
||||
@tool("multiply_numbers")
|
||||
def multiply_numbers(a: int, b: int) -> int:
|
||||
"""Multiply two numbers together."""
|
||||
return a * b
|
||||
|
||||
def before_hook(context: ToolCallHookContext) -> bool | None:
|
||||
hook_calls["before"].append({
|
||||
"tool_name": context.tool_name,
|
||||
"tool_input": dict(context.tool_input),
|
||||
"has_agent": context.agent is not None,
|
||||
})
|
||||
return None
|
||||
|
||||
def after_hook(context: ToolCallHookContext) -> str | None:
|
||||
hook_calls["after"].append({
|
||||
"tool_name": context.tool_name,
|
||||
"tool_result": context.tool_result,
|
||||
"has_agent": context.agent is not None,
|
||||
})
|
||||
return None
|
||||
|
||||
register_before_tool_call_hook(before_hook)
|
||||
register_after_tool_call_hook(after_hook)
|
||||
|
||||
try:
|
||||
agent = Agent(
|
||||
role="Calculator",
|
||||
goal="Perform calculations",
|
||||
backstory="You are a calculator assistant",
|
||||
tools=[multiply_numbers],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
agent.kickoff(
|
||||
messages="What is 7 times 6? Use the multiply_numbers tool."
|
||||
)
|
||||
|
||||
# Verify before hook was called
|
||||
assert len(hook_calls["before"]) > 0, "Before hook was never called"
|
||||
before_call = hook_calls["before"][0]
|
||||
assert before_call["tool_name"] == "multiply_numbers"
|
||||
assert "a" in before_call["tool_input"]
|
||||
assert "b" in before_call["tool_input"]
|
||||
assert before_call["has_agent"] is True
|
||||
|
||||
# Verify after hook was called
|
||||
assert len(hook_calls["after"]) > 0, "After hook was never called"
|
||||
after_call = hook_calls["after"][0]
|
||||
assert after_call["tool_name"] == "multiply_numbers"
|
||||
assert "42" in str(after_call["tool_result"])
|
||||
assert after_call["has_agent"] is True
|
||||
|
||||
finally:
|
||||
unregister_before_tool_call_hook(before_hook)
|
||||
unregister_after_tool_call_hook(after_hook)
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_crew_native_tool_hooks_before_and_after(self):
|
||||
"""Test that Crew with Agent executes before/after hooks with full context."""
|
||||
import os
|
||||
from crewai import Agent, Crew, Task
|
||||
from crewai.tools import tool
|
||||
|
||||
|
||||
hook_calls = {"before": [], "after": []}
|
||||
|
||||
@tool("divide_numbers")
|
||||
def divide_numbers(a: int, b: int) -> float:
|
||||
"""Divide first number by second number."""
|
||||
return a / b
|
||||
|
||||
def before_hook(context: ToolCallHookContext) -> bool | None:
|
||||
hook_calls["before"].append({
|
||||
"tool_name": context.tool_name,
|
||||
"tool_input": dict(context.tool_input),
|
||||
"has_agent": context.agent is not None,
|
||||
"has_task": context.task is not None,
|
||||
"has_crew": context.crew is not None,
|
||||
"agent_role": context.agent.role if context.agent else None,
|
||||
})
|
||||
return None
|
||||
|
||||
def after_hook(context: ToolCallHookContext) -> str | None:
|
||||
hook_calls["after"].append({
|
||||
"tool_name": context.tool_name,
|
||||
"tool_result": context.tool_result,
|
||||
"has_agent": context.agent is not None,
|
||||
"has_task": context.task is not None,
|
||||
"has_crew": context.crew is not None,
|
||||
})
|
||||
return None
|
||||
|
||||
register_before_tool_call_hook(before_hook)
|
||||
register_after_tool_call_hook(after_hook)
|
||||
|
||||
try:
|
||||
agent = Agent(
|
||||
role="Math Assistant",
|
||||
goal="Perform division calculations accurately",
|
||||
backstory="You are a math assistant that helps with division",
|
||||
tools=[divide_numbers],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Calculate 100 divided by 4 using the divide_numbers tool.",
|
||||
expected_output="The result of the division",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[agent],
|
||||
tasks=[task],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
crew.kickoff()
|
||||
|
||||
# Verify before hook was called with full context
|
||||
assert len(hook_calls["before"]) > 0, "Before hook was never called"
|
||||
before_call = hook_calls["before"][0]
|
||||
assert before_call["tool_name"] == "divide_numbers"
|
||||
assert "a" in before_call["tool_input"]
|
||||
assert "b" in before_call["tool_input"]
|
||||
assert before_call["has_agent"] is True
|
||||
assert before_call["has_task"] is True
|
||||
assert before_call["has_crew"] is True
|
||||
assert before_call["agent_role"] == "Math Assistant"
|
||||
|
||||
# Verify after hook was called with full context
|
||||
assert len(hook_calls["after"]) > 0, "After hook was never called"
|
||||
after_call = hook_calls["after"][0]
|
||||
assert after_call["tool_name"] == "divide_numbers"
|
||||
assert "25" in str(after_call["tool_result"])
|
||||
assert after_call["has_agent"] is True
|
||||
assert after_call["has_task"] is True
|
||||
assert after_call["has_crew"] is True
|
||||
|
||||
finally:
|
||||
unregister_before_tool_call_hook(before_hook)
|
||||
unregister_after_tool_call_hook(after_hook)
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_before_hook_blocks_tool_execution_in_crew(self):
|
||||
"""Test that returning False from before hook blocks tool execution."""
|
||||
import os
|
||||
from crewai import Agent, Crew, Task
|
||||
from crewai.tools import tool
|
||||
|
||||
hook_calls = {"before": [], "after": [], "tool_executed": False}
|
||||
|
||||
@tool("dangerous_operation")
|
||||
def dangerous_operation(action: str) -> str:
|
||||
"""Perform a dangerous operation that should be blocked."""
|
||||
hook_calls["tool_executed"] = True
|
||||
return f"Executed: {action}"
|
||||
|
||||
def blocking_before_hook(context: ToolCallHookContext) -> bool | None:
|
||||
hook_calls["before"].append({
|
||||
"tool_name": context.tool_name,
|
||||
"tool_input": dict(context.tool_input),
|
||||
})
|
||||
# Block all calls to dangerous_operation
|
||||
if context.tool_name == "dangerous_operation":
|
||||
return False
|
||||
return None
|
||||
|
||||
def after_hook(context: ToolCallHookContext) -> str | None:
|
||||
hook_calls["after"].append({
|
||||
"tool_name": context.tool_name,
|
||||
"tool_result": context.tool_result,
|
||||
})
|
||||
return None
|
||||
|
||||
register_before_tool_call_hook(blocking_before_hook)
|
||||
register_after_tool_call_hook(after_hook)
|
||||
|
||||
try:
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Try to use the dangerous operation tool",
|
||||
backstory="You are a test agent",
|
||||
tools=[dangerous_operation],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Use the dangerous_operation tool with action 'delete_all'.",
|
||||
expected_output="The result of the operation",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[agent],
|
||||
tasks=[task],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
crew.kickoff()
|
||||
|
||||
# Verify before hook was called
|
||||
assert len(hook_calls["before"]) > 0, "Before hook was never called"
|
||||
before_call = hook_calls["before"][0]
|
||||
assert before_call["tool_name"] == "dangerous_operation"
|
||||
|
||||
# Verify the actual tool function was NOT executed
|
||||
assert hook_calls["tool_executed"] is False, "Tool should have been blocked"
|
||||
|
||||
# Verify after hook was still called (with blocked message)
|
||||
assert len(hook_calls["after"]) > 0, "After hook was never called"
|
||||
after_call = hook_calls["after"][0]
|
||||
assert "blocked" in after_call["tool_result"].lower()
|
||||
|
||||
finally:
|
||||
unregister_before_tool_call_hook(blocking_before_hook)
|
||||
unregister_after_tool_call_hook(after_hook)
|
||||
|
||||
@@ -193,3 +193,40 @@ def test_dimension_mismatch_error_handling(mock_get_client: MagicMock) -> None:
|
||||
|
||||
with pytest.raises(ValueError, match="Embedding dimension mismatch"):
|
||||
storage.save(["test document"])
|
||||
|
||||
|
||||
@patch("crewai.knowledge.storage.knowledge_storage.get_rag_client")
|
||||
def test_save_empty_documents_list(mock_get_client: MagicMock) -> None:
|
||||
"""Test that save() handles empty documents list gracefully.
|
||||
|
||||
Calling save() with an empty documents list should be a no-op and not
|
||||
propagate low-level storage exceptions from ChromaDB.
|
||||
"""
|
||||
mock_client = MagicMock()
|
||||
mock_get_client.return_value = mock_client
|
||||
|
||||
storage = KnowledgeStorage(collection_name="empty_docs_test")
|
||||
|
||||
storage.save([])
|
||||
|
||||
mock_client.get_or_create_collection.assert_not_called()
|
||||
mock_client.add_documents.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("crewai.knowledge.storage.knowledge_storage.get_rag_client")
|
||||
async def test_asave_empty_documents_list(mock_get_client: MagicMock) -> None:
|
||||
"""Test that asave() handles empty documents list gracefully.
|
||||
|
||||
Calling asave() with an empty documents list should be a no-op and not
|
||||
propagate low-level storage exceptions from ChromaDB.
|
||||
"""
|
||||
mock_client = MagicMock()
|
||||
mock_get_client.return_value = mock_client
|
||||
|
||||
storage = KnowledgeStorage(collection_name="empty_docs_async_test")
|
||||
|
||||
await storage.asave([])
|
||||
|
||||
mock_client.aget_or_create_collection.assert_not_called()
|
||||
mock_client.aadd_documents.assert_not_called()
|
||||
|
||||
@@ -635,54 +635,6 @@ def test_gemini_token_usage_tracking():
|
||||
assert usage.total_tokens > 0
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_gemini_tool_returning_float():
|
||||
"""
|
||||
Test that Gemini properly handles tools that return non-dict values like floats.
|
||||
|
||||
This is an end-to-end test that verifies the agent can use a tool that returns
|
||||
a float (which gets wrapped in {"result": value} for Gemini's FunctionResponse).
|
||||
"""
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import Type
|
||||
from crewai.tools import BaseTool
|
||||
|
||||
class SumNumbersToolInput(BaseModel):
|
||||
a: float = Field(..., description="The first number to add")
|
||||
b: float = Field(..., description="The second number to add")
|
||||
|
||||
class SumNumbersTool(BaseTool):
|
||||
name: str = "sum_numbers"
|
||||
description: str = "Add two numbers together and return the result"
|
||||
args_schema: Type[BaseModel] = SumNumbersToolInput
|
||||
|
||||
def _run(self, a: float, b: float) -> float:
|
||||
return a + b
|
||||
|
||||
sum_tool = SumNumbersTool()
|
||||
|
||||
agent = Agent(
|
||||
role="Calculator",
|
||||
goal="Calculate numbers accurately",
|
||||
backstory="You are a calculator that adds numbers.",
|
||||
llm=LLM(model="google/gemini-2.0-flash-001"),
|
||||
tools=[sum_tool],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="What is 10000 + 20000? Use the sum_numbers tool to calculate this.",
|
||||
expected_output="The sum of the two numbers",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task], verbose=True)
|
||||
result = crew.kickoff()
|
||||
|
||||
# The result should contain 30000 (the sum)
|
||||
assert "30000" in result.raw
|
||||
|
||||
|
||||
def test_gemini_stop_sequences_sync():
|
||||
"""Test that stop and stop_sequences attributes stay synchronized."""
|
||||
llm = LLM(model="google/gemini-2.0-flash-001")
|
||||
|
||||
@@ -511,13 +511,10 @@ def test_openai_streaming_with_response_model():
|
||||
mock_chunk1 = MagicMock()
|
||||
mock_chunk1.type = "content.delta"
|
||||
mock_chunk1.delta = '{"answer": "test", '
|
||||
mock_chunk1.id = "response-1"
|
||||
|
||||
# Second chunk
|
||||
mock_chunk2 = MagicMock()
|
||||
mock_chunk2.type = "content.delta"
|
||||
mock_chunk2.delta = '"confidence": 0.95}'
|
||||
mock_chunk2.id = "response-2"
|
||||
|
||||
# Create mock final completion with parsed result
|
||||
mock_parsed = TestResponse(answer="test", confidence=0.95)
|
||||
|
||||
@@ -272,100 +272,3 @@ class TestEmbeddingFactory:
|
||||
mock_build_from_provider.assert_called_once_with(mock_provider)
|
||||
assert result == mock_embedding_function
|
||||
mock_import.assert_not_called()
|
||||
|
||||
@patch("crewai.rag.embeddings.factory.import_and_validate_definition")
|
||||
def test_build_embedder_google_vertex_with_genai_model(self, mock_import):
|
||||
"""Test routing to Google Vertex provider with new genai model."""
|
||||
mock_provider_class = MagicMock()
|
||||
mock_provider_instance = MagicMock()
|
||||
mock_embedding_function = MagicMock()
|
||||
|
||||
mock_import.return_value = mock_provider_class
|
||||
mock_provider_class.return_value = mock_provider_instance
|
||||
mock_provider_instance.embedding_callable.return_value = mock_embedding_function
|
||||
|
||||
config = {
|
||||
"provider": "google-vertex",
|
||||
"config": {
|
||||
"api_key": "test-google-api-key",
|
||||
"model_name": "gemini-embedding-001",
|
||||
},
|
||||
}
|
||||
|
||||
build_embedder(config)
|
||||
|
||||
mock_import.assert_called_once_with(
|
||||
"crewai.rag.embeddings.providers.google.vertex.VertexAIProvider"
|
||||
)
|
||||
mock_provider_class.assert_called_once()
|
||||
|
||||
call_kwargs = mock_provider_class.call_args.kwargs
|
||||
assert call_kwargs["api_key"] == "test-google-api-key"
|
||||
assert call_kwargs["model_name"] == "gemini-embedding-001"
|
||||
|
||||
@patch("crewai.rag.embeddings.factory.import_and_validate_definition")
|
||||
def test_build_embedder_google_vertex_with_legacy_model(self, mock_import):
|
||||
"""Test routing to Google Vertex provider with legacy textembedding-gecko model."""
|
||||
mock_provider_class = MagicMock()
|
||||
mock_provider_instance = MagicMock()
|
||||
mock_embedding_function = MagicMock()
|
||||
|
||||
mock_import.return_value = mock_provider_class
|
||||
mock_provider_class.return_value = mock_provider_instance
|
||||
mock_provider_instance.embedding_callable.return_value = mock_embedding_function
|
||||
|
||||
config = {
|
||||
"provider": "google-vertex",
|
||||
"config": {
|
||||
"project_id": "my-gcp-project",
|
||||
"region": "us-central1",
|
||||
"model_name": "textembedding-gecko",
|
||||
},
|
||||
}
|
||||
|
||||
build_embedder(config)
|
||||
|
||||
mock_import.assert_called_once_with(
|
||||
"crewai.rag.embeddings.providers.google.vertex.VertexAIProvider"
|
||||
)
|
||||
mock_provider_class.assert_called_once()
|
||||
|
||||
call_kwargs = mock_provider_class.call_args.kwargs
|
||||
assert call_kwargs["project_id"] == "my-gcp-project"
|
||||
assert call_kwargs["region"] == "us-central1"
|
||||
assert call_kwargs["model_name"] == "textembedding-gecko"
|
||||
|
||||
@patch("crewai.rag.embeddings.factory.import_and_validate_definition")
|
||||
def test_build_embedder_google_vertex_with_location(self, mock_import):
|
||||
"""Test routing to Google Vertex provider with location parameter."""
|
||||
mock_provider_class = MagicMock()
|
||||
mock_provider_instance = MagicMock()
|
||||
mock_embedding_function = MagicMock()
|
||||
|
||||
mock_import.return_value = mock_provider_class
|
||||
mock_provider_class.return_value = mock_provider_instance
|
||||
mock_provider_instance.embedding_callable.return_value = mock_embedding_function
|
||||
|
||||
config = {
|
||||
"provider": "google-vertex",
|
||||
"config": {
|
||||
"project_id": "my-gcp-project",
|
||||
"location": "europe-west1",
|
||||
"model_name": "gemini-embedding-001",
|
||||
"task_type": "RETRIEVAL_DOCUMENT",
|
||||
"output_dimensionality": 768,
|
||||
},
|
||||
}
|
||||
|
||||
build_embedder(config)
|
||||
|
||||
mock_import.assert_called_once_with(
|
||||
"crewai.rag.embeddings.providers.google.vertex.VertexAIProvider"
|
||||
)
|
||||
|
||||
call_kwargs = mock_provider_class.call_args.kwargs
|
||||
assert call_kwargs["project_id"] == "my-gcp-project"
|
||||
assert call_kwargs["location"] == "europe-west1"
|
||||
assert call_kwargs["model_name"] == "gemini-embedding-001"
|
||||
assert call_kwargs["task_type"] == "RETRIEVAL_DOCUMENT"
|
||||
assert call_kwargs["output_dimensionality"] == 768
|
||||
|
||||
@@ -1,176 +0,0 @@
|
||||
"""Integration tests for Google Vertex embeddings with Crew memory.
|
||||
|
||||
These tests make real API calls and use VCR to record/replay responses.
|
||||
"""
|
||||
|
||||
import os
|
||||
import threading
|
||||
from collections import defaultdict
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai import Agent, Crew, Task
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.memory_events import (
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def setup_vertex_ai_env():
|
||||
"""Set up environment for Vertex AI tests.
|
||||
|
||||
Sets GOOGLE_GENAI_USE_VERTEXAI=true to ensure the SDK uses the Vertex AI
|
||||
backend (aiplatform.googleapis.com) which matches the VCR cassettes.
|
||||
Also mocks GOOGLE_API_KEY if not already set.
|
||||
"""
|
||||
env_updates = {"GOOGLE_GENAI_USE_VERTEXAI": "true"}
|
||||
|
||||
# Add a mock API key if none exists
|
||||
if "GOOGLE_API_KEY" not in os.environ and "GEMINI_API_KEY" not in os.environ:
|
||||
env_updates["GOOGLE_API_KEY"] = "test-key"
|
||||
|
||||
with patch.dict(os.environ, env_updates):
|
||||
yield
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def google_vertex_embedder_config():
|
||||
"""Fixture providing Google Vertex embedder configuration."""
|
||||
return {
|
||||
"provider": "google-vertex",
|
||||
"config": {
|
||||
"api_key": os.getenv("GOOGLE_API_KEY", "test-key"),
|
||||
"model_name": "gemini-embedding-001",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def simple_agent():
|
||||
"""Fixture providing a simple test agent."""
|
||||
return Agent(
|
||||
role="Research Assistant",
|
||||
goal="Help with research tasks",
|
||||
backstory="You are a helpful research assistant.",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def simple_task(simple_agent):
|
||||
"""Fixture providing a simple test task."""
|
||||
return Task(
|
||||
description="Summarize the key points about artificial intelligence in one sentence.",
|
||||
expected_output="A one sentence summary about AI.",
|
||||
agent=simple_agent,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
@pytest.mark.timeout(120) # Longer timeout for VCR recording
|
||||
def test_crew_memory_with_google_vertex_embedder(
|
||||
google_vertex_embedder_config, simple_agent, simple_task
|
||||
) -> None:
|
||||
"""Test that Crew with memory=True works with google-vertex embedder and memory is used."""
|
||||
# Track memory events
|
||||
events: dict[str, list] = defaultdict(list)
|
||||
condition = threading.Condition()
|
||||
|
||||
@crewai_event_bus.on(MemorySaveStartedEvent)
|
||||
def on_save_started(source, event):
|
||||
with condition:
|
||||
events["MemorySaveStartedEvent"].append(event)
|
||||
condition.notify()
|
||||
|
||||
@crewai_event_bus.on(MemorySaveCompletedEvent)
|
||||
def on_save_completed(source, event):
|
||||
with condition:
|
||||
events["MemorySaveCompletedEvent"].append(event)
|
||||
condition.notify()
|
||||
|
||||
crew = Crew(
|
||||
agents=[simple_agent],
|
||||
tasks=[simple_task],
|
||||
memory=True,
|
||||
embedder=google_vertex_embedder_config,
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
result = crew.kickoff()
|
||||
|
||||
assert result is not None
|
||||
assert result.raw is not None
|
||||
assert len(result.raw) > 0
|
||||
|
||||
with condition:
|
||||
success = condition.wait_for(
|
||||
lambda: len(events["MemorySaveCompletedEvent"]) >= 1,
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
assert success, "Timeout waiting for memory save events - memory may not be working"
|
||||
assert len(events["MemorySaveStartedEvent"]) >= 1, "No memory save started events"
|
||||
assert len(events["MemorySaveCompletedEvent"]) >= 1, "Memory save completed events"
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
@pytest.mark.timeout(120)
|
||||
def test_crew_memory_with_google_vertex_project_id(simple_agent, simple_task) -> None:
|
||||
"""Test Crew memory with Google Vertex using project_id authentication."""
|
||||
project_id = os.getenv("GOOGLE_CLOUD_PROJECT")
|
||||
if not project_id:
|
||||
pytest.skip("GOOGLE_CLOUD_PROJECT environment variable not set")
|
||||
|
||||
# Track memory events
|
||||
events: dict[str, list] = defaultdict(list)
|
||||
condition = threading.Condition()
|
||||
|
||||
@crewai_event_bus.on(MemorySaveStartedEvent)
|
||||
def on_save_started(source, event):
|
||||
with condition:
|
||||
events["MemorySaveStartedEvent"].append(event)
|
||||
condition.notify()
|
||||
|
||||
@crewai_event_bus.on(MemorySaveCompletedEvent)
|
||||
def on_save_completed(source, event):
|
||||
with condition:
|
||||
events["MemorySaveCompletedEvent"].append(event)
|
||||
condition.notify()
|
||||
|
||||
embedder_config = {
|
||||
"provider": "google-vertex",
|
||||
"config": {
|
||||
"project_id": project_id,
|
||||
"location": "us-central1",
|
||||
"model_name": "gemini-embedding-001",
|
||||
},
|
||||
}
|
||||
|
||||
crew = Crew(
|
||||
agents=[simple_agent],
|
||||
tasks=[simple_task],
|
||||
memory=True,
|
||||
embedder=embedder_config,
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
result = crew.kickoff()
|
||||
|
||||
# Verify basic result
|
||||
assert result is not None
|
||||
assert result.raw is not None
|
||||
|
||||
# Wait for memory save events
|
||||
with condition:
|
||||
success = condition.wait_for(
|
||||
lambda: len(events["MemorySaveCompletedEvent"]) >= 1,
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
# Verify memory was actually used
|
||||
assert success, "Timeout waiting for memory save events - memory may not be working"
|
||||
assert len(events["MemorySaveStartedEvent"]) >= 1, "No memory save started events"
|
||||
assert len(events["MemorySaveCompletedEvent"]) >= 1, "No memory save completed events"
|
||||
@@ -2585,7 +2585,6 @@ def test_warning_long_term_memory_without_entity_memory():
|
||||
goal="You research about math.",
|
||||
backstory="You're an expert in research and you love to learn new things.",
|
||||
allow_delegation=False,
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
task1 = Task(
|
||||
|
||||
@@ -984,8 +984,8 @@ def test_streaming_fallback_to_non_streaming():
|
||||
def mock_call(messages, tools=None, callbacks=None, available_functions=None):
|
||||
nonlocal fallback_called
|
||||
# Emit a couple of chunks to simulate partial streaming
|
||||
crewai_event_bus.emit(llm, event=LLMStreamChunkEvent(chunk="Test chunk 1", response_id = "Id"))
|
||||
crewai_event_bus.emit(llm, event=LLMStreamChunkEvent(chunk="Test chunk 2", response_id = "Id"))
|
||||
crewai_event_bus.emit(llm, event=LLMStreamChunkEvent(chunk="Test chunk 1"))
|
||||
crewai_event_bus.emit(llm, event=LLMStreamChunkEvent(chunk="Test chunk 2"))
|
||||
|
||||
# Mark that fallback would be called
|
||||
fallback_called = True
|
||||
@@ -1041,7 +1041,7 @@ def test_streaming_empty_response_handling():
|
||||
def mock_call(messages, tools=None, callbacks=None, available_functions=None):
|
||||
# Emit a few empty chunks
|
||||
for _ in range(3):
|
||||
crewai_event_bus.emit(llm, event=LLMStreamChunkEvent(chunk="",response_id="id"))
|
||||
crewai_event_bus.emit(llm, event=LLMStreamChunkEvent(chunk=""))
|
||||
|
||||
# Return the default message for empty responses
|
||||
return "I apologize, but I couldn't generate a proper response. Please try again or rephrase your request."
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
"""CrewAI development tools."""
|
||||
|
||||
__version__ = "1.9.1"
|
||||
__version__ = "1.8.1"
|
||||
|
||||
8
uv.lock
generated
8
uv.lock
generated
@@ -310,7 +310,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "anthropic"
|
||||
version = "0.73.0"
|
||||
version = "0.71.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "anyio" },
|
||||
@@ -322,9 +322,9 @@ dependencies = [
|
||||
{ name = "sniffio" },
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/f0/07/f550112c3f5299d02f06580577f602e8a112b1988ad7c98ac1a8f7292d7e/anthropic-0.73.0.tar.gz", hash = "sha256:30f0d7d86390165f86af6ca7c3041f8720bb2e1b0e12a44525c8edfdbd2c5239", size = 425168, upload-time = "2025-11-14T18:47:52.635Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/05/4b/19620875841f692fdc35eb58bf0201c8ad8c47b8443fecbf1b225312175b/anthropic-0.71.1.tar.gz", hash = "sha256:a77d156d3e7d318b84681b59823b2dee48a8ac508a3e54e49f0ab0d074e4b0da", size = 493294, upload-time = "2025-10-28T17:28:42.213Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/15/b1/5d4d3f649e151e58dc938cf19c4d0cd19fca9a986879f30fea08a7b17138/anthropic-0.73.0-py3-none-any.whl", hash = "sha256:0d56cd8b3ca3fea9c9b5162868bdfd053fbc189b8b56d4290bd2d427b56db769", size = 367839, upload-time = "2025-11-14T18:47:51.195Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4b/68/b2f988b13325f9ac9921b1e87f0b7994468014e1b5bd3bdbd2472f5baf45/anthropic-0.71.1-py3-none-any.whl", hash = "sha256:6ca6c579f0899a445faeeed9c0eb97aa4bdb751196262f9ccc96edfc0bb12679", size = 355020, upload-time = "2025-10-28T17:28:40.653Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1276,7 +1276,7 @@ requires-dist = [
|
||||
{ name = "aiobotocore", marker = "extra == 'aws'", specifier = "~=2.25.2" },
|
||||
{ name = "aiocache", extras = ["memcached", "redis"], marker = "extra == 'a2a'", specifier = "~=0.12.3" },
|
||||
{ name = "aiosqlite", specifier = "~=0.21.0" },
|
||||
{ name = "anthropic", marker = "extra == 'anthropic'", specifier = "~=0.73.0" },
|
||||
{ name = "anthropic", marker = "extra == 'anthropic'", specifier = "~=0.71.0" },
|
||||
{ name = "appdirs", specifier = "~=1.4.4" },
|
||||
{ name = "azure-ai-inference", marker = "extra == 'azure-ai-inference'", specifier = "~=1.0.0b9" },
|
||||
{ name = "boto3", marker = "extra == 'aws'", specifier = "~=1.40.38" },
|
||||
|
||||
Reference in New Issue
Block a user