mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-23 15:18:14 +00:00
Compare commits
3 Commits
gl/feat/na
...
devin/1769
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5644211c2a | ||
|
|
ee79a612d0 | ||
|
|
21ebc01e9d |
@@ -19,7 +19,7 @@ repos:
|
||||
language: system
|
||||
pass_filenames: true
|
||||
types: [python]
|
||||
exclude: ^(lib/crewai/src/crewai/cli/templates/|lib/crewai/tests/|lib/crewai-tools/tests/|lib/crewai-files/tests/)
|
||||
exclude: ^(lib/crewai/src/crewai/cli/templates/|lib/crewai/tests/|lib/crewai-tools/tests/)
|
||||
- repo: https://github.com/astral-sh/uv-pre-commit
|
||||
rev: 0.9.3
|
||||
hooks:
|
||||
|
||||
49
conftest.py
49
conftest.py
@@ -1,6 +1,5 @@
|
||||
"""Pytest configuration for crewAI workspace."""
|
||||
|
||||
import base64
|
||||
from collections.abc import Generator
|
||||
import gzip
|
||||
import os
|
||||
@@ -11,7 +10,6 @@ from typing import Any
|
||||
from dotenv import load_dotenv
|
||||
import pytest
|
||||
from vcr.request import Request # type: ignore[import-untyped]
|
||||
import vcr.stubs.httpx_stubs as httpx_stubs # type: ignore[import-untyped]
|
||||
|
||||
|
||||
env_test_path = Path(__file__).parent / ".env.test"
|
||||
@@ -19,25 +17,6 @@ load_dotenv(env_test_path, override=True)
|
||||
load_dotenv(override=True)
|
||||
|
||||
|
||||
def _patched_make_vcr_request(httpx_request: Any, **kwargs: Any) -> Any:
|
||||
"""Patched version of VCR's _make_vcr_request that handles binary content.
|
||||
|
||||
The original implementation fails on binary request bodies (like file uploads)
|
||||
because it assumes all content can be decoded as UTF-8.
|
||||
"""
|
||||
raw_body = httpx_request.read()
|
||||
try:
|
||||
body = raw_body.decode("utf-8")
|
||||
except UnicodeDecodeError:
|
||||
body = base64.b64encode(raw_body).decode("ascii")
|
||||
uri = str(httpx_request.url)
|
||||
headers = dict(httpx_request.headers)
|
||||
return Request(httpx_request.method, uri, body, headers)
|
||||
|
||||
|
||||
httpx_stubs._make_vcr_request = _patched_make_vcr_request
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True, scope="function")
|
||||
def cleanup_event_handlers() -> Generator[None, Any, None]:
|
||||
"""Clean up event bus handlers after each test to prevent test pollution."""
|
||||
@@ -180,23 +159,12 @@ def _filter_request_headers(request: Request) -> Request: # type: ignore[no-any
|
||||
return request
|
||||
|
||||
|
||||
def _filter_response_headers(response: dict[str, Any]) -> dict[str, Any] | None:
|
||||
"""Filter sensitive headers from response before recording.
|
||||
|
||||
Returns None to skip recording responses with empty bodies. This handles
|
||||
duplicate recordings caused by OpenAI's stainless client using
|
||||
with_raw_response which triggers httpx to re-read the consumed stream.
|
||||
"""
|
||||
body = response.get("body", {}).get("string", "")
|
||||
headers = response.get("headers", {})
|
||||
content_length = headers.get("content-length", headers.get("Content-Length", []))
|
||||
|
||||
if body == "" or body == b"" or content_length == ["0"]:
|
||||
return None
|
||||
def _filter_response_headers(response: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Filter sensitive headers from response before recording."""
|
||||
|
||||
for encoding_header in ["Content-Encoding", "content-encoding"]:
|
||||
if encoding_header in headers:
|
||||
encoding = headers.pop(encoding_header)
|
||||
if encoding_header in response["headers"]:
|
||||
encoding = response["headers"].pop(encoding_header)
|
||||
if encoding and encoding[0] == "gzip":
|
||||
body = response.get("body", {}).get("string", b"")
|
||||
if isinstance(body, bytes) and body.startswith(b"\x1f\x8b"):
|
||||
@@ -204,8 +172,8 @@ def _filter_response_headers(response: dict[str, Any]) -> dict[str, Any] | None:
|
||||
|
||||
for header_name, replacement in HEADERS_TO_FILTER.items():
|
||||
for variant in [header_name, header_name.upper(), header_name.title()]:
|
||||
if variant in headers:
|
||||
headers[variant] = [replacement]
|
||||
if variant in response["headers"]:
|
||||
response["headers"][variant] = [replacement]
|
||||
return response
|
||||
|
||||
|
||||
@@ -220,10 +188,7 @@ def vcr_cassette_dir(request: Any) -> str:
|
||||
test_file = Path(request.fspath)
|
||||
|
||||
for parent in test_file.parents:
|
||||
if (
|
||||
parent.name in ("crewai", "crewai-tools", "crewai-files")
|
||||
and parent.parent.name == "lib"
|
||||
):
|
||||
if parent.name in ("crewai", "crewai-tools") and parent.parent.name == "lib":
|
||||
package_root = parent
|
||||
break
|
||||
else:
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
# crewai-files
|
||||
|
||||
File handling utilities for CrewAI multimodal inputs.
|
||||
|
||||
## Supported File Types
|
||||
|
||||
- `ImageFile` - PNG, JPEG, GIF, WebP
|
||||
- `PDFFile` - PDF documents
|
||||
- `TextFile` - Plain text files
|
||||
- `AudioFile` - MP3, WAV, FLAC, OGG, M4A
|
||||
- `VideoFile` - MP4, WebM, MOV, AVI
|
||||
|
||||
## Usage
|
||||
|
||||
```python
|
||||
from crewai_files import File, ImageFile, PDFFile
|
||||
|
||||
# Auto-detect file type
|
||||
file = File(source="document.pdf") # Resolves to PDFFile
|
||||
|
||||
# Or use specific types
|
||||
image = ImageFile(source="chart.png")
|
||||
pdf = PDFFile(source="report.pdf")
|
||||
```
|
||||
|
||||
### Passing Files to Crews
|
||||
|
||||
```python
|
||||
crew.kickoff(inputs={
|
||||
"files": {"chart": ImageFile(source="chart.png")}
|
||||
})
|
||||
```
|
||||
|
||||
### Passing Files to Tasks
|
||||
|
||||
```python
|
||||
task = Task(
|
||||
description="Analyze the chart",
|
||||
expected_output="Analysis",
|
||||
agent=agent,
|
||||
input_files=[ImageFile(source="chart.png")],
|
||||
)
|
||||
```
|
||||
@@ -1,25 +0,0 @@
|
||||
[project]
|
||||
name = "crewai-files"
|
||||
dynamic = ["version"]
|
||||
description = "File handling utilities for CrewAI multimodal inputs"
|
||||
readme = "README.md"
|
||||
authors = [
|
||||
{ name = "Greyson LaLonde", email = "greyson@crewai.com" }
|
||||
]
|
||||
requires-python = ">=3.10, <3.14"
|
||||
dependencies = [
|
||||
"Pillow~=10.4.0",
|
||||
"pypdf~=4.0.0",
|
||||
"python-magic>=0.4.27",
|
||||
"aiocache~=0.12.3",
|
||||
"aiofiles~=24.1.0",
|
||||
"tinytag~=1.10.0",
|
||||
"av~=13.0.0",
|
||||
]
|
||||
|
||||
[build-system]
|
||||
requires = ["hatchling"]
|
||||
build-backend = "hatchling.build"
|
||||
|
||||
[tool.hatch.version]
|
||||
path = "src/crewai_files/__init__.py"
|
||||
@@ -1,153 +0,0 @@
|
||||
"""File handling utilities for crewAI tasks."""
|
||||
|
||||
from crewai_files.cache.cleanup import (
|
||||
cleanup_expired_files,
|
||||
cleanup_provider_files,
|
||||
cleanup_uploaded_files,
|
||||
)
|
||||
from crewai_files.cache.upload_cache import (
|
||||
CachedUpload,
|
||||
UploadCache,
|
||||
get_upload_cache,
|
||||
reset_upload_cache,
|
||||
)
|
||||
from crewai_files.core.resolved import (
|
||||
FileReference,
|
||||
InlineBase64,
|
||||
InlineBytes,
|
||||
ResolvedFile,
|
||||
ResolvedFileType,
|
||||
UrlReference,
|
||||
)
|
||||
from crewai_files.core.sources import (
|
||||
FileBytes,
|
||||
FilePath,
|
||||
FileSource,
|
||||
FileSourceInput,
|
||||
FileStream,
|
||||
FileUrl,
|
||||
RawFileInput,
|
||||
)
|
||||
from crewai_files.core.types import (
|
||||
AudioExtension,
|
||||
AudioFile,
|
||||
AudioMimeType,
|
||||
BaseFile,
|
||||
File,
|
||||
FileInput,
|
||||
FileMode,
|
||||
ImageExtension,
|
||||
ImageFile,
|
||||
ImageMimeType,
|
||||
PDFContentType,
|
||||
PDFExtension,
|
||||
PDFFile,
|
||||
TextContentType,
|
||||
TextExtension,
|
||||
TextFile,
|
||||
VideoExtension,
|
||||
VideoFile,
|
||||
VideoMimeType,
|
||||
)
|
||||
from crewai_files.formatting import (
|
||||
aformat_multimodal_content,
|
||||
format_multimodal_content,
|
||||
)
|
||||
from crewai_files.processing import (
|
||||
ANTHROPIC_CONSTRAINTS,
|
||||
BEDROCK_CONSTRAINTS,
|
||||
GEMINI_CONSTRAINTS,
|
||||
OPENAI_CONSTRAINTS,
|
||||
AudioConstraints,
|
||||
FileHandling,
|
||||
FileProcessingError,
|
||||
FileProcessor,
|
||||
FileTooLargeError,
|
||||
FileValidationError,
|
||||
ImageConstraints,
|
||||
PDFConstraints,
|
||||
ProcessingDependencyError,
|
||||
ProviderConstraints,
|
||||
UnsupportedFileTypeError,
|
||||
VideoConstraints,
|
||||
get_constraints_for_provider,
|
||||
)
|
||||
from crewai_files.resolution.resolver import (
|
||||
FileResolver,
|
||||
FileResolverConfig,
|
||||
create_resolver,
|
||||
)
|
||||
from crewai_files.resolution.utils import normalize_input_files, wrap_file_source
|
||||
from crewai_files.uploaders import FileUploader, UploadResult, get_uploader
|
||||
|
||||
|
||||
__all__ = [
|
||||
"ANTHROPIC_CONSTRAINTS",
|
||||
"BEDROCK_CONSTRAINTS",
|
||||
"GEMINI_CONSTRAINTS",
|
||||
"OPENAI_CONSTRAINTS",
|
||||
"AudioConstraints",
|
||||
"AudioExtension",
|
||||
"AudioFile",
|
||||
"AudioMimeType",
|
||||
"BaseFile",
|
||||
"CachedUpload",
|
||||
"File",
|
||||
"FileBytes",
|
||||
"FileHandling",
|
||||
"FileInput",
|
||||
"FileMode",
|
||||
"FilePath",
|
||||
"FileProcessingError",
|
||||
"FileProcessor",
|
||||
"FileReference",
|
||||
"FileResolver",
|
||||
"FileResolverConfig",
|
||||
"FileSource",
|
||||
"FileSourceInput",
|
||||
"FileStream",
|
||||
"FileTooLargeError",
|
||||
"FileUploader",
|
||||
"FileUrl",
|
||||
"FileValidationError",
|
||||
"ImageConstraints",
|
||||
"ImageExtension",
|
||||
"ImageFile",
|
||||
"ImageMimeType",
|
||||
"InlineBase64",
|
||||
"InlineBytes",
|
||||
"PDFConstraints",
|
||||
"PDFContentType",
|
||||
"PDFExtension",
|
||||
"PDFFile",
|
||||
"ProcessingDependencyError",
|
||||
"ProviderConstraints",
|
||||
"RawFileInput",
|
||||
"ResolvedFile",
|
||||
"ResolvedFileType",
|
||||
"TextContentType",
|
||||
"TextExtension",
|
||||
"TextFile",
|
||||
"UnsupportedFileTypeError",
|
||||
"UploadCache",
|
||||
"UploadResult",
|
||||
"UrlReference",
|
||||
"VideoConstraints",
|
||||
"VideoExtension",
|
||||
"VideoFile",
|
||||
"VideoMimeType",
|
||||
"aformat_multimodal_content",
|
||||
"cleanup_expired_files",
|
||||
"cleanup_provider_files",
|
||||
"cleanup_uploaded_files",
|
||||
"create_resolver",
|
||||
"format_multimodal_content",
|
||||
"get_constraints_for_provider",
|
||||
"get_upload_cache",
|
||||
"get_uploader",
|
||||
"normalize_input_files",
|
||||
"reset_upload_cache",
|
||||
"wrap_file_source",
|
||||
]
|
||||
|
||||
__version__ = "1.8.1"
|
||||
@@ -1,14 +0,0 @@
|
||||
"""Upload caching and cleanup."""
|
||||
|
||||
from crewai_files.cache.cleanup import cleanup_uploaded_files
|
||||
from crewai_files.cache.metrics import FileOperationMetrics, measure_operation
|
||||
from crewai_files.cache.upload_cache import UploadCache, get_upload_cache
|
||||
|
||||
|
||||
__all__ = [
|
||||
"FileOperationMetrics",
|
||||
"UploadCache",
|
||||
"cleanup_uploaded_files",
|
||||
"get_upload_cache",
|
||||
"measure_operation",
|
||||
]
|
||||
374
lib/crewai-files/src/crewai_files/cache/cleanup.py
vendored
374
lib/crewai-files/src/crewai_files/cache/cleanup.py
vendored
@@ -1,374 +0,0 @@
|
||||
"""Cleanup utilities for uploaded files."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from crewai_files.cache.upload_cache import CachedUpload, UploadCache
|
||||
from crewai_files.uploaders import get_uploader
|
||||
from crewai_files.uploaders.factory import ProviderType
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai_files.uploaders.base import FileUploader
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _safe_delete(
|
||||
uploader: FileUploader,
|
||||
file_id: str,
|
||||
provider: str,
|
||||
) -> bool:
|
||||
"""Safely delete a file, logging any errors.
|
||||
|
||||
Args:
|
||||
uploader: The file uploader to use.
|
||||
file_id: The file ID to delete.
|
||||
provider: Provider name for logging.
|
||||
|
||||
Returns:
|
||||
True if deleted successfully, False otherwise.
|
||||
"""
|
||||
try:
|
||||
if uploader.delete(file_id):
|
||||
logger.debug(f"Deleted {file_id} from {provider}")
|
||||
return True
|
||||
logger.warning(f"Failed to delete {file_id} from {provider}")
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.warning(f"Error deleting {file_id} from {provider}: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def cleanup_uploaded_files(
|
||||
cache: UploadCache,
|
||||
*,
|
||||
delete_from_provider: bool = True,
|
||||
providers: list[ProviderType] | None = None,
|
||||
) -> int:
|
||||
"""Clean up uploaded files from the cache and optionally from providers.
|
||||
|
||||
Args:
|
||||
cache: The upload cache to clean up.
|
||||
delete_from_provider: If True, delete files from the provider as well.
|
||||
providers: Optional list of providers to clean up. If None, cleans all.
|
||||
|
||||
Returns:
|
||||
Number of files cleaned up.
|
||||
"""
|
||||
cleaned = 0
|
||||
|
||||
provider_uploads: dict[ProviderType, list[CachedUpload]] = {}
|
||||
|
||||
for provider in _get_providers_from_cache(cache):
|
||||
if providers is not None and provider not in providers:
|
||||
continue
|
||||
provider_uploads[provider] = cache.get_all_for_provider(provider)
|
||||
|
||||
if delete_from_provider:
|
||||
for provider, uploads in provider_uploads.items():
|
||||
uploader = get_uploader(provider)
|
||||
if uploader is None:
|
||||
logger.warning(
|
||||
f"No uploader available for {provider}, skipping cleanup"
|
||||
)
|
||||
continue
|
||||
|
||||
for upload in uploads:
|
||||
if _safe_delete(uploader, upload.file_id, provider):
|
||||
cleaned += 1
|
||||
|
||||
cache.clear()
|
||||
|
||||
logger.info(f"Cleaned up {cleaned} uploaded files")
|
||||
return cleaned
|
||||
|
||||
|
||||
def cleanup_expired_files(
|
||||
cache: UploadCache,
|
||||
*,
|
||||
delete_from_provider: bool = False,
|
||||
) -> int:
|
||||
"""Clean up expired files from the cache.
|
||||
|
||||
Args:
|
||||
cache: The upload cache to clean up.
|
||||
delete_from_provider: If True, attempt to delete from provider as well.
|
||||
Note: Expired files may already be deleted by the provider.
|
||||
|
||||
Returns:
|
||||
Number of expired entries removed from cache.
|
||||
"""
|
||||
expired_entries: list[CachedUpload] = []
|
||||
|
||||
if delete_from_provider:
|
||||
for provider in _get_providers_from_cache(cache):
|
||||
expired_entries.extend(
|
||||
upload
|
||||
for upload in cache.get_all_for_provider(provider)
|
||||
if upload.is_expired()
|
||||
)
|
||||
|
||||
removed = cache.clear_expired()
|
||||
|
||||
if delete_from_provider:
|
||||
for upload in expired_entries:
|
||||
uploader = get_uploader(upload.provider)
|
||||
if uploader is not None:
|
||||
try:
|
||||
uploader.delete(upload.file_id)
|
||||
except Exception as e:
|
||||
logger.debug(f"Could not delete expired file {upload.file_id}: {e}")
|
||||
|
||||
return removed
|
||||
|
||||
|
||||
def cleanup_provider_files(
|
||||
provider: ProviderType,
|
||||
*,
|
||||
cache: UploadCache | None = None,
|
||||
delete_all_from_provider: bool = False,
|
||||
) -> int:
|
||||
"""Clean up all files for a specific provider.
|
||||
|
||||
Args:
|
||||
provider: Provider name to clean up.
|
||||
cache: Optional upload cache to clear entries from.
|
||||
delete_all_from_provider: If True, delete all files from the provider,
|
||||
not just cached ones.
|
||||
|
||||
Returns:
|
||||
Number of files deleted.
|
||||
"""
|
||||
deleted = 0
|
||||
uploader = get_uploader(provider)
|
||||
|
||||
if uploader is None:
|
||||
logger.warning(f"No uploader available for {provider}")
|
||||
return 0
|
||||
|
||||
if delete_all_from_provider:
|
||||
try:
|
||||
files = uploader.list_files()
|
||||
for file_info in files:
|
||||
file_id = file_info.get("id") or file_info.get("name")
|
||||
if file_id and uploader.delete(file_id):
|
||||
deleted += 1
|
||||
except Exception as e:
|
||||
logger.warning(f"Error listing/deleting files from {provider}: {e}")
|
||||
elif cache is not None:
|
||||
uploads = cache.get_all_for_provider(provider)
|
||||
for upload in uploads:
|
||||
if _safe_delete(uploader, upload.file_id, provider):
|
||||
deleted += 1
|
||||
cache.remove_by_file_id(upload.file_id, provider)
|
||||
|
||||
logger.info(f"Deleted {deleted} files from {provider}")
|
||||
return deleted
|
||||
|
||||
|
||||
def _get_providers_from_cache(cache: UploadCache) -> set[ProviderType]:
|
||||
"""Get unique provider names from cache entries.
|
||||
|
||||
Args:
|
||||
cache: The upload cache.
|
||||
|
||||
Returns:
|
||||
Set of provider names.
|
||||
"""
|
||||
return cache.get_providers()
|
||||
|
||||
|
||||
async def _asafe_delete(
|
||||
uploader: FileUploader,
|
||||
file_id: str,
|
||||
provider: str,
|
||||
) -> bool:
|
||||
"""Async safely delete a file, logging any errors.
|
||||
|
||||
Args:
|
||||
uploader: The file uploader to use.
|
||||
file_id: The file ID to delete.
|
||||
provider: Provider name for logging.
|
||||
|
||||
Returns:
|
||||
True if deleted successfully, False otherwise.
|
||||
"""
|
||||
try:
|
||||
if await uploader.adelete(file_id):
|
||||
logger.debug(f"Deleted {file_id} from {provider}")
|
||||
return True
|
||||
logger.warning(f"Failed to delete {file_id} from {provider}")
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.warning(f"Error deleting {file_id} from {provider}: {e}")
|
||||
return False
|
||||
|
||||
|
||||
async def acleanup_uploaded_files(
|
||||
cache: UploadCache,
|
||||
*,
|
||||
delete_from_provider: bool = True,
|
||||
providers: list[ProviderType] | None = None,
|
||||
max_concurrency: int = 10,
|
||||
) -> int:
|
||||
"""Async clean up uploaded files from the cache and optionally from providers.
|
||||
|
||||
Args:
|
||||
cache: The upload cache to clean up.
|
||||
delete_from_provider: If True, delete files from the provider as well.
|
||||
providers: Optional list of providers to clean up. If None, cleans all.
|
||||
max_concurrency: Maximum number of concurrent delete operations.
|
||||
|
||||
Returns:
|
||||
Number of files cleaned up.
|
||||
"""
|
||||
cleaned = 0
|
||||
|
||||
provider_uploads: dict[ProviderType, list[CachedUpload]] = {}
|
||||
|
||||
for provider in _get_providers_from_cache(cache):
|
||||
if providers is not None and provider not in providers:
|
||||
continue
|
||||
provider_uploads[provider] = await cache.aget_all_for_provider(provider)
|
||||
|
||||
if delete_from_provider:
|
||||
semaphore = asyncio.Semaphore(max_concurrency)
|
||||
|
||||
async def delete_one(file_uploader: FileUploader, cached: CachedUpload) -> bool:
|
||||
"""Delete a single file with semaphore limiting."""
|
||||
async with semaphore:
|
||||
return await _asafe_delete(
|
||||
file_uploader, cached.file_id, cached.provider
|
||||
)
|
||||
|
||||
tasks: list[asyncio.Task[bool]] = []
|
||||
for provider, uploads in provider_uploads.items():
|
||||
uploader = get_uploader(provider)
|
||||
if uploader is None:
|
||||
logger.warning(
|
||||
f"No uploader available for {provider}, skipping cleanup"
|
||||
)
|
||||
continue
|
||||
|
||||
tasks.extend(
|
||||
asyncio.create_task(delete_one(uploader, cached)) for cached in uploads
|
||||
)
|
||||
|
||||
results = await asyncio.gather(*tasks, return_exceptions=True)
|
||||
cleaned = sum(1 for r in results if r is True)
|
||||
|
||||
await cache.aclear()
|
||||
|
||||
logger.info(f"Cleaned up {cleaned} uploaded files")
|
||||
return cleaned
|
||||
|
||||
|
||||
async def acleanup_expired_files(
|
||||
cache: UploadCache,
|
||||
*,
|
||||
delete_from_provider: bool = False,
|
||||
max_concurrency: int = 10,
|
||||
) -> int:
|
||||
"""Async clean up expired files from the cache.
|
||||
|
||||
Args:
|
||||
cache: The upload cache to clean up.
|
||||
delete_from_provider: If True, attempt to delete from provider as well.
|
||||
max_concurrency: Maximum number of concurrent delete operations.
|
||||
|
||||
Returns:
|
||||
Number of expired entries removed from cache.
|
||||
"""
|
||||
expired_entries: list[CachedUpload] = []
|
||||
|
||||
if delete_from_provider:
|
||||
for provider in _get_providers_from_cache(cache):
|
||||
uploads = await cache.aget_all_for_provider(provider)
|
||||
expired_entries.extend(upload for upload in uploads if upload.is_expired())
|
||||
|
||||
removed = await cache.aclear_expired()
|
||||
|
||||
if delete_from_provider and expired_entries:
|
||||
semaphore = asyncio.Semaphore(max_concurrency)
|
||||
|
||||
async def delete_expired(cached: CachedUpload) -> None:
|
||||
"""Delete an expired file with semaphore limiting."""
|
||||
async with semaphore:
|
||||
file_uploader = get_uploader(cached.provider)
|
||||
if file_uploader is not None:
|
||||
try:
|
||||
await file_uploader.adelete(cached.file_id)
|
||||
except Exception as e:
|
||||
logger.debug(
|
||||
f"Could not delete expired file {cached.file_id}: {e}"
|
||||
)
|
||||
|
||||
await asyncio.gather(
|
||||
*[delete_expired(cached) for cached in expired_entries],
|
||||
return_exceptions=True,
|
||||
)
|
||||
|
||||
return removed
|
||||
|
||||
|
||||
async def acleanup_provider_files(
|
||||
provider: ProviderType,
|
||||
*,
|
||||
cache: UploadCache | None = None,
|
||||
delete_all_from_provider: bool = False,
|
||||
max_concurrency: int = 10,
|
||||
) -> int:
|
||||
"""Async clean up all files for a specific provider.
|
||||
|
||||
Args:
|
||||
provider: Provider name to clean up.
|
||||
cache: Optional upload cache to clear entries from.
|
||||
delete_all_from_provider: If True, delete all files from the provider.
|
||||
max_concurrency: Maximum number of concurrent delete operations.
|
||||
|
||||
Returns:
|
||||
Number of files deleted.
|
||||
"""
|
||||
deleted = 0
|
||||
uploader = get_uploader(provider)
|
||||
|
||||
if uploader is None:
|
||||
logger.warning(f"No uploader available for {provider}")
|
||||
return 0
|
||||
|
||||
semaphore = asyncio.Semaphore(max_concurrency)
|
||||
|
||||
async def delete_single(target_file_id: str) -> bool:
|
||||
"""Delete a single file with semaphore limiting."""
|
||||
async with semaphore:
|
||||
return await uploader.adelete(target_file_id)
|
||||
|
||||
if delete_all_from_provider:
|
||||
try:
|
||||
files = uploader.list_files()
|
||||
tasks = []
|
||||
for file_info in files:
|
||||
fid = file_info.get("id") or file_info.get("name")
|
||||
if fid:
|
||||
tasks.append(delete_single(fid))
|
||||
results = await asyncio.gather(*tasks, return_exceptions=True)
|
||||
deleted = sum(1 for r in results if r is True)
|
||||
except Exception as e:
|
||||
logger.warning(f"Error listing/deleting files from {provider}: {e}")
|
||||
elif cache is not None:
|
||||
uploads = await cache.aget_all_for_provider(provider)
|
||||
tasks = []
|
||||
for upload in uploads:
|
||||
tasks.append(delete_single(upload.file_id))
|
||||
results = await asyncio.gather(*tasks, return_exceptions=True)
|
||||
for upload, result in zip(uploads, results, strict=False):
|
||||
if result is True:
|
||||
deleted += 1
|
||||
await cache.aremove_by_file_id(upload.file_id, provider)
|
||||
|
||||
logger.info(f"Deleted {deleted} files from {provider}")
|
||||
return deleted
|
||||
184
lib/crewai-files/src/crewai_files/cache/metrics.py
vendored
184
lib/crewai-files/src/crewai_files/cache/metrics.py
vendored
@@ -1,184 +0,0 @@
|
||||
"""Performance metrics and structured logging for file operations."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Generator
|
||||
from contextlib import contextmanager
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
import logging
|
||||
import time
|
||||
from typing import Any
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class FileOperationMetrics:
|
||||
"""Metrics for a file operation.
|
||||
|
||||
Attributes:
|
||||
operation: Name of the operation (e.g., "upload", "resolve", "process").
|
||||
filename: Name of the file being operated on.
|
||||
provider: Provider name if applicable.
|
||||
duration_ms: Duration of the operation in milliseconds.
|
||||
size_bytes: Size of the file in bytes.
|
||||
success: Whether the operation succeeded.
|
||||
error: Error message if operation failed.
|
||||
timestamp: When the operation occurred.
|
||||
metadata: Additional operation-specific metadata.
|
||||
"""
|
||||
|
||||
operation: str
|
||||
filename: str | None = None
|
||||
provider: str | None = None
|
||||
duration_ms: float = 0.0
|
||||
size_bytes: int | None = None
|
||||
success: bool = True
|
||||
error: str | None = None
|
||||
timestamp: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
||||
metadata: dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
"""Convert metrics to dictionary for logging.
|
||||
|
||||
Returns:
|
||||
Dictionary representation of metrics.
|
||||
"""
|
||||
result: dict[str, Any] = {
|
||||
"operation": self.operation,
|
||||
"duration_ms": round(self.duration_ms, 2),
|
||||
"success": self.success,
|
||||
"timestamp": self.timestamp.isoformat(),
|
||||
}
|
||||
|
||||
if self.filename:
|
||||
result["file_name"] = self.filename
|
||||
if self.provider:
|
||||
result["provider"] = self.provider
|
||||
if self.size_bytes is not None:
|
||||
result["size_bytes"] = self.size_bytes
|
||||
if self.error:
|
||||
result["error"] = self.error
|
||||
if self.metadata:
|
||||
result.update(self.metadata)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@contextmanager
|
||||
def measure_operation(
|
||||
operation: str,
|
||||
*,
|
||||
filename: str | None = None,
|
||||
provider: str | None = None,
|
||||
size_bytes: int | None = None,
|
||||
log_level: int = logging.DEBUG,
|
||||
**extra_metadata: Any,
|
||||
) -> Generator[FileOperationMetrics, None, None]:
|
||||
"""Context manager to measure and log operation performance.
|
||||
|
||||
Args:
|
||||
operation: Name of the operation.
|
||||
filename: Optional filename being operated on.
|
||||
provider: Optional provider name.
|
||||
size_bytes: Optional file size in bytes.
|
||||
log_level: Log level for the result message.
|
||||
**extra_metadata: Additional metadata to include.
|
||||
|
||||
Yields:
|
||||
FileOperationMetrics object that will be populated with results.
|
||||
|
||||
Example:
|
||||
with measure_operation("upload", filename="test.pdf", provider="openai") as metrics:
|
||||
result = upload_file(file)
|
||||
metrics.metadata["file_id"] = result.file_id
|
||||
"""
|
||||
metrics = FileOperationMetrics(
|
||||
operation=operation,
|
||||
filename=filename,
|
||||
provider=provider,
|
||||
size_bytes=size_bytes,
|
||||
metadata=dict(extra_metadata),
|
||||
)
|
||||
|
||||
start_time = time.perf_counter()
|
||||
|
||||
try:
|
||||
yield metrics
|
||||
metrics.success = True
|
||||
except Exception as e:
|
||||
metrics.success = False
|
||||
metrics.error = str(e)
|
||||
raise
|
||||
finally:
|
||||
metrics.duration_ms = (time.perf_counter() - start_time) * 1000
|
||||
|
||||
log_message = f"{operation}"
|
||||
if filename:
|
||||
log_message += f" [{filename}]"
|
||||
if provider:
|
||||
log_message += f" ({provider})"
|
||||
|
||||
if metrics.success:
|
||||
log_message += f" completed in {metrics.duration_ms:.2f}ms"
|
||||
else:
|
||||
log_message += f" failed after {metrics.duration_ms:.2f}ms: {metrics.error}"
|
||||
|
||||
logger.log(log_level, log_message, extra=metrics.to_dict())
|
||||
|
||||
|
||||
def log_file_operation(
|
||||
operation: str,
|
||||
*,
|
||||
filename: str | None = None,
|
||||
provider: str | None = None,
|
||||
size_bytes: int | None = None,
|
||||
duration_ms: float | None = None,
|
||||
success: bool = True,
|
||||
error: str | None = None,
|
||||
level: int = logging.INFO,
|
||||
**extra: Any,
|
||||
) -> None:
|
||||
"""Log a file operation with structured data.
|
||||
|
||||
Args:
|
||||
operation: Name of the operation.
|
||||
filename: Optional filename being operated on.
|
||||
provider: Optional provider name.
|
||||
size_bytes: Optional file size in bytes.
|
||||
duration_ms: Optional duration in milliseconds.
|
||||
success: Whether the operation succeeded.
|
||||
error: Optional error message.
|
||||
level: Log level to use.
|
||||
**extra: Additional metadata to include.
|
||||
"""
|
||||
metrics = FileOperationMetrics(
|
||||
operation=operation,
|
||||
filename=filename,
|
||||
provider=provider,
|
||||
size_bytes=size_bytes,
|
||||
duration_ms=duration_ms or 0.0,
|
||||
success=success,
|
||||
error=error,
|
||||
metadata=dict(extra),
|
||||
)
|
||||
|
||||
message = f"{operation}"
|
||||
if filename:
|
||||
message += f" [{filename}]"
|
||||
if provider:
|
||||
message += f" ({provider})"
|
||||
|
||||
if success:
|
||||
if duration_ms:
|
||||
message += f" completed in {duration_ms:.2f}ms"
|
||||
else:
|
||||
message += " completed"
|
||||
else:
|
||||
message += " failed"
|
||||
if error:
|
||||
message += f": {error}"
|
||||
|
||||
logger.log(level, message, extra=metrics.to_dict())
|
||||
@@ -1,553 +0,0 @@
|
||||
"""Cache for tracking uploaded files using aiocache."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import atexit
|
||||
import builtins
|
||||
from collections.abc import Iterator
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime, timezone
|
||||
import hashlib
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from aiocache import Cache # type: ignore[import-untyped]
|
||||
from aiocache.serializers import PickleSerializer # type: ignore[import-untyped]
|
||||
|
||||
from crewai_files.core.constants import DEFAULT_MAX_CACHE_ENTRIES, DEFAULT_TTL_SECONDS
|
||||
from crewai_files.uploaders.factory import ProviderType
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai_files.core.types import FileInput
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class CachedUpload:
|
||||
"""Represents a cached file upload.
|
||||
|
||||
Attributes:
|
||||
file_id: Provider-specific file identifier.
|
||||
provider: Name of the provider.
|
||||
file_uri: Optional URI for accessing the file.
|
||||
content_type: MIME type of the uploaded file.
|
||||
uploaded_at: When the file was uploaded.
|
||||
expires_at: When the upload expires (if applicable).
|
||||
"""
|
||||
|
||||
file_id: str
|
||||
provider: ProviderType
|
||||
file_uri: str | None
|
||||
content_type: str
|
||||
uploaded_at: datetime
|
||||
expires_at: datetime | None = None
|
||||
|
||||
def is_expired(self) -> bool:
|
||||
"""Check if this cached upload has expired."""
|
||||
if self.expires_at is None:
|
||||
return False
|
||||
return datetime.now(timezone.utc) >= self.expires_at
|
||||
|
||||
|
||||
def _make_key(file_hash: str, provider: str) -> str:
|
||||
"""Create a cache key from file hash and provider."""
|
||||
return f"upload:{provider}:{file_hash}"
|
||||
|
||||
|
||||
def _compute_file_hash_streaming(chunks: Iterator[bytes]) -> str:
|
||||
"""Compute SHA-256 hash from streaming chunks.
|
||||
|
||||
Args:
|
||||
chunks: Iterator of byte chunks.
|
||||
|
||||
Returns:
|
||||
Hexadecimal hash string.
|
||||
"""
|
||||
hasher = hashlib.sha256()
|
||||
for chunk in chunks:
|
||||
hasher.update(chunk)
|
||||
return hasher.hexdigest()
|
||||
|
||||
|
||||
def _compute_file_hash(file: FileInput) -> str:
|
||||
"""Compute SHA-256 hash of file content.
|
||||
|
||||
Uses streaming for FilePath sources to avoid loading large files into memory.
|
||||
"""
|
||||
from crewai_files.core.sources import FilePath
|
||||
|
||||
source = file._file_source
|
||||
if isinstance(source, FilePath):
|
||||
return _compute_file_hash_streaming(source.read_chunks(chunk_size=1024 * 1024))
|
||||
content = file.read()
|
||||
return hashlib.sha256(content).hexdigest()
|
||||
|
||||
|
||||
class UploadCache:
|
||||
"""Async cache for tracking uploaded files using aiocache.
|
||||
|
||||
Supports in-memory caching by default, with optional Redis backend
|
||||
for distributed setups.
|
||||
|
||||
Attributes:
|
||||
ttl: Default time-to-live in seconds for cached entries.
|
||||
namespace: Cache namespace for isolation.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
ttl: int = DEFAULT_TTL_SECONDS,
|
||||
namespace: str = "crewai_uploads",
|
||||
cache_type: str = "memory",
|
||||
max_entries: int | None = DEFAULT_MAX_CACHE_ENTRIES,
|
||||
**cache_kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize the upload cache.
|
||||
|
||||
Args:
|
||||
ttl: Default TTL in seconds.
|
||||
namespace: Cache namespace.
|
||||
cache_type: Backend type ("memory" or "redis").
|
||||
max_entries: Maximum cache entries (None for unlimited).
|
||||
**cache_kwargs: Additional args for cache backend.
|
||||
"""
|
||||
self.ttl = ttl
|
||||
self.namespace = namespace
|
||||
self.max_entries = max_entries
|
||||
self._provider_keys: dict[ProviderType, set[str]] = {}
|
||||
self._key_access_order: list[str] = []
|
||||
|
||||
if cache_type == "redis":
|
||||
self._cache = Cache(
|
||||
Cache.REDIS,
|
||||
serializer=PickleSerializer(),
|
||||
namespace=namespace,
|
||||
**cache_kwargs,
|
||||
)
|
||||
else:
|
||||
self._cache = Cache(
|
||||
serializer=PickleSerializer(),
|
||||
namespace=namespace,
|
||||
)
|
||||
|
||||
def _track_key(self, provider: ProviderType, key: str) -> None:
|
||||
"""Track a key for a provider (for cleanup) and access order."""
|
||||
if provider not in self._provider_keys:
|
||||
self._provider_keys[provider] = set()
|
||||
self._provider_keys[provider].add(key)
|
||||
if key in self._key_access_order:
|
||||
self._key_access_order.remove(key)
|
||||
self._key_access_order.append(key)
|
||||
|
||||
def _untrack_key(self, provider: ProviderType, key: str) -> None:
|
||||
"""Remove key tracking for a provider."""
|
||||
if provider in self._provider_keys:
|
||||
self._provider_keys[provider].discard(key)
|
||||
if key in self._key_access_order:
|
||||
self._key_access_order.remove(key)
|
||||
|
||||
async def _evict_if_needed(self) -> int:
|
||||
"""Evict oldest entries if limit exceeded.
|
||||
|
||||
Returns:
|
||||
Number of entries evicted.
|
||||
"""
|
||||
if self.max_entries is None:
|
||||
return 0
|
||||
|
||||
current_count = len(self)
|
||||
if current_count < self.max_entries:
|
||||
return 0
|
||||
|
||||
to_evict = max(1, self.max_entries // 10)
|
||||
return await self._evict_oldest(to_evict)
|
||||
|
||||
async def _evict_oldest(self, count: int) -> int:
|
||||
"""Evict the oldest entries from the cache.
|
||||
|
||||
Args:
|
||||
count: Number of entries to evict.
|
||||
|
||||
Returns:
|
||||
Number of entries actually evicted.
|
||||
"""
|
||||
evicted = 0
|
||||
keys_to_evict = self._key_access_order[:count]
|
||||
|
||||
for key in keys_to_evict:
|
||||
await self._cache.delete(key)
|
||||
self._key_access_order.remove(key)
|
||||
for provider_keys in self._provider_keys.values():
|
||||
provider_keys.discard(key)
|
||||
evicted += 1
|
||||
|
||||
if evicted > 0:
|
||||
logger.debug(f"Evicted {evicted} oldest cache entries")
|
||||
|
||||
return evicted
|
||||
|
||||
async def aget(
|
||||
self, file: FileInput, provider: ProviderType
|
||||
) -> CachedUpload | None:
|
||||
"""Get a cached upload for a file.
|
||||
|
||||
Args:
|
||||
file: The file to look up.
|
||||
provider: The provider name.
|
||||
|
||||
Returns:
|
||||
Cached upload if found and not expired, None otherwise.
|
||||
"""
|
||||
file_hash = _compute_file_hash(file)
|
||||
return await self.aget_by_hash(file_hash, provider)
|
||||
|
||||
async def aget_by_hash(
|
||||
self, file_hash: str, provider: ProviderType
|
||||
) -> CachedUpload | None:
|
||||
"""Get a cached upload by file hash.
|
||||
|
||||
Args:
|
||||
file_hash: Hash of the file content.
|
||||
provider: The provider name.
|
||||
|
||||
Returns:
|
||||
Cached upload if found and not expired, None otherwise.
|
||||
"""
|
||||
key = _make_key(file_hash, provider)
|
||||
result = await self._cache.get(key)
|
||||
|
||||
if result is None:
|
||||
return None
|
||||
if isinstance(result, CachedUpload):
|
||||
if result.is_expired():
|
||||
await self._cache.delete(key)
|
||||
self._untrack_key(provider, key)
|
||||
return None
|
||||
return result
|
||||
return None
|
||||
|
||||
async def aset(
|
||||
self,
|
||||
file: FileInput,
|
||||
provider: ProviderType,
|
||||
file_id: str,
|
||||
file_uri: str | None = None,
|
||||
expires_at: datetime | None = None,
|
||||
) -> CachedUpload:
|
||||
"""Cache an uploaded file.
|
||||
|
||||
Args:
|
||||
file: The file that was uploaded.
|
||||
provider: The provider name.
|
||||
file_id: Provider-specific file identifier.
|
||||
file_uri: Optional URI for accessing the file.
|
||||
expires_at: When the upload expires.
|
||||
|
||||
Returns:
|
||||
The created cache entry.
|
||||
"""
|
||||
file_hash = _compute_file_hash(file)
|
||||
return await self.aset_by_hash(
|
||||
file_hash=file_hash,
|
||||
content_type=file.content_type,
|
||||
provider=provider,
|
||||
file_id=file_id,
|
||||
file_uri=file_uri,
|
||||
expires_at=expires_at,
|
||||
)
|
||||
|
||||
async def aset_by_hash(
|
||||
self,
|
||||
file_hash: str,
|
||||
content_type: str,
|
||||
provider: ProviderType,
|
||||
file_id: str,
|
||||
file_uri: str | None = None,
|
||||
expires_at: datetime | None = None,
|
||||
) -> CachedUpload:
|
||||
"""Cache an uploaded file by hash.
|
||||
|
||||
Args:
|
||||
file_hash: Hash of the file content.
|
||||
content_type: MIME type of the file.
|
||||
provider: The provider name.
|
||||
file_id: Provider-specific file identifier.
|
||||
file_uri: Optional URI for accessing the file.
|
||||
expires_at: When the upload expires.
|
||||
|
||||
Returns:
|
||||
The created cache entry.
|
||||
"""
|
||||
await self._evict_if_needed()
|
||||
|
||||
key = _make_key(file_hash, provider)
|
||||
now = datetime.now(timezone.utc)
|
||||
|
||||
cached = CachedUpload(
|
||||
file_id=file_id,
|
||||
provider=provider,
|
||||
file_uri=file_uri,
|
||||
content_type=content_type,
|
||||
uploaded_at=now,
|
||||
expires_at=expires_at,
|
||||
)
|
||||
|
||||
ttl = self.ttl
|
||||
if expires_at is not None:
|
||||
ttl = max(0, int((expires_at - now).total_seconds()))
|
||||
|
||||
await self._cache.set(key, cached, ttl=ttl)
|
||||
self._track_key(provider, key)
|
||||
logger.debug(f"Cached upload: {file_id} for provider {provider}")
|
||||
return cached
|
||||
|
||||
async def aremove(self, file: FileInput, provider: ProviderType) -> bool:
|
||||
"""Remove a cached upload.
|
||||
|
||||
Args:
|
||||
file: The file to remove.
|
||||
provider: The provider name.
|
||||
|
||||
Returns:
|
||||
True if entry was removed, False if not found.
|
||||
"""
|
||||
file_hash = _compute_file_hash(file)
|
||||
key = _make_key(file_hash, provider)
|
||||
|
||||
result = await self._cache.delete(key)
|
||||
removed = bool(result > 0 if isinstance(result, int) else result)
|
||||
if removed:
|
||||
self._untrack_key(provider, key)
|
||||
return removed
|
||||
|
||||
async def aremove_by_file_id(self, file_id: str, provider: ProviderType) -> bool:
|
||||
"""Remove a cached upload by file ID.
|
||||
|
||||
Args:
|
||||
file_id: The file ID to remove.
|
||||
provider: The provider name.
|
||||
|
||||
Returns:
|
||||
True if entry was removed, False if not found.
|
||||
"""
|
||||
if provider not in self._provider_keys:
|
||||
return False
|
||||
|
||||
for key in list(self._provider_keys[provider]):
|
||||
cached = await self._cache.get(key)
|
||||
if isinstance(cached, CachedUpload) and cached.file_id == file_id:
|
||||
await self._cache.delete(key)
|
||||
self._untrack_key(provider, key)
|
||||
return True
|
||||
return False
|
||||
|
||||
async def aclear_expired(self) -> int:
|
||||
"""Remove all expired entries from the cache.
|
||||
|
||||
Returns:
|
||||
Number of entries removed.
|
||||
"""
|
||||
removed = 0
|
||||
|
||||
for provider, keys in list(self._provider_keys.items()):
|
||||
for key in list(keys):
|
||||
cached = await self._cache.get(key)
|
||||
if cached is None or (
|
||||
isinstance(cached, CachedUpload) and cached.is_expired()
|
||||
):
|
||||
await self._cache.delete(key)
|
||||
self._untrack_key(provider, key)
|
||||
removed += 1
|
||||
|
||||
if removed > 0:
|
||||
logger.debug(f"Cleared {removed} expired cache entries")
|
||||
return removed
|
||||
|
||||
async def aclear(self) -> int:
|
||||
"""Clear all entries from the cache.
|
||||
|
||||
Returns:
|
||||
Number of entries cleared.
|
||||
"""
|
||||
count = sum(len(keys) for keys in self._provider_keys.values())
|
||||
await self._cache.clear(namespace=self.namespace)
|
||||
self._provider_keys.clear()
|
||||
|
||||
if count > 0:
|
||||
logger.debug(f"Cleared {count} cache entries")
|
||||
return count
|
||||
|
||||
async def aget_all_for_provider(self, provider: ProviderType) -> list[CachedUpload]:
|
||||
"""Get all cached uploads for a provider.
|
||||
|
||||
Args:
|
||||
provider: The provider name.
|
||||
|
||||
Returns:
|
||||
List of cached uploads for the provider.
|
||||
"""
|
||||
if provider not in self._provider_keys:
|
||||
return []
|
||||
|
||||
results: list[CachedUpload] = []
|
||||
for key in list(self._provider_keys[provider]):
|
||||
cached = await self._cache.get(key)
|
||||
if isinstance(cached, CachedUpload) and not cached.is_expired():
|
||||
results.append(cached)
|
||||
return results
|
||||
|
||||
@staticmethod
|
||||
def _run_sync(coro: Any) -> Any:
|
||||
"""Run an async coroutine from sync context without blocking event loop."""
|
||||
try:
|
||||
loop = asyncio.get_running_loop()
|
||||
except RuntimeError:
|
||||
loop = None
|
||||
|
||||
if loop is not None and loop.is_running():
|
||||
future = asyncio.run_coroutine_threadsafe(coro, loop)
|
||||
return future.result(timeout=30)
|
||||
return asyncio.run(coro)
|
||||
|
||||
def get(self, file: FileInput, provider: ProviderType) -> CachedUpload | None:
|
||||
"""Sync wrapper for aget."""
|
||||
result: CachedUpload | None = self._run_sync(self.aget(file, provider))
|
||||
return result
|
||||
|
||||
def get_by_hash(
|
||||
self, file_hash: str, provider: ProviderType
|
||||
) -> CachedUpload | None:
|
||||
"""Sync wrapper for aget_by_hash."""
|
||||
result: CachedUpload | None = self._run_sync(
|
||||
self.aget_by_hash(file_hash, provider)
|
||||
)
|
||||
return result
|
||||
|
||||
def set(
|
||||
self,
|
||||
file: FileInput,
|
||||
provider: ProviderType,
|
||||
file_id: str,
|
||||
file_uri: str | None = None,
|
||||
expires_at: datetime | None = None,
|
||||
) -> CachedUpload:
|
||||
"""Sync wrapper for aset."""
|
||||
result: CachedUpload = self._run_sync(
|
||||
self.aset(file, provider, file_id, file_uri, expires_at)
|
||||
)
|
||||
return result
|
||||
|
||||
def set_by_hash(
|
||||
self,
|
||||
file_hash: str,
|
||||
content_type: str,
|
||||
provider: ProviderType,
|
||||
file_id: str,
|
||||
file_uri: str | None = None,
|
||||
expires_at: datetime | None = None,
|
||||
) -> CachedUpload:
|
||||
"""Sync wrapper for aset_by_hash."""
|
||||
result: CachedUpload = self._run_sync(
|
||||
self.aset_by_hash(
|
||||
file_hash, content_type, provider, file_id, file_uri, expires_at
|
||||
)
|
||||
)
|
||||
return result
|
||||
|
||||
def remove(self, file: FileInput, provider: ProviderType) -> bool:
|
||||
"""Sync wrapper for aremove."""
|
||||
result: bool = self._run_sync(self.aremove(file, provider))
|
||||
return result
|
||||
|
||||
def remove_by_file_id(self, file_id: str, provider: ProviderType) -> bool:
|
||||
"""Sync wrapper for aremove_by_file_id."""
|
||||
result: bool = self._run_sync(self.aremove_by_file_id(file_id, provider))
|
||||
return result
|
||||
|
||||
def clear_expired(self) -> int:
|
||||
"""Sync wrapper for aclear_expired."""
|
||||
result: int = self._run_sync(self.aclear_expired())
|
||||
return result
|
||||
|
||||
def clear(self) -> int:
|
||||
"""Sync wrapper for aclear."""
|
||||
result: int = self._run_sync(self.aclear())
|
||||
return result
|
||||
|
||||
def get_all_for_provider(self, provider: ProviderType) -> list[CachedUpload]:
|
||||
"""Sync wrapper for aget_all_for_provider."""
|
||||
result: list[CachedUpload] = self._run_sync(
|
||||
self.aget_all_for_provider(provider)
|
||||
)
|
||||
return result
|
||||
|
||||
def __len__(self) -> int:
|
||||
"""Return the number of cached entries."""
|
||||
return sum(len(keys) for keys in self._provider_keys.values())
|
||||
|
||||
def get_providers(self) -> builtins.set[ProviderType]:
|
||||
"""Get all provider names that have cached entries.
|
||||
|
||||
Returns:
|
||||
Set of provider names.
|
||||
"""
|
||||
return builtins.set(self._provider_keys.keys())
|
||||
|
||||
|
||||
_default_cache: UploadCache | None = None
|
||||
|
||||
|
||||
def get_upload_cache(
|
||||
ttl: int = DEFAULT_TTL_SECONDS,
|
||||
namespace: str = "crewai_uploads",
|
||||
cache_type: str = "memory",
|
||||
**cache_kwargs: Any,
|
||||
) -> UploadCache:
|
||||
"""Get or create the default upload cache.
|
||||
|
||||
Args:
|
||||
ttl: Default TTL in seconds.
|
||||
namespace: Cache namespace.
|
||||
cache_type: Backend type ("memory" or "redis").
|
||||
**cache_kwargs: Additional args for cache backend.
|
||||
|
||||
Returns:
|
||||
The upload cache instance.
|
||||
"""
|
||||
global _default_cache
|
||||
if _default_cache is None:
|
||||
_default_cache = UploadCache(
|
||||
ttl=ttl,
|
||||
namespace=namespace,
|
||||
cache_type=cache_type,
|
||||
**cache_kwargs,
|
||||
)
|
||||
return _default_cache
|
||||
|
||||
|
||||
def reset_upload_cache() -> None:
|
||||
"""Reset the default upload cache (useful for testing)."""
|
||||
global _default_cache
|
||||
if _default_cache is not None:
|
||||
_default_cache.clear()
|
||||
_default_cache = None
|
||||
|
||||
|
||||
def _cleanup_on_exit() -> None:
|
||||
"""Clean up uploaded files on process exit."""
|
||||
global _default_cache
|
||||
if _default_cache is None or len(_default_cache) == 0:
|
||||
return
|
||||
|
||||
from crewai_files.cache.cleanup import cleanup_uploaded_files
|
||||
|
||||
try:
|
||||
cleanup_uploaded_files(_default_cache)
|
||||
except Exception as e:
|
||||
logger.debug(f"Error during exit cleanup: {e}")
|
||||
|
||||
|
||||
atexit.register(_cleanup_on_exit)
|
||||
@@ -1,92 +0,0 @@
|
||||
"""Core file types and sources."""
|
||||
|
||||
from crewai_files.core.constants import (
|
||||
BACKOFF_BASE_DELAY,
|
||||
BACKOFF_JITTER_FACTOR,
|
||||
BACKOFF_MAX_DELAY,
|
||||
DEFAULT_MAX_CACHE_ENTRIES,
|
||||
DEFAULT_MAX_FILE_SIZE_BYTES,
|
||||
DEFAULT_TTL_SECONDS,
|
||||
DEFAULT_UPLOAD_CHUNK_SIZE,
|
||||
FILES_API_MAX_SIZE,
|
||||
GEMINI_FILE_TTL,
|
||||
MAGIC_BUFFER_SIZE,
|
||||
MAX_CONCURRENCY,
|
||||
MULTIPART_CHUNKSIZE,
|
||||
MULTIPART_THRESHOLD,
|
||||
UPLOAD_MAX_RETRIES,
|
||||
UPLOAD_RETRY_DELAY_BASE,
|
||||
)
|
||||
from crewai_files.core.resolved import (
|
||||
FileReference,
|
||||
InlineBase64,
|
||||
InlineBytes,
|
||||
ResolvedFile,
|
||||
UrlReference,
|
||||
)
|
||||
from crewai_files.core.sources import (
|
||||
AsyncFileStream,
|
||||
FileBytes,
|
||||
FilePath,
|
||||
FileSource,
|
||||
FileStream,
|
||||
FileUrl,
|
||||
)
|
||||
from crewai_files.core.types import (
|
||||
AudioFile,
|
||||
AudioMimeType,
|
||||
BaseFile,
|
||||
CoercedFileSource,
|
||||
File,
|
||||
FileInput,
|
||||
FileMode,
|
||||
ImageFile,
|
||||
ImageMimeType,
|
||||
PDFFile,
|
||||
TextFile,
|
||||
VideoFile,
|
||||
VideoMimeType,
|
||||
)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"BACKOFF_BASE_DELAY",
|
||||
"BACKOFF_JITTER_FACTOR",
|
||||
"BACKOFF_MAX_DELAY",
|
||||
"DEFAULT_MAX_CACHE_ENTRIES",
|
||||
"DEFAULT_MAX_FILE_SIZE_BYTES",
|
||||
"DEFAULT_TTL_SECONDS",
|
||||
"DEFAULT_UPLOAD_CHUNK_SIZE",
|
||||
"FILES_API_MAX_SIZE",
|
||||
"GEMINI_FILE_TTL",
|
||||
"MAGIC_BUFFER_SIZE",
|
||||
"MAX_CONCURRENCY",
|
||||
"MULTIPART_CHUNKSIZE",
|
||||
"MULTIPART_THRESHOLD",
|
||||
"UPLOAD_MAX_RETRIES",
|
||||
"UPLOAD_RETRY_DELAY_BASE",
|
||||
"AsyncFileStream",
|
||||
"AudioFile",
|
||||
"AudioMimeType",
|
||||
"BaseFile",
|
||||
"CoercedFileSource",
|
||||
"File",
|
||||
"FileBytes",
|
||||
"FileInput",
|
||||
"FileMode",
|
||||
"FilePath",
|
||||
"FileReference",
|
||||
"FileSource",
|
||||
"FileStream",
|
||||
"FileUrl",
|
||||
"ImageFile",
|
||||
"ImageMimeType",
|
||||
"InlineBase64",
|
||||
"InlineBytes",
|
||||
"PDFFile",
|
||||
"ResolvedFile",
|
||||
"TextFile",
|
||||
"UrlReference",
|
||||
"VideoFile",
|
||||
"VideoMimeType",
|
||||
]
|
||||
@@ -1,26 +0,0 @@
|
||||
"""Constants for file handling utilities."""
|
||||
|
||||
from datetime import timedelta
|
||||
from typing import Final, Literal
|
||||
|
||||
|
||||
DEFAULT_MAX_FILE_SIZE_BYTES: Final[Literal[524_288_000]] = 524_288_000
|
||||
MAGIC_BUFFER_SIZE: Final[Literal[2048]] = 2048
|
||||
|
||||
UPLOAD_MAX_RETRIES: Final[Literal[3]] = 3
|
||||
UPLOAD_RETRY_DELAY_BASE: Final[Literal[2]] = 2
|
||||
|
||||
DEFAULT_TTL_SECONDS: Final[Literal[86_400]] = 86_400
|
||||
DEFAULT_MAX_CACHE_ENTRIES: Final[Literal[1000]] = 1000
|
||||
|
||||
GEMINI_FILE_TTL: Final[timedelta] = timedelta(hours=48)
|
||||
BACKOFF_BASE_DELAY: Final[float] = 1.0
|
||||
BACKOFF_MAX_DELAY: Final[float] = 30.0
|
||||
BACKOFF_JITTER_FACTOR: Final[float] = 0.1
|
||||
|
||||
FILES_API_MAX_SIZE: Final[Literal[536_870_912]] = 536_870_912
|
||||
DEFAULT_UPLOAD_CHUNK_SIZE: Final[Literal[67_108_864]] = 67_108_864
|
||||
|
||||
MULTIPART_THRESHOLD: Final[Literal[8_388_608]] = 8_388_608
|
||||
MULTIPART_CHUNKSIZE: Final[Literal[8_388_608]] = 8_388_608
|
||||
MAX_CONCURRENCY: Final[Literal[10]] = 10
|
||||
@@ -1,84 +0,0 @@
|
||||
"""Resolved file types representing different delivery methods for file content."""
|
||||
|
||||
from abc import ABC
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ResolvedFile(ABC):
|
||||
"""Base class for resolved file representations.
|
||||
|
||||
A ResolvedFile represents the final form of a file ready for delivery
|
||||
to an LLM provider, whether inline or via reference.
|
||||
|
||||
Attributes:
|
||||
content_type: MIME type of the file content.
|
||||
"""
|
||||
|
||||
content_type: str
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class InlineBase64(ResolvedFile):
|
||||
"""File content encoded as base64 string.
|
||||
|
||||
Used by most providers for inline file content in messages.
|
||||
|
||||
Attributes:
|
||||
content_type: MIME type of the file content.
|
||||
data: Base64-encoded file content.
|
||||
"""
|
||||
|
||||
data: str
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class InlineBytes(ResolvedFile):
|
||||
"""File content as raw bytes.
|
||||
|
||||
Used by providers like Bedrock that accept raw bytes instead of base64.
|
||||
|
||||
Attributes:
|
||||
content_type: MIME type of the file content.
|
||||
data: Raw file bytes.
|
||||
"""
|
||||
|
||||
data: bytes
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class FileReference(ResolvedFile):
|
||||
"""Reference to an uploaded file.
|
||||
|
||||
Used when files are uploaded via provider File APIs.
|
||||
|
||||
Attributes:
|
||||
content_type: MIME type of the file content.
|
||||
file_id: Provider-specific file identifier.
|
||||
provider: Name of the provider the file was uploaded to.
|
||||
expires_at: When the uploaded file expires (if applicable).
|
||||
file_uri: Optional URI for accessing the file (used by Gemini).
|
||||
"""
|
||||
|
||||
file_id: str
|
||||
provider: str
|
||||
expires_at: datetime | None = None
|
||||
file_uri: str | None = None
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class UrlReference(ResolvedFile):
|
||||
"""Reference to a file accessible via URL.
|
||||
|
||||
Used by providers that support fetching files from URLs.
|
||||
|
||||
Attributes:
|
||||
content_type: MIME type of the file content.
|
||||
url: URL where the file can be accessed.
|
||||
"""
|
||||
|
||||
url: str
|
||||
|
||||
|
||||
ResolvedFileType = InlineBase64 | InlineBytes | FileReference | UrlReference
|
||||
@@ -1,529 +0,0 @@
|
||||
"""Base file class for handling file inputs in tasks."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import AsyncIterator, Iterator
|
||||
import inspect
|
||||
import mimetypes
|
||||
from pathlib import Path
|
||||
from typing import Annotated, Any, BinaryIO, Protocol, cast, runtime_checkable
|
||||
|
||||
import aiofiles
|
||||
from pydantic import (
|
||||
BaseModel,
|
||||
BeforeValidator,
|
||||
Field,
|
||||
GetCoreSchemaHandler,
|
||||
PrivateAttr,
|
||||
model_validator,
|
||||
)
|
||||
from pydantic_core import CoreSchema, core_schema
|
||||
from typing_extensions import TypeIs
|
||||
|
||||
from crewai_files.core.constants import DEFAULT_MAX_FILE_SIZE_BYTES, MAGIC_BUFFER_SIZE
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class AsyncReadable(Protocol):
|
||||
"""Protocol for async readable streams."""
|
||||
|
||||
async def read(self, size: int = -1) -> bytes:
|
||||
"""Read up to size bytes from the stream."""
|
||||
...
|
||||
|
||||
|
||||
class _AsyncReadableValidator:
|
||||
"""Pydantic validator for AsyncReadable types."""
|
||||
|
||||
@classmethod
|
||||
def __get_pydantic_core_schema__(
|
||||
cls, _source_type: Any, _handler: GetCoreSchemaHandler
|
||||
) -> CoreSchema:
|
||||
return core_schema.no_info_plain_validator_function(
|
||||
cls._validate,
|
||||
serialization=core_schema.plain_serializer_function_ser_schema(
|
||||
lambda x: None, info_arg=False
|
||||
),
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _validate(value: Any) -> AsyncReadable:
|
||||
if isinstance(value, AsyncReadable):
|
||||
return value
|
||||
raise ValueError("Expected an async readable object with async read() method")
|
||||
|
||||
|
||||
ValidatedAsyncReadable = Annotated[AsyncReadable, _AsyncReadableValidator()]
|
||||
|
||||
|
||||
def _fallback_content_type(filename: str | None) -> str:
|
||||
"""Get content type from filename extension or return default."""
|
||||
if filename:
|
||||
mime_type, _ = mimetypes.guess_type(filename)
|
||||
if mime_type:
|
||||
return mime_type
|
||||
return "application/octet-stream"
|
||||
|
||||
|
||||
def generate_filename(content_type: str) -> str:
|
||||
"""Generate a UUID-based filename with extension from content type.
|
||||
|
||||
Args:
|
||||
content_type: MIME type to derive extension from.
|
||||
|
||||
Returns:
|
||||
Filename in format "{uuid}{ext}" where ext includes the dot.
|
||||
"""
|
||||
import uuid
|
||||
|
||||
ext = mimetypes.guess_extension(content_type) or ""
|
||||
return f"{uuid.uuid4()}{ext}"
|
||||
|
||||
|
||||
def detect_content_type(data: bytes, filename: str | None = None) -> str:
|
||||
"""Detect MIME type from file content.
|
||||
|
||||
Uses python-magic if available for accurate content-based detection,
|
||||
falls back to mimetypes module using filename extension.
|
||||
|
||||
Args:
|
||||
data: Raw bytes to analyze (only first 2048 bytes are used).
|
||||
filename: Optional filename for extension-based fallback.
|
||||
|
||||
Returns:
|
||||
The detected MIME type.
|
||||
"""
|
||||
try:
|
||||
import magic
|
||||
|
||||
result: str = magic.from_buffer(data[:MAGIC_BUFFER_SIZE], mime=True)
|
||||
return result
|
||||
except ImportError:
|
||||
return _fallback_content_type(filename)
|
||||
|
||||
|
||||
def detect_content_type_from_path(path: Path, filename: str | None = None) -> str:
|
||||
"""Detect MIME type from file path.
|
||||
|
||||
Uses python-magic's from_file() for accurate detection without reading
|
||||
the entire file into memory.
|
||||
|
||||
Args:
|
||||
path: Path to the file.
|
||||
filename: Optional filename for extension-based fallback.
|
||||
|
||||
Returns:
|
||||
The detected MIME type.
|
||||
"""
|
||||
try:
|
||||
import magic
|
||||
|
||||
result: str = magic.from_file(str(path), mime=True)
|
||||
return result
|
||||
except ImportError:
|
||||
return _fallback_content_type(filename or path.name)
|
||||
|
||||
|
||||
class _BinaryIOValidator:
|
||||
"""Pydantic validator for BinaryIO types."""
|
||||
|
||||
@classmethod
|
||||
def __get_pydantic_core_schema__(
|
||||
cls, _source_type: Any, _handler: GetCoreSchemaHandler
|
||||
) -> CoreSchema:
|
||||
return core_schema.no_info_plain_validator_function(
|
||||
cls._validate,
|
||||
serialization=core_schema.plain_serializer_function_ser_schema(
|
||||
lambda x: None, info_arg=False
|
||||
),
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _validate(value: Any) -> BinaryIO:
|
||||
if hasattr(value, "read") and hasattr(value, "seek"):
|
||||
return cast(BinaryIO, value)
|
||||
raise ValueError("Expected a binary file-like object with read() and seek()")
|
||||
|
||||
|
||||
ValidatedBinaryIO = Annotated[BinaryIO, _BinaryIOValidator()]
|
||||
|
||||
|
||||
class FilePath(BaseModel):
|
||||
"""File loaded from a filesystem path."""
|
||||
|
||||
path: Path = Field(description="Path to the file on the filesystem.")
|
||||
max_size_bytes: int = Field(
|
||||
default=DEFAULT_MAX_FILE_SIZE_BYTES,
|
||||
exclude=True,
|
||||
description="Maximum file size in bytes.",
|
||||
)
|
||||
_content: bytes | None = PrivateAttr(default=None)
|
||||
_content_type: str = PrivateAttr()
|
||||
|
||||
@model_validator(mode="after")
|
||||
def _validate_file_exists(self) -> FilePath:
|
||||
"""Validate that the file exists, is secure, and within size limits."""
|
||||
from crewai_files.processing.exceptions import FileTooLargeError
|
||||
|
||||
path_str = str(self.path)
|
||||
if ".." in path_str:
|
||||
raise ValueError(f"Path traversal not allowed: {self.path}")
|
||||
|
||||
if self.path.is_symlink():
|
||||
resolved = self.path.resolve()
|
||||
cwd = Path.cwd().resolve()
|
||||
if not str(resolved).startswith(str(cwd)):
|
||||
raise ValueError(f"Symlink escapes allowed directory: {self.path}")
|
||||
|
||||
if not self.path.exists():
|
||||
raise ValueError(f"File not found: {self.path}")
|
||||
if not self.path.is_file():
|
||||
raise ValueError(f"Path is not a file: {self.path}")
|
||||
|
||||
actual_size = self.path.stat().st_size
|
||||
if actual_size > self.max_size_bytes:
|
||||
raise FileTooLargeError(
|
||||
f"File exceeds max size ({actual_size} > {self.max_size_bytes})",
|
||||
file_name=str(self.path),
|
||||
actual_size=actual_size,
|
||||
max_size=self.max_size_bytes,
|
||||
)
|
||||
|
||||
self._content_type = detect_content_type_from_path(self.path, self.path.name)
|
||||
return self
|
||||
|
||||
@property
|
||||
def filename(self) -> str:
|
||||
"""Get the filename from the path."""
|
||||
return self.path.name
|
||||
|
||||
@property
|
||||
def content_type(self) -> str:
|
||||
"""Get the content type."""
|
||||
return self._content_type
|
||||
|
||||
def read(self) -> bytes:
|
||||
"""Read the file content from disk."""
|
||||
if self._content is None:
|
||||
self._content = self.path.read_bytes()
|
||||
return self._content
|
||||
|
||||
async def aread(self) -> bytes:
|
||||
"""Async read the file content from disk."""
|
||||
if self._content is None:
|
||||
async with aiofiles.open(self.path, "rb") as f:
|
||||
self._content = await f.read()
|
||||
return self._content
|
||||
|
||||
def read_chunks(self, chunk_size: int = 65536) -> Iterator[bytes]:
|
||||
"""Stream file content in chunks without loading entirely into memory.
|
||||
|
||||
Args:
|
||||
chunk_size: Size of each chunk in bytes.
|
||||
|
||||
Yields:
|
||||
Chunks of file content.
|
||||
"""
|
||||
with open(self.path, "rb") as f:
|
||||
while chunk := f.read(chunk_size):
|
||||
yield chunk
|
||||
|
||||
async def aread_chunks(self, chunk_size: int = 65536) -> AsyncIterator[bytes]:
|
||||
"""Async streaming for non-blocking I/O.
|
||||
|
||||
Args:
|
||||
chunk_size: Size of each chunk in bytes.
|
||||
|
||||
Yields:
|
||||
Chunks of file content.
|
||||
"""
|
||||
async with aiofiles.open(self.path, "rb") as f:
|
||||
while chunk := await f.read(chunk_size):
|
||||
yield chunk
|
||||
|
||||
|
||||
class FileBytes(BaseModel):
|
||||
"""File created from raw bytes content."""
|
||||
|
||||
data: bytes = Field(description="Raw bytes content of the file.")
|
||||
filename: str | None = Field(default=None, description="Optional filename.")
|
||||
_content_type: str = PrivateAttr()
|
||||
|
||||
@model_validator(mode="after")
|
||||
def _detect_content_type(self) -> FileBytes:
|
||||
"""Detect and cache content type from data."""
|
||||
self._content_type = detect_content_type(self.data, self.filename)
|
||||
return self
|
||||
|
||||
@property
|
||||
def content_type(self) -> str:
|
||||
"""Get the content type."""
|
||||
return self._content_type
|
||||
|
||||
def read(self) -> bytes:
|
||||
"""Return the bytes content."""
|
||||
return self.data
|
||||
|
||||
async def aread(self) -> bytes:
|
||||
"""Async return the bytes content (immediate, already in memory)."""
|
||||
return self.data
|
||||
|
||||
def read_chunks(self, chunk_size: int = 65536) -> Iterator[bytes]:
|
||||
"""Stream bytes content in chunks.
|
||||
|
||||
Args:
|
||||
chunk_size: Size of each chunk in bytes.
|
||||
|
||||
Yields:
|
||||
Chunks of bytes content.
|
||||
"""
|
||||
for i in range(0, len(self.data), chunk_size):
|
||||
yield self.data[i : i + chunk_size]
|
||||
|
||||
async def aread_chunks(self, chunk_size: int = 65536) -> AsyncIterator[bytes]:
|
||||
"""Async streaming (immediate yield since already in memory).
|
||||
|
||||
Args:
|
||||
chunk_size: Size of each chunk in bytes.
|
||||
|
||||
Yields:
|
||||
Chunks of bytes content.
|
||||
"""
|
||||
for chunk in self.read_chunks(chunk_size):
|
||||
yield chunk
|
||||
|
||||
|
||||
class FileStream(BaseModel):
|
||||
"""File loaded from a file-like stream."""
|
||||
|
||||
stream: ValidatedBinaryIO = Field(description="Binary file stream.")
|
||||
filename: str | None = Field(default=None, description="Optional filename.")
|
||||
_content: bytes | None = PrivateAttr(default=None)
|
||||
_content_type: str = PrivateAttr()
|
||||
|
||||
@model_validator(mode="after")
|
||||
def _initialize(self) -> FileStream:
|
||||
"""Extract filename and detect content type."""
|
||||
if self.filename is None:
|
||||
name = getattr(self.stream, "name", None)
|
||||
if name is not None:
|
||||
self.filename = Path(name).name
|
||||
|
||||
position = self.stream.tell()
|
||||
self.stream.seek(0)
|
||||
header = self.stream.read(MAGIC_BUFFER_SIZE)
|
||||
self.stream.seek(position)
|
||||
self._content_type = detect_content_type(header, self.filename)
|
||||
return self
|
||||
|
||||
@property
|
||||
def content_type(self) -> str:
|
||||
"""Get the content type."""
|
||||
return self._content_type
|
||||
|
||||
def read(self) -> bytes:
|
||||
"""Read the stream content. Content is cached after first read."""
|
||||
if self._content is None:
|
||||
position = self.stream.tell()
|
||||
self.stream.seek(0)
|
||||
self._content = self.stream.read()
|
||||
self.stream.seek(position)
|
||||
return self._content
|
||||
|
||||
def close(self) -> None:
|
||||
"""Close the underlying stream."""
|
||||
self.stream.close()
|
||||
|
||||
def __enter__(self) -> FileStream:
|
||||
"""Enter context manager."""
|
||||
return self
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: Any,
|
||||
) -> None:
|
||||
"""Exit context manager and close stream."""
|
||||
self.close()
|
||||
|
||||
def read_chunks(self, chunk_size: int = 65536) -> Iterator[bytes]:
|
||||
"""Stream from underlying stream in chunks.
|
||||
|
||||
Args:
|
||||
chunk_size: Size of each chunk in bytes.
|
||||
|
||||
Yields:
|
||||
Chunks of stream content.
|
||||
"""
|
||||
position = self.stream.tell()
|
||||
self.stream.seek(0)
|
||||
try:
|
||||
while chunk := self.stream.read(chunk_size):
|
||||
yield chunk
|
||||
finally:
|
||||
self.stream.seek(position)
|
||||
|
||||
|
||||
class AsyncFileStream(BaseModel):
|
||||
"""File loaded from an async stream.
|
||||
|
||||
Use for async file handles like aiofiles objects or aiohttp response bodies.
|
||||
This is an async-only type - use aread() instead of read().
|
||||
|
||||
Attributes:
|
||||
stream: Async file-like object with async read() method.
|
||||
filename: Optional filename for the stream.
|
||||
"""
|
||||
|
||||
stream: ValidatedAsyncReadable = Field(
|
||||
description="Async file stream with async read() method."
|
||||
)
|
||||
filename: str | None = Field(default=None, description="Optional filename.")
|
||||
_content: bytes | None = PrivateAttr(default=None)
|
||||
_content_type: str | None = PrivateAttr(default=None)
|
||||
|
||||
@property
|
||||
def content_type(self) -> str:
|
||||
"""Get the content type from stream content (cached). Requires aread() first."""
|
||||
if self._content is None:
|
||||
raise RuntimeError("Call aread() first to load content")
|
||||
if self._content_type is None:
|
||||
self._content_type = detect_content_type(self._content, self.filename)
|
||||
return self._content_type
|
||||
|
||||
async def aread(self) -> bytes:
|
||||
"""Async read the stream content. Content is cached after first read."""
|
||||
if self._content is None:
|
||||
self._content = await self.stream.read()
|
||||
return self._content
|
||||
|
||||
async def aclose(self) -> None:
|
||||
"""Async close the underlying stream."""
|
||||
if hasattr(self.stream, "close"):
|
||||
result = self.stream.close()
|
||||
if inspect.isawaitable(result):
|
||||
await result
|
||||
|
||||
async def __aenter__(self) -> AsyncFileStream:
|
||||
"""Async enter context manager."""
|
||||
return self
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: Any,
|
||||
) -> None:
|
||||
"""Async exit context manager and close stream."""
|
||||
await self.aclose()
|
||||
|
||||
async def aread_chunks(self, chunk_size: int = 65536) -> AsyncIterator[bytes]:
|
||||
"""Async stream content in chunks.
|
||||
|
||||
Args:
|
||||
chunk_size: Size of each chunk in bytes.
|
||||
|
||||
Yields:
|
||||
Chunks of stream content.
|
||||
"""
|
||||
while chunk := await self.stream.read(chunk_size):
|
||||
yield chunk
|
||||
|
||||
|
||||
class FileUrl(BaseModel):
|
||||
"""File referenced by URL.
|
||||
|
||||
For providers that support URL references, the URL is passed directly.
|
||||
For providers that don't, content is fetched on demand.
|
||||
|
||||
Attributes:
|
||||
url: URL where the file can be accessed.
|
||||
filename: Optional filename (extracted from URL if not provided).
|
||||
"""
|
||||
|
||||
url: str = Field(description="URL where the file can be accessed.")
|
||||
filename: str | None = Field(default=None, description="Optional filename.")
|
||||
_content_type: str | None = PrivateAttr(default=None)
|
||||
_content: bytes | None = PrivateAttr(default=None)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def _validate_url(self) -> FileUrl:
|
||||
"""Validate URL format."""
|
||||
if not self.url.startswith(("http://", "https://")):
|
||||
raise ValueError(f"Invalid URL scheme: {self.url}")
|
||||
return self
|
||||
|
||||
@property
|
||||
def content_type(self) -> str:
|
||||
"""Get the content type, guessing from URL extension if not set."""
|
||||
if self._content_type is None:
|
||||
self._content_type = self._guess_content_type()
|
||||
return self._content_type
|
||||
|
||||
def _guess_content_type(self) -> str:
|
||||
"""Guess content type from URL extension."""
|
||||
from urllib.parse import urlparse
|
||||
|
||||
parsed = urlparse(self.url)
|
||||
path = parsed.path
|
||||
guessed, _ = mimetypes.guess_type(path)
|
||||
return guessed or "application/octet-stream"
|
||||
|
||||
def read(self) -> bytes:
|
||||
"""Fetch content from URL (for providers that don't support URL references)."""
|
||||
if self._content is None:
|
||||
import httpx
|
||||
|
||||
response = httpx.get(self.url, follow_redirects=True)
|
||||
response.raise_for_status()
|
||||
self._content = response.content
|
||||
if "content-type" in response.headers:
|
||||
self._content_type = response.headers["content-type"].split(";")[0]
|
||||
return self._content
|
||||
|
||||
async def aread(self) -> bytes:
|
||||
"""Async fetch content from URL."""
|
||||
if self._content is None:
|
||||
import httpx
|
||||
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.get(self.url, follow_redirects=True)
|
||||
response.raise_for_status()
|
||||
self._content = response.content
|
||||
if "content-type" in response.headers:
|
||||
self._content_type = response.headers["content-type"].split(";")[0]
|
||||
return self._content
|
||||
|
||||
|
||||
FileSource = FilePath | FileBytes | FileStream | AsyncFileStream | FileUrl
|
||||
|
||||
|
||||
def is_file_source(v: object) -> TypeIs[FileSource]:
|
||||
"""Type guard to narrow input to FileSource."""
|
||||
return isinstance(v, (FilePath, FileBytes, FileStream, FileUrl))
|
||||
|
||||
|
||||
def _normalize_source(value: Any) -> FileSource:
|
||||
"""Convert raw input to appropriate source type."""
|
||||
if isinstance(value, (FilePath, FileBytes, FileStream, AsyncFileStream, FileUrl)):
|
||||
return value
|
||||
if isinstance(value, str):
|
||||
if value.startswith(("http://", "https://")):
|
||||
return FileUrl(url=value)
|
||||
return FilePath(path=Path(value))
|
||||
if isinstance(value, Path):
|
||||
return FilePath(path=value)
|
||||
if isinstance(value, bytes):
|
||||
return FileBytes(data=value)
|
||||
if isinstance(value, AsyncReadable):
|
||||
return AsyncFileStream(stream=value)
|
||||
if hasattr(value, "read") and hasattr(value, "seek"):
|
||||
return FileStream(stream=value)
|
||||
raise ValueError(f"Cannot convert {type(value).__name__} to file source")
|
||||
|
||||
|
||||
RawFileInput = str | Path | bytes
|
||||
FileSourceInput = Annotated[
|
||||
RawFileInput | FileSource, BeforeValidator(_normalize_source)
|
||||
]
|
||||
@@ -1,282 +0,0 @@
|
||||
"""Content-type specific file classes."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC
|
||||
from io import IOBase
|
||||
from pathlib import Path
|
||||
from typing import Annotated, Any, BinaryIO, Literal
|
||||
|
||||
from pydantic import BaseModel, Field, GetCoreSchemaHandler
|
||||
from pydantic_core import CoreSchema, core_schema
|
||||
from typing_extensions import Self
|
||||
|
||||
from crewai_files.core.sources import (
|
||||
AsyncFileStream,
|
||||
FileBytes,
|
||||
FilePath,
|
||||
FileSource,
|
||||
FileStream,
|
||||
FileUrl,
|
||||
is_file_source,
|
||||
)
|
||||
|
||||
|
||||
FileSourceInput = str | Path | bytes | IOBase | FileSource
|
||||
|
||||
|
||||
class _FileSourceCoercer:
|
||||
"""Pydantic-compatible type that coerces various inputs to FileSource."""
|
||||
|
||||
@classmethod
|
||||
def _coerce(cls, v: Any) -> FileSource:
|
||||
"""Convert raw input to appropriate FileSource type."""
|
||||
if isinstance(v, (FilePath, FileBytes, FileStream, FileUrl)):
|
||||
return v
|
||||
if isinstance(v, str):
|
||||
if v.startswith(("http://", "https://")):
|
||||
return FileUrl(url=v)
|
||||
return FilePath(path=Path(v))
|
||||
if isinstance(v, Path):
|
||||
return FilePath(path=v)
|
||||
if isinstance(v, bytes):
|
||||
return FileBytes(data=v)
|
||||
if isinstance(v, (IOBase, BinaryIO)):
|
||||
return FileStream(stream=v)
|
||||
raise ValueError(f"Cannot convert {type(v).__name__} to file source")
|
||||
|
||||
@classmethod
|
||||
def __get_pydantic_core_schema__(
|
||||
cls,
|
||||
_source_type: Any,
|
||||
_handler: GetCoreSchemaHandler,
|
||||
) -> CoreSchema:
|
||||
"""Generate Pydantic core schema for FileSource coercion."""
|
||||
return core_schema.no_info_plain_validator_function(
|
||||
cls._coerce,
|
||||
serialization=core_schema.plain_serializer_function_ser_schema(
|
||||
lambda v: v,
|
||||
info_arg=False,
|
||||
return_schema=core_schema.any_schema(),
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
CoercedFileSource = Annotated[FileSourceInput, _FileSourceCoercer]
|
||||
|
||||
FileMode = Literal["strict", "auto", "warn", "chunk"]
|
||||
|
||||
|
||||
ImageExtension = Literal[
|
||||
".png",
|
||||
".jpg",
|
||||
".jpeg",
|
||||
".gif",
|
||||
".webp",
|
||||
".bmp",
|
||||
".tiff",
|
||||
".tif",
|
||||
".svg",
|
||||
".heic",
|
||||
".heif",
|
||||
]
|
||||
ImageMimeType = Literal[
|
||||
"image/png",
|
||||
"image/jpeg",
|
||||
"image/gif",
|
||||
"image/webp",
|
||||
"image/bmp",
|
||||
"image/tiff",
|
||||
"image/svg+xml",
|
||||
"image/heic",
|
||||
"image/heif",
|
||||
]
|
||||
|
||||
PDFExtension = Literal[".pdf"]
|
||||
PDFContentType = Literal["application/pdf"]
|
||||
|
||||
TextExtension = Literal[
|
||||
".txt",
|
||||
".md",
|
||||
".rst",
|
||||
".csv",
|
||||
".json",
|
||||
".xml",
|
||||
".yaml",
|
||||
".yml",
|
||||
".html",
|
||||
".htm",
|
||||
".log",
|
||||
".ini",
|
||||
".cfg",
|
||||
".conf",
|
||||
]
|
||||
TextContentType = Literal[
|
||||
"text/plain",
|
||||
"text/markdown",
|
||||
"text/csv",
|
||||
"application/json",
|
||||
"application/xml",
|
||||
"text/xml",
|
||||
"application/x-yaml",
|
||||
"text/yaml",
|
||||
"text/html",
|
||||
]
|
||||
|
||||
AudioExtension = Literal[
|
||||
".mp3", ".wav", ".ogg", ".flac", ".aac", ".m4a", ".wma", ".aiff", ".opus"
|
||||
]
|
||||
AudioMimeType = Literal[
|
||||
"audio/mp3",
|
||||
"audio/mpeg",
|
||||
"audio/wav",
|
||||
"audio/x-wav",
|
||||
"audio/ogg",
|
||||
"audio/flac",
|
||||
"audio/aac",
|
||||
"audio/m4a",
|
||||
"audio/mp4",
|
||||
"audio/x-ms-wma",
|
||||
"audio/aiff",
|
||||
"audio/opus",
|
||||
]
|
||||
|
||||
VideoExtension = Literal[
|
||||
".mp4", ".avi", ".mkv", ".mov", ".webm", ".flv", ".wmv", ".m4v", ".mpeg", ".mpg"
|
||||
]
|
||||
VideoMimeType = Literal[
|
||||
"video/mp4",
|
||||
"video/mpeg",
|
||||
"video/webm",
|
||||
"video/quicktime",
|
||||
"video/x-msvideo",
|
||||
"video/x-matroska",
|
||||
"video/x-flv",
|
||||
"video/x-ms-wmv",
|
||||
]
|
||||
|
||||
|
||||
class BaseFile(ABC, BaseModel):
|
||||
"""Abstract base class for typed file wrappers.
|
||||
|
||||
Provides common functionality for all file types including:
|
||||
- File source management
|
||||
- Content reading
|
||||
- Dict unpacking support (`**` syntax)
|
||||
- Per-file mode mode
|
||||
|
||||
Can be unpacked with ** syntax: `{**ImageFile(source="./chart.png")}`
|
||||
which unpacks to: `{"chart": <ImageFile instance>}` using filename stem as key.
|
||||
|
||||
Attributes:
|
||||
source: The underlying file source (path, bytes, or stream).
|
||||
mode: How to handle this file if it exceeds provider limits.
|
||||
"""
|
||||
|
||||
source: CoercedFileSource = Field(description="The underlying file source.")
|
||||
mode: FileMode = Field(
|
||||
default="auto",
|
||||
description="How to handle if file exceeds limits: strict, auto, warn, chunk.",
|
||||
)
|
||||
|
||||
@property
|
||||
def _file_source(self) -> FileSource:
|
||||
"""Get source with narrowed type (always FileSource after validation)."""
|
||||
if is_file_source(self.source):
|
||||
return self.source
|
||||
raise TypeError("source must be a FileSource after validation")
|
||||
|
||||
@property
|
||||
def filename(self) -> str | None:
|
||||
"""Get the filename from the source."""
|
||||
return self._file_source.filename
|
||||
|
||||
@property
|
||||
def content_type(self) -> str:
|
||||
"""Get the content type from the source."""
|
||||
return self._file_source.content_type
|
||||
|
||||
def read(self) -> bytes:
|
||||
"""Read the file content as bytes."""
|
||||
return self._file_source.read() # type: ignore[union-attr]
|
||||
|
||||
async def aread(self) -> bytes:
|
||||
"""Async read the file content as bytes.
|
||||
|
||||
Raises:
|
||||
TypeError: If the underlying source doesn't support async read.
|
||||
"""
|
||||
source = self._file_source
|
||||
if isinstance(source, (FilePath, FileBytes, AsyncFileStream, FileUrl)):
|
||||
return await source.aread()
|
||||
raise TypeError(f"{type(source).__name__} does not support async read")
|
||||
|
||||
def read_text(self, encoding: str = "utf-8") -> str:
|
||||
"""Read the file content as string."""
|
||||
return self.read().decode(encoding)
|
||||
|
||||
@property
|
||||
def _unpack_key(self) -> str:
|
||||
"""Get the key to use when unpacking (filename stem)."""
|
||||
filename = self._file_source.filename
|
||||
if filename:
|
||||
return Path(filename).stem
|
||||
return "file"
|
||||
|
||||
def keys(self) -> list[str]:
|
||||
"""Return keys for dict unpacking."""
|
||||
return [self._unpack_key]
|
||||
|
||||
def __getitem__(self, key: str) -> Self:
|
||||
"""Return self for dict unpacking."""
|
||||
if key == self._unpack_key:
|
||||
return self
|
||||
raise KeyError(key)
|
||||
|
||||
|
||||
class ImageFile(BaseFile):
|
||||
"""File representing an image.
|
||||
|
||||
Supports common image formats: PNG, JPEG, GIF, WebP, BMP, TIFF, SVG.
|
||||
"""
|
||||
|
||||
|
||||
class PDFFile(BaseFile):
|
||||
"""File representing a PDF document."""
|
||||
|
||||
|
||||
class TextFile(BaseFile):
|
||||
"""File representing a text document.
|
||||
|
||||
Supports common text formats: TXT, MD, RST, CSV, JSON, XML, YAML, HTML.
|
||||
"""
|
||||
|
||||
|
||||
class AudioFile(BaseFile):
|
||||
"""File representing an audio file.
|
||||
|
||||
Supports common audio formats: MP3, WAV, OGG, FLAC, AAC, M4A, WMA.
|
||||
"""
|
||||
|
||||
|
||||
class VideoFile(BaseFile):
|
||||
"""File representing a video file.
|
||||
|
||||
Supports common video formats: MP4, AVI, MKV, MOV, WebM, FLV, WMV.
|
||||
"""
|
||||
|
||||
|
||||
class File(BaseFile):
|
||||
"""Generic file that auto-detects the appropriate type.
|
||||
|
||||
Use this when you don't want to specify the exact file type.
|
||||
The content type is automatically detected from the file contents.
|
||||
|
||||
Example:
|
||||
>>> pdf_file = File(source="./document.pdf")
|
||||
>>> image_file = File(source="./image.png")
|
||||
>>> bytes_file = File(source=b"file content")
|
||||
"""
|
||||
|
||||
|
||||
FileInput = AudioFile | File | ImageFile | PDFFile | TextFile | VideoFile
|
||||
@@ -1,14 +0,0 @@
|
||||
"""High-level formatting API for multimodal content."""
|
||||
|
||||
from crewai_files.formatting.api import (
|
||||
aformat_multimodal_content,
|
||||
format_multimodal_content,
|
||||
)
|
||||
from crewai_files.formatting.openai import OpenAIResponsesFormatter
|
||||
|
||||
|
||||
__all__ = [
|
||||
"OpenAIResponsesFormatter",
|
||||
"aformat_multimodal_content",
|
||||
"format_multimodal_content",
|
||||
]
|
||||
@@ -1,98 +0,0 @@
|
||||
"""Anthropic content block formatter."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
from typing import Any
|
||||
|
||||
from crewai_files.core.resolved import (
|
||||
FileReference,
|
||||
InlineBase64,
|
||||
InlineBytes,
|
||||
ResolvedFileType,
|
||||
UrlReference,
|
||||
)
|
||||
from crewai_files.core.types import FileInput
|
||||
|
||||
|
||||
class AnthropicFormatter:
|
||||
"""Formats resolved files into Anthropic content blocks."""
|
||||
|
||||
def format_block(
|
||||
self,
|
||||
file: FileInput,
|
||||
resolved: ResolvedFileType,
|
||||
) -> dict[str, Any] | None:
|
||||
"""Format a resolved file into an Anthropic content block.
|
||||
|
||||
Args:
|
||||
file: Original file input with metadata.
|
||||
resolved: Resolved file.
|
||||
|
||||
Returns:
|
||||
Content block dict or None if not supported.
|
||||
"""
|
||||
content_type = file.content_type
|
||||
block_type = self._get_block_type(content_type)
|
||||
if block_type is None:
|
||||
return None
|
||||
|
||||
if isinstance(resolved, FileReference):
|
||||
return {
|
||||
"type": block_type,
|
||||
"source": {
|
||||
"type": "file",
|
||||
"file_id": resolved.file_id,
|
||||
},
|
||||
"cache_control": {"type": "ephemeral"},
|
||||
}
|
||||
|
||||
if isinstance(resolved, UrlReference):
|
||||
return {
|
||||
"type": block_type,
|
||||
"source": {
|
||||
"type": "url",
|
||||
"url": resolved.url,
|
||||
},
|
||||
"cache_control": {"type": "ephemeral"},
|
||||
}
|
||||
|
||||
if isinstance(resolved, InlineBase64):
|
||||
return {
|
||||
"type": block_type,
|
||||
"source": {
|
||||
"type": "base64",
|
||||
"media_type": resolved.content_type,
|
||||
"data": resolved.data,
|
||||
},
|
||||
"cache_control": {"type": "ephemeral"},
|
||||
}
|
||||
|
||||
if isinstance(resolved, InlineBytes):
|
||||
return {
|
||||
"type": block_type,
|
||||
"source": {
|
||||
"type": "base64",
|
||||
"media_type": resolved.content_type,
|
||||
"data": base64.b64encode(resolved.data).decode("ascii"),
|
||||
},
|
||||
"cache_control": {"type": "ephemeral"},
|
||||
}
|
||||
|
||||
raise TypeError(f"Unexpected resolved type: {type(resolved).__name__}")
|
||||
|
||||
@staticmethod
|
||||
def _get_block_type(content_type: str) -> str | None:
|
||||
"""Get Anthropic block type for content type.
|
||||
|
||||
Args:
|
||||
content_type: MIME type.
|
||||
|
||||
Returns:
|
||||
Block type string or None if not supported.
|
||||
"""
|
||||
if content_type.startswith("image/"):
|
||||
return "image"
|
||||
if content_type == "application/pdf":
|
||||
return "document"
|
||||
return None
|
||||
@@ -1,328 +0,0 @@
|
||||
"""High-level API for formatting multimodal content."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import Any
|
||||
|
||||
from crewai_files.cache.upload_cache import get_upload_cache
|
||||
from crewai_files.core.types import FileInput
|
||||
from crewai_files.formatting.anthropic import AnthropicFormatter
|
||||
from crewai_files.formatting.bedrock import BedrockFormatter
|
||||
from crewai_files.formatting.gemini import GeminiFormatter
|
||||
from crewai_files.formatting.openai import OpenAIFormatter, OpenAIResponsesFormatter
|
||||
from crewai_files.processing.constraints import get_constraints_for_provider
|
||||
from crewai_files.processing.processor import FileProcessor
|
||||
from crewai_files.resolution.resolver import FileResolver, FileResolverConfig
|
||||
from crewai_files.uploaders.factory import ProviderType
|
||||
|
||||
|
||||
def _normalize_provider(provider: str | None) -> ProviderType:
|
||||
"""Normalize provider string to ProviderType.
|
||||
|
||||
Args:
|
||||
provider: Raw provider string.
|
||||
|
||||
Returns:
|
||||
Normalized provider type.
|
||||
|
||||
Raises:
|
||||
ValueError: If provider is None or empty.
|
||||
"""
|
||||
if not provider:
|
||||
raise ValueError("provider is required")
|
||||
|
||||
provider_lower = provider.lower()
|
||||
|
||||
if "gemini" in provider_lower:
|
||||
return "gemini"
|
||||
if "google" in provider_lower:
|
||||
return "google"
|
||||
if "anthropic" in provider_lower:
|
||||
return "anthropic"
|
||||
if "claude" in provider_lower:
|
||||
return "claude"
|
||||
if "bedrock" in provider_lower:
|
||||
return "bedrock"
|
||||
if "aws" in provider_lower:
|
||||
return "aws"
|
||||
if "azure" in provider_lower:
|
||||
return "azure"
|
||||
if "gpt" in provider_lower:
|
||||
return "gpt"
|
||||
|
||||
return "openai"
|
||||
|
||||
|
||||
def format_multimodal_content(
|
||||
files: dict[str, FileInput],
|
||||
provider: str | None = None,
|
||||
api: str | None = None,
|
||||
prefer_upload: bool | None = None,
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Format files as provider-specific multimodal content blocks.
|
||||
|
||||
This is the main high-level API for converting files to content blocks
|
||||
suitable for sending to LLM providers. It handles:
|
||||
- File processing according to provider constraints
|
||||
- Resolution (upload vs inline) based on provider capabilities
|
||||
- Formatting into provider-specific content block structures
|
||||
|
||||
Args:
|
||||
files: Dictionary mapping file names to FileInput objects.
|
||||
provider: Provider name (e.g., "openai", "anthropic", "bedrock", "gemini").
|
||||
api: API variant (e.g., "responses" for OpenAI Responses API).
|
||||
prefer_upload: Whether to prefer uploading files instead of inlining.
|
||||
If None, uses provider-specific defaults.
|
||||
|
||||
Returns:
|
||||
List of content blocks in the provider's expected format.
|
||||
|
||||
Example:
|
||||
>>> from crewai_files import format_multimodal_content, ImageFile
|
||||
>>> files = {"photo": ImageFile(source="image.jpg")}
|
||||
>>> blocks = format_multimodal_content(files, "openai")
|
||||
>>> # For OpenAI Responses API:
|
||||
>>> blocks = format_multimodal_content(files, "openai", api="responses")
|
||||
>>> # With file upload:
|
||||
>>> blocks = format_multimodal_content(
|
||||
... files, "openai", api="responses", prefer_upload=True
|
||||
... )
|
||||
"""
|
||||
if not files:
|
||||
return []
|
||||
|
||||
provider_type = _normalize_provider(provider)
|
||||
|
||||
processor = FileProcessor(constraints=provider_type)
|
||||
processed_files = processor.process_files(files)
|
||||
|
||||
if not processed_files:
|
||||
return []
|
||||
|
||||
constraints = get_constraints_for_provider(provider_type)
|
||||
supported_types = _get_supported_types(constraints)
|
||||
supported_files = _filter_supported_files(processed_files, supported_types)
|
||||
|
||||
if not supported_files:
|
||||
return []
|
||||
|
||||
config = _get_resolver_config(provider_type, prefer_upload)
|
||||
upload_cache = get_upload_cache()
|
||||
resolver = FileResolver(config=config, upload_cache=upload_cache)
|
||||
|
||||
formatter = _get_formatter(provider_type, api)
|
||||
content_blocks: list[dict[str, Any]] = []
|
||||
|
||||
for name, file_input in supported_files.items():
|
||||
resolved = resolver.resolve(file_input, provider_type)
|
||||
block = _format_block(formatter, file_input, resolved, name)
|
||||
if block is not None:
|
||||
content_blocks.append(block)
|
||||
|
||||
return content_blocks
|
||||
|
||||
|
||||
async def aformat_multimodal_content(
|
||||
files: dict[str, FileInput],
|
||||
provider: str | None = None,
|
||||
api: str | None = None,
|
||||
prefer_upload: bool | None = None,
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Async format files as provider-specific multimodal content blocks.
|
||||
|
||||
Async version of format_multimodal_content with parallel file resolution.
|
||||
|
||||
Args:
|
||||
files: Dictionary mapping file names to FileInput objects.
|
||||
provider: Provider name (e.g., "openai", "anthropic", "bedrock", "gemini").
|
||||
api: API variant (e.g., "responses" for OpenAI Responses API).
|
||||
prefer_upload: Whether to prefer uploading files instead of inlining.
|
||||
If None, uses provider-specific defaults.
|
||||
|
||||
Returns:
|
||||
List of content blocks in the provider's expected format.
|
||||
"""
|
||||
if not files:
|
||||
return []
|
||||
|
||||
provider_type = _normalize_provider(provider)
|
||||
|
||||
processor = FileProcessor(constraints=provider_type)
|
||||
processed_files = await processor.aprocess_files(files)
|
||||
|
||||
if not processed_files:
|
||||
return []
|
||||
|
||||
constraints = get_constraints_for_provider(provider_type)
|
||||
supported_types = _get_supported_types(constraints)
|
||||
supported_files = _filter_supported_files(processed_files, supported_types)
|
||||
|
||||
if not supported_files:
|
||||
return []
|
||||
|
||||
config = _get_resolver_config(provider_type, prefer_upload)
|
||||
upload_cache = get_upload_cache()
|
||||
resolver = FileResolver(config=config, upload_cache=upload_cache)
|
||||
|
||||
resolved_files = await resolver.aresolve_files(supported_files, provider_type)
|
||||
|
||||
formatter = _get_formatter(provider_type, api)
|
||||
content_blocks: list[dict[str, Any]] = []
|
||||
|
||||
for name, resolved in resolved_files.items():
|
||||
file_input = supported_files[name]
|
||||
block = _format_block(formatter, file_input, resolved, name)
|
||||
if block is not None:
|
||||
content_blocks.append(block)
|
||||
|
||||
return content_blocks
|
||||
|
||||
|
||||
def _get_supported_types(
|
||||
constraints: Any | None,
|
||||
) -> list[str]:
|
||||
"""Get list of supported MIME type prefixes from constraints.
|
||||
|
||||
Args:
|
||||
constraints: Provider constraints.
|
||||
|
||||
Returns:
|
||||
List of MIME type prefixes (e.g., ["image/", "application/pdf"]).
|
||||
"""
|
||||
if constraints is None:
|
||||
return []
|
||||
|
||||
supported: list[str] = []
|
||||
if constraints.image is not None:
|
||||
supported.append("image/")
|
||||
if constraints.pdf is not None:
|
||||
supported.append("application/pdf")
|
||||
if constraints.audio is not None:
|
||||
supported.append("audio/")
|
||||
if constraints.video is not None:
|
||||
supported.append("video/")
|
||||
if constraints.text is not None:
|
||||
supported.append("text/")
|
||||
supported.append("application/json")
|
||||
supported.append("application/xml")
|
||||
supported.append("application/x-yaml")
|
||||
return supported
|
||||
|
||||
|
||||
def _filter_supported_files(
|
||||
files: dict[str, FileInput],
|
||||
supported_types: list[str],
|
||||
) -> dict[str, FileInput]:
|
||||
"""Filter files to those with supported content types.
|
||||
|
||||
Args:
|
||||
files: All files.
|
||||
supported_types: MIME type prefixes to allow.
|
||||
|
||||
Returns:
|
||||
Filtered dictionary of supported files.
|
||||
"""
|
||||
return {
|
||||
name: f
|
||||
for name, f in files.items()
|
||||
if any(f.content_type.startswith(t) for t in supported_types)
|
||||
}
|
||||
|
||||
|
||||
def _get_resolver_config(
|
||||
provider_lower: str,
|
||||
prefer_upload_override: bool | None = None,
|
||||
) -> FileResolverConfig:
|
||||
"""Get resolver config for provider.
|
||||
|
||||
Args:
|
||||
provider_lower: Lowercase provider name.
|
||||
prefer_upload_override: Override for prefer_upload setting.
|
||||
If None, uses provider-specific defaults.
|
||||
|
||||
Returns:
|
||||
Configured FileResolverConfig.
|
||||
"""
|
||||
if "bedrock" in provider_lower:
|
||||
s3_bucket = os.environ.get("CREWAI_BEDROCK_S3_BUCKET")
|
||||
prefer_upload = (
|
||||
prefer_upload_override
|
||||
if prefer_upload_override is not None
|
||||
else bool(s3_bucket)
|
||||
)
|
||||
return FileResolverConfig(
|
||||
prefer_upload=prefer_upload, use_bytes_for_bedrock=True
|
||||
)
|
||||
|
||||
prefer_upload = (
|
||||
prefer_upload_override if prefer_upload_override is not None else False
|
||||
)
|
||||
return FileResolverConfig(prefer_upload=prefer_upload)
|
||||
|
||||
|
||||
def _get_formatter(
|
||||
provider_lower: str,
|
||||
api: str | None = None,
|
||||
) -> (
|
||||
OpenAIFormatter
|
||||
| OpenAIResponsesFormatter
|
||||
| AnthropicFormatter
|
||||
| BedrockFormatter
|
||||
| GeminiFormatter
|
||||
):
|
||||
"""Get formatter for provider.
|
||||
|
||||
Args:
|
||||
provider_lower: Lowercase provider name.
|
||||
api: API variant (e.g., "responses" for OpenAI Responses API).
|
||||
|
||||
Returns:
|
||||
Provider-specific formatter instance.
|
||||
"""
|
||||
if "anthropic" in provider_lower or "claude" in provider_lower:
|
||||
return AnthropicFormatter()
|
||||
|
||||
if "bedrock" in provider_lower or "aws" in provider_lower:
|
||||
s3_bucket_owner = os.environ.get("CREWAI_BEDROCK_S3_BUCKET_OWNER")
|
||||
return BedrockFormatter(s3_bucket_owner=s3_bucket_owner)
|
||||
|
||||
if "gemini" in provider_lower or "google" in provider_lower:
|
||||
return GeminiFormatter()
|
||||
|
||||
if api == "responses":
|
||||
return OpenAIResponsesFormatter()
|
||||
|
||||
return OpenAIFormatter()
|
||||
|
||||
|
||||
def _format_block(
|
||||
formatter: OpenAIFormatter
|
||||
| OpenAIResponsesFormatter
|
||||
| AnthropicFormatter
|
||||
| BedrockFormatter
|
||||
| GeminiFormatter,
|
||||
file_input: FileInput,
|
||||
resolved: Any,
|
||||
name: str,
|
||||
) -> dict[str, Any] | None:
|
||||
"""Format a single file block using the appropriate formatter.
|
||||
|
||||
Args:
|
||||
formatter: Provider formatter.
|
||||
file_input: Original file input.
|
||||
resolved: Resolved file.
|
||||
name: File name.
|
||||
|
||||
Returns:
|
||||
Content block dict or None.
|
||||
"""
|
||||
if isinstance(formatter, BedrockFormatter):
|
||||
return formatter.format_block(file_input, resolved, name=name)
|
||||
if isinstance(formatter, AnthropicFormatter):
|
||||
return formatter.format_block(file_input, resolved)
|
||||
if isinstance(formatter, OpenAIResponsesFormatter):
|
||||
return formatter.format_block(resolved, file_input.content_type)
|
||||
if isinstance(formatter, (OpenAIFormatter, GeminiFormatter)):
|
||||
return formatter.format_block(resolved)
|
||||
raise TypeError(f"Unknown formatter type: {type(formatter).__name__}")
|
||||
@@ -1,200 +0,0 @@
|
||||
"""Bedrock content block formatter."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
from typing import Any
|
||||
|
||||
from crewai_files.core.resolved import (
|
||||
FileReference,
|
||||
InlineBase64,
|
||||
InlineBytes,
|
||||
ResolvedFileType,
|
||||
UrlReference,
|
||||
)
|
||||
from crewai_files.core.types import FileInput
|
||||
|
||||
|
||||
_DOCUMENT_FORMATS: dict[str, str] = {
|
||||
"application/pdf": "pdf",
|
||||
"text/csv": "csv",
|
||||
"text/plain": "txt",
|
||||
"text/markdown": "md",
|
||||
"text/html": "html",
|
||||
"application/msword": "doc",
|
||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": "docx",
|
||||
"application/vnd.ms-excel": "xls",
|
||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": "xlsx",
|
||||
}
|
||||
|
||||
_VIDEO_FORMATS: dict[str, str] = {
|
||||
"video/mp4": "mp4",
|
||||
"video/quicktime": "mov",
|
||||
"video/x-matroska": "mkv",
|
||||
"video/webm": "webm",
|
||||
"video/x-flv": "flv",
|
||||
"video/mpeg": "mpeg",
|
||||
"video/3gpp": "three_gp",
|
||||
}
|
||||
|
||||
|
||||
class BedrockFormatter:
|
||||
"""Formats resolved files into Bedrock Converse API content blocks."""
|
||||
|
||||
def __init__(self, s3_bucket_owner: str | None = None) -> None:
|
||||
"""Initialize formatter.
|
||||
|
||||
Args:
|
||||
s3_bucket_owner: Optional S3 bucket owner for file references.
|
||||
"""
|
||||
self.s3_bucket_owner = s3_bucket_owner
|
||||
|
||||
def format_block(
|
||||
self,
|
||||
file: FileInput,
|
||||
resolved: ResolvedFileType,
|
||||
name: str | None = None,
|
||||
) -> dict[str, Any] | None:
|
||||
"""Format a resolved file into a Bedrock content block.
|
||||
|
||||
Args:
|
||||
file: Original file input with metadata.
|
||||
resolved: Resolved file.
|
||||
name: File name (required for document blocks).
|
||||
|
||||
Returns:
|
||||
Content block dict or None if not supported.
|
||||
"""
|
||||
content_type = file.content_type
|
||||
|
||||
if isinstance(resolved, FileReference):
|
||||
if not resolved.file_uri:
|
||||
raise ValueError("Bedrock requires file_uri for FileReference (S3 URI)")
|
||||
return self._format_s3_block(content_type, resolved.file_uri, name)
|
||||
|
||||
if isinstance(resolved, InlineBytes):
|
||||
return self._format_bytes_block(content_type, resolved.data, name)
|
||||
|
||||
if isinstance(resolved, InlineBase64):
|
||||
file_bytes = base64.b64decode(resolved.data)
|
||||
return self._format_bytes_block(content_type, file_bytes, name)
|
||||
|
||||
if isinstance(resolved, UrlReference):
|
||||
raise ValueError(
|
||||
"Bedrock does not support URL references - resolve to bytes first"
|
||||
)
|
||||
|
||||
raise TypeError(f"Unexpected resolved type: {type(resolved).__name__}")
|
||||
|
||||
def _format_s3_block(
|
||||
self,
|
||||
content_type: str,
|
||||
file_uri: str,
|
||||
name: str | None,
|
||||
) -> dict[str, Any] | None:
|
||||
"""Format block with S3 location source.
|
||||
|
||||
Args:
|
||||
content_type: MIME type.
|
||||
file_uri: S3 URI.
|
||||
name: File name for documents.
|
||||
|
||||
Returns:
|
||||
Content block dict or None.
|
||||
"""
|
||||
s3_location: dict[str, Any] = {"uri": file_uri}
|
||||
if self.s3_bucket_owner:
|
||||
s3_location["bucketOwner"] = self.s3_bucket_owner
|
||||
|
||||
if content_type.startswith("image/"):
|
||||
return {
|
||||
"image": {
|
||||
"format": self._get_image_format(content_type),
|
||||
"source": {"s3Location": s3_location},
|
||||
}
|
||||
}
|
||||
|
||||
if content_type.startswith("video/"):
|
||||
video_format = _VIDEO_FORMATS.get(content_type)
|
||||
if video_format:
|
||||
return {
|
||||
"video": {
|
||||
"format": video_format,
|
||||
"source": {"s3Location": s3_location},
|
||||
}
|
||||
}
|
||||
return None
|
||||
|
||||
doc_format = _DOCUMENT_FORMATS.get(content_type)
|
||||
if doc_format:
|
||||
return {
|
||||
"document": {
|
||||
"name": name or "document",
|
||||
"format": doc_format,
|
||||
"source": {"s3Location": s3_location},
|
||||
}
|
||||
}
|
||||
|
||||
return None
|
||||
|
||||
def _format_bytes_block(
|
||||
self,
|
||||
content_type: str,
|
||||
file_bytes: bytes,
|
||||
name: str | None,
|
||||
) -> dict[str, Any] | None:
|
||||
"""Format block with inline bytes source.
|
||||
|
||||
Args:
|
||||
content_type: MIME type.
|
||||
file_bytes: Raw file bytes.
|
||||
name: File name for documents.
|
||||
|
||||
Returns:
|
||||
Content block dict or None.
|
||||
"""
|
||||
if content_type.startswith("image/"):
|
||||
return {
|
||||
"image": {
|
||||
"format": self._get_image_format(content_type),
|
||||
"source": {"bytes": file_bytes},
|
||||
}
|
||||
}
|
||||
|
||||
if content_type.startswith("video/"):
|
||||
video_format = _VIDEO_FORMATS.get(content_type)
|
||||
if video_format:
|
||||
return {
|
||||
"video": {
|
||||
"format": video_format,
|
||||
"source": {"bytes": file_bytes},
|
||||
}
|
||||
}
|
||||
return None
|
||||
|
||||
doc_format = _DOCUMENT_FORMATS.get(content_type)
|
||||
if doc_format:
|
||||
return {
|
||||
"document": {
|
||||
"name": name or "document",
|
||||
"format": doc_format,
|
||||
"source": {"bytes": file_bytes},
|
||||
}
|
||||
}
|
||||
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _get_image_format(content_type: str) -> str:
|
||||
"""Get Bedrock image format from content type.
|
||||
|
||||
Args:
|
||||
content_type: MIME type.
|
||||
|
||||
Returns:
|
||||
Format string for Bedrock.
|
||||
"""
|
||||
media_type = content_type.split("/")[-1]
|
||||
if media_type == "jpg":
|
||||
return "jpeg"
|
||||
return media_type
|
||||
@@ -1,67 +0,0 @@
|
||||
"""Gemini content block formatter."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
from typing import Any
|
||||
|
||||
from crewai_files.core.resolved import (
|
||||
FileReference,
|
||||
InlineBase64,
|
||||
InlineBytes,
|
||||
ResolvedFileType,
|
||||
UrlReference,
|
||||
)
|
||||
|
||||
|
||||
class GeminiFormatter:
|
||||
"""Formats resolved files into Gemini content blocks."""
|
||||
|
||||
@staticmethod
|
||||
def format_block(resolved: ResolvedFileType) -> dict[str, Any]:
|
||||
"""Format a resolved file into a Gemini content block.
|
||||
|
||||
Args:
|
||||
resolved: Resolved file.
|
||||
|
||||
Returns:
|
||||
Content block dict.
|
||||
|
||||
Raises:
|
||||
TypeError: If resolved type is not supported.
|
||||
"""
|
||||
if isinstance(resolved, FileReference):
|
||||
if not resolved.file_uri:
|
||||
raise ValueError("Gemini requires file_uri for FileReference")
|
||||
return {
|
||||
"fileData": {
|
||||
"mimeType": resolved.content_type,
|
||||
"fileUri": resolved.file_uri,
|
||||
}
|
||||
}
|
||||
|
||||
if isinstance(resolved, UrlReference):
|
||||
return {
|
||||
"fileData": {
|
||||
"mimeType": resolved.content_type,
|
||||
"fileUri": resolved.url,
|
||||
}
|
||||
}
|
||||
|
||||
if isinstance(resolved, InlineBase64):
|
||||
return {
|
||||
"inlineData": {
|
||||
"mimeType": resolved.content_type,
|
||||
"data": resolved.data,
|
||||
}
|
||||
}
|
||||
|
||||
if isinstance(resolved, InlineBytes):
|
||||
return {
|
||||
"inlineData": {
|
||||
"mimeType": resolved.content_type,
|
||||
"data": base64.b64encode(resolved.data).decode("ascii"),
|
||||
}
|
||||
}
|
||||
|
||||
raise TypeError(f"Unexpected resolved type: {type(resolved).__name__}")
|
||||
@@ -1,149 +0,0 @@
|
||||
"""OpenAI content block formatter."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
from typing import Any
|
||||
|
||||
from crewai_files.core.resolved import (
|
||||
FileReference,
|
||||
InlineBase64,
|
||||
InlineBytes,
|
||||
ResolvedFileType,
|
||||
UrlReference,
|
||||
)
|
||||
|
||||
|
||||
class OpenAIResponsesFormatter:
|
||||
"""Formats resolved files into OpenAI Responses API content blocks.
|
||||
|
||||
The Responses API uses a different format than Chat Completions:
|
||||
- Images use `type: "input_image"` with `file_id` or `image_url`
|
||||
- PDFs use `type: "input_file"` with `file_id`, `file_url`, or `file_data`
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def format_block(resolved: ResolvedFileType, content_type: str) -> dict[str, Any]:
|
||||
"""Format a resolved file into an OpenAI Responses API content block.
|
||||
|
||||
Args:
|
||||
resolved: Resolved file.
|
||||
content_type: MIME type of the file.
|
||||
|
||||
Returns:
|
||||
Content block dict.
|
||||
|
||||
Raises:
|
||||
TypeError: If resolved type is not supported.
|
||||
"""
|
||||
is_image = content_type.startswith("image/")
|
||||
is_pdf = content_type == "application/pdf"
|
||||
|
||||
if isinstance(resolved, FileReference):
|
||||
if is_image:
|
||||
return {
|
||||
"type": "input_image",
|
||||
"file_id": resolved.file_id,
|
||||
}
|
||||
if is_pdf:
|
||||
return {
|
||||
"type": "input_file",
|
||||
"file_id": resolved.file_id,
|
||||
}
|
||||
raise TypeError(
|
||||
f"Unsupported content type for Responses API: {content_type}"
|
||||
)
|
||||
|
||||
if isinstance(resolved, UrlReference):
|
||||
if is_image:
|
||||
return {
|
||||
"type": "input_image",
|
||||
"image_url": resolved.url,
|
||||
}
|
||||
if is_pdf:
|
||||
return {
|
||||
"type": "input_file",
|
||||
"file_url": resolved.url,
|
||||
}
|
||||
raise TypeError(
|
||||
f"Unsupported content type for Responses API: {content_type}"
|
||||
)
|
||||
|
||||
if isinstance(resolved, InlineBase64):
|
||||
if is_image:
|
||||
return {
|
||||
"type": "input_image",
|
||||
"image_url": f"data:{resolved.content_type};base64,{resolved.data}",
|
||||
}
|
||||
if is_pdf:
|
||||
return {
|
||||
"type": "input_file",
|
||||
"file_data": f"data:{resolved.content_type};base64,{resolved.data}",
|
||||
}
|
||||
raise TypeError(
|
||||
f"Unsupported content type for Responses API: {content_type}"
|
||||
)
|
||||
|
||||
if isinstance(resolved, InlineBytes):
|
||||
data = base64.b64encode(resolved.data).decode("ascii")
|
||||
if is_image:
|
||||
return {
|
||||
"type": "input_image",
|
||||
"image_url": f"data:{resolved.content_type};base64,{data}",
|
||||
}
|
||||
if is_pdf:
|
||||
return {
|
||||
"type": "input_file",
|
||||
"file_data": f"data:{resolved.content_type};base64,{data}",
|
||||
}
|
||||
raise TypeError(
|
||||
f"Unsupported content type for Responses API: {content_type}"
|
||||
)
|
||||
|
||||
raise TypeError(f"Unexpected resolved type: {type(resolved).__name__}")
|
||||
|
||||
|
||||
class OpenAIFormatter:
|
||||
"""Formats resolved files into OpenAI content blocks."""
|
||||
|
||||
@staticmethod
|
||||
def format_block(resolved: ResolvedFileType) -> dict[str, Any]:
|
||||
"""Format a resolved file into an OpenAI content block.
|
||||
|
||||
Args:
|
||||
resolved: Resolved file.
|
||||
|
||||
Returns:
|
||||
Content block dict.
|
||||
|
||||
Raises:
|
||||
TypeError: If resolved type is not supported.
|
||||
"""
|
||||
if isinstance(resolved, FileReference):
|
||||
return {
|
||||
"type": "file",
|
||||
"file": {"file_id": resolved.file_id},
|
||||
}
|
||||
|
||||
if isinstance(resolved, UrlReference):
|
||||
return {
|
||||
"type": "image_url",
|
||||
"image_url": {"url": resolved.url},
|
||||
}
|
||||
|
||||
if isinstance(resolved, InlineBase64):
|
||||
return {
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": f"data:{resolved.content_type};base64,{resolved.data}"
|
||||
},
|
||||
}
|
||||
|
||||
if isinstance(resolved, InlineBytes):
|
||||
data = base64.b64encode(resolved.data).decode("ascii")
|
||||
return {
|
||||
"type": "image_url",
|
||||
"image_url": {"url": f"data:{resolved.content_type};base64,{data}"},
|
||||
}
|
||||
|
||||
raise TypeError(f"Unexpected resolved type: {type(resolved).__name__}")
|
||||
@@ -1,62 +0,0 @@
|
||||
"""File processing module for multimodal content handling.
|
||||
|
||||
This module provides validation, transformation, and processing utilities
|
||||
for files used in multimodal LLM interactions.
|
||||
"""
|
||||
|
||||
from crewai_files.processing.constraints import (
|
||||
ANTHROPIC_CONSTRAINTS,
|
||||
BEDROCK_CONSTRAINTS,
|
||||
GEMINI_CONSTRAINTS,
|
||||
OPENAI_CONSTRAINTS,
|
||||
AudioConstraints,
|
||||
ImageConstraints,
|
||||
PDFConstraints,
|
||||
ProviderConstraints,
|
||||
VideoConstraints,
|
||||
get_constraints_for_provider,
|
||||
)
|
||||
from crewai_files.processing.enums import FileHandling
|
||||
from crewai_files.processing.exceptions import (
|
||||
FileProcessingError,
|
||||
FileTooLargeError,
|
||||
FileValidationError,
|
||||
ProcessingDependencyError,
|
||||
UnsupportedFileTypeError,
|
||||
)
|
||||
from crewai_files.processing.processor import FileProcessor
|
||||
from crewai_files.processing.validators import (
|
||||
validate_audio,
|
||||
validate_file,
|
||||
validate_image,
|
||||
validate_pdf,
|
||||
validate_text,
|
||||
validate_video,
|
||||
)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"ANTHROPIC_CONSTRAINTS",
|
||||
"BEDROCK_CONSTRAINTS",
|
||||
"GEMINI_CONSTRAINTS",
|
||||
"OPENAI_CONSTRAINTS",
|
||||
"AudioConstraints",
|
||||
"FileHandling",
|
||||
"FileProcessingError",
|
||||
"FileProcessor",
|
||||
"FileTooLargeError",
|
||||
"FileValidationError",
|
||||
"ImageConstraints",
|
||||
"PDFConstraints",
|
||||
"ProcessingDependencyError",
|
||||
"ProviderConstraints",
|
||||
"UnsupportedFileTypeError",
|
||||
"VideoConstraints",
|
||||
"get_constraints_for_provider",
|
||||
"validate_audio",
|
||||
"validate_file",
|
||||
"validate_image",
|
||||
"validate_pdf",
|
||||
"validate_text",
|
||||
"validate_video",
|
||||
]
|
||||
@@ -1,331 +0,0 @@
|
||||
"""Provider-specific file constraints for multimodal content."""
|
||||
|
||||
from dataclasses import dataclass
|
||||
from functools import lru_cache
|
||||
from typing import Literal
|
||||
|
||||
from crewai_files.core.types import (
|
||||
AudioMimeType,
|
||||
ImageMimeType,
|
||||
TextContentType,
|
||||
VideoMimeType,
|
||||
)
|
||||
|
||||
|
||||
ProviderName = Literal[
|
||||
"anthropic",
|
||||
"openai",
|
||||
"gemini",
|
||||
"bedrock",
|
||||
"azure",
|
||||
]
|
||||
|
||||
DEFAULT_IMAGE_FORMATS: tuple[ImageMimeType, ...] = (
|
||||
"image/png",
|
||||
"image/jpeg",
|
||||
"image/gif",
|
||||
"image/webp",
|
||||
)
|
||||
|
||||
GEMINI_IMAGE_FORMATS: tuple[ImageMimeType, ...] = (
|
||||
"image/png",
|
||||
"image/jpeg",
|
||||
"image/gif",
|
||||
"image/webp",
|
||||
"image/heic",
|
||||
"image/heif",
|
||||
)
|
||||
|
||||
DEFAULT_AUDIO_FORMATS: tuple[AudioMimeType, ...] = (
|
||||
"audio/mp3",
|
||||
"audio/mpeg",
|
||||
"audio/wav",
|
||||
"audio/ogg",
|
||||
"audio/flac",
|
||||
"audio/aac",
|
||||
"audio/m4a",
|
||||
)
|
||||
|
||||
GEMINI_AUDIO_FORMATS: tuple[AudioMimeType, ...] = (
|
||||
"audio/mp3",
|
||||
"audio/mpeg",
|
||||
"audio/wav",
|
||||
"audio/ogg",
|
||||
"audio/flac",
|
||||
"audio/aac",
|
||||
"audio/m4a",
|
||||
"audio/opus",
|
||||
)
|
||||
|
||||
DEFAULT_VIDEO_FORMATS: tuple[VideoMimeType, ...] = (
|
||||
"video/mp4",
|
||||
"video/mpeg",
|
||||
"video/webm",
|
||||
"video/quicktime",
|
||||
)
|
||||
|
||||
GEMINI_VIDEO_FORMATS: tuple[VideoMimeType, ...] = (
|
||||
"video/mp4",
|
||||
"video/mpeg",
|
||||
"video/webm",
|
||||
"video/quicktime",
|
||||
"video/x-msvideo",
|
||||
"video/x-flv",
|
||||
)
|
||||
|
||||
DEFAULT_TEXT_FORMATS: tuple[TextContentType, ...] = (
|
||||
"text/plain",
|
||||
"text/markdown",
|
||||
"text/csv",
|
||||
"application/json",
|
||||
"text/xml",
|
||||
"text/html",
|
||||
)
|
||||
|
||||
GEMINI_TEXT_FORMATS: tuple[TextContentType, ...] = (
|
||||
"text/plain",
|
||||
"text/markdown",
|
||||
"text/csv",
|
||||
"application/json",
|
||||
"application/xml",
|
||||
"text/xml",
|
||||
"application/x-yaml",
|
||||
"text/yaml",
|
||||
"text/html",
|
||||
)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ImageConstraints:
|
||||
"""Constraints for image files.
|
||||
|
||||
Attributes:
|
||||
max_size_bytes: Maximum file size in bytes.
|
||||
max_width: Maximum image width in pixels.
|
||||
max_height: Maximum image height in pixels.
|
||||
max_images_per_request: Maximum number of images per request.
|
||||
supported_formats: Supported image MIME types.
|
||||
"""
|
||||
|
||||
max_size_bytes: int
|
||||
max_width: int | None = None
|
||||
max_height: int | None = None
|
||||
max_images_per_request: int | None = None
|
||||
supported_formats: tuple[ImageMimeType, ...] = DEFAULT_IMAGE_FORMATS
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class PDFConstraints:
|
||||
"""Constraints for PDF files.
|
||||
|
||||
Attributes:
|
||||
max_size_bytes: Maximum file size in bytes.
|
||||
max_pages: Maximum number of pages.
|
||||
"""
|
||||
|
||||
max_size_bytes: int
|
||||
max_pages: int | None = None
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class AudioConstraints:
|
||||
"""Constraints for audio files.
|
||||
|
||||
Attributes:
|
||||
max_size_bytes: Maximum file size in bytes.
|
||||
max_duration_seconds: Maximum audio duration in seconds.
|
||||
supported_formats: Supported audio MIME types.
|
||||
"""
|
||||
|
||||
max_size_bytes: int
|
||||
max_duration_seconds: int | None = None
|
||||
supported_formats: tuple[AudioMimeType, ...] = DEFAULT_AUDIO_FORMATS
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class VideoConstraints:
|
||||
"""Constraints for video files.
|
||||
|
||||
Attributes:
|
||||
max_size_bytes: Maximum file size in bytes.
|
||||
max_duration_seconds: Maximum video duration in seconds.
|
||||
supported_formats: Supported video MIME types.
|
||||
"""
|
||||
|
||||
max_size_bytes: int
|
||||
max_duration_seconds: int | None = None
|
||||
supported_formats: tuple[VideoMimeType, ...] = DEFAULT_VIDEO_FORMATS
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class TextConstraints:
|
||||
"""Constraints for text files.
|
||||
|
||||
Attributes:
|
||||
max_size_bytes: Maximum file size in bytes.
|
||||
supported_formats: Supported text MIME types.
|
||||
"""
|
||||
|
||||
max_size_bytes: int
|
||||
supported_formats: tuple[TextContentType, ...] = DEFAULT_TEXT_FORMATS
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ProviderConstraints:
|
||||
"""Complete set of constraints for a provider.
|
||||
|
||||
Attributes:
|
||||
name: Provider name identifier.
|
||||
image: Image file constraints.
|
||||
pdf: PDF file constraints.
|
||||
audio: Audio file constraints.
|
||||
video: Video file constraints.
|
||||
text: Text file constraints.
|
||||
general_max_size_bytes: Maximum size for any file type.
|
||||
supports_file_upload: Whether the provider supports file upload APIs.
|
||||
file_upload_threshold_bytes: Size threshold above which to use file upload.
|
||||
supports_url_references: Whether the provider supports URL-based file references.
|
||||
"""
|
||||
|
||||
name: ProviderName
|
||||
image: ImageConstraints | None = None
|
||||
pdf: PDFConstraints | None = None
|
||||
audio: AudioConstraints | None = None
|
||||
video: VideoConstraints | None = None
|
||||
text: TextConstraints | None = None
|
||||
general_max_size_bytes: int | None = None
|
||||
supports_file_upload: bool = False
|
||||
file_upload_threshold_bytes: int | None = None
|
||||
supports_url_references: bool = False
|
||||
|
||||
|
||||
ANTHROPIC_CONSTRAINTS = ProviderConstraints(
|
||||
name="anthropic",
|
||||
image=ImageConstraints(
|
||||
max_size_bytes=5_242_880, # 5 MB per image
|
||||
max_width=8000,
|
||||
max_height=8000,
|
||||
max_images_per_request=100,
|
||||
),
|
||||
pdf=PDFConstraints(
|
||||
max_size_bytes=33_554_432, # 32 MB request size limit
|
||||
max_pages=100,
|
||||
),
|
||||
supports_file_upload=True,
|
||||
file_upload_threshold_bytes=5_242_880,
|
||||
supports_url_references=True,
|
||||
)
|
||||
|
||||
OPENAI_CONSTRAINTS = ProviderConstraints(
|
||||
name="openai",
|
||||
image=ImageConstraints(
|
||||
max_size_bytes=20_971_520,
|
||||
max_images_per_request=10,
|
||||
),
|
||||
pdf=PDFConstraints(
|
||||
max_size_bytes=33_554_432, # 32 MB total across all file inputs
|
||||
max_pages=100,
|
||||
),
|
||||
audio=AudioConstraints(
|
||||
max_size_bytes=26_214_400, # 25 MB - whisper limit
|
||||
max_duration_seconds=1500, # 25 minutes, arbitrary-ish, this is from the transcriptions limit
|
||||
),
|
||||
supports_file_upload=True,
|
||||
file_upload_threshold_bytes=5_242_880,
|
||||
supports_url_references=True,
|
||||
)
|
||||
|
||||
GEMINI_CONSTRAINTS = ProviderConstraints(
|
||||
name="gemini",
|
||||
image=ImageConstraints(
|
||||
max_size_bytes=104_857_600,
|
||||
supported_formats=GEMINI_IMAGE_FORMATS,
|
||||
),
|
||||
pdf=PDFConstraints(
|
||||
max_size_bytes=52_428_800,
|
||||
),
|
||||
audio=AudioConstraints(
|
||||
max_size_bytes=104_857_600,
|
||||
max_duration_seconds=34200, # 9.5 hours
|
||||
supported_formats=GEMINI_AUDIO_FORMATS,
|
||||
),
|
||||
video=VideoConstraints(
|
||||
max_size_bytes=2_147_483_648,
|
||||
max_duration_seconds=3600, # 1 hour at default resolution
|
||||
supported_formats=GEMINI_VIDEO_FORMATS,
|
||||
),
|
||||
text=TextConstraints(
|
||||
max_size_bytes=104_857_600,
|
||||
supported_formats=GEMINI_TEXT_FORMATS,
|
||||
),
|
||||
supports_file_upload=True,
|
||||
file_upload_threshold_bytes=20_971_520,
|
||||
supports_url_references=True,
|
||||
)
|
||||
|
||||
BEDROCK_CONSTRAINTS = ProviderConstraints(
|
||||
name="bedrock",
|
||||
image=ImageConstraints(
|
||||
max_size_bytes=4_608_000,
|
||||
max_width=8000,
|
||||
max_height=8000,
|
||||
),
|
||||
pdf=PDFConstraints(
|
||||
max_size_bytes=3_840_000,
|
||||
max_pages=100,
|
||||
),
|
||||
supports_url_references=True, # S3 URIs supported
|
||||
)
|
||||
|
||||
AZURE_CONSTRAINTS = ProviderConstraints(
|
||||
name="azure",
|
||||
image=ImageConstraints(
|
||||
max_size_bytes=20_971_520,
|
||||
max_images_per_request=10,
|
||||
),
|
||||
audio=AudioConstraints(
|
||||
max_size_bytes=26_214_400, # 25 MB - same as openai
|
||||
max_duration_seconds=1500, # 25 minutes - same as openai
|
||||
),
|
||||
supports_url_references=True,
|
||||
)
|
||||
|
||||
|
||||
_PROVIDER_CONSTRAINTS_MAP: dict[str, ProviderConstraints] = {
|
||||
"anthropic": ANTHROPIC_CONSTRAINTS,
|
||||
"openai": OPENAI_CONSTRAINTS,
|
||||
"gemini": GEMINI_CONSTRAINTS,
|
||||
"bedrock": BEDROCK_CONSTRAINTS,
|
||||
"azure": AZURE_CONSTRAINTS,
|
||||
"claude": ANTHROPIC_CONSTRAINTS,
|
||||
"gpt": OPENAI_CONSTRAINTS,
|
||||
"google": GEMINI_CONSTRAINTS,
|
||||
"aws": BEDROCK_CONSTRAINTS,
|
||||
}
|
||||
|
||||
|
||||
@lru_cache(maxsize=32)
|
||||
def get_constraints_for_provider(
|
||||
provider: str | ProviderConstraints,
|
||||
) -> ProviderConstraints | None:
|
||||
"""Get constraints for a provider by name or return if already ProviderConstraints.
|
||||
|
||||
Args:
|
||||
provider: Provider name string or ProviderConstraints instance.
|
||||
|
||||
Returns:
|
||||
ProviderConstraints for the provider, or None if not found.
|
||||
"""
|
||||
if isinstance(provider, ProviderConstraints):
|
||||
return provider
|
||||
|
||||
provider_lower = provider.lower()
|
||||
|
||||
if provider_lower in _PROVIDER_CONSTRAINTS_MAP:
|
||||
return _PROVIDER_CONSTRAINTS_MAP[provider_lower]
|
||||
|
||||
for key, constraints in _PROVIDER_CONSTRAINTS_MAP.items():
|
||||
if key in provider_lower:
|
||||
return constraints
|
||||
|
||||
return None
|
||||
@@ -1,19 +0,0 @@
|
||||
"""Enums for file processing configuration."""
|
||||
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class FileHandling(Enum):
|
||||
"""Defines how files exceeding provider limits should be handled.
|
||||
|
||||
Attributes:
|
||||
STRICT: Fail with an error if file exceeds limits.
|
||||
AUTO: Automatically resize, compress, or optimize to fit limits.
|
||||
WARN: Log a warning but attempt to process anyway.
|
||||
CHUNK: Split large files into smaller pieces.
|
||||
"""
|
||||
|
||||
STRICT = "strict"
|
||||
AUTO = "auto"
|
||||
WARN = "warn"
|
||||
CHUNK = "chunk"
|
||||
@@ -1,145 +0,0 @@
|
||||
"""Exceptions for file processing operations."""
|
||||
|
||||
|
||||
class FileProcessingError(Exception):
|
||||
"""Base exception for file processing errors."""
|
||||
|
||||
def __init__(self, message: str, file_name: str | None = None) -> None:
|
||||
"""Initialize the exception.
|
||||
|
||||
Args:
|
||||
message: Error message describing the issue.
|
||||
file_name: Optional name of the file that caused the error.
|
||||
"""
|
||||
self.file_name = file_name
|
||||
super().__init__(message)
|
||||
|
||||
|
||||
class FileValidationError(FileProcessingError):
|
||||
"""Raised when file validation fails."""
|
||||
|
||||
|
||||
class FileTooLargeError(FileValidationError):
|
||||
"""Raised when a file exceeds the maximum allowed size."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
message: str,
|
||||
file_name: str | None = None,
|
||||
actual_size: int | None = None,
|
||||
max_size: int | None = None,
|
||||
) -> None:
|
||||
"""Initialize the exception.
|
||||
|
||||
Args:
|
||||
message: Error message describing the issue.
|
||||
file_name: Optional name of the file that caused the error.
|
||||
actual_size: The actual size of the file in bytes.
|
||||
max_size: The maximum allowed size in bytes.
|
||||
"""
|
||||
self.actual_size = actual_size
|
||||
self.max_size = max_size
|
||||
super().__init__(message, file_name)
|
||||
|
||||
|
||||
class UnsupportedFileTypeError(FileValidationError):
|
||||
"""Raised when a file type is not supported by the provider."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
message: str,
|
||||
file_name: str | None = None,
|
||||
content_type: str | None = None,
|
||||
) -> None:
|
||||
"""Initialize the exception.
|
||||
|
||||
Args:
|
||||
message: Error message describing the issue.
|
||||
file_name: Optional name of the file that caused the error.
|
||||
content_type: The content type that is not supported.
|
||||
"""
|
||||
self.content_type = content_type
|
||||
super().__init__(message, file_name)
|
||||
|
||||
|
||||
class ProcessingDependencyError(FileProcessingError):
|
||||
"""Raised when a required processing dependency is not installed."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
message: str,
|
||||
dependency: str,
|
||||
install_command: str | None = None,
|
||||
) -> None:
|
||||
"""Initialize the exception.
|
||||
|
||||
Args:
|
||||
message: Error message describing the issue.
|
||||
dependency: Name of the missing dependency.
|
||||
install_command: Optional command to install the dependency.
|
||||
"""
|
||||
self.dependency = dependency
|
||||
self.install_command = install_command
|
||||
super().__init__(message)
|
||||
|
||||
|
||||
class TransientFileError(FileProcessingError):
|
||||
"""Transient error that may succeed on retry (network, timeout)."""
|
||||
|
||||
|
||||
class PermanentFileError(FileProcessingError):
|
||||
"""Permanent error that will not succeed on retry (auth, format)."""
|
||||
|
||||
|
||||
class UploadError(FileProcessingError):
|
||||
"""Base exception for upload errors."""
|
||||
|
||||
|
||||
class TransientUploadError(UploadError, TransientFileError):
|
||||
"""Upload failed but may succeed on retry (network issues, rate limits)."""
|
||||
|
||||
|
||||
class PermanentUploadError(UploadError, PermanentFileError):
|
||||
"""Upload failed permanently (auth failure, invalid file, unsupported type)."""
|
||||
|
||||
|
||||
def classify_upload_error(e: Exception, filename: str | None = None) -> Exception:
|
||||
"""Classify an exception as transient or permanent upload error.
|
||||
|
||||
Analyzes the exception type name and status code to determine if
|
||||
the error is likely transient (retryable) or permanent.
|
||||
|
||||
Args:
|
||||
e: The exception to classify.
|
||||
filename: Optional filename for error context.
|
||||
|
||||
Returns:
|
||||
A TransientUploadError or PermanentUploadError wrapping the original.
|
||||
"""
|
||||
error_type = type(e).__name__
|
||||
|
||||
if "RateLimit" in error_type or "APIConnection" in error_type:
|
||||
return TransientUploadError(f"Transient upload error: {e}", file_name=filename)
|
||||
if "Authentication" in error_type or "Permission" in error_type:
|
||||
return PermanentUploadError(
|
||||
f"Authentication/permission error: {e}", file_name=filename
|
||||
)
|
||||
if "BadRequest" in error_type or "InvalidRequest" in error_type:
|
||||
return PermanentUploadError(f"Invalid request: {e}", file_name=filename)
|
||||
|
||||
status_code = getattr(e, "status_code", None)
|
||||
if status_code is not None:
|
||||
if status_code >= 500 or status_code == 429:
|
||||
return TransientUploadError(
|
||||
f"Server error ({status_code}): {e}", file_name=filename
|
||||
)
|
||||
if status_code in (401, 403):
|
||||
return PermanentUploadError(
|
||||
f"Auth error ({status_code}): {e}", file_name=filename
|
||||
)
|
||||
if status_code == 400:
|
||||
return PermanentUploadError(
|
||||
f"Bad request ({status_code}): {e}", file_name=filename
|
||||
)
|
||||
|
||||
return TransientUploadError(f"Upload failed: {e}", file_name=filename)
|
||||
@@ -1,346 +0,0 @@
|
||||
"""FileProcessor for validating and transforming files based on provider constraints."""
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Sequence
|
||||
import logging
|
||||
|
||||
from crewai_files.core.types import (
|
||||
AudioFile,
|
||||
File,
|
||||
FileInput,
|
||||
ImageFile,
|
||||
PDFFile,
|
||||
TextFile,
|
||||
VideoFile,
|
||||
)
|
||||
from crewai_files.processing.constraints import (
|
||||
ProviderConstraints,
|
||||
get_constraints_for_provider,
|
||||
)
|
||||
from crewai_files.processing.enums import FileHandling
|
||||
from crewai_files.processing.exceptions import (
|
||||
FileProcessingError,
|
||||
FileTooLargeError,
|
||||
FileValidationError,
|
||||
UnsupportedFileTypeError,
|
||||
)
|
||||
from crewai_files.processing.transformers import (
|
||||
chunk_pdf,
|
||||
chunk_text,
|
||||
get_image_dimensions,
|
||||
get_pdf_page_count,
|
||||
optimize_image,
|
||||
resize_image,
|
||||
)
|
||||
from crewai_files.processing.validators import validate_file
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FileProcessor:
|
||||
"""Processes files according to provider constraints and per-file mode mode.
|
||||
|
||||
Validates files against provider-specific limits and optionally transforms
|
||||
them (resize, compress, chunk) to meet those limits. Each file specifies
|
||||
its own mode mode via `file.mode`.
|
||||
|
||||
Attributes:
|
||||
constraints: Provider constraints for validation.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
constraints: ProviderConstraints | str | None = None,
|
||||
) -> None:
|
||||
"""Initialize the FileProcessor.
|
||||
|
||||
Args:
|
||||
constraints: Provider constraints or provider name string.
|
||||
If None, validation is skipped.
|
||||
"""
|
||||
if isinstance(constraints, str):
|
||||
resolved = get_constraints_for_provider(constraints)
|
||||
if resolved is None:
|
||||
logger.warning(
|
||||
f"Unknown provider '{constraints}' - validation disabled"
|
||||
)
|
||||
self.constraints = resolved
|
||||
else:
|
||||
self.constraints = constraints
|
||||
|
||||
def validate(self, file: FileInput) -> Sequence[str]:
|
||||
"""Validate a file against provider constraints.
|
||||
|
||||
Args:
|
||||
file: The file to validate.
|
||||
|
||||
Returns:
|
||||
List of validation error messages (empty if valid).
|
||||
|
||||
Raises:
|
||||
FileValidationError: If file.mode is STRICT and validation fails.
|
||||
"""
|
||||
if self.constraints is None:
|
||||
return []
|
||||
|
||||
mode = self._get_mode(file)
|
||||
raise_on_error = mode == FileHandling.STRICT
|
||||
return validate_file(file, self.constraints, raise_on_error=raise_on_error)
|
||||
|
||||
@staticmethod
|
||||
def _get_mode(file: FileInput) -> FileHandling:
|
||||
"""Get the mode mode for a file.
|
||||
|
||||
Args:
|
||||
file: The file to get mode for.
|
||||
|
||||
Returns:
|
||||
The file's mode mode, defaulting to AUTO.
|
||||
"""
|
||||
mode = getattr(file, "mode", None)
|
||||
if mode is None:
|
||||
return FileHandling.AUTO
|
||||
if isinstance(mode, str):
|
||||
return FileHandling(mode)
|
||||
if isinstance(mode, FileHandling):
|
||||
return mode
|
||||
return FileHandling.AUTO
|
||||
|
||||
def process(self, file: FileInput) -> FileInput | Sequence[FileInput]:
|
||||
"""Process a single file according to constraints and its mode mode.
|
||||
|
||||
Args:
|
||||
file: The file to process.
|
||||
|
||||
Returns:
|
||||
The processed file (possibly transformed) or a sequence of files
|
||||
if the file was chunked.
|
||||
|
||||
Raises:
|
||||
FileProcessingError: If file.mode is STRICT and processing fails.
|
||||
"""
|
||||
if self.constraints is None:
|
||||
return file
|
||||
|
||||
mode = self._get_mode(file)
|
||||
|
||||
try:
|
||||
errors = self.validate(file)
|
||||
|
||||
if not errors:
|
||||
return file
|
||||
|
||||
if mode == FileHandling.STRICT:
|
||||
raise FileValidationError("; ".join(errors), file_name=file.filename)
|
||||
|
||||
if mode == FileHandling.WARN:
|
||||
for error in errors:
|
||||
logger.warning(error)
|
||||
return file
|
||||
|
||||
if mode == FileHandling.AUTO:
|
||||
return self._auto_process(file)
|
||||
|
||||
if mode == FileHandling.CHUNK:
|
||||
return self._chunk_process(file)
|
||||
|
||||
return file
|
||||
|
||||
except (FileValidationError, FileTooLargeError, UnsupportedFileTypeError):
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing file '{file.filename}': {e}")
|
||||
if mode == FileHandling.STRICT:
|
||||
raise FileProcessingError(str(e), file_name=file.filename) from e
|
||||
return file
|
||||
|
||||
def process_files(
|
||||
self,
|
||||
files: dict[str, FileInput],
|
||||
) -> dict[str, FileInput]:
|
||||
"""Process multiple files according to constraints.
|
||||
|
||||
Args:
|
||||
files: Dictionary mapping names to file inputs.
|
||||
|
||||
Returns:
|
||||
Dictionary mapping names to processed files. If a file is chunked,
|
||||
multiple entries are created with indexed names.
|
||||
"""
|
||||
result: dict[str, FileInput] = {}
|
||||
|
||||
for name, file in files.items():
|
||||
processed = self.process(file)
|
||||
|
||||
if isinstance(processed, Sequence) and not isinstance(
|
||||
processed, (str, bytes)
|
||||
):
|
||||
for i, chunk in enumerate(processed):
|
||||
chunk_name = f"{name}_chunk_{i}"
|
||||
result[chunk_name] = chunk
|
||||
else:
|
||||
result[name] = processed
|
||||
|
||||
return result
|
||||
|
||||
async def aprocess_files(
|
||||
self,
|
||||
files: dict[str, FileInput],
|
||||
max_concurrency: int = 10,
|
||||
) -> dict[str, FileInput]:
|
||||
"""Async process multiple files in parallel.
|
||||
|
||||
Args:
|
||||
files: Dictionary mapping names to file inputs.
|
||||
max_concurrency: Maximum number of concurrent processing tasks.
|
||||
|
||||
Returns:
|
||||
Dictionary mapping names to processed files. If a file is chunked,
|
||||
multiple entries are created with indexed names.
|
||||
"""
|
||||
semaphore = asyncio.Semaphore(max_concurrency)
|
||||
|
||||
async def process_single(
|
||||
key: str, input_file: FileInput
|
||||
) -> tuple[str, FileInput | Sequence[FileInput]]:
|
||||
"""Process a single file with semaphore limiting."""
|
||||
async with semaphore:
|
||||
loop = asyncio.get_running_loop()
|
||||
result = await loop.run_in_executor(None, self.process, input_file)
|
||||
return key, result
|
||||
|
||||
tasks = [process_single(n, f) for n, f in files.items()]
|
||||
gather_results = await asyncio.gather(*tasks, return_exceptions=True)
|
||||
|
||||
output: dict[str, FileInput] = {}
|
||||
for item in gather_results:
|
||||
if isinstance(item, BaseException):
|
||||
logger.error(f"Processing failed: {item}")
|
||||
continue
|
||||
entry_name, processed = item
|
||||
if isinstance(processed, Sequence) and not isinstance(
|
||||
processed, (str, bytes)
|
||||
):
|
||||
for i, chunk in enumerate(processed):
|
||||
output[f"{entry_name}_chunk_{i}"] = chunk
|
||||
elif isinstance(
|
||||
processed, (AudioFile, File, ImageFile, PDFFile, TextFile, VideoFile)
|
||||
):
|
||||
output[entry_name] = processed
|
||||
|
||||
return output
|
||||
|
||||
def _auto_process(self, file: FileInput) -> FileInput:
|
||||
"""Automatically resize/compress file to meet constraints.
|
||||
|
||||
Args:
|
||||
file: The file to process.
|
||||
|
||||
Returns:
|
||||
The processed file.
|
||||
"""
|
||||
if self.constraints is None:
|
||||
return file
|
||||
|
||||
if isinstance(file, ImageFile) and self.constraints.image is not None:
|
||||
return self._auto_process_image(file)
|
||||
|
||||
if isinstance(file, PDFFile) and self.constraints.pdf is not None:
|
||||
logger.warning(
|
||||
f"Cannot auto-compress PDF '{file.filename}'. "
|
||||
"Consider using CHUNK mode for large PDFs."
|
||||
)
|
||||
return file
|
||||
|
||||
if isinstance(file, (AudioFile, VideoFile)):
|
||||
logger.warning(
|
||||
f"Auto-processing not supported for {type(file).__name__}. "
|
||||
"File will be used as-is."
|
||||
)
|
||||
return file
|
||||
|
||||
return file
|
||||
|
||||
def _auto_process_image(self, file: ImageFile) -> ImageFile:
|
||||
"""Auto-process an image file.
|
||||
|
||||
Args:
|
||||
file: The image file to process.
|
||||
|
||||
Returns:
|
||||
The processed image file.
|
||||
"""
|
||||
if self.constraints is None or self.constraints.image is None:
|
||||
return file
|
||||
|
||||
image_constraints = self.constraints.image
|
||||
processed = file
|
||||
content = file.read()
|
||||
current_size = len(content)
|
||||
|
||||
if image_constraints.max_width or image_constraints.max_height:
|
||||
dimensions = get_image_dimensions(file)
|
||||
if dimensions:
|
||||
width, height = dimensions
|
||||
max_w = image_constraints.max_width or width
|
||||
max_h = image_constraints.max_height or height
|
||||
|
||||
if width > max_w or height > max_h:
|
||||
try:
|
||||
processed = resize_image(file, max_w, max_h)
|
||||
content = processed.read()
|
||||
current_size = len(content)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to resize image: {e}")
|
||||
|
||||
if current_size > image_constraints.max_size_bytes:
|
||||
try:
|
||||
processed = optimize_image(processed, image_constraints.max_size_bytes)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to optimize image: {e}")
|
||||
|
||||
return processed
|
||||
|
||||
def _chunk_process(self, file: FileInput) -> FileInput | Sequence[FileInput]:
|
||||
"""Split file into chunks to meet constraints.
|
||||
|
||||
Args:
|
||||
file: The file to chunk.
|
||||
|
||||
Returns:
|
||||
Original file if chunking not needed, or sequence of chunked files.
|
||||
"""
|
||||
if self.constraints is None:
|
||||
return file
|
||||
|
||||
if isinstance(file, PDFFile) and self.constraints.pdf is not None:
|
||||
max_pages = self.constraints.pdf.max_pages
|
||||
if max_pages is not None:
|
||||
page_count = get_pdf_page_count(file)
|
||||
if page_count is not None and page_count > max_pages:
|
||||
try:
|
||||
return list(chunk_pdf(file, max_pages))
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to chunk PDF: {e}")
|
||||
return file
|
||||
|
||||
if isinstance(file, TextFile):
|
||||
# Use general max size as character limit approximation
|
||||
max_size = self.constraints.general_max_size_bytes
|
||||
if max_size is not None:
|
||||
content = file.read()
|
||||
if len(content) > max_size:
|
||||
try:
|
||||
return list(chunk_text(file, max_size))
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to chunk text file: {e}")
|
||||
return file
|
||||
|
||||
if isinstance(file, (ImageFile, AudioFile, VideoFile)):
|
||||
logger.warning(
|
||||
f"Chunking not supported for {type(file).__name__}. "
|
||||
"Consider using AUTO mode for images."
|
||||
)
|
||||
|
||||
return file
|
||||
@@ -1,336 +0,0 @@
|
||||
"""File transformation functions for resizing, optimizing, and chunking."""
|
||||
|
||||
from collections.abc import Iterator
|
||||
import io
|
||||
import logging
|
||||
|
||||
from crewai_files.core.sources import FileBytes
|
||||
from crewai_files.core.types import ImageFile, PDFFile, TextFile
|
||||
from crewai_files.processing.exceptions import ProcessingDependencyError
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def resize_image(
|
||||
file: ImageFile,
|
||||
max_width: int,
|
||||
max_height: int,
|
||||
*,
|
||||
preserve_aspect_ratio: bool = True,
|
||||
) -> ImageFile:
|
||||
"""Resize an image to fit within the specified dimensions.
|
||||
|
||||
Args:
|
||||
file: The image file to resize.
|
||||
max_width: Maximum width in pixels.
|
||||
max_height: Maximum height in pixels.
|
||||
preserve_aspect_ratio: If True, maintain aspect ratio while fitting within bounds.
|
||||
|
||||
Returns:
|
||||
A new ImageFile with the resized image data.
|
||||
|
||||
Raises:
|
||||
ProcessingDependencyError: If Pillow is not installed.
|
||||
"""
|
||||
try:
|
||||
from PIL import Image
|
||||
except ImportError as e:
|
||||
raise ProcessingDependencyError(
|
||||
"Pillow is required for image resizing",
|
||||
dependency="Pillow",
|
||||
install_command="pip install Pillow",
|
||||
) from e
|
||||
|
||||
content = file.read()
|
||||
|
||||
with Image.open(io.BytesIO(content)) as img:
|
||||
original_width, original_height = img.size
|
||||
|
||||
if original_width <= max_width and original_height <= max_height:
|
||||
return file
|
||||
|
||||
if preserve_aspect_ratio:
|
||||
width_ratio = max_width / original_width
|
||||
height_ratio = max_height / original_height
|
||||
scale_factor = min(width_ratio, height_ratio)
|
||||
|
||||
new_width = int(original_width * scale_factor)
|
||||
new_height = int(original_height * scale_factor)
|
||||
else:
|
||||
new_width = min(original_width, max_width)
|
||||
new_height = min(original_height, max_height)
|
||||
|
||||
resized_img = img.resize((new_width, new_height), Image.Resampling.LANCZOS)
|
||||
|
||||
output_format = img.format or "PNG"
|
||||
if output_format.upper() == "JPEG":
|
||||
if resized_img.mode in ("RGBA", "LA", "P"):
|
||||
resized_img = resized_img.convert("RGB")
|
||||
|
||||
output_buffer = io.BytesIO()
|
||||
resized_img.save(output_buffer, format=output_format)
|
||||
output_bytes = output_buffer.getvalue()
|
||||
|
||||
logger.info(
|
||||
f"Resized image '{file.filename}' from {original_width}x{original_height} "
|
||||
f"to {new_width}x{new_height}"
|
||||
)
|
||||
|
||||
return ImageFile(source=FileBytes(data=output_bytes, filename=file.filename))
|
||||
|
||||
|
||||
def optimize_image(
|
||||
file: ImageFile,
|
||||
target_size_bytes: int,
|
||||
*,
|
||||
min_quality: int = 20,
|
||||
initial_quality: int = 85,
|
||||
) -> ImageFile:
|
||||
"""Optimize an image to fit within a target file size.
|
||||
|
||||
Uses iterative quality reduction to achieve target size.
|
||||
|
||||
Args:
|
||||
file: The image file to optimize.
|
||||
target_size_bytes: Target maximum file size in bytes.
|
||||
min_quality: Minimum quality to use (prevents excessive degradation).
|
||||
initial_quality: Starting quality for optimization.
|
||||
|
||||
Returns:
|
||||
A new ImageFile with the optimized image data.
|
||||
|
||||
Raises:
|
||||
ProcessingDependencyError: If Pillow is not installed.
|
||||
"""
|
||||
try:
|
||||
from PIL import Image
|
||||
except ImportError as e:
|
||||
raise ProcessingDependencyError(
|
||||
"Pillow is required for image optimization",
|
||||
dependency="Pillow",
|
||||
install_command="pip install Pillow",
|
||||
) from e
|
||||
|
||||
content = file.read()
|
||||
current_size = len(content)
|
||||
|
||||
if current_size <= target_size_bytes:
|
||||
return file
|
||||
|
||||
with Image.open(io.BytesIO(content)) as img:
|
||||
if img.mode in ("RGBA", "LA", "P"):
|
||||
img = img.convert("RGB")
|
||||
output_format = "JPEG"
|
||||
else:
|
||||
output_format = img.format or "JPEG"
|
||||
if output_format.upper() not in ("JPEG", "JPG"):
|
||||
output_format = "JPEG"
|
||||
|
||||
quality = initial_quality
|
||||
output_bytes = content
|
||||
|
||||
while len(output_bytes) > target_size_bytes and quality >= min_quality:
|
||||
output_buffer = io.BytesIO()
|
||||
img.save(
|
||||
output_buffer, format=output_format, quality=quality, optimize=True
|
||||
)
|
||||
output_bytes = output_buffer.getvalue()
|
||||
|
||||
if len(output_bytes) > target_size_bytes:
|
||||
quality -= 5
|
||||
|
||||
logger.info(
|
||||
f"Optimized image '{file.filename}' from {current_size} bytes to "
|
||||
f"{len(output_bytes)} bytes (quality={quality})"
|
||||
)
|
||||
|
||||
filename = file.filename
|
||||
if (
|
||||
filename
|
||||
and output_format.upper() == "JPEG"
|
||||
and not filename.lower().endswith((".jpg", ".jpeg"))
|
||||
):
|
||||
filename = filename.rsplit(".", 1)[0] + ".jpg"
|
||||
|
||||
return ImageFile(source=FileBytes(data=output_bytes, filename=filename))
|
||||
|
||||
|
||||
def chunk_pdf(
|
||||
file: PDFFile,
|
||||
max_pages: int,
|
||||
*,
|
||||
overlap_pages: int = 0,
|
||||
) -> Iterator[PDFFile]:
|
||||
"""Split a PDF into chunks of maximum page count.
|
||||
|
||||
Yields chunks one at a time to minimize memory usage.
|
||||
|
||||
Args:
|
||||
file: The PDF file to chunk.
|
||||
max_pages: Maximum pages per chunk.
|
||||
overlap_pages: Number of overlapping pages between chunks (for context).
|
||||
|
||||
Yields:
|
||||
PDFFile objects, one per chunk.
|
||||
|
||||
Raises:
|
||||
ProcessingDependencyError: If pypdf is not installed.
|
||||
"""
|
||||
try:
|
||||
from pypdf import PdfReader, PdfWriter
|
||||
except ImportError as e:
|
||||
raise ProcessingDependencyError(
|
||||
"pypdf is required for PDF chunking",
|
||||
dependency="pypdf",
|
||||
install_command="pip install pypdf",
|
||||
) from e
|
||||
|
||||
content = file.read()
|
||||
reader = PdfReader(io.BytesIO(content))
|
||||
total_pages = len(reader.pages)
|
||||
|
||||
if total_pages <= max_pages:
|
||||
yield file
|
||||
return
|
||||
|
||||
filename = file.filename or "document.pdf"
|
||||
base_filename = filename.rsplit(".", 1)[0]
|
||||
step = max_pages - overlap_pages
|
||||
|
||||
chunk_num = 0
|
||||
start_page = 0
|
||||
|
||||
while start_page < total_pages:
|
||||
end_page = min(start_page + max_pages, total_pages)
|
||||
|
||||
writer = PdfWriter()
|
||||
for page_num in range(start_page, end_page):
|
||||
writer.add_page(reader.pages[page_num])
|
||||
|
||||
output_buffer = io.BytesIO()
|
||||
writer.write(output_buffer)
|
||||
output_bytes = output_buffer.getvalue()
|
||||
|
||||
chunk_filename = f"{base_filename}_chunk_{chunk_num}.pdf"
|
||||
|
||||
logger.info(
|
||||
f"Created PDF chunk '{chunk_filename}' with pages {start_page + 1}-{end_page}"
|
||||
)
|
||||
|
||||
yield PDFFile(source=FileBytes(data=output_bytes, filename=chunk_filename))
|
||||
|
||||
start_page += step
|
||||
chunk_num += 1
|
||||
|
||||
|
||||
def chunk_text(
|
||||
file: TextFile,
|
||||
max_chars: int,
|
||||
*,
|
||||
overlap_chars: int = 200,
|
||||
split_on_newlines: bool = True,
|
||||
) -> Iterator[TextFile]:
|
||||
"""Split a text file into chunks of maximum character count.
|
||||
|
||||
Yields chunks one at a time to minimize memory usage.
|
||||
|
||||
Args:
|
||||
file: The text file to chunk.
|
||||
max_chars: Maximum characters per chunk.
|
||||
overlap_chars: Number of overlapping characters between chunks.
|
||||
split_on_newlines: If True, prefer splitting at newline boundaries.
|
||||
|
||||
Yields:
|
||||
TextFile objects, one per chunk.
|
||||
"""
|
||||
content = file.read()
|
||||
text = content.decode(errors="replace")
|
||||
total_chars = len(text)
|
||||
|
||||
if total_chars <= max_chars:
|
||||
yield file
|
||||
return
|
||||
|
||||
filename = file.filename or "text.txt"
|
||||
base_filename = filename.rsplit(".", 1)[0]
|
||||
extension = filename.rsplit(".", 1)[-1] if "." in filename else "txt"
|
||||
|
||||
chunk_num = 0
|
||||
start_pos = 0
|
||||
|
||||
while start_pos < total_chars:
|
||||
end_pos = min(start_pos + max_chars, total_chars)
|
||||
|
||||
if end_pos < total_chars and split_on_newlines:
|
||||
last_newline = text.rfind("\n", start_pos, end_pos)
|
||||
if last_newline > start_pos + max_chars // 2:
|
||||
end_pos = last_newline + 1
|
||||
|
||||
chunk_content = text[start_pos:end_pos]
|
||||
chunk_bytes = chunk_content.encode()
|
||||
|
||||
chunk_filename = f"{base_filename}_chunk_{chunk_num}.{extension}"
|
||||
|
||||
logger.info(
|
||||
f"Created text chunk '{chunk_filename}' with {len(chunk_content)} characters"
|
||||
)
|
||||
|
||||
yield TextFile(source=FileBytes(data=chunk_bytes, filename=chunk_filename))
|
||||
|
||||
if end_pos < total_chars:
|
||||
start_pos = max(start_pos + 1, end_pos - overlap_chars)
|
||||
else:
|
||||
start_pos = total_chars
|
||||
chunk_num += 1
|
||||
|
||||
|
||||
def get_image_dimensions(file: ImageFile) -> tuple[int, int] | None:
|
||||
"""Get the dimensions of an image file.
|
||||
|
||||
Args:
|
||||
file: The image file to measure.
|
||||
|
||||
Returns:
|
||||
Tuple of (width, height) in pixels, or None if dimensions cannot be determined.
|
||||
"""
|
||||
try:
|
||||
from PIL import Image
|
||||
except ImportError:
|
||||
logger.warning("Pillow not installed - cannot get image dimensions")
|
||||
return None
|
||||
|
||||
content = file.read()
|
||||
|
||||
try:
|
||||
with Image.open(io.BytesIO(content)) as img:
|
||||
width, height = img.size
|
||||
return width, height
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to get image dimensions: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def get_pdf_page_count(file: PDFFile) -> int | None:
|
||||
"""Get the page count of a PDF file.
|
||||
|
||||
Args:
|
||||
file: The PDF file to measure.
|
||||
|
||||
Returns:
|
||||
Number of pages, or None if page count cannot be determined.
|
||||
"""
|
||||
try:
|
||||
from pypdf import PdfReader
|
||||
except ImportError:
|
||||
logger.warning("pypdf not installed - cannot get PDF page count")
|
||||
return None
|
||||
|
||||
content = file.read()
|
||||
|
||||
try:
|
||||
reader = PdfReader(io.BytesIO(content))
|
||||
return len(reader.pages)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to get PDF page count: {e}")
|
||||
return None
|
||||
@@ -1,564 +0,0 @@
|
||||
"""File validation functions for checking against provider constraints."""
|
||||
|
||||
from collections.abc import Sequence
|
||||
import io
|
||||
import logging
|
||||
|
||||
from crewai_files.core.types import (
|
||||
AudioFile,
|
||||
FileInput,
|
||||
ImageFile,
|
||||
PDFFile,
|
||||
TextFile,
|
||||
VideoFile,
|
||||
)
|
||||
from crewai_files.processing.constraints import (
|
||||
AudioConstraints,
|
||||
ImageConstraints,
|
||||
PDFConstraints,
|
||||
ProviderConstraints,
|
||||
VideoConstraints,
|
||||
)
|
||||
from crewai_files.processing.exceptions import (
|
||||
FileTooLargeError,
|
||||
FileValidationError,
|
||||
UnsupportedFileTypeError,
|
||||
)
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _get_image_dimensions(content: bytes) -> tuple[int, int] | None:
|
||||
"""Get image dimensions using Pillow if available.
|
||||
|
||||
Args:
|
||||
content: Raw image bytes.
|
||||
|
||||
Returns:
|
||||
Tuple of (width, height) or None if Pillow unavailable.
|
||||
"""
|
||||
try:
|
||||
from PIL import Image
|
||||
|
||||
with Image.open(io.BytesIO(content)) as img:
|
||||
width, height = img.size
|
||||
return int(width), int(height)
|
||||
except ImportError:
|
||||
logger.warning(
|
||||
"Pillow not installed - cannot validate image dimensions. "
|
||||
"Install with: pip install Pillow"
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
def _get_pdf_page_count(content: bytes) -> int | None:
|
||||
"""Get PDF page count using pypdf if available.
|
||||
|
||||
Args:
|
||||
content: Raw PDF bytes.
|
||||
|
||||
Returns:
|
||||
Page count or None if pypdf unavailable.
|
||||
"""
|
||||
try:
|
||||
from pypdf import PdfReader
|
||||
|
||||
reader = PdfReader(io.BytesIO(content))
|
||||
return len(reader.pages)
|
||||
except ImportError:
|
||||
logger.warning(
|
||||
"pypdf not installed - cannot validate PDF page count. "
|
||||
"Install with: pip install pypdf"
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
def _get_audio_duration(content: bytes, filename: str | None = None) -> float | None:
|
||||
"""Get audio duration in seconds using tinytag if available.
|
||||
|
||||
Args:
|
||||
content: Raw audio bytes.
|
||||
filename: Optional filename for format detection hint.
|
||||
|
||||
Returns:
|
||||
Duration in seconds or None if tinytag unavailable.
|
||||
"""
|
||||
try:
|
||||
from tinytag import TinyTag # type: ignore[import-untyped]
|
||||
except ImportError:
|
||||
logger.warning(
|
||||
"tinytag not installed - cannot validate audio duration. "
|
||||
"Install with: pip install tinytag"
|
||||
)
|
||||
return None
|
||||
|
||||
try:
|
||||
tag = TinyTag.get(file_obj=io.BytesIO(content), filename=filename)
|
||||
duration: float | None = tag.duration
|
||||
return duration
|
||||
except Exception as e:
|
||||
logger.debug(f"Could not determine audio duration: {e}")
|
||||
return None
|
||||
|
||||
|
||||
_VIDEO_FORMAT_MAP: dict[str, str] = {
|
||||
"video/mp4": "mp4",
|
||||
"video/webm": "webm",
|
||||
"video/x-matroska": "matroska",
|
||||
"video/quicktime": "mov",
|
||||
"video/x-msvideo": "avi",
|
||||
"video/x-flv": "flv",
|
||||
}
|
||||
|
||||
|
||||
def _get_video_duration(
|
||||
content: bytes, content_type: str | None = None
|
||||
) -> float | None:
|
||||
"""Get video duration in seconds using av if available.
|
||||
|
||||
Args:
|
||||
content: Raw video bytes.
|
||||
content_type: Optional MIME type for format detection hint.
|
||||
|
||||
Returns:
|
||||
Duration in seconds or None if av unavailable.
|
||||
"""
|
||||
try:
|
||||
import av
|
||||
except ImportError:
|
||||
logger.warning(
|
||||
"av (PyAV) not installed - cannot validate video duration. "
|
||||
"Install with: pip install av"
|
||||
)
|
||||
return None
|
||||
|
||||
format_hint = _VIDEO_FORMAT_MAP.get(content_type) if content_type else None
|
||||
|
||||
try:
|
||||
with av.open(io.BytesIO(content), format=format_hint) as container: # type: ignore[attr-defined]
|
||||
duration: int | None = container.duration # type: ignore[union-attr]
|
||||
if duration is None:
|
||||
return None
|
||||
return float(duration) / 1_000_000
|
||||
except Exception as e:
|
||||
logger.debug(f"Could not determine video duration: {e}")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def _format_size(size_bytes: int) -> str:
|
||||
"""Format byte size to human-readable string."""
|
||||
if size_bytes >= 1024 * 1024 * 1024:
|
||||
return f"{size_bytes / (1024 * 1024 * 1024):.1f}GB"
|
||||
if size_bytes >= 1024 * 1024:
|
||||
return f"{size_bytes / (1024 * 1024):.1f}MB"
|
||||
if size_bytes >= 1024:
|
||||
return f"{size_bytes / 1024:.1f}KB"
|
||||
return f"{size_bytes}B"
|
||||
|
||||
|
||||
def _validate_size(
|
||||
file_type: str,
|
||||
filename: str | None,
|
||||
file_size: int,
|
||||
max_size: int,
|
||||
errors: list[str],
|
||||
raise_on_error: bool,
|
||||
) -> None:
|
||||
"""Validate file size against maximum.
|
||||
|
||||
Args:
|
||||
file_type: Type label for error messages (e.g., "Image", "PDF").
|
||||
filename: Name of the file being validated.
|
||||
file_size: Actual file size in bytes.
|
||||
max_size: Maximum allowed size in bytes.
|
||||
errors: List to append error messages to.
|
||||
raise_on_error: If True, raise FileTooLargeError on failure.
|
||||
"""
|
||||
if file_size > max_size:
|
||||
msg = (
|
||||
f"{file_type} '{filename}' size ({_format_size(file_size)}) exceeds "
|
||||
f"maximum ({_format_size(max_size)})"
|
||||
)
|
||||
errors.append(msg)
|
||||
if raise_on_error:
|
||||
raise FileTooLargeError(
|
||||
msg,
|
||||
file_name=filename,
|
||||
actual_size=file_size,
|
||||
max_size=max_size,
|
||||
)
|
||||
|
||||
|
||||
def _validate_format(
|
||||
file_type: str,
|
||||
filename: str | None,
|
||||
content_type: str,
|
||||
supported_formats: tuple[str, ...],
|
||||
errors: list[str],
|
||||
raise_on_error: bool,
|
||||
) -> None:
|
||||
"""Validate content type against supported formats.
|
||||
|
||||
Args:
|
||||
file_type: Type label for error messages (e.g., "Image", "Audio").
|
||||
filename: Name of the file being validated.
|
||||
content_type: MIME type of the file.
|
||||
supported_formats: Tuple of supported MIME types.
|
||||
errors: List to append error messages to.
|
||||
raise_on_error: If True, raise UnsupportedFileTypeError on failure.
|
||||
"""
|
||||
if content_type not in supported_formats:
|
||||
msg = (
|
||||
f"{file_type} format '{content_type}' is not supported. "
|
||||
f"Supported: {', '.join(supported_formats)}"
|
||||
)
|
||||
errors.append(msg)
|
||||
if raise_on_error:
|
||||
raise UnsupportedFileTypeError(
|
||||
msg, file_name=filename, content_type=content_type
|
||||
)
|
||||
|
||||
|
||||
def validate_image(
|
||||
file: ImageFile,
|
||||
constraints: ImageConstraints,
|
||||
*,
|
||||
raise_on_error: bool = True,
|
||||
) -> Sequence[str]:
|
||||
"""Validate an image file against constraints.
|
||||
|
||||
Args:
|
||||
file: The image file to validate.
|
||||
constraints: Image constraints to validate against.
|
||||
raise_on_error: If True, raise exceptions on validation failure.
|
||||
|
||||
Returns:
|
||||
List of validation error messages (empty if valid).
|
||||
|
||||
Raises:
|
||||
FileTooLargeError: If the file exceeds size limits.
|
||||
FileValidationError: If the file exceeds dimension limits.
|
||||
UnsupportedFileTypeError: If the format is not supported.
|
||||
"""
|
||||
errors: list[str] = []
|
||||
content = file.read()
|
||||
file_size = len(content)
|
||||
filename = file.filename
|
||||
|
||||
_validate_size(
|
||||
"Image", filename, file_size, constraints.max_size_bytes, errors, raise_on_error
|
||||
)
|
||||
_validate_format(
|
||||
"Image",
|
||||
filename,
|
||||
file.content_type,
|
||||
constraints.supported_formats,
|
||||
errors,
|
||||
raise_on_error,
|
||||
)
|
||||
|
||||
if constraints.max_width is not None or constraints.max_height is not None:
|
||||
dimensions = _get_image_dimensions(content)
|
||||
if dimensions is not None:
|
||||
width, height = dimensions
|
||||
|
||||
if constraints.max_width and width > constraints.max_width:
|
||||
msg = (
|
||||
f"Image '{filename}' width ({width}px) exceeds "
|
||||
f"maximum ({constraints.max_width}px)"
|
||||
)
|
||||
errors.append(msg)
|
||||
if raise_on_error:
|
||||
raise FileValidationError(msg, file_name=filename)
|
||||
|
||||
if constraints.max_height and height > constraints.max_height:
|
||||
msg = (
|
||||
f"Image '{filename}' height ({height}px) exceeds "
|
||||
f"maximum ({constraints.max_height}px)"
|
||||
)
|
||||
errors.append(msg)
|
||||
if raise_on_error:
|
||||
raise FileValidationError(msg, file_name=filename)
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
def validate_pdf(
|
||||
file: PDFFile,
|
||||
constraints: PDFConstraints,
|
||||
*,
|
||||
raise_on_error: bool = True,
|
||||
) -> Sequence[str]:
|
||||
"""Validate a PDF file against constraints.
|
||||
|
||||
Args:
|
||||
file: The PDF file to validate.
|
||||
constraints: PDF constraints to validate against.
|
||||
raise_on_error: If True, raise exceptions on validation failure.
|
||||
|
||||
Returns:
|
||||
List of validation error messages (empty if valid).
|
||||
|
||||
Raises:
|
||||
FileTooLargeError: If the file exceeds size limits.
|
||||
FileValidationError: If the file exceeds page limits.
|
||||
"""
|
||||
errors: list[str] = []
|
||||
content = file.read()
|
||||
file_size = len(content)
|
||||
filename = file.filename
|
||||
|
||||
_validate_size(
|
||||
"PDF", filename, file_size, constraints.max_size_bytes, errors, raise_on_error
|
||||
)
|
||||
|
||||
if constraints.max_pages is not None:
|
||||
page_count = _get_pdf_page_count(content)
|
||||
if page_count is not None and page_count > constraints.max_pages:
|
||||
msg = (
|
||||
f"PDF '{filename}' page count ({page_count}) exceeds "
|
||||
f"maximum ({constraints.max_pages})"
|
||||
)
|
||||
errors.append(msg)
|
||||
if raise_on_error:
|
||||
raise FileValidationError(msg, file_name=filename)
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
def validate_audio(
|
||||
file: AudioFile,
|
||||
constraints: AudioConstraints,
|
||||
*,
|
||||
raise_on_error: bool = True,
|
||||
) -> Sequence[str]:
|
||||
"""Validate an audio file against constraints.
|
||||
|
||||
Args:
|
||||
file: The audio file to validate.
|
||||
constraints: Audio constraints to validate against.
|
||||
raise_on_error: If True, raise exceptions on validation failure.
|
||||
|
||||
Returns:
|
||||
List of validation error messages (empty if valid).
|
||||
|
||||
Raises:
|
||||
FileTooLargeError: If the file exceeds size limits.
|
||||
FileValidationError: If the file exceeds duration limits.
|
||||
UnsupportedFileTypeError: If the format is not supported.
|
||||
"""
|
||||
errors: list[str] = []
|
||||
content = file.read()
|
||||
file_size = len(content)
|
||||
filename = file.filename
|
||||
|
||||
_validate_size(
|
||||
"Audio",
|
||||
filename,
|
||||
file_size,
|
||||
constraints.max_size_bytes,
|
||||
errors,
|
||||
raise_on_error,
|
||||
)
|
||||
_validate_format(
|
||||
"Audio",
|
||||
filename,
|
||||
file.content_type,
|
||||
constraints.supported_formats,
|
||||
errors,
|
||||
raise_on_error,
|
||||
)
|
||||
|
||||
if constraints.max_duration_seconds is not None:
|
||||
duration = _get_audio_duration(content, filename)
|
||||
if duration is not None and duration > constraints.max_duration_seconds:
|
||||
msg = (
|
||||
f"Audio '{filename}' duration ({duration:.1f}s) exceeds "
|
||||
f"maximum ({constraints.max_duration_seconds}s)"
|
||||
)
|
||||
errors.append(msg)
|
||||
if raise_on_error:
|
||||
raise FileValidationError(msg, file_name=filename)
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
def validate_video(
|
||||
file: VideoFile,
|
||||
constraints: VideoConstraints,
|
||||
*,
|
||||
raise_on_error: bool = True,
|
||||
) -> Sequence[str]:
|
||||
"""Validate a video file against constraints.
|
||||
|
||||
Args:
|
||||
file: The video file to validate.
|
||||
constraints: Video constraints to validate against.
|
||||
raise_on_error: If True, raise exceptions on validation failure.
|
||||
|
||||
Returns:
|
||||
List of validation error messages (empty if valid).
|
||||
|
||||
Raises:
|
||||
FileTooLargeError: If the file exceeds size limits.
|
||||
FileValidationError: If the file exceeds duration limits.
|
||||
UnsupportedFileTypeError: If the format is not supported.
|
||||
"""
|
||||
errors: list[str] = []
|
||||
content = file.read()
|
||||
file_size = len(content)
|
||||
filename = file.filename
|
||||
|
||||
_validate_size(
|
||||
"Video",
|
||||
filename,
|
||||
file_size,
|
||||
constraints.max_size_bytes,
|
||||
errors,
|
||||
raise_on_error,
|
||||
)
|
||||
_validate_format(
|
||||
"Video",
|
||||
filename,
|
||||
file.content_type,
|
||||
constraints.supported_formats,
|
||||
errors,
|
||||
raise_on_error,
|
||||
)
|
||||
|
||||
if constraints.max_duration_seconds is not None:
|
||||
duration = _get_video_duration(content)
|
||||
if duration is not None and duration > constraints.max_duration_seconds:
|
||||
msg = (
|
||||
f"Video '{filename}' duration ({duration:.1f}s) exceeds "
|
||||
f"maximum ({constraints.max_duration_seconds}s)"
|
||||
)
|
||||
errors.append(msg)
|
||||
if raise_on_error:
|
||||
raise FileValidationError(msg, file_name=filename)
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
def validate_text(
|
||||
file: TextFile,
|
||||
constraints: ProviderConstraints,
|
||||
*,
|
||||
raise_on_error: bool = True,
|
||||
) -> Sequence[str]:
|
||||
"""Validate a text file against general constraints.
|
||||
|
||||
Args:
|
||||
file: The text file to validate.
|
||||
constraints: Provider constraints to validate against.
|
||||
raise_on_error: If True, raise exceptions on validation failure.
|
||||
|
||||
Returns:
|
||||
List of validation error messages (empty if valid).
|
||||
|
||||
Raises:
|
||||
FileTooLargeError: If the file exceeds size limits.
|
||||
"""
|
||||
errors: list[str] = []
|
||||
|
||||
if constraints.general_max_size_bytes is None:
|
||||
return errors
|
||||
|
||||
file_size = len(file.read())
|
||||
_validate_size(
|
||||
"Text file",
|
||||
file.filename,
|
||||
file_size,
|
||||
constraints.general_max_size_bytes,
|
||||
errors,
|
||||
raise_on_error,
|
||||
)
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
def _check_unsupported_type(
|
||||
file: FileInput,
|
||||
provider_name: str,
|
||||
type_name: str,
|
||||
raise_on_error: bool,
|
||||
) -> Sequence[str]:
|
||||
"""Check if file type is unsupported and handle error.
|
||||
|
||||
Args:
|
||||
file: The file being validated.
|
||||
provider_name: Name of the provider.
|
||||
type_name: Name of the file type (e.g., "images", "PDFs").
|
||||
raise_on_error: If True, raise exception instead of returning errors.
|
||||
|
||||
Returns:
|
||||
List with error message (only returns when raise_on_error is False).
|
||||
|
||||
Raises:
|
||||
UnsupportedFileTypeError: If raise_on_error is True.
|
||||
"""
|
||||
msg = f"Provider '{provider_name}' does not support {type_name}"
|
||||
if raise_on_error:
|
||||
raise UnsupportedFileTypeError(
|
||||
msg, file_name=file.filename, content_type=file.content_type
|
||||
)
|
||||
return [msg]
|
||||
|
||||
|
||||
def validate_file(
|
||||
file: FileInput,
|
||||
constraints: ProviderConstraints,
|
||||
*,
|
||||
raise_on_error: bool = True,
|
||||
) -> Sequence[str]:
|
||||
"""Validate a file against provider constraints.
|
||||
|
||||
Dispatches to the appropriate validator based on file type.
|
||||
|
||||
Args:
|
||||
file: The file to validate.
|
||||
constraints: Provider constraints to validate against.
|
||||
raise_on_error: If True, raise exceptions on validation failure.
|
||||
|
||||
Returns:
|
||||
List of validation error messages (empty if valid).
|
||||
|
||||
Raises:
|
||||
FileTooLargeError: If the file exceeds size limits.
|
||||
FileValidationError: If the file fails other validation checks.
|
||||
UnsupportedFileTypeError: If the file type is not supported.
|
||||
"""
|
||||
if isinstance(file, ImageFile):
|
||||
if constraints.image is None:
|
||||
return _check_unsupported_type(
|
||||
file, constraints.name, "images", raise_on_error
|
||||
)
|
||||
return validate_image(file, constraints.image, raise_on_error=raise_on_error)
|
||||
|
||||
if isinstance(file, PDFFile):
|
||||
if constraints.pdf is None:
|
||||
return _check_unsupported_type(
|
||||
file, constraints.name, "PDFs", raise_on_error
|
||||
)
|
||||
return validate_pdf(file, constraints.pdf, raise_on_error=raise_on_error)
|
||||
|
||||
if isinstance(file, AudioFile):
|
||||
if constraints.audio is None:
|
||||
return _check_unsupported_type(
|
||||
file, constraints.name, "audio", raise_on_error
|
||||
)
|
||||
return validate_audio(file, constraints.audio, raise_on_error=raise_on_error)
|
||||
|
||||
if isinstance(file, VideoFile):
|
||||
if constraints.video is None:
|
||||
return _check_unsupported_type(
|
||||
file, constraints.name, "video", raise_on_error
|
||||
)
|
||||
return validate_video(file, constraints.video, raise_on_error=raise_on_error)
|
||||
|
||||
if isinstance(file, TextFile):
|
||||
return validate_text(file, constraints, raise_on_error=raise_on_error)
|
||||
|
||||
return []
|
||||
@@ -1,16 +0,0 @@
|
||||
"""File resolution logic."""
|
||||
|
||||
from crewai_files.resolution.resolver import FileResolver
|
||||
from crewai_files.resolution.utils import (
|
||||
is_file_source,
|
||||
normalize_input_files,
|
||||
wrap_file_source,
|
||||
)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"FileResolver",
|
||||
"is_file_source",
|
||||
"normalize_input_files",
|
||||
"wrap_file_source",
|
||||
]
|
||||
@@ -1,670 +0,0 @@
|
||||
"""FileResolver for deciding file delivery method and managing uploads."""
|
||||
|
||||
import asyncio
|
||||
import base64
|
||||
from dataclasses import dataclass, field
|
||||
import hashlib
|
||||
import logging
|
||||
|
||||
from crewai_files.cache.metrics import measure_operation
|
||||
from crewai_files.cache.upload_cache import CachedUpload, UploadCache
|
||||
from crewai_files.core.constants import UPLOAD_MAX_RETRIES, UPLOAD_RETRY_DELAY_BASE
|
||||
from crewai_files.core.resolved import (
|
||||
FileReference,
|
||||
InlineBase64,
|
||||
InlineBytes,
|
||||
ResolvedFile,
|
||||
UrlReference,
|
||||
)
|
||||
from crewai_files.core.sources import FileUrl
|
||||
from crewai_files.core.types import FileInput
|
||||
from crewai_files.processing.constraints import (
|
||||
AudioConstraints,
|
||||
ImageConstraints,
|
||||
PDFConstraints,
|
||||
ProviderConstraints,
|
||||
VideoConstraints,
|
||||
get_constraints_for_provider,
|
||||
)
|
||||
from crewai_files.uploaders import UploadResult, get_uploader
|
||||
from crewai_files.uploaders.base import FileUploader
|
||||
from crewai_files.uploaders.factory import ProviderType
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class FileContext:
|
||||
"""Cached file metadata to avoid redundant reads.
|
||||
|
||||
Attributes:
|
||||
content: Raw file bytes.
|
||||
size: Size of the file in bytes.
|
||||
content_hash: SHA-256 hash of the file content.
|
||||
content_type: MIME type of the file.
|
||||
"""
|
||||
|
||||
content: bytes
|
||||
size: int
|
||||
content_hash: str
|
||||
content_type: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class FileResolverConfig:
|
||||
"""Configuration for FileResolver.
|
||||
|
||||
Attributes:
|
||||
prefer_upload: If True, prefer uploading over inline for supported providers.
|
||||
upload_threshold_bytes: Size threshold above which to use upload.
|
||||
If None, uses provider-specific threshold.
|
||||
use_bytes_for_bedrock: If True, use raw bytes instead of base64 for Bedrock.
|
||||
"""
|
||||
|
||||
prefer_upload: bool = False
|
||||
upload_threshold_bytes: int | None = None
|
||||
use_bytes_for_bedrock: bool = True
|
||||
|
||||
|
||||
@dataclass
|
||||
class FileResolver:
|
||||
"""Resolves files to their delivery format based on provider capabilities.
|
||||
|
||||
Decides whether to use inline base64, raw bytes, or file upload based on:
|
||||
- Provider constraints and capabilities
|
||||
- File size
|
||||
- Configuration preferences
|
||||
|
||||
Caches uploaded files to avoid redundant uploads.
|
||||
|
||||
Attributes:
|
||||
config: Resolver configuration.
|
||||
upload_cache: Cache for tracking uploaded files.
|
||||
"""
|
||||
|
||||
config: FileResolverConfig = field(default_factory=FileResolverConfig)
|
||||
upload_cache: UploadCache | None = None
|
||||
_uploaders: dict[str, FileUploader] = field(default_factory=dict)
|
||||
|
||||
@staticmethod
|
||||
def _build_file_context(file: FileInput) -> FileContext:
|
||||
"""Build context by reading file once.
|
||||
|
||||
Args:
|
||||
file: The file to build context for.
|
||||
|
||||
Returns:
|
||||
FileContext with cached metadata.
|
||||
"""
|
||||
content = file.read()
|
||||
return FileContext(
|
||||
content=content,
|
||||
size=len(content),
|
||||
content_hash=hashlib.sha256(content).hexdigest(),
|
||||
content_type=file.content_type,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _is_url_source(file: FileInput) -> bool:
|
||||
"""Check if file source is a URL.
|
||||
|
||||
Args:
|
||||
file: The file to check.
|
||||
|
||||
Returns:
|
||||
True if the file source is a FileUrl, False otherwise.
|
||||
"""
|
||||
return isinstance(file._file_source, FileUrl)
|
||||
|
||||
@staticmethod
|
||||
def _supports_url(constraints: ProviderConstraints | None) -> bool:
|
||||
"""Check if provider supports URL references.
|
||||
|
||||
Args:
|
||||
constraints: Provider constraints.
|
||||
|
||||
Returns:
|
||||
True if the provider supports URL references, False otherwise.
|
||||
"""
|
||||
return constraints is not None and constraints.supports_url_references
|
||||
|
||||
@staticmethod
|
||||
def _resolve_as_url(file: FileInput) -> UrlReference:
|
||||
"""Resolve a URL source as UrlReference.
|
||||
|
||||
Args:
|
||||
file: The file with URL source.
|
||||
|
||||
Returns:
|
||||
UrlReference with the URL and content type.
|
||||
"""
|
||||
source = file._file_source
|
||||
if not isinstance(source, FileUrl):
|
||||
raise TypeError(f"Expected FileUrl source, got {type(source).__name__}")
|
||||
return UrlReference(
|
||||
content_type=file.content_type,
|
||||
url=source.url,
|
||||
)
|
||||
|
||||
def resolve(self, file: FileInput, provider: ProviderType) -> ResolvedFile:
|
||||
"""Resolve a file to its delivery format for a provider.
|
||||
|
||||
Args:
|
||||
file: The file to resolve.
|
||||
provider: Provider name (e.g., "gemini", "anthropic", "openai").
|
||||
|
||||
Returns:
|
||||
ResolvedFile representing the appropriate delivery format.
|
||||
"""
|
||||
constraints = get_constraints_for_provider(provider)
|
||||
|
||||
if self._is_url_source(file) and self._supports_url(constraints):
|
||||
return self._resolve_as_url(file)
|
||||
|
||||
context = self._build_file_context(file)
|
||||
|
||||
should_upload = self._should_upload(file, provider, constraints, context.size)
|
||||
|
||||
if should_upload:
|
||||
resolved = self._resolve_via_upload(file, provider, context)
|
||||
if resolved is not None:
|
||||
return resolved
|
||||
|
||||
return self._resolve_inline(file, provider, context)
|
||||
|
||||
def resolve_files(
|
||||
self,
|
||||
files: dict[str, FileInput],
|
||||
provider: ProviderType,
|
||||
) -> dict[str, ResolvedFile]:
|
||||
"""Resolve multiple files for a provider.
|
||||
|
||||
Args:
|
||||
files: Dictionary mapping names to file inputs.
|
||||
provider: Provider name.
|
||||
|
||||
Returns:
|
||||
Dictionary mapping names to resolved files.
|
||||
"""
|
||||
return {name: self.resolve(file, provider) for name, file in files.items()}
|
||||
|
||||
@staticmethod
|
||||
def _get_type_constraint(
|
||||
content_type: str,
|
||||
constraints: ProviderConstraints,
|
||||
) -> ImageConstraints | PDFConstraints | AudioConstraints | VideoConstraints | None:
|
||||
"""Get type-specific constraint based on content type.
|
||||
|
||||
Args:
|
||||
content_type: MIME type of the file.
|
||||
constraints: Provider constraints.
|
||||
|
||||
Returns:
|
||||
Type-specific constraint or None if not found.
|
||||
"""
|
||||
if content_type.startswith("image/"):
|
||||
return constraints.image
|
||||
if content_type == "application/pdf":
|
||||
return constraints.pdf
|
||||
if content_type.startswith("audio/"):
|
||||
return constraints.audio
|
||||
if content_type.startswith("video/"):
|
||||
return constraints.video
|
||||
return None
|
||||
|
||||
def _should_upload(
|
||||
self,
|
||||
file: FileInput,
|
||||
provider: str,
|
||||
constraints: ProviderConstraints | None,
|
||||
file_size: int,
|
||||
) -> bool:
|
||||
"""Determine if a file should be uploaded rather than inlined.
|
||||
|
||||
Uses type-specific constraints to make smarter decisions:
|
||||
- Checks if file exceeds type-specific inline size limits
|
||||
- Falls back to general threshold if no type-specific constraint
|
||||
|
||||
Args:
|
||||
file: The file to check.
|
||||
provider: Provider name.
|
||||
constraints: Provider constraints.
|
||||
file_size: Size of the file in bytes.
|
||||
|
||||
Returns:
|
||||
True if the file should be uploaded, False otherwise.
|
||||
"""
|
||||
if constraints is None or not constraints.supports_file_upload:
|
||||
return False
|
||||
|
||||
if self.config.prefer_upload:
|
||||
return True
|
||||
|
||||
content_type = file.content_type
|
||||
type_constraint = self._get_type_constraint(content_type, constraints)
|
||||
|
||||
if type_constraint is not None:
|
||||
# Check if file exceeds type-specific inline limit
|
||||
if file_size > type_constraint.max_size_bytes:
|
||||
logger.debug(
|
||||
f"File {file.filename} ({file_size}B) exceeds {content_type} "
|
||||
f"inline limit ({type_constraint.max_size_bytes}B) for {provider}"
|
||||
)
|
||||
return True
|
||||
|
||||
# Fall back to general threshold
|
||||
threshold = self.config.upload_threshold_bytes
|
||||
if threshold is None:
|
||||
threshold = constraints.file_upload_threshold_bytes
|
||||
|
||||
if threshold is not None and file_size > threshold:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def _resolve_via_upload(
|
||||
self,
|
||||
file: FileInput,
|
||||
provider: ProviderType,
|
||||
context: FileContext,
|
||||
) -> ResolvedFile | None:
|
||||
"""Resolve a file by uploading it.
|
||||
|
||||
Args:
|
||||
file: The file to upload.
|
||||
provider: Provider name.
|
||||
context: Pre-computed file context.
|
||||
|
||||
Returns:
|
||||
FileReference if upload succeeds, None otherwise.
|
||||
"""
|
||||
if self.upload_cache is not None:
|
||||
cached = self.upload_cache.get_by_hash(context.content_hash, provider)
|
||||
if cached is not None:
|
||||
logger.debug(
|
||||
f"Using cached upload for {file.filename}: {cached.file_id}"
|
||||
)
|
||||
return FileReference(
|
||||
content_type=cached.content_type,
|
||||
file_id=cached.file_id,
|
||||
provider=cached.provider,
|
||||
expires_at=cached.expires_at,
|
||||
file_uri=cached.file_uri,
|
||||
)
|
||||
|
||||
uploader = self._get_uploader(provider)
|
||||
if uploader is None:
|
||||
logger.debug(f"No uploader available for {provider}")
|
||||
return None
|
||||
|
||||
result = self._upload_with_retry(uploader, file, provider, context.size)
|
||||
if result is None:
|
||||
return None
|
||||
|
||||
if self.upload_cache is not None:
|
||||
self.upload_cache.set_by_hash(
|
||||
file_hash=context.content_hash,
|
||||
content_type=context.content_type,
|
||||
provider=provider,
|
||||
file_id=result.file_id,
|
||||
file_uri=result.file_uri,
|
||||
expires_at=result.expires_at,
|
||||
)
|
||||
|
||||
return FileReference(
|
||||
content_type=result.content_type,
|
||||
file_id=result.file_id,
|
||||
provider=result.provider,
|
||||
expires_at=result.expires_at,
|
||||
file_uri=result.file_uri,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _upload_with_retry(
|
||||
uploader: FileUploader,
|
||||
file: FileInput,
|
||||
provider: str,
|
||||
file_size: int,
|
||||
) -> UploadResult | None:
|
||||
"""Upload with exponential backoff retry.
|
||||
|
||||
Args:
|
||||
uploader: The uploader to use.
|
||||
file: The file to upload.
|
||||
provider: Provider name for logging.
|
||||
file_size: Size of the file in bytes.
|
||||
|
||||
Returns:
|
||||
UploadResult if successful, None otherwise.
|
||||
"""
|
||||
import time
|
||||
|
||||
from crewai_files.processing.exceptions import (
|
||||
PermanentUploadError,
|
||||
TransientUploadError,
|
||||
)
|
||||
|
||||
last_error: Exception | None = None
|
||||
|
||||
for attempt in range(UPLOAD_MAX_RETRIES):
|
||||
with measure_operation(
|
||||
"upload",
|
||||
filename=file.filename,
|
||||
provider=provider,
|
||||
size_bytes=file_size,
|
||||
attempt=attempt + 1,
|
||||
) as metrics:
|
||||
try:
|
||||
result = uploader.upload(file)
|
||||
metrics.metadata["file_id"] = result.file_id
|
||||
return result
|
||||
except PermanentUploadError as e:
|
||||
metrics.metadata["error_type"] = "permanent"
|
||||
logger.warning(
|
||||
f"Non-retryable upload error for {file.filename}: {e}"
|
||||
)
|
||||
return None
|
||||
except TransientUploadError as e:
|
||||
metrics.metadata["error_type"] = "transient"
|
||||
last_error = e
|
||||
except Exception as e:
|
||||
metrics.metadata["error_type"] = "unknown"
|
||||
last_error = e
|
||||
|
||||
if attempt < UPLOAD_MAX_RETRIES - 1:
|
||||
delay = UPLOAD_RETRY_DELAY_BASE**attempt
|
||||
logger.debug(
|
||||
f"Retrying upload for {file.filename} in {delay}s (attempt {attempt + 1})"
|
||||
)
|
||||
time.sleep(delay)
|
||||
|
||||
logger.warning(
|
||||
f"Upload failed for {file.filename} to {provider} after {UPLOAD_MAX_RETRIES} attempts: {last_error}"
|
||||
)
|
||||
return None
|
||||
|
||||
def _resolve_inline(
|
||||
self,
|
||||
file: FileInput,
|
||||
provider: str,
|
||||
context: FileContext,
|
||||
) -> ResolvedFile:
|
||||
"""Resolve a file as inline content.
|
||||
|
||||
Args:
|
||||
file: The file to resolve (used for logging).
|
||||
provider: Provider name.
|
||||
context: Pre-computed file context.
|
||||
|
||||
Returns:
|
||||
InlineBase64 or InlineBytes depending on provider.
|
||||
"""
|
||||
logger.debug(f"Resolving {file.filename} as inline for {provider}")
|
||||
if self.config.use_bytes_for_bedrock and "bedrock" in provider:
|
||||
return InlineBytes(
|
||||
content_type=context.content_type,
|
||||
data=context.content,
|
||||
)
|
||||
|
||||
encoded = base64.b64encode(context.content).decode("ascii")
|
||||
return InlineBase64(
|
||||
content_type=context.content_type,
|
||||
data=encoded,
|
||||
)
|
||||
|
||||
async def aresolve(self, file: FileInput, provider: ProviderType) -> ResolvedFile:
|
||||
"""Async resolve a file to its delivery format for a provider.
|
||||
|
||||
Args:
|
||||
file: The file to resolve.
|
||||
provider: Provider name (e.g., "gemini", "anthropic", "openai").
|
||||
|
||||
Returns:
|
||||
ResolvedFile representing the appropriate delivery format.
|
||||
"""
|
||||
constraints = get_constraints_for_provider(provider)
|
||||
|
||||
if self._is_url_source(file) and self._supports_url(constraints):
|
||||
return self._resolve_as_url(file)
|
||||
|
||||
context = self._build_file_context(file)
|
||||
|
||||
should_upload = self._should_upload(file, provider, constraints, context.size)
|
||||
|
||||
if should_upload:
|
||||
resolved = await self._aresolve_via_upload(file, provider, context)
|
||||
if resolved is not None:
|
||||
return resolved
|
||||
|
||||
return self._resolve_inline(file, provider, context)
|
||||
|
||||
async def aresolve_files(
|
||||
self,
|
||||
files: dict[str, FileInput],
|
||||
provider: ProviderType,
|
||||
max_concurrency: int = 10,
|
||||
) -> dict[str, ResolvedFile]:
|
||||
"""Async resolve multiple files in parallel.
|
||||
|
||||
Args:
|
||||
files: Dictionary mapping names to file inputs.
|
||||
provider: Provider name.
|
||||
max_concurrency: Maximum number of concurrent resolutions.
|
||||
|
||||
Returns:
|
||||
Dictionary mapping names to resolved files.
|
||||
"""
|
||||
semaphore = asyncio.Semaphore(max_concurrency)
|
||||
|
||||
async def resolve_single(
|
||||
entry_key: str, input_file: FileInput
|
||||
) -> tuple[str, ResolvedFile]:
|
||||
"""Resolve a single file with semaphore limiting."""
|
||||
async with semaphore:
|
||||
entry_resolved = await self.aresolve(input_file, provider)
|
||||
return entry_key, entry_resolved
|
||||
|
||||
tasks = [resolve_single(n, f) for n, f in files.items()]
|
||||
gather_results = await asyncio.gather(*tasks, return_exceptions=True)
|
||||
|
||||
output: dict[str, ResolvedFile] = {}
|
||||
for item in gather_results:
|
||||
if isinstance(item, BaseException):
|
||||
logger.error(f"Resolution failed: {item}")
|
||||
continue
|
||||
key, resolved = item
|
||||
output[key] = resolved
|
||||
|
||||
return output
|
||||
|
||||
async def _aresolve_via_upload(
|
||||
self,
|
||||
file: FileInput,
|
||||
provider: ProviderType,
|
||||
context: FileContext,
|
||||
) -> ResolvedFile | None:
|
||||
"""Async resolve a file by uploading it.
|
||||
|
||||
Args:
|
||||
file: The file to upload.
|
||||
provider: Provider name.
|
||||
context: Pre-computed file context.
|
||||
|
||||
Returns:
|
||||
FileReference if upload succeeds, None otherwise.
|
||||
"""
|
||||
if self.upload_cache is not None:
|
||||
cached = await self.upload_cache.aget_by_hash(
|
||||
context.content_hash, provider
|
||||
)
|
||||
if cached is not None:
|
||||
logger.debug(
|
||||
f"Using cached upload for {file.filename}: {cached.file_id}"
|
||||
)
|
||||
return FileReference(
|
||||
content_type=cached.content_type,
|
||||
file_id=cached.file_id,
|
||||
provider=cached.provider,
|
||||
expires_at=cached.expires_at,
|
||||
file_uri=cached.file_uri,
|
||||
)
|
||||
|
||||
uploader = self._get_uploader(provider)
|
||||
if uploader is None:
|
||||
logger.debug(f"No uploader available for {provider}")
|
||||
return None
|
||||
|
||||
result = await self._aupload_with_retry(uploader, file, provider, context.size)
|
||||
if result is None:
|
||||
return None
|
||||
|
||||
if self.upload_cache is not None:
|
||||
await self.upload_cache.aset_by_hash(
|
||||
file_hash=context.content_hash,
|
||||
content_type=context.content_type,
|
||||
provider=provider,
|
||||
file_id=result.file_id,
|
||||
file_uri=result.file_uri,
|
||||
expires_at=result.expires_at,
|
||||
)
|
||||
|
||||
return FileReference(
|
||||
content_type=result.content_type,
|
||||
file_id=result.file_id,
|
||||
provider=result.provider,
|
||||
expires_at=result.expires_at,
|
||||
file_uri=result.file_uri,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
async def _aupload_with_retry(
|
||||
uploader: FileUploader,
|
||||
file: FileInput,
|
||||
provider: str,
|
||||
file_size: int,
|
||||
) -> UploadResult | None:
|
||||
"""Async upload with exponential backoff retry.
|
||||
|
||||
Args:
|
||||
uploader: The uploader to use.
|
||||
file: The file to upload.
|
||||
provider: Provider name for logging.
|
||||
file_size: Size of the file in bytes.
|
||||
|
||||
Returns:
|
||||
UploadResult if successful, None otherwise.
|
||||
"""
|
||||
from crewai_files.processing.exceptions import (
|
||||
PermanentUploadError,
|
||||
TransientUploadError,
|
||||
)
|
||||
|
||||
last_error: Exception | None = None
|
||||
|
||||
for attempt in range(UPLOAD_MAX_RETRIES):
|
||||
with measure_operation(
|
||||
"upload",
|
||||
filename=file.filename,
|
||||
provider=provider,
|
||||
size_bytes=file_size,
|
||||
attempt=attempt + 1,
|
||||
) as metrics:
|
||||
try:
|
||||
result = await uploader.aupload(file)
|
||||
metrics.metadata["file_id"] = result.file_id
|
||||
return result
|
||||
except PermanentUploadError as e:
|
||||
metrics.metadata["error_type"] = "permanent"
|
||||
logger.warning(
|
||||
f"Non-retryable upload error for {file.filename}: {e}"
|
||||
)
|
||||
return None
|
||||
except TransientUploadError as e:
|
||||
metrics.metadata["error_type"] = "transient"
|
||||
last_error = e
|
||||
except Exception as e:
|
||||
metrics.metadata["error_type"] = "unknown"
|
||||
last_error = e
|
||||
|
||||
if attempt < UPLOAD_MAX_RETRIES - 1:
|
||||
delay = UPLOAD_RETRY_DELAY_BASE**attempt
|
||||
logger.debug(
|
||||
f"Retrying upload for {file.filename} in {delay}s (attempt {attempt + 1})"
|
||||
)
|
||||
await asyncio.sleep(delay)
|
||||
|
||||
logger.warning(
|
||||
f"Upload failed for {file.filename} to {provider} after {UPLOAD_MAX_RETRIES} attempts: {last_error}"
|
||||
)
|
||||
return None
|
||||
|
||||
def _get_uploader(self, provider: ProviderType) -> FileUploader | None:
|
||||
"""Get or create an uploader for a provider.
|
||||
|
||||
Args:
|
||||
provider: Provider name.
|
||||
|
||||
Returns:
|
||||
FileUploader instance or None if not available.
|
||||
"""
|
||||
if provider not in self._uploaders:
|
||||
uploader = get_uploader(provider)
|
||||
if uploader is not None:
|
||||
self._uploaders[provider] = uploader
|
||||
else:
|
||||
return None
|
||||
|
||||
return self._uploaders.get(provider)
|
||||
|
||||
def get_cached_uploads(self, provider: ProviderType) -> list[CachedUpload]:
|
||||
"""Get all cached uploads for a provider.
|
||||
|
||||
Args:
|
||||
provider: Provider name.
|
||||
|
||||
Returns:
|
||||
List of cached uploads.
|
||||
"""
|
||||
if self.upload_cache is None:
|
||||
return []
|
||||
return self.upload_cache.get_all_for_provider(provider)
|
||||
|
||||
def clear_cache(self) -> None:
|
||||
"""Clear the upload cache."""
|
||||
if self.upload_cache is not None:
|
||||
self.upload_cache.clear()
|
||||
|
||||
|
||||
def create_resolver(
|
||||
provider: str | None = None,
|
||||
prefer_upload: bool = False,
|
||||
upload_threshold_bytes: int | None = None,
|
||||
enable_cache: bool = True,
|
||||
) -> FileResolver:
|
||||
"""Create a configured FileResolver.
|
||||
|
||||
Args:
|
||||
provider: Optional provider name to load default threshold from constraints.
|
||||
prefer_upload: Whether to prefer upload over inline.
|
||||
upload_threshold_bytes: Size threshold for using upload. If None and
|
||||
provider is specified, uses provider's default threshold.
|
||||
enable_cache: Whether to enable upload caching.
|
||||
|
||||
Returns:
|
||||
Configured FileResolver instance.
|
||||
"""
|
||||
threshold = upload_threshold_bytes
|
||||
if threshold is None and provider is not None:
|
||||
constraints = get_constraints_for_provider(provider)
|
||||
if constraints is not None:
|
||||
threshold = constraints.file_upload_threshold_bytes
|
||||
|
||||
config = FileResolverConfig(
|
||||
prefer_upload=prefer_upload,
|
||||
upload_threshold_bytes=threshold,
|
||||
)
|
||||
|
||||
cache = UploadCache() if enable_cache else None
|
||||
|
||||
return FileResolver(config=config, upload_cache=cache)
|
||||
@@ -1,91 +0,0 @@
|
||||
"""Utility functions for file handling."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from crewai_files.core.sources import is_file_source
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai_files.core.sources import FileSource, FileSourceInput
|
||||
from crewai_files.core.types import FileInput
|
||||
|
||||
|
||||
__all__ = ["is_file_source", "normalize_input_files", "wrap_file_source"]
|
||||
|
||||
|
||||
def wrap_file_source(source: FileSource) -> FileInput:
|
||||
"""Wrap a FileSource in the appropriate typed FileInput wrapper.
|
||||
|
||||
Args:
|
||||
source: The file source to wrap.
|
||||
|
||||
Returns:
|
||||
Typed FileInput wrapper based on content type.
|
||||
"""
|
||||
from crewai_files.core.types import (
|
||||
AudioFile,
|
||||
ImageFile,
|
||||
PDFFile,
|
||||
TextFile,
|
||||
VideoFile,
|
||||
)
|
||||
|
||||
content_type = source.content_type
|
||||
|
||||
if content_type.startswith("image/"):
|
||||
return ImageFile(source=source)
|
||||
if content_type.startswith("audio/"):
|
||||
return AudioFile(source=source)
|
||||
if content_type.startswith("video/"):
|
||||
return VideoFile(source=source)
|
||||
if content_type == "application/pdf":
|
||||
return PDFFile(source=source)
|
||||
return TextFile(source=source)
|
||||
|
||||
|
||||
def normalize_input_files(
|
||||
input_files: list[FileSourceInput | FileInput],
|
||||
) -> dict[str, FileInput]:
|
||||
"""Convert a list of file sources to a named dictionary of FileInputs.
|
||||
|
||||
Args:
|
||||
input_files: List of file source inputs or File objects.
|
||||
|
||||
Returns:
|
||||
Dictionary mapping names to FileInput wrappers.
|
||||
"""
|
||||
from crewai_files.core.sources import FileBytes, FilePath, FileStream, FileUrl
|
||||
from crewai_files.core.types import BaseFile
|
||||
|
||||
result: dict[str, FileInput] = {}
|
||||
|
||||
for i, item in enumerate(input_files):
|
||||
if isinstance(item, BaseFile):
|
||||
name = item.filename or f"file_{i}"
|
||||
if "." in name:
|
||||
name = name.rsplit(".", 1)[0]
|
||||
result[name] = item
|
||||
continue
|
||||
|
||||
file_source: FilePath | FileBytes | FileStream | FileUrl
|
||||
if isinstance(item, (FilePath, FileBytes, FileStream, FileUrl)):
|
||||
file_source = item
|
||||
elif isinstance(item, Path):
|
||||
file_source = FilePath(path=item)
|
||||
elif isinstance(item, str):
|
||||
if item.startswith(("http://", "https://")):
|
||||
file_source = FileUrl(url=item)
|
||||
else:
|
||||
file_source = FilePath(path=Path(item))
|
||||
elif isinstance(item, (bytes, memoryview)):
|
||||
file_source = FileBytes(data=bytes(item))
|
||||
else:
|
||||
continue
|
||||
|
||||
name = file_source.filename or f"file_{i}"
|
||||
result[name] = wrap_file_source(file_source)
|
||||
|
||||
return result
|
||||
@@ -1,11 +0,0 @@
|
||||
"""File uploader implementations for provider File APIs."""
|
||||
|
||||
from crewai_files.uploaders.base import FileUploader, UploadResult
|
||||
from crewai_files.uploaders.factory import get_uploader
|
||||
|
||||
|
||||
__all__ = [
|
||||
"FileUploader",
|
||||
"UploadResult",
|
||||
"get_uploader",
|
||||
]
|
||||
@@ -1,242 +0,0 @@
|
||||
"""Anthropic Files API uploader implementation."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
from typing import Any
|
||||
|
||||
from crewai_files.core.sources import generate_filename
|
||||
from crewai_files.core.types import FileInput
|
||||
from crewai_files.processing.exceptions import classify_upload_error
|
||||
from crewai_files.uploaders.base import FileUploader, UploadResult
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AnthropicFileUploader(FileUploader):
|
||||
"""Uploader for Anthropic Files API.
|
||||
|
||||
Uses the anthropic SDK to upload files. Files are stored persistently
|
||||
until explicitly deleted.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_key: str | None = None,
|
||||
client: Any = None,
|
||||
async_client: Any = None,
|
||||
) -> None:
|
||||
"""Initialize the Anthropic uploader.
|
||||
|
||||
Args:
|
||||
api_key: Optional Anthropic API key. If not provided, uses
|
||||
ANTHROPIC_API_KEY environment variable.
|
||||
client: Optional pre-instantiated Anthropic client.
|
||||
async_client: Optional pre-instantiated async Anthropic client.
|
||||
"""
|
||||
self._api_key = api_key or os.environ.get("ANTHROPIC_API_KEY")
|
||||
self._client: Any = client
|
||||
self._async_client: Any = async_client
|
||||
|
||||
@property
|
||||
def provider_name(self) -> str:
|
||||
"""Return the provider name."""
|
||||
return "anthropic"
|
||||
|
||||
def _get_client(self) -> Any:
|
||||
"""Get or create the Anthropic client."""
|
||||
if self._client is None:
|
||||
try:
|
||||
import anthropic
|
||||
|
||||
self._client = anthropic.Anthropic(api_key=self._api_key)
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"anthropic is required for Anthropic file uploads. "
|
||||
"Install with: pip install anthropic"
|
||||
) from e
|
||||
return self._client
|
||||
|
||||
def _get_async_client(self) -> Any:
|
||||
"""Get or create the async Anthropic client."""
|
||||
if self._async_client is None:
|
||||
try:
|
||||
import anthropic
|
||||
|
||||
self._async_client = anthropic.AsyncAnthropic(api_key=self._api_key)
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"anthropic is required for Anthropic file uploads. "
|
||||
"Install with: pip install anthropic"
|
||||
) from e
|
||||
return self._async_client
|
||||
|
||||
def upload(self, file: FileInput, purpose: str | None = None) -> UploadResult:
|
||||
"""Upload a file to Anthropic.
|
||||
|
||||
Args:
|
||||
file: The file to upload.
|
||||
purpose: Optional purpose for the file (default: "user_upload").
|
||||
|
||||
Returns:
|
||||
UploadResult with the file ID and metadata.
|
||||
|
||||
Raises:
|
||||
TransientUploadError: For retryable errors (network, rate limits).
|
||||
PermanentUploadError: For non-retryable errors (auth, validation).
|
||||
"""
|
||||
try:
|
||||
client = self._get_client()
|
||||
|
||||
content = file.read()
|
||||
|
||||
logger.info(
|
||||
f"Uploading file '{file.filename}' to Anthropic ({len(content)} bytes)"
|
||||
)
|
||||
|
||||
filename = file.filename or generate_filename(file.content_type)
|
||||
uploaded_file = client.beta.files.upload(
|
||||
file=(filename, content, file.content_type),
|
||||
)
|
||||
|
||||
logger.info(f"Uploaded to Anthropic: {uploaded_file.id}")
|
||||
|
||||
return UploadResult(
|
||||
file_id=uploaded_file.id,
|
||||
file_uri=None,
|
||||
content_type=file.content_type,
|
||||
expires_at=None,
|
||||
provider=self.provider_name,
|
||||
)
|
||||
except ImportError:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise classify_upload_error(e, file.filename) from e
|
||||
|
||||
def delete(self, file_id: str) -> bool:
|
||||
"""Delete an uploaded file from Anthropic.
|
||||
|
||||
Args:
|
||||
file_id: The file ID to delete.
|
||||
|
||||
Returns:
|
||||
True if deletion was successful, False otherwise.
|
||||
"""
|
||||
try:
|
||||
client = self._get_client()
|
||||
client.beta.files.delete(file_id=file_id)
|
||||
logger.info(f"Deleted Anthropic file: {file_id}")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to delete Anthropic file {file_id}: {e}")
|
||||
return False
|
||||
|
||||
def get_file_info(self, file_id: str) -> dict[str, Any] | None:
|
||||
"""Get information about an uploaded file.
|
||||
|
||||
Args:
|
||||
file_id: The file ID.
|
||||
|
||||
Returns:
|
||||
Dictionary with file information, or None if not found.
|
||||
"""
|
||||
try:
|
||||
client = self._get_client()
|
||||
file_info = client.beta.files.retrieve(file_id=file_id)
|
||||
return {
|
||||
"id": file_info.id,
|
||||
"filename": file_info.filename,
|
||||
"purpose": file_info.purpose,
|
||||
"size_bytes": file_info.size_bytes,
|
||||
"created_at": file_info.created_at,
|
||||
}
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to get Anthropic file info for {file_id}: {e}")
|
||||
return None
|
||||
|
||||
def list_files(self) -> list[dict[str, Any]]:
|
||||
"""List all uploaded files.
|
||||
|
||||
Returns:
|
||||
List of dictionaries with file information.
|
||||
"""
|
||||
try:
|
||||
client = self._get_client()
|
||||
files = client.beta.files.list()
|
||||
return [
|
||||
{
|
||||
"id": f.id,
|
||||
"filename": f.filename,
|
||||
"purpose": f.purpose,
|
||||
"size_bytes": f.size_bytes,
|
||||
"created_at": f.created_at,
|
||||
}
|
||||
for f in files.data
|
||||
]
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to list Anthropic files: {e}")
|
||||
return []
|
||||
|
||||
async def aupload(
|
||||
self, file: FileInput, purpose: str | None = None
|
||||
) -> UploadResult:
|
||||
"""Async upload a file to Anthropic using native async client.
|
||||
|
||||
Args:
|
||||
file: The file to upload.
|
||||
purpose: Optional purpose for the file (default: "user_upload").
|
||||
|
||||
Returns:
|
||||
UploadResult with the file ID and metadata.
|
||||
|
||||
Raises:
|
||||
TransientUploadError: For retryable errors (network, rate limits).
|
||||
PermanentUploadError: For non-retryable errors (auth, validation).
|
||||
"""
|
||||
try:
|
||||
client = self._get_async_client()
|
||||
|
||||
content = await file.aread()
|
||||
|
||||
logger.info(
|
||||
f"Uploading file '{file.filename}' to Anthropic ({len(content)} bytes)"
|
||||
)
|
||||
|
||||
filename = file.filename or generate_filename(file.content_type)
|
||||
uploaded_file = await client.beta.files.upload(
|
||||
file=(filename, content, file.content_type),
|
||||
)
|
||||
|
||||
logger.info(f"Uploaded to Anthropic: {uploaded_file.id}")
|
||||
|
||||
return UploadResult(
|
||||
file_id=uploaded_file.id,
|
||||
file_uri=None,
|
||||
content_type=file.content_type,
|
||||
expires_at=None,
|
||||
provider=self.provider_name,
|
||||
)
|
||||
except ImportError:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise classify_upload_error(e, file.filename) from e
|
||||
|
||||
async def adelete(self, file_id: str) -> bool:
|
||||
"""Async delete an uploaded file from Anthropic.
|
||||
|
||||
Args:
|
||||
file_id: The file ID to delete.
|
||||
|
||||
Returns:
|
||||
True if deletion was successful, False otherwise.
|
||||
"""
|
||||
try:
|
||||
client = self._get_async_client()
|
||||
await client.beta.files.delete(file_id=file_id)
|
||||
logger.info(f"Deleted Anthropic file: {file_id}")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to delete Anthropic file {file_id}: {e}")
|
||||
return False
|
||||
@@ -1,118 +0,0 @@
|
||||
"""Base class for file uploaders."""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
import asyncio
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime
|
||||
from typing import Any
|
||||
|
||||
from crewai_files.core.types import FileInput
|
||||
|
||||
|
||||
@dataclass
|
||||
class UploadResult:
|
||||
"""Result of a file upload operation.
|
||||
|
||||
Attributes:
|
||||
file_id: Provider-specific file identifier.
|
||||
file_uri: Optional URI for accessing the file.
|
||||
content_type: MIME type of the uploaded file.
|
||||
expires_at: When the upload expires (if applicable).
|
||||
provider: Name of the provider.
|
||||
"""
|
||||
|
||||
file_id: str
|
||||
provider: str
|
||||
content_type: str
|
||||
file_uri: str | None = None
|
||||
expires_at: datetime | None = None
|
||||
|
||||
|
||||
class FileUploader(ABC):
|
||||
"""Abstract base class for provider file uploaders.
|
||||
|
||||
Implementations handle uploading files to provider-specific File APIs.
|
||||
"""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def provider_name(self) -> str:
|
||||
"""Return the provider name."""
|
||||
|
||||
@abstractmethod
|
||||
def upload(self, file: FileInput, purpose: str | None = None) -> UploadResult:
|
||||
"""Upload a file to the provider.
|
||||
|
||||
Args:
|
||||
file: The file to upload.
|
||||
purpose: Optional purpose/description for the upload.
|
||||
|
||||
Returns:
|
||||
UploadResult with the file identifier and metadata.
|
||||
|
||||
Raises:
|
||||
Exception: If upload fails.
|
||||
"""
|
||||
|
||||
async def aupload(
|
||||
self, file: FileInput, purpose: str | None = None
|
||||
) -> UploadResult:
|
||||
"""Async upload a file to the provider.
|
||||
|
||||
Default implementation runs sync upload in executor.
|
||||
Override in subclasses for native async support.
|
||||
|
||||
Args:
|
||||
file: The file to upload.
|
||||
purpose: Optional purpose/description for the upload.
|
||||
|
||||
Returns:
|
||||
UploadResult with the file identifier and metadata.
|
||||
"""
|
||||
loop = asyncio.get_running_loop()
|
||||
return await loop.run_in_executor(None, self.upload, file, purpose)
|
||||
|
||||
@abstractmethod
|
||||
def delete(self, file_id: str) -> bool:
|
||||
"""Delete an uploaded file.
|
||||
|
||||
Args:
|
||||
file_id: The file identifier to delete.
|
||||
|
||||
Returns:
|
||||
True if deletion was successful, False otherwise.
|
||||
"""
|
||||
|
||||
async def adelete(self, file_id: str) -> bool:
|
||||
"""Async delete an uploaded file.
|
||||
|
||||
Default implementation runs sync delete in executor.
|
||||
Override in subclasses for native async support.
|
||||
|
||||
Args:
|
||||
file_id: The file identifier to delete.
|
||||
|
||||
Returns:
|
||||
True if deletion was successful, False otherwise.
|
||||
"""
|
||||
loop = asyncio.get_running_loop()
|
||||
return await loop.run_in_executor(None, self.delete, file_id)
|
||||
|
||||
def get_file_info(self, file_id: str) -> dict[str, Any] | None:
|
||||
"""Get information about an uploaded file.
|
||||
|
||||
Args:
|
||||
file_id: The file identifier.
|
||||
|
||||
Returns:
|
||||
Dictionary with file information, or None if not found.
|
||||
"""
|
||||
return None
|
||||
|
||||
def list_files(self) -> list[dict[str, Any]]:
|
||||
"""List all uploaded files.
|
||||
|
||||
Returns:
|
||||
List of dictionaries with file information.
|
||||
"""
|
||||
return []
|
||||
@@ -1,477 +0,0 @@
|
||||
"""AWS Bedrock S3 file uploader implementation."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from crewai_files.core.constants import (
|
||||
MAX_CONCURRENCY,
|
||||
MULTIPART_CHUNKSIZE,
|
||||
MULTIPART_THRESHOLD,
|
||||
)
|
||||
from crewai_files.core.sources import FileBytes, FilePath
|
||||
from crewai_files.core.types import FileInput
|
||||
from crewai_files.processing.exceptions import (
|
||||
PermanentUploadError,
|
||||
TransientUploadError,
|
||||
)
|
||||
from crewai_files.uploaders.base import FileUploader, UploadResult
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _classify_s3_error(e: Exception, filename: str | None) -> Exception:
|
||||
"""Classify an S3 exception as transient or permanent upload error.
|
||||
|
||||
Args:
|
||||
e: The exception to classify.
|
||||
filename: The filename for error context.
|
||||
|
||||
Returns:
|
||||
A TransientUploadError or PermanentUploadError wrapping the original.
|
||||
"""
|
||||
error_type = type(e).__name__
|
||||
error_code = getattr(e, "response", {}).get("Error", {}).get("Code", "")
|
||||
|
||||
if error_code in ("SlowDown", "ServiceUnavailable", "InternalError"):
|
||||
return TransientUploadError(f"Transient S3 error: {e}", file_name=filename)
|
||||
if error_code in ("AccessDenied", "InvalidAccessKeyId", "SignatureDoesNotMatch"):
|
||||
return PermanentUploadError(f"S3 authentication error: {e}", file_name=filename)
|
||||
if error_code in ("NoSuchBucket", "InvalidBucketName"):
|
||||
return PermanentUploadError(f"S3 bucket error: {e}", file_name=filename)
|
||||
if "Throttl" in error_type or "Throttl" in str(e):
|
||||
return TransientUploadError(f"S3 throttling: {e}", file_name=filename)
|
||||
return TransientUploadError(f"S3 upload failed: {e}", file_name=filename)
|
||||
|
||||
|
||||
def _get_file_path(file: FileInput) -> Path | None:
|
||||
"""Get the filesystem path if file source is FilePath.
|
||||
|
||||
Args:
|
||||
file: The file input to check.
|
||||
|
||||
Returns:
|
||||
Path if source is FilePath, None otherwise.
|
||||
"""
|
||||
source = file._file_source
|
||||
if isinstance(source, FilePath):
|
||||
return source.path
|
||||
return None
|
||||
|
||||
|
||||
def _get_file_size(file: FileInput) -> int | None:
|
||||
"""Get file size without reading content if possible.
|
||||
|
||||
Args:
|
||||
file: The file input.
|
||||
|
||||
Returns:
|
||||
Size in bytes if determinable without reading, None otherwise.
|
||||
"""
|
||||
source = file._file_source
|
||||
if isinstance(source, FilePath):
|
||||
return source.path.stat().st_size
|
||||
if isinstance(source, FileBytes):
|
||||
return len(source.data)
|
||||
return None
|
||||
|
||||
|
||||
def _compute_hash_streaming(file_path: Path) -> str:
|
||||
"""Compute SHA-256 hash by streaming file content.
|
||||
|
||||
Args:
|
||||
file_path: Path to the file.
|
||||
|
||||
Returns:
|
||||
First 16 characters of hex digest.
|
||||
"""
|
||||
hasher = hashlib.sha256()
|
||||
with open(file_path, "rb") as f:
|
||||
while chunk := f.read(1024 * 1024):
|
||||
hasher.update(chunk)
|
||||
return hasher.hexdigest()[:16]
|
||||
|
||||
|
||||
class BedrockFileUploader(FileUploader):
|
||||
"""Uploader for AWS Bedrock via S3.
|
||||
|
||||
Uploads files to S3 and returns S3 URIs that can be used with Bedrock's
|
||||
Converse API s3Location source format.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
bucket_name: str | None = None,
|
||||
bucket_owner: str | None = None,
|
||||
prefix: str = "crewai-files",
|
||||
region: str | None = None,
|
||||
client: Any = None,
|
||||
async_client: Any = None,
|
||||
) -> None:
|
||||
"""Initialize the Bedrock S3 uploader.
|
||||
|
||||
Args:
|
||||
bucket_name: S3 bucket name. If not provided, uses
|
||||
CREWAI_BEDROCK_S3_BUCKET environment variable.
|
||||
bucket_owner: Optional bucket owner account ID for cross-account access.
|
||||
Uses CREWAI_BEDROCK_S3_BUCKET_OWNER environment variable if not provided.
|
||||
prefix: S3 key prefix for uploaded files (default: "crewai-files").
|
||||
region: AWS region. Uses AWS_REGION or AWS_DEFAULT_REGION if not provided.
|
||||
client: Optional pre-instantiated boto3 S3 client.
|
||||
async_client: Optional pre-instantiated aioboto3 S3 client.
|
||||
"""
|
||||
self._bucket_name = bucket_name or os.environ.get("CREWAI_BEDROCK_S3_BUCKET")
|
||||
self._bucket_owner = bucket_owner or os.environ.get(
|
||||
"CREWAI_BEDROCK_S3_BUCKET_OWNER"
|
||||
)
|
||||
self._prefix = prefix
|
||||
self._region = region or os.environ.get(
|
||||
"AWS_REGION", os.environ.get("AWS_DEFAULT_REGION")
|
||||
)
|
||||
self._client: Any = client
|
||||
self._async_client: Any = async_client
|
||||
|
||||
@property
|
||||
def provider_name(self) -> str:
|
||||
"""Return the provider name."""
|
||||
return "bedrock"
|
||||
|
||||
@property
|
||||
def bucket_name(self) -> str:
|
||||
"""Return the configured bucket name."""
|
||||
if not self._bucket_name:
|
||||
raise ValueError(
|
||||
"S3 bucket name not configured. Set CREWAI_BEDROCK_S3_BUCKET "
|
||||
"environment variable or pass bucket_name parameter."
|
||||
)
|
||||
return self._bucket_name
|
||||
|
||||
@property
|
||||
def bucket_owner(self) -> str | None:
|
||||
"""Return the configured bucket owner."""
|
||||
return self._bucket_owner
|
||||
|
||||
def _get_client(self) -> Any:
|
||||
"""Get or create the S3 client."""
|
||||
if self._client is None:
|
||||
try:
|
||||
import boto3
|
||||
|
||||
self._client = boto3.client("s3", region_name=self._region)
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"boto3 is required for Bedrock S3 file uploads. "
|
||||
"Install with: pip install boto3"
|
||||
) from e
|
||||
return self._client
|
||||
|
||||
def _get_async_client(self) -> Any:
|
||||
"""Get or create the async S3 client."""
|
||||
if self._async_client is None:
|
||||
try:
|
||||
import aioboto3 # type: ignore[import-not-found]
|
||||
|
||||
self._session = aioboto3.Session()
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"aioboto3 is required for async Bedrock S3 file uploads. "
|
||||
"Install with: pip install aioboto3"
|
||||
) from e
|
||||
return self._session
|
||||
|
||||
def _generate_s3_key(self, file: FileInput, content: bytes | None = None) -> str:
|
||||
"""Generate a unique S3 key for the file.
|
||||
|
||||
For FilePath sources with no content provided, computes hash via streaming.
|
||||
|
||||
Args:
|
||||
file: The file being uploaded.
|
||||
content: The file content bytes (optional for FilePath sources).
|
||||
|
||||
Returns:
|
||||
S3 key string.
|
||||
"""
|
||||
if content is not None:
|
||||
content_hash = hashlib.sha256(content).hexdigest()[:16]
|
||||
else:
|
||||
file_path = _get_file_path(file)
|
||||
if file_path is not None:
|
||||
content_hash = _compute_hash_streaming(file_path)
|
||||
else:
|
||||
content_hash = hashlib.sha256(file.read()).hexdigest()[:16]
|
||||
|
||||
filename = file.filename or "file"
|
||||
safe_filename = "".join(
|
||||
c if c.isalnum() or c in ".-_" else "_" for c in filename
|
||||
)
|
||||
return f"{self._prefix}/{content_hash}_{safe_filename}"
|
||||
|
||||
def _build_s3_uri(self, key: str) -> str:
|
||||
"""Build an S3 URI from a key.
|
||||
|
||||
Args:
|
||||
key: The S3 object key.
|
||||
|
||||
Returns:
|
||||
S3 URI string.
|
||||
"""
|
||||
return f"s3://{self.bucket_name}/{key}"
|
||||
|
||||
@staticmethod
|
||||
def _get_transfer_config() -> Any:
|
||||
"""Get boto3 TransferConfig for multipart uploads."""
|
||||
from boto3.s3.transfer import TransferConfig
|
||||
|
||||
return TransferConfig(
|
||||
multipart_threshold=MULTIPART_THRESHOLD,
|
||||
multipart_chunksize=MULTIPART_CHUNKSIZE,
|
||||
max_concurrency=MAX_CONCURRENCY,
|
||||
)
|
||||
|
||||
def upload(self, file: FileInput, purpose: str | None = None) -> UploadResult:
|
||||
"""Upload a file to S3 for use with Bedrock.
|
||||
|
||||
Uses streaming upload with automatic multipart for large files.
|
||||
For FilePath sources, streams directly from disk without loading into memory.
|
||||
|
||||
Args:
|
||||
file: The file to upload.
|
||||
purpose: Optional purpose (unused, kept for interface consistency).
|
||||
|
||||
Returns:
|
||||
UploadResult with the S3 URI and metadata.
|
||||
|
||||
Raises:
|
||||
TransientUploadError: For retryable errors (network, throttling).
|
||||
PermanentUploadError: For non-retryable errors (auth, validation).
|
||||
"""
|
||||
import io
|
||||
|
||||
try:
|
||||
client = self._get_client()
|
||||
transfer_config = self._get_transfer_config()
|
||||
file_path = _get_file_path(file)
|
||||
|
||||
if file_path is not None:
|
||||
file_size = file_path.stat().st_size
|
||||
s3_key = self._generate_s3_key(file)
|
||||
|
||||
logger.info(
|
||||
f"Uploading file '{file.filename}' to S3 bucket "
|
||||
f"'{self.bucket_name}' ({file_size} bytes, streaming)"
|
||||
)
|
||||
|
||||
with open(file_path, "rb") as f:
|
||||
client.upload_fileobj(
|
||||
f,
|
||||
self.bucket_name,
|
||||
s3_key,
|
||||
ExtraArgs={"ContentType": file.content_type},
|
||||
Config=transfer_config,
|
||||
)
|
||||
else:
|
||||
content = file.read()
|
||||
s3_key = self._generate_s3_key(file, content)
|
||||
|
||||
logger.info(
|
||||
f"Uploading file '{file.filename}' to S3 bucket "
|
||||
f"'{self.bucket_name}' ({len(content)} bytes)"
|
||||
)
|
||||
|
||||
client.upload_fileobj(
|
||||
io.BytesIO(content),
|
||||
self.bucket_name,
|
||||
s3_key,
|
||||
ExtraArgs={"ContentType": file.content_type},
|
||||
Config=transfer_config,
|
||||
)
|
||||
|
||||
s3_uri = self._build_s3_uri(s3_key)
|
||||
logger.info(f"Uploaded to S3: {s3_uri}")
|
||||
|
||||
return UploadResult(
|
||||
file_id=s3_key,
|
||||
file_uri=s3_uri,
|
||||
content_type=file.content_type,
|
||||
expires_at=None,
|
||||
provider=self.provider_name,
|
||||
)
|
||||
except ImportError:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise _classify_s3_error(e, file.filename) from e
|
||||
|
||||
def delete(self, file_id: str) -> bool:
|
||||
"""Delete an uploaded file from S3.
|
||||
|
||||
Args:
|
||||
file_id: The S3 key to delete.
|
||||
|
||||
Returns:
|
||||
True if deletion was successful, False otherwise.
|
||||
"""
|
||||
try:
|
||||
client = self._get_client()
|
||||
client.delete_object(Bucket=self.bucket_name, Key=file_id)
|
||||
logger.info(f"Deleted S3 object: s3://{self.bucket_name}/{file_id}")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Failed to delete S3 object s3://{self.bucket_name}/{file_id}: {e}"
|
||||
)
|
||||
return False
|
||||
|
||||
def get_file_info(self, file_id: str) -> dict[str, Any] | None:
|
||||
"""Get information about an uploaded file.
|
||||
|
||||
Args:
|
||||
file_id: The S3 key.
|
||||
|
||||
Returns:
|
||||
Dictionary with file information, or None if not found.
|
||||
"""
|
||||
try:
|
||||
client = self._get_client()
|
||||
response = client.head_object(Bucket=self.bucket_name, Key=file_id)
|
||||
return {
|
||||
"id": file_id,
|
||||
"uri": self._build_s3_uri(file_id),
|
||||
"content_type": response.get("ContentType"),
|
||||
"size": response.get("ContentLength"),
|
||||
"last_modified": response.get("LastModified"),
|
||||
"etag": response.get("ETag"),
|
||||
}
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to get S3 object info for {file_id}: {e}")
|
||||
return None
|
||||
|
||||
def list_files(self) -> list[dict[str, Any]]:
|
||||
"""List all uploaded files in the configured prefix.
|
||||
|
||||
Returns:
|
||||
List of dictionaries with file information.
|
||||
"""
|
||||
try:
|
||||
client = self._get_client()
|
||||
response = client.list_objects_v2(
|
||||
Bucket=self.bucket_name,
|
||||
Prefix=self._prefix,
|
||||
)
|
||||
return [
|
||||
{
|
||||
"id": obj["Key"],
|
||||
"uri": self._build_s3_uri(obj["Key"]),
|
||||
"size": obj.get("Size"),
|
||||
"last_modified": obj.get("LastModified"),
|
||||
"etag": obj.get("ETag"),
|
||||
}
|
||||
for obj in response.get("Contents", [])
|
||||
]
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to list S3 objects: {e}")
|
||||
return []
|
||||
|
||||
async def aupload(
|
||||
self, file: FileInput, purpose: str | None = None
|
||||
) -> UploadResult:
|
||||
"""Async upload a file to S3 for use with Bedrock.
|
||||
|
||||
Uses streaming upload with automatic multipart for large files.
|
||||
For FilePath sources, streams directly from disk without loading into memory.
|
||||
|
||||
Args:
|
||||
file: The file to upload.
|
||||
purpose: Optional purpose (unused, kept for interface consistency).
|
||||
|
||||
Returns:
|
||||
UploadResult with the S3 URI and metadata.
|
||||
|
||||
Raises:
|
||||
TransientUploadError: For retryable errors (network, throttling).
|
||||
PermanentUploadError: For non-retryable errors (auth, validation).
|
||||
"""
|
||||
import io
|
||||
|
||||
import aiofiles
|
||||
|
||||
try:
|
||||
session = self._get_async_client()
|
||||
transfer_config = self._get_transfer_config()
|
||||
file_path = _get_file_path(file)
|
||||
|
||||
if file_path is not None:
|
||||
file_size = file_path.stat().st_size
|
||||
s3_key = self._generate_s3_key(file)
|
||||
|
||||
logger.info(
|
||||
f"Uploading file '{file.filename}' to S3 bucket "
|
||||
f"'{self.bucket_name}' ({file_size} bytes, streaming)"
|
||||
)
|
||||
|
||||
async with session.client("s3", region_name=self._region) as client:
|
||||
async with aiofiles.open(file_path, "rb") as f:
|
||||
await client.upload_fileobj(
|
||||
f,
|
||||
self.bucket_name,
|
||||
s3_key,
|
||||
ExtraArgs={"ContentType": file.content_type},
|
||||
Config=transfer_config,
|
||||
)
|
||||
else:
|
||||
content = await file.aread()
|
||||
s3_key = self._generate_s3_key(file, content)
|
||||
|
||||
logger.info(
|
||||
f"Uploading file '{file.filename}' to S3 bucket "
|
||||
f"'{self.bucket_name}' ({len(content)} bytes)"
|
||||
)
|
||||
|
||||
async with session.client("s3", region_name=self._region) as client:
|
||||
await client.upload_fileobj(
|
||||
io.BytesIO(content),
|
||||
self.bucket_name,
|
||||
s3_key,
|
||||
ExtraArgs={"ContentType": file.content_type},
|
||||
Config=transfer_config,
|
||||
)
|
||||
|
||||
s3_uri = self._build_s3_uri(s3_key)
|
||||
logger.info(f"Uploaded to S3: {s3_uri}")
|
||||
|
||||
return UploadResult(
|
||||
file_id=s3_key,
|
||||
file_uri=s3_uri,
|
||||
content_type=file.content_type,
|
||||
expires_at=None,
|
||||
provider=self.provider_name,
|
||||
)
|
||||
except ImportError:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise _classify_s3_error(e, file.filename) from e
|
||||
|
||||
async def adelete(self, file_id: str) -> bool:
|
||||
"""Async delete an uploaded file from S3.
|
||||
|
||||
Args:
|
||||
file_id: The S3 key to delete.
|
||||
|
||||
Returns:
|
||||
True if deletion was successful, False otherwise.
|
||||
"""
|
||||
try:
|
||||
session = self._get_async_client()
|
||||
async with session.client("s3", region_name=self._region) as client:
|
||||
await client.delete_object(Bucket=self.bucket_name, Key=file_id)
|
||||
logger.info(f"Deleted S3 object: s3://{self.bucket_name}/{file_id}")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Failed to delete S3 object s3://{self.bucket_name}/{file_id}: {e}"
|
||||
)
|
||||
return False
|
||||
@@ -1,216 +0,0 @@
|
||||
"""Factory for creating file uploaders."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Any as AnyType, Literal, TypeAlias, TypedDict, overload
|
||||
|
||||
from typing_extensions import NotRequired, Unpack
|
||||
|
||||
from crewai_files.uploaders.anthropic import AnthropicFileUploader
|
||||
from crewai_files.uploaders.bedrock import BedrockFileUploader
|
||||
from crewai_files.uploaders.gemini import GeminiFileUploader
|
||||
from crewai_files.uploaders.openai import OpenAIFileUploader
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
FileUploaderType: TypeAlias = (
|
||||
GeminiFileUploader
|
||||
| AnthropicFileUploader
|
||||
| BedrockFileUploader
|
||||
| OpenAIFileUploader
|
||||
)
|
||||
|
||||
GeminiProviderType = Literal["gemini", "google"]
|
||||
AnthropicProviderType = Literal["anthropic", "claude"]
|
||||
OpenAIProviderType = Literal["openai", "gpt", "azure"]
|
||||
BedrockProviderType = Literal["bedrock", "aws"]
|
||||
|
||||
ProviderType: TypeAlias = (
|
||||
GeminiProviderType
|
||||
| AnthropicProviderType
|
||||
| OpenAIProviderType
|
||||
| BedrockProviderType
|
||||
)
|
||||
|
||||
|
||||
class _BaseOpts(TypedDict):
|
||||
"""Kwargs for uploader factory."""
|
||||
|
||||
api_key: NotRequired[str | None]
|
||||
client: NotRequired[AnyType]
|
||||
async_client: NotRequired[AnyType]
|
||||
|
||||
|
||||
class OpenAIOpts(_BaseOpts):
|
||||
"""Kwargs for openai uploader factory."""
|
||||
|
||||
chunk_size: NotRequired[int]
|
||||
|
||||
|
||||
class GeminiOpts(TypedDict):
|
||||
"""Kwargs for gemini uploader factory."""
|
||||
|
||||
api_key: NotRequired[str | None]
|
||||
client: NotRequired[AnyType]
|
||||
|
||||
|
||||
class AnthropicOpts(_BaseOpts):
|
||||
"""Kwargs for anthropic uploader factory."""
|
||||
|
||||
|
||||
class BedrockOpts(TypedDict):
|
||||
"""Kwargs for bedrock uploader factory."""
|
||||
|
||||
bucket_name: NotRequired[str | None]
|
||||
bucket_owner: NotRequired[str | None]
|
||||
prefix: NotRequired[str]
|
||||
region: NotRequired[str | None]
|
||||
client: NotRequired[AnyType]
|
||||
async_client: NotRequired[AnyType]
|
||||
|
||||
|
||||
class AllOptions(TypedDict):
|
||||
"""Kwargs for uploader factory."""
|
||||
|
||||
api_key: NotRequired[str | None]
|
||||
chunk_size: NotRequired[int]
|
||||
bucket_name: NotRequired[str | None]
|
||||
bucket_owner: NotRequired[str | None]
|
||||
prefix: NotRequired[str]
|
||||
region: NotRequired[str | None]
|
||||
client: NotRequired[AnyType]
|
||||
async_client: NotRequired[AnyType]
|
||||
|
||||
|
||||
@overload
|
||||
def get_uploader(
|
||||
provider: GeminiProviderType,
|
||||
**kwargs: Unpack[GeminiOpts],
|
||||
) -> GeminiFileUploader:
|
||||
"""Get Gemini file uploader."""
|
||||
|
||||
|
||||
@overload
|
||||
def get_uploader(
|
||||
provider: AnthropicProviderType,
|
||||
**kwargs: Unpack[AnthropicOpts],
|
||||
) -> AnthropicFileUploader:
|
||||
"""Get Anthropic file uploader."""
|
||||
|
||||
|
||||
@overload
|
||||
def get_uploader(
|
||||
provider: OpenAIProviderType,
|
||||
**kwargs: Unpack[OpenAIOpts],
|
||||
) -> OpenAIFileUploader:
|
||||
"""Get OpenAI file uploader."""
|
||||
|
||||
|
||||
@overload
|
||||
def get_uploader(
|
||||
provider: BedrockProviderType,
|
||||
**kwargs: Unpack[BedrockOpts],
|
||||
) -> BedrockFileUploader:
|
||||
"""Get Bedrock file uploader."""
|
||||
|
||||
|
||||
@overload
|
||||
def get_uploader(
|
||||
provider: ProviderType, **kwargs: Unpack[AllOptions]
|
||||
) -> FileUploaderType:
|
||||
"""Get any file uploader."""
|
||||
|
||||
|
||||
def get_uploader(
|
||||
provider: ProviderType, **kwargs: Unpack[AllOptions]
|
||||
) -> FileUploaderType:
|
||||
"""Get a file uploader for a specific provider.
|
||||
|
||||
Args:
|
||||
provider: Provider name (e.g., "gemini", "anthropic").
|
||||
**kwargs: Additional arguments passed to the uploader constructor.
|
||||
|
||||
Returns:
|
||||
FileUploader instance for the provider, or None if not supported.
|
||||
"""
|
||||
provider_lower = provider.lower()
|
||||
|
||||
if "gemini" in provider_lower or "google" in provider_lower:
|
||||
try:
|
||||
from crewai_files.uploaders.gemini import GeminiFileUploader
|
||||
|
||||
return GeminiFileUploader(
|
||||
api_key=kwargs.get("api_key"),
|
||||
client=kwargs.get("client"),
|
||||
)
|
||||
except ImportError:
|
||||
logger.warning(
|
||||
"google-genai not installed. Install with: pip install google-genai"
|
||||
)
|
||||
raise
|
||||
|
||||
if "anthropic" in provider_lower or "claude" in provider_lower:
|
||||
try:
|
||||
from crewai_files.uploaders.anthropic import AnthropicFileUploader
|
||||
|
||||
return AnthropicFileUploader(
|
||||
api_key=kwargs.get("api_key"),
|
||||
client=kwargs.get("client"),
|
||||
async_client=kwargs.get("async_client"),
|
||||
)
|
||||
except ImportError:
|
||||
logger.warning(
|
||||
"anthropic not installed. Install with: pip install anthropic"
|
||||
)
|
||||
raise
|
||||
|
||||
if (
|
||||
"openai" in provider_lower
|
||||
or "gpt" in provider_lower
|
||||
or "azure" in provider_lower
|
||||
):
|
||||
try:
|
||||
from crewai_files.uploaders.openai import OpenAIFileUploader
|
||||
|
||||
return OpenAIFileUploader(
|
||||
api_key=kwargs.get("api_key"),
|
||||
chunk_size=kwargs.get("chunk_size", 67_108_864),
|
||||
client=kwargs.get("client"),
|
||||
async_client=kwargs.get("async_client"),
|
||||
)
|
||||
except ImportError:
|
||||
logger.warning("openai not installed. Install with: pip install openai")
|
||||
raise
|
||||
|
||||
if "bedrock" in provider_lower or "aws" in provider_lower:
|
||||
import os
|
||||
|
||||
if (
|
||||
not os.environ.get("CREWAI_BEDROCK_S3_BUCKET")
|
||||
and "bucket_name" not in kwargs
|
||||
):
|
||||
logger.debug(
|
||||
"Bedrock S3 uploader not configured. "
|
||||
"Set CREWAI_BEDROCK_S3_BUCKET environment variable to enable."
|
||||
)
|
||||
raise
|
||||
try:
|
||||
from crewai_files.uploaders.bedrock import BedrockFileUploader
|
||||
|
||||
return BedrockFileUploader(
|
||||
bucket_name=kwargs.get("bucket_name"),
|
||||
bucket_owner=kwargs.get("bucket_owner"),
|
||||
prefix=kwargs.get("prefix", "crewai-files"),
|
||||
region=kwargs.get("region"),
|
||||
client=kwargs.get("client"),
|
||||
async_client=kwargs.get("async_client"),
|
||||
)
|
||||
except ImportError:
|
||||
logger.warning("boto3 not installed. Install with: pip install boto3")
|
||||
raise
|
||||
|
||||
logger.debug(f"No file uploader available for provider: {provider}")
|
||||
raise
|
||||
@@ -1,448 +0,0 @@
|
||||
"""Gemini File API uploader implementation."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from datetime import datetime, timezone
|
||||
import io
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
import random
|
||||
import time
|
||||
from typing import Any
|
||||
|
||||
from crewai_files.core.constants import (
|
||||
BACKOFF_BASE_DELAY,
|
||||
BACKOFF_JITTER_FACTOR,
|
||||
BACKOFF_MAX_DELAY,
|
||||
GEMINI_FILE_TTL,
|
||||
)
|
||||
from crewai_files.core.sources import FilePath
|
||||
from crewai_files.core.types import FileInput
|
||||
from crewai_files.processing.exceptions import (
|
||||
PermanentUploadError,
|
||||
TransientUploadError,
|
||||
classify_upload_error,
|
||||
)
|
||||
from crewai_files.uploaders.base import FileUploader, UploadResult
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _compute_backoff_delay(attempt: int) -> float:
|
||||
"""Compute exponential backoff delay with jitter.
|
||||
|
||||
Args:
|
||||
attempt: The current attempt number (0-indexed).
|
||||
|
||||
Returns:
|
||||
Delay in seconds with jitter applied.
|
||||
"""
|
||||
delay: float = min(BACKOFF_BASE_DELAY * (2**attempt), BACKOFF_MAX_DELAY)
|
||||
jitter: float = random.uniform(0, delay * BACKOFF_JITTER_FACTOR) # noqa: S311
|
||||
return float(delay + jitter)
|
||||
|
||||
|
||||
def _classify_gemini_error(e: Exception, filename: str | None) -> Exception:
|
||||
"""Classify a Gemini exception as transient or permanent upload error.
|
||||
|
||||
Checks Gemini-specific error message patterns first, then falls back
|
||||
to generic status code classification.
|
||||
|
||||
Args:
|
||||
e: The exception to classify.
|
||||
filename: The filename for error context.
|
||||
|
||||
Returns:
|
||||
A TransientUploadError or PermanentUploadError wrapping the original.
|
||||
"""
|
||||
error_msg = str(e).lower()
|
||||
|
||||
if "quota" in error_msg or "rate" in error_msg or "limit" in error_msg:
|
||||
return TransientUploadError(f"Rate limit error: {e}", file_name=filename)
|
||||
if "auth" in error_msg or "permission" in error_msg or "denied" in error_msg:
|
||||
return PermanentUploadError(
|
||||
f"Authentication/permission error: {e}", file_name=filename
|
||||
)
|
||||
if "invalid" in error_msg or "unsupported" in error_msg:
|
||||
return PermanentUploadError(f"Invalid request: {e}", file_name=filename)
|
||||
|
||||
return classify_upload_error(e, filename)
|
||||
|
||||
|
||||
def _get_file_path(file: FileInput) -> Path | None:
|
||||
"""Get the filesystem path if file source is FilePath.
|
||||
|
||||
Args:
|
||||
file: The file input to check.
|
||||
|
||||
Returns:
|
||||
Path if source is FilePath, None otherwise.
|
||||
"""
|
||||
source = file._file_source
|
||||
if isinstance(source, FilePath):
|
||||
return source.path
|
||||
return None
|
||||
|
||||
|
||||
class GeminiFileUploader(FileUploader):
|
||||
"""Uploader for Google Gemini File API.
|
||||
|
||||
Uses the google-genai SDK to upload files. Files are stored for 48 hours.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_key: str | None = None,
|
||||
client: Any = None,
|
||||
) -> None:
|
||||
"""Initialize the Gemini uploader.
|
||||
|
||||
Args:
|
||||
api_key: Optional Google API key. If not provided, uses
|
||||
GOOGLE_API_KEY environment variable.
|
||||
client: Optional pre-instantiated Gemini client.
|
||||
"""
|
||||
self._api_key = api_key or os.environ.get("GOOGLE_API_KEY")
|
||||
self._client: Any = client
|
||||
|
||||
@property
|
||||
def provider_name(self) -> str:
|
||||
"""Return the provider name."""
|
||||
return "gemini"
|
||||
|
||||
def _get_client(self) -> Any:
|
||||
"""Get or create the Gemini client."""
|
||||
if self._client is None:
|
||||
try:
|
||||
from google import genai
|
||||
|
||||
self._client = genai.Client(api_key=self._api_key)
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"google-genai is required for Gemini file uploads. "
|
||||
"Install with: pip install google-genai"
|
||||
) from e
|
||||
return self._client
|
||||
|
||||
def upload(self, file: FileInput, purpose: str | None = None) -> UploadResult:
|
||||
"""Upload a file to Gemini.
|
||||
|
||||
For FilePath sources, passes the path directly to the SDK which handles
|
||||
streaming internally via resumable uploads, avoiding memory overhead.
|
||||
|
||||
Args:
|
||||
file: The file to upload.
|
||||
purpose: Optional purpose/description (used as display name).
|
||||
|
||||
Returns:
|
||||
UploadResult with the file URI and metadata.
|
||||
|
||||
Raises:
|
||||
TransientUploadError: For retryable errors (network, rate limits).
|
||||
PermanentUploadError: For non-retryable errors (auth, validation).
|
||||
"""
|
||||
try:
|
||||
client = self._get_client()
|
||||
display_name = purpose or file.filename
|
||||
|
||||
file_path = _get_file_path(file)
|
||||
if file_path is not None:
|
||||
file_size = file_path.stat().st_size
|
||||
logger.info(
|
||||
f"Uploading file '{file.filename}' to Gemini via path "
|
||||
f"({file_size} bytes, streaming)"
|
||||
)
|
||||
uploaded_file = client.files.upload(
|
||||
file=file_path,
|
||||
config={
|
||||
"display_name": display_name,
|
||||
"mime_type": file.content_type,
|
||||
},
|
||||
)
|
||||
else:
|
||||
content = file.read()
|
||||
file_data = io.BytesIO(content)
|
||||
file_data.name = file.filename
|
||||
|
||||
logger.info(
|
||||
f"Uploading file '{file.filename}' to Gemini ({len(content)} bytes)"
|
||||
)
|
||||
|
||||
uploaded_file = client.files.upload(
|
||||
file=file_data,
|
||||
config={
|
||||
"display_name": display_name,
|
||||
"mime_type": file.content_type,
|
||||
},
|
||||
)
|
||||
|
||||
if file.content_type.startswith("video/"):
|
||||
if not self.wait_for_processing(uploaded_file.name):
|
||||
raise PermanentUploadError(
|
||||
f"Video processing failed for {file.filename}",
|
||||
file_name=file.filename,
|
||||
)
|
||||
|
||||
expires_at = datetime.now(timezone.utc) + GEMINI_FILE_TTL
|
||||
|
||||
logger.info(
|
||||
f"Uploaded to Gemini: {uploaded_file.name} (URI: {uploaded_file.uri})"
|
||||
)
|
||||
|
||||
return UploadResult(
|
||||
file_id=uploaded_file.name,
|
||||
file_uri=uploaded_file.uri,
|
||||
content_type=file.content_type,
|
||||
expires_at=expires_at,
|
||||
provider=self.provider_name,
|
||||
)
|
||||
except ImportError:
|
||||
raise
|
||||
except (TransientUploadError, PermanentUploadError):
|
||||
raise
|
||||
except Exception as e:
|
||||
raise _classify_gemini_error(e, file.filename) from e
|
||||
|
||||
async def aupload(
|
||||
self, file: FileInput, purpose: str | None = None
|
||||
) -> UploadResult:
|
||||
"""Async upload a file to Gemini using native async client.
|
||||
|
||||
For FilePath sources, passes the path directly to the SDK which handles
|
||||
streaming internally via resumable uploads, avoiding memory overhead.
|
||||
|
||||
Args:
|
||||
file: The file to upload.
|
||||
purpose: Optional purpose/description (used as display name).
|
||||
|
||||
Returns:
|
||||
UploadResult with the file URI and metadata.
|
||||
|
||||
Raises:
|
||||
TransientUploadError: For retryable errors (network, rate limits).
|
||||
PermanentUploadError: For non-retryable errors (auth, validation).
|
||||
"""
|
||||
try:
|
||||
client = self._get_client()
|
||||
display_name = purpose or file.filename
|
||||
|
||||
file_path = _get_file_path(file)
|
||||
if file_path is not None:
|
||||
file_size = file_path.stat().st_size
|
||||
logger.info(
|
||||
f"Uploading file '{file.filename}' to Gemini via path "
|
||||
f"({file_size} bytes, streaming)"
|
||||
)
|
||||
uploaded_file = await client.aio.files.upload(
|
||||
file=file_path,
|
||||
config={
|
||||
"display_name": display_name,
|
||||
"mime_type": file.content_type,
|
||||
},
|
||||
)
|
||||
else:
|
||||
content = await file.aread()
|
||||
file_data = io.BytesIO(content)
|
||||
file_data.name = file.filename
|
||||
|
||||
logger.info(
|
||||
f"Uploading file '{file.filename}' to Gemini ({len(content)} bytes)"
|
||||
)
|
||||
|
||||
uploaded_file = await client.aio.files.upload(
|
||||
file=file_data,
|
||||
config={
|
||||
"display_name": display_name,
|
||||
"mime_type": file.content_type,
|
||||
},
|
||||
)
|
||||
|
||||
if file.content_type.startswith("video/"):
|
||||
if not await self.await_for_processing(uploaded_file.name):
|
||||
raise PermanentUploadError(
|
||||
f"Video processing failed for {file.filename}",
|
||||
file_name=file.filename,
|
||||
)
|
||||
|
||||
expires_at = datetime.now(timezone.utc) + GEMINI_FILE_TTL
|
||||
|
||||
logger.info(
|
||||
f"Uploaded to Gemini: {uploaded_file.name} (URI: {uploaded_file.uri})"
|
||||
)
|
||||
|
||||
return UploadResult(
|
||||
file_id=uploaded_file.name,
|
||||
file_uri=uploaded_file.uri,
|
||||
content_type=file.content_type,
|
||||
expires_at=expires_at,
|
||||
provider=self.provider_name,
|
||||
)
|
||||
except ImportError:
|
||||
raise
|
||||
except (TransientUploadError, PermanentUploadError):
|
||||
raise
|
||||
except Exception as e:
|
||||
raise _classify_gemini_error(e, file.filename) from e
|
||||
|
||||
def delete(self, file_id: str) -> bool:
|
||||
"""Delete an uploaded file from Gemini.
|
||||
|
||||
Args:
|
||||
file_id: The file name/ID to delete.
|
||||
|
||||
Returns:
|
||||
True if deletion was successful, False otherwise.
|
||||
"""
|
||||
try:
|
||||
client = self._get_client()
|
||||
client.files.delete(name=file_id)
|
||||
logger.info(f"Deleted Gemini file: {file_id}")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to delete Gemini file {file_id}: {e}")
|
||||
return False
|
||||
|
||||
async def adelete(self, file_id: str) -> bool:
|
||||
"""Async delete an uploaded file from Gemini.
|
||||
|
||||
Args:
|
||||
file_id: The file name/ID to delete.
|
||||
|
||||
Returns:
|
||||
True if deletion was successful, False otherwise.
|
||||
"""
|
||||
try:
|
||||
client = self._get_client()
|
||||
await client.aio.files.delete(name=file_id)
|
||||
logger.info(f"Deleted Gemini file: {file_id}")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to delete Gemini file {file_id}: {e}")
|
||||
return False
|
||||
|
||||
def get_file_info(self, file_id: str) -> dict[str, Any] | None:
|
||||
"""Get information about an uploaded file.
|
||||
|
||||
Args:
|
||||
file_id: The file name/ID.
|
||||
|
||||
Returns:
|
||||
Dictionary with file information, or None if not found.
|
||||
"""
|
||||
try:
|
||||
client = self._get_client()
|
||||
file_info = client.files.get(name=file_id)
|
||||
return {
|
||||
"name": file_info.name,
|
||||
"uri": file_info.uri,
|
||||
"display_name": file_info.display_name,
|
||||
"mime_type": file_info.mime_type,
|
||||
"size_bytes": file_info.size_bytes,
|
||||
"state": str(file_info.state),
|
||||
"create_time": file_info.create_time,
|
||||
"expiration_time": file_info.expiration_time,
|
||||
}
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to get Gemini file info for {file_id}: {e}")
|
||||
return None
|
||||
|
||||
def list_files(self) -> list[dict[str, Any]]:
|
||||
"""List all uploaded files.
|
||||
|
||||
Returns:
|
||||
List of dictionaries with file information.
|
||||
"""
|
||||
try:
|
||||
client = self._get_client()
|
||||
files = client.files.list()
|
||||
return [
|
||||
{
|
||||
"name": f.name,
|
||||
"uri": f.uri,
|
||||
"display_name": f.display_name,
|
||||
"mime_type": f.mime_type,
|
||||
"size_bytes": f.size_bytes,
|
||||
"state": str(f.state),
|
||||
}
|
||||
for f in files
|
||||
]
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to list Gemini files: {e}")
|
||||
return []
|
||||
|
||||
def wait_for_processing(self, file_id: str, timeout_seconds: int = 300) -> bool:
|
||||
"""Wait for a file to finish processing with exponential backoff.
|
||||
|
||||
Some files (especially videos) need time to process after upload.
|
||||
|
||||
Args:
|
||||
file_id: The file name/ID.
|
||||
timeout_seconds: Maximum time to wait.
|
||||
|
||||
Returns:
|
||||
True if processing completed, False if timed out or failed.
|
||||
"""
|
||||
try:
|
||||
from google.genai.types import FileState
|
||||
except ImportError:
|
||||
return True
|
||||
|
||||
client = self._get_client()
|
||||
start_time = time.time()
|
||||
attempt = 0
|
||||
|
||||
while time.time() - start_time < timeout_seconds:
|
||||
file_info = client.files.get(name=file_id)
|
||||
|
||||
if file_info.state == FileState.ACTIVE:
|
||||
return True
|
||||
if file_info.state == FileState.FAILED:
|
||||
logger.error(f"Gemini file processing failed: {file_id}")
|
||||
return False
|
||||
|
||||
time.sleep(_compute_backoff_delay(attempt))
|
||||
attempt += 1
|
||||
|
||||
logger.warning(f"Timed out waiting for Gemini file processing: {file_id}")
|
||||
return False
|
||||
|
||||
async def await_for_processing(
|
||||
self, file_id: str, timeout_seconds: int = 300
|
||||
) -> bool:
|
||||
"""Async wait for a file to finish processing with exponential backoff.
|
||||
|
||||
Some files (especially videos) need time to process after upload.
|
||||
|
||||
Args:
|
||||
file_id: The file name/ID.
|
||||
timeout_seconds: Maximum time to wait.
|
||||
|
||||
Returns:
|
||||
True if processing completed, False if timed out or failed.
|
||||
"""
|
||||
try:
|
||||
from google.genai.types import FileState
|
||||
except ImportError:
|
||||
return True
|
||||
|
||||
client = self._get_client()
|
||||
start_time = time.time()
|
||||
attempt = 0
|
||||
|
||||
while time.time() - start_time < timeout_seconds:
|
||||
file_info = await client.aio.files.get(name=file_id)
|
||||
|
||||
if file_info.state == FileState.ACTIVE:
|
||||
return True
|
||||
if file_info.state == FileState.FAILED:
|
||||
logger.error(f"Gemini file processing failed: {file_id}")
|
||||
return False
|
||||
|
||||
await asyncio.sleep(_compute_backoff_delay(attempt))
|
||||
attempt += 1
|
||||
|
||||
logger.warning(f"Timed out waiting for Gemini file processing: {file_id}")
|
||||
return False
|
||||
@@ -1,695 +0,0 @@
|
||||
"""OpenAI Files API uploader implementation."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import AsyncIterator, Iterator
|
||||
import io
|
||||
import logging
|
||||
import os
|
||||
from typing import Any
|
||||
|
||||
from crewai_files.core.constants import DEFAULT_UPLOAD_CHUNK_SIZE, FILES_API_MAX_SIZE
|
||||
from crewai_files.core.sources import FileBytes, FilePath, FileStream, generate_filename
|
||||
from crewai_files.core.types import FileInput
|
||||
from crewai_files.processing.exceptions import (
|
||||
PermanentUploadError,
|
||||
TransientUploadError,
|
||||
classify_upload_error,
|
||||
)
|
||||
from crewai_files.uploaders.base import FileUploader, UploadResult
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _get_purpose_for_content_type(content_type: str, purpose: str | None) -> str:
|
||||
"""Get the appropriate purpose for a file based on content type.
|
||||
|
||||
OpenAI Files API requires different purposes for different file types:
|
||||
- Images (for Responses API vision): "vision"
|
||||
- PDFs and other documents: "user_data"
|
||||
|
||||
Args:
|
||||
content_type: MIME type of the file.
|
||||
purpose: Optional explicit purpose override.
|
||||
|
||||
Returns:
|
||||
The purpose string to use for upload.
|
||||
"""
|
||||
if purpose is not None:
|
||||
return purpose
|
||||
if content_type.startswith("image/"):
|
||||
return "vision"
|
||||
return "user_data"
|
||||
|
||||
|
||||
def _get_file_size(file: FileInput) -> int | None:
|
||||
"""Get file size without reading content if possible.
|
||||
|
||||
Args:
|
||||
file: The file to get size for.
|
||||
|
||||
Returns:
|
||||
File size in bytes, or None if size cannot be determined without reading.
|
||||
"""
|
||||
source = file._file_source
|
||||
if isinstance(source, FilePath):
|
||||
return source.path.stat().st_size
|
||||
if isinstance(source, FileBytes):
|
||||
return len(source.data)
|
||||
return None
|
||||
|
||||
|
||||
def _iter_file_chunks(file: FileInput, chunk_size: int) -> Iterator[bytes]:
|
||||
"""Iterate over file content in chunks.
|
||||
|
||||
Args:
|
||||
file: The file to read.
|
||||
chunk_size: Size of each chunk in bytes.
|
||||
|
||||
Yields:
|
||||
Chunks of file content.
|
||||
"""
|
||||
source = file._file_source
|
||||
if isinstance(source, (FilePath, FileBytes, FileStream)):
|
||||
yield from source.read_chunks(chunk_size)
|
||||
else:
|
||||
content = file.read()
|
||||
for i in range(0, len(content), chunk_size):
|
||||
yield content[i : i + chunk_size]
|
||||
|
||||
|
||||
async def _aiter_file_chunks(
|
||||
file: FileInput, chunk_size: int, content: bytes | None = None
|
||||
) -> AsyncIterator[bytes]:
|
||||
"""Async iterate over file content in chunks.
|
||||
|
||||
Args:
|
||||
file: The file to read.
|
||||
chunk_size: Size of each chunk in bytes.
|
||||
content: Optional pre-loaded content to chunk.
|
||||
|
||||
Yields:
|
||||
Chunks of file content.
|
||||
"""
|
||||
if content is not None:
|
||||
for i in range(0, len(content), chunk_size):
|
||||
yield content[i : i + chunk_size]
|
||||
return
|
||||
|
||||
source = file._file_source
|
||||
if isinstance(source, FilePath):
|
||||
async for chunk in source.aread_chunks(chunk_size):
|
||||
yield chunk
|
||||
elif isinstance(source, (FileBytes, FileStream)):
|
||||
for chunk in source.read_chunks(chunk_size):
|
||||
yield chunk
|
||||
else:
|
||||
data = await file.aread()
|
||||
for i in range(0, len(data), chunk_size):
|
||||
yield data[i : i + chunk_size]
|
||||
|
||||
|
||||
class OpenAIFileUploader(FileUploader):
|
||||
"""Uploader for OpenAI Files and Uploads APIs.
|
||||
|
||||
Uses the Files API for files up to 512MB (single request).
|
||||
Uses the Uploads API for files larger than 512MB (multipart chunked).
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_key: str | None = None,
|
||||
chunk_size: int = DEFAULT_UPLOAD_CHUNK_SIZE,
|
||||
client: Any = None,
|
||||
async_client: Any = None,
|
||||
) -> None:
|
||||
"""Initialize the OpenAI uploader.
|
||||
|
||||
Args:
|
||||
api_key: Optional OpenAI API key. If not provided, uses
|
||||
OPENAI_API_KEY environment variable.
|
||||
chunk_size: Chunk size in bytes for multipart uploads (default 64MB).
|
||||
client: Optional pre-instantiated OpenAI client.
|
||||
async_client: Optional pre-instantiated async OpenAI client.
|
||||
"""
|
||||
self._api_key = api_key or os.environ.get("OPENAI_API_KEY")
|
||||
self._chunk_size = chunk_size
|
||||
self._client: Any = client
|
||||
self._async_client: Any = async_client
|
||||
|
||||
@property
|
||||
def provider_name(self) -> str:
|
||||
"""Return the provider name."""
|
||||
return "openai"
|
||||
|
||||
def _build_upload_result(self, file_id: str, content_type: str) -> UploadResult:
|
||||
"""Build an UploadResult for a completed upload.
|
||||
|
||||
Args:
|
||||
file_id: The uploaded file ID.
|
||||
content_type: The file's content type.
|
||||
|
||||
Returns:
|
||||
UploadResult with the file metadata.
|
||||
"""
|
||||
return UploadResult(
|
||||
file_id=file_id,
|
||||
file_uri=None,
|
||||
content_type=content_type,
|
||||
expires_at=None,
|
||||
provider=self.provider_name,
|
||||
)
|
||||
|
||||
def _get_client(self) -> Any:
|
||||
"""Get or create the OpenAI client."""
|
||||
if self._client is None:
|
||||
try:
|
||||
from openai import OpenAI
|
||||
|
||||
self._client = OpenAI(api_key=self._api_key)
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"openai is required for OpenAI file uploads. "
|
||||
"Install with: pip install openai"
|
||||
) from e
|
||||
return self._client
|
||||
|
||||
def _get_async_client(self) -> Any:
|
||||
"""Get or create the async OpenAI client."""
|
||||
if self._async_client is None:
|
||||
try:
|
||||
from openai import AsyncOpenAI
|
||||
|
||||
self._async_client = AsyncOpenAI(api_key=self._api_key)
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"openai is required for OpenAI file uploads. "
|
||||
"Install with: pip install openai"
|
||||
) from e
|
||||
return self._async_client
|
||||
|
||||
def upload(self, file: FileInput, purpose: str | None = None) -> UploadResult:
|
||||
"""Upload a file to OpenAI.
|
||||
|
||||
Uses Files API for files <= 512MB, Uploads API for larger files.
|
||||
For large files, streams chunks to avoid loading entire file in memory.
|
||||
|
||||
Args:
|
||||
file: The file to upload.
|
||||
purpose: Optional purpose for the file (default: "user_data").
|
||||
|
||||
Returns:
|
||||
UploadResult with the file ID and metadata.
|
||||
|
||||
Raises:
|
||||
TransientUploadError: For retryable errors (network, rate limits).
|
||||
PermanentUploadError: For non-retryable errors (auth, validation).
|
||||
"""
|
||||
try:
|
||||
file_size = _get_file_size(file)
|
||||
|
||||
if file_size is not None and file_size > FILES_API_MAX_SIZE:
|
||||
return self._upload_multipart_streaming(file, file_size, purpose)
|
||||
|
||||
content = file.read()
|
||||
if len(content) > FILES_API_MAX_SIZE:
|
||||
return self._upload_multipart(file, content, purpose)
|
||||
return self._upload_simple(file, content, purpose)
|
||||
except ImportError:
|
||||
raise
|
||||
except (TransientUploadError, PermanentUploadError):
|
||||
raise
|
||||
except Exception as e:
|
||||
raise classify_upload_error(e, file.filename) from e
|
||||
|
||||
def _upload_simple(
|
||||
self,
|
||||
file: FileInput,
|
||||
content: bytes,
|
||||
purpose: str | None,
|
||||
) -> UploadResult:
|
||||
"""Upload using the Files API (single request, up to 512MB).
|
||||
|
||||
Args:
|
||||
file: The file to upload.
|
||||
content: File content bytes.
|
||||
purpose: Optional purpose for the file.
|
||||
|
||||
Returns:
|
||||
UploadResult with the file ID and metadata.
|
||||
"""
|
||||
client = self._get_client()
|
||||
file_purpose = _get_purpose_for_content_type(file.content_type, purpose)
|
||||
filename = file.filename or generate_filename(file.content_type)
|
||||
|
||||
file_data = io.BytesIO(content)
|
||||
file_data.name = filename
|
||||
|
||||
logger.info(
|
||||
f"Uploading file '{filename}' to OpenAI Files API ({len(content)} bytes)"
|
||||
)
|
||||
|
||||
uploaded_file = client.files.create(
|
||||
file=file_data,
|
||||
purpose=file_purpose,
|
||||
)
|
||||
|
||||
logger.info(f"Uploaded to OpenAI: {uploaded_file.id}")
|
||||
|
||||
return self._build_upload_result(uploaded_file.id, file.content_type)
|
||||
|
||||
def _upload_multipart(
|
||||
self,
|
||||
file: FileInput,
|
||||
content: bytes,
|
||||
purpose: str | None,
|
||||
) -> UploadResult:
|
||||
"""Upload using the Uploads API with content already in memory.
|
||||
|
||||
Args:
|
||||
file: The file to upload.
|
||||
content: File content bytes (already loaded).
|
||||
purpose: Optional purpose for the file.
|
||||
|
||||
Returns:
|
||||
UploadResult with the file ID and metadata.
|
||||
"""
|
||||
client = self._get_client()
|
||||
file_purpose = _get_purpose_for_content_type(file.content_type, purpose)
|
||||
filename = file.filename or generate_filename(file.content_type)
|
||||
file_size = len(content)
|
||||
|
||||
logger.info(
|
||||
f"Uploading file '{filename}' to OpenAI Uploads API "
|
||||
f"({file_size} bytes, {self._chunk_size} byte chunks)"
|
||||
)
|
||||
|
||||
upload = client.uploads.create(
|
||||
bytes=file_size,
|
||||
filename=filename,
|
||||
mime_type=file.content_type,
|
||||
purpose=file_purpose,
|
||||
)
|
||||
|
||||
part_ids: list[str] = []
|
||||
offset = 0
|
||||
part_num = 1
|
||||
|
||||
try:
|
||||
while offset < file_size:
|
||||
chunk = content[offset : offset + self._chunk_size]
|
||||
chunk_io = io.BytesIO(chunk)
|
||||
|
||||
logger.debug(
|
||||
f"Uploading part {part_num} ({len(chunk)} bytes, offset {offset})"
|
||||
)
|
||||
|
||||
part = client.uploads.parts.create(
|
||||
upload_id=upload.id,
|
||||
data=chunk_io,
|
||||
)
|
||||
part_ids.append(part.id)
|
||||
|
||||
offset += self._chunk_size
|
||||
part_num += 1
|
||||
|
||||
completed = client.uploads.complete(
|
||||
upload_id=upload.id,
|
||||
part_ids=part_ids,
|
||||
)
|
||||
|
||||
file_id = completed.file.id if completed.file else upload.id
|
||||
logger.info(f"Completed multipart upload to OpenAI: {file_id}")
|
||||
|
||||
return self._build_upload_result(file_id, file.content_type)
|
||||
except Exception:
|
||||
logger.warning(f"Multipart upload failed, cancelling upload {upload.id}")
|
||||
try:
|
||||
client.uploads.cancel(upload_id=upload.id)
|
||||
except Exception as cancel_err:
|
||||
logger.debug(f"Failed to cancel upload: {cancel_err}")
|
||||
raise
|
||||
|
||||
def _upload_multipart_streaming(
|
||||
self,
|
||||
file: FileInput,
|
||||
file_size: int,
|
||||
purpose: str | None,
|
||||
) -> UploadResult:
|
||||
"""Upload using the Uploads API with streaming chunks.
|
||||
|
||||
Streams chunks directly from the file source without loading
|
||||
the entire file into memory. Used for large files.
|
||||
|
||||
Args:
|
||||
file: The file to upload.
|
||||
file_size: Total file size in bytes.
|
||||
purpose: Optional purpose for the file.
|
||||
|
||||
Returns:
|
||||
UploadResult with the file ID and metadata.
|
||||
"""
|
||||
client = self._get_client()
|
||||
file_purpose = _get_purpose_for_content_type(file.content_type, purpose)
|
||||
filename = file.filename or generate_filename(file.content_type)
|
||||
|
||||
logger.info(
|
||||
f"Uploading file '{filename}' to OpenAI Uploads API (streaming) "
|
||||
f"({file_size} bytes, {self._chunk_size} byte chunks)"
|
||||
)
|
||||
|
||||
upload = client.uploads.create(
|
||||
bytes=file_size,
|
||||
filename=filename,
|
||||
mime_type=file.content_type,
|
||||
purpose=file_purpose,
|
||||
)
|
||||
|
||||
part_ids: list[str] = []
|
||||
part_num = 1
|
||||
|
||||
try:
|
||||
for chunk in _iter_file_chunks(file, self._chunk_size):
|
||||
chunk_io = io.BytesIO(chunk)
|
||||
|
||||
logger.debug(f"Uploading part {part_num} ({len(chunk)} bytes)")
|
||||
|
||||
part = client.uploads.parts.create(
|
||||
upload_id=upload.id,
|
||||
data=chunk_io,
|
||||
)
|
||||
part_ids.append(part.id)
|
||||
part_num += 1
|
||||
|
||||
completed = client.uploads.complete(
|
||||
upload_id=upload.id,
|
||||
part_ids=part_ids,
|
||||
)
|
||||
|
||||
file_id = completed.file.id if completed.file else upload.id
|
||||
logger.info(f"Completed streaming multipart upload to OpenAI: {file_id}")
|
||||
|
||||
return self._build_upload_result(file_id, file.content_type)
|
||||
except Exception:
|
||||
logger.warning(f"Multipart upload failed, cancelling upload {upload.id}")
|
||||
try:
|
||||
client.uploads.cancel(upload_id=upload.id)
|
||||
except Exception as cancel_err:
|
||||
logger.debug(f"Failed to cancel upload: {cancel_err}")
|
||||
raise
|
||||
|
||||
def delete(self, file_id: str) -> bool:
|
||||
"""Delete an uploaded file from OpenAI.
|
||||
|
||||
Args:
|
||||
file_id: The file ID to delete.
|
||||
|
||||
Returns:
|
||||
True if deletion was successful, False otherwise.
|
||||
"""
|
||||
try:
|
||||
client = self._get_client()
|
||||
client.files.delete(file_id)
|
||||
logger.info(f"Deleted OpenAI file: {file_id}")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to delete OpenAI file {file_id}: {e}")
|
||||
return False
|
||||
|
||||
def get_file_info(self, file_id: str) -> dict[str, Any] | None:
|
||||
"""Get information about an uploaded file.
|
||||
|
||||
Args:
|
||||
file_id: The file ID.
|
||||
|
||||
Returns:
|
||||
Dictionary with file information, or None if not found.
|
||||
"""
|
||||
try:
|
||||
client = self._get_client()
|
||||
file_info = client.files.retrieve(file_id)
|
||||
return {
|
||||
"id": file_info.id,
|
||||
"filename": file_info.filename,
|
||||
"purpose": file_info.purpose,
|
||||
"bytes": file_info.bytes,
|
||||
"created_at": file_info.created_at,
|
||||
"status": file_info.status,
|
||||
}
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to get OpenAI file info for {file_id}: {e}")
|
||||
return None
|
||||
|
||||
def list_files(self) -> list[dict[str, Any]]:
|
||||
"""List all uploaded files.
|
||||
|
||||
Returns:
|
||||
List of dictionaries with file information.
|
||||
"""
|
||||
try:
|
||||
client = self._get_client()
|
||||
files = client.files.list()
|
||||
return [
|
||||
{
|
||||
"id": f.id,
|
||||
"filename": f.filename,
|
||||
"purpose": f.purpose,
|
||||
"bytes": f.bytes,
|
||||
"created_at": f.created_at,
|
||||
"status": f.status,
|
||||
}
|
||||
for f in files.data
|
||||
]
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to list OpenAI files: {e}")
|
||||
return []
|
||||
|
||||
async def aupload(
|
||||
self, file: FileInput, purpose: str | None = None
|
||||
) -> UploadResult:
|
||||
"""Async upload a file to OpenAI using native async client.
|
||||
|
||||
Uses Files API for files <= 512MB, Uploads API for larger files.
|
||||
For large files, streams chunks to avoid loading entire file in memory.
|
||||
|
||||
Args:
|
||||
file: The file to upload.
|
||||
purpose: Optional purpose for the file (default: "user_data").
|
||||
|
||||
Returns:
|
||||
UploadResult with the file ID and metadata.
|
||||
|
||||
Raises:
|
||||
TransientUploadError: For retryable errors (network, rate limits).
|
||||
PermanentUploadError: For non-retryable errors (auth, validation).
|
||||
"""
|
||||
try:
|
||||
file_size = _get_file_size(file)
|
||||
|
||||
if file_size is not None and file_size > FILES_API_MAX_SIZE:
|
||||
return await self._aupload_multipart_streaming(file, file_size, purpose)
|
||||
|
||||
content = await file.aread()
|
||||
if len(content) > FILES_API_MAX_SIZE:
|
||||
return await self._aupload_multipart(file, content, purpose)
|
||||
return await self._aupload_simple(file, content, purpose)
|
||||
except ImportError:
|
||||
raise
|
||||
except (TransientUploadError, PermanentUploadError):
|
||||
raise
|
||||
except Exception as e:
|
||||
raise classify_upload_error(e, file.filename) from e
|
||||
|
||||
async def _aupload_simple(
|
||||
self,
|
||||
file: FileInput,
|
||||
content: bytes,
|
||||
purpose: str | None,
|
||||
) -> UploadResult:
|
||||
"""Async upload using the Files API (single request, up to 512MB).
|
||||
|
||||
Args:
|
||||
file: The file to upload.
|
||||
content: File content bytes.
|
||||
purpose: Optional purpose for the file.
|
||||
|
||||
Returns:
|
||||
UploadResult with the file ID and metadata.
|
||||
"""
|
||||
client = self._get_async_client()
|
||||
file_purpose = _get_purpose_for_content_type(file.content_type, purpose)
|
||||
|
||||
file_data = io.BytesIO(content)
|
||||
file_data.name = file.filename or generate_filename(file.content_type)
|
||||
|
||||
logger.info(
|
||||
f"Uploading file '{file.filename}' to OpenAI Files API ({len(content)} bytes)"
|
||||
)
|
||||
|
||||
uploaded_file = await client.files.create(
|
||||
file=file_data,
|
||||
purpose=file_purpose,
|
||||
)
|
||||
|
||||
logger.info(f"Uploaded to OpenAI: {uploaded_file.id}")
|
||||
|
||||
return self._build_upload_result(uploaded_file.id, file.content_type)
|
||||
|
||||
async def _aupload_multipart(
|
||||
self,
|
||||
file: FileInput,
|
||||
content: bytes,
|
||||
purpose: str | None,
|
||||
) -> UploadResult:
|
||||
"""Async upload using the Uploads API (multipart chunked, up to 8GB).
|
||||
|
||||
Args:
|
||||
file: The file to upload.
|
||||
content: File content bytes.
|
||||
purpose: Optional purpose for the file.
|
||||
|
||||
Returns:
|
||||
UploadResult with the file ID and metadata.
|
||||
"""
|
||||
client = self._get_async_client()
|
||||
file_purpose = _get_purpose_for_content_type(file.content_type, purpose)
|
||||
filename = file.filename or generate_filename(file.content_type)
|
||||
file_size = len(content)
|
||||
|
||||
logger.info(
|
||||
f"Uploading file '{filename}' to OpenAI Uploads API "
|
||||
f"({file_size} bytes, {self._chunk_size} byte chunks)"
|
||||
)
|
||||
|
||||
upload = await client.uploads.create(
|
||||
bytes=file_size,
|
||||
filename=filename,
|
||||
mime_type=file.content_type,
|
||||
purpose=file_purpose,
|
||||
)
|
||||
|
||||
part_ids: list[str] = []
|
||||
offset = 0
|
||||
part_num = 1
|
||||
|
||||
try:
|
||||
while offset < file_size:
|
||||
chunk = content[offset : offset + self._chunk_size]
|
||||
chunk_io = io.BytesIO(chunk)
|
||||
|
||||
logger.debug(
|
||||
f"Uploading part {part_num} ({len(chunk)} bytes, offset {offset})"
|
||||
)
|
||||
|
||||
part = await client.uploads.parts.create(
|
||||
upload_id=upload.id,
|
||||
data=chunk_io,
|
||||
)
|
||||
part_ids.append(part.id)
|
||||
|
||||
offset += self._chunk_size
|
||||
part_num += 1
|
||||
|
||||
completed = await client.uploads.complete(
|
||||
upload_id=upload.id,
|
||||
part_ids=part_ids,
|
||||
)
|
||||
|
||||
file_id = completed.file.id if completed.file else upload.id
|
||||
logger.info(f"Completed multipart upload to OpenAI: {file_id}")
|
||||
|
||||
return self._build_upload_result(file_id, file.content_type)
|
||||
except Exception:
|
||||
logger.warning(f"Multipart upload failed, cancelling upload {upload.id}")
|
||||
try:
|
||||
await client.uploads.cancel(upload_id=upload.id)
|
||||
except Exception as cancel_err:
|
||||
logger.debug(f"Failed to cancel upload: {cancel_err}")
|
||||
raise
|
||||
|
||||
async def _aupload_multipart_streaming(
|
||||
self,
|
||||
file: FileInput,
|
||||
file_size: int,
|
||||
purpose: str | None,
|
||||
) -> UploadResult:
|
||||
"""Async upload using the Uploads API with streaming chunks.
|
||||
|
||||
Streams chunks directly from the file source without loading
|
||||
the entire file into memory. Used for large files.
|
||||
|
||||
Args:
|
||||
file: The file to upload.
|
||||
file_size: Total file size in bytes.
|
||||
purpose: Optional purpose for the file.
|
||||
|
||||
Returns:
|
||||
UploadResult with the file ID and metadata.
|
||||
"""
|
||||
client = self._get_async_client()
|
||||
file_purpose = _get_purpose_for_content_type(file.content_type, purpose)
|
||||
filename = file.filename or generate_filename(file.content_type)
|
||||
|
||||
logger.info(
|
||||
f"Uploading file '{filename}' to OpenAI Uploads API (streaming) "
|
||||
f"({file_size} bytes, {self._chunk_size} byte chunks)"
|
||||
)
|
||||
|
||||
upload = await client.uploads.create(
|
||||
bytes=file_size,
|
||||
filename=filename,
|
||||
mime_type=file.content_type,
|
||||
purpose=file_purpose,
|
||||
)
|
||||
|
||||
part_ids: list[str] = []
|
||||
part_num = 1
|
||||
|
||||
try:
|
||||
async for chunk in _aiter_file_chunks(file, self._chunk_size):
|
||||
chunk_io = io.BytesIO(chunk)
|
||||
|
||||
logger.debug(f"Uploading part {part_num} ({len(chunk)} bytes)")
|
||||
|
||||
part = await client.uploads.parts.create(
|
||||
upload_id=upload.id,
|
||||
data=chunk_io,
|
||||
)
|
||||
part_ids.append(part.id)
|
||||
part_num += 1
|
||||
|
||||
completed = await client.uploads.complete(
|
||||
upload_id=upload.id,
|
||||
part_ids=part_ids,
|
||||
)
|
||||
|
||||
file_id = completed.file.id if completed.file else upload.id
|
||||
logger.info(f"Completed streaming multipart upload to OpenAI: {file_id}")
|
||||
|
||||
return self._build_upload_result(file_id, file.content_type)
|
||||
except Exception:
|
||||
logger.warning(f"Multipart upload failed, cancelling upload {upload.id}")
|
||||
try:
|
||||
await client.uploads.cancel(upload_id=upload.id)
|
||||
except Exception as cancel_err:
|
||||
logger.debug(f"Failed to cancel upload: {cancel_err}")
|
||||
raise
|
||||
|
||||
async def adelete(self, file_id: str) -> bool:
|
||||
"""Async delete an uploaded file from OpenAI.
|
||||
|
||||
Args:
|
||||
file_id: The file ID to delete.
|
||||
|
||||
Returns:
|
||||
True if deletion was successful, False otherwise.
|
||||
"""
|
||||
try:
|
||||
client = self._get_async_client()
|
||||
await client.files.delete(file_id)
|
||||
logger.info(f"Deleted OpenAI file: {file_id}")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to delete OpenAI file {file_id}: {e}")
|
||||
return False
|
||||
@@ -1,5 +0,0 @@
|
||||
Quarter,Revenue ($M),Expenses ($M),Profit ($M)
|
||||
Q1 2024,70,40,30
|
||||
Q2 2024,75,42,33
|
||||
Q3 2024,80,45,35
|
||||
Q4 2024,75,44,31
|
||||
|
BIN
lib/crewai-files/tests/fixtures/revenue_chart.png
vendored
BIN
lib/crewai-files/tests/fixtures/revenue_chart.png
vendored
Binary file not shown.
|
Before Width: | Height: | Size: 27 KiB |
@@ -1,10 +0,0 @@
|
||||
Review Guidelines
|
||||
|
||||
1. Be clear and concise: Write feedback that is easy to understand.
|
||||
2. Focus on behavior and outcomes: Describe what happened and why it matters.
|
||||
3. Be specific: Provide examples to support your points.
|
||||
4. Balance positives and improvements: Highlight strengths and areas to grow.
|
||||
5. Be respectful and constructive: Assume positive intent and offer solutions.
|
||||
6. Use objective criteria: Reference goals, metrics, or expectations where possible.
|
||||
7. Suggest next steps: Recommend actionable ways to improve.
|
||||
8. Proofread: Check tone, grammar, and clarity before submitting.
|
||||
BIN
lib/crewai-files/tests/fixtures/sample_audio.wav
vendored
BIN
lib/crewai-files/tests/fixtures/sample_audio.wav
vendored
Binary file not shown.
BIN
lib/crewai-files/tests/fixtures/sample_video.mp4
vendored
BIN
lib/crewai-files/tests/fixtures/sample_video.mp4
vendored
Binary file not shown.
@@ -1,225 +0,0 @@
|
||||
"""Tests for provider constraints."""
|
||||
|
||||
from crewai_files.processing.constraints import (
|
||||
ANTHROPIC_CONSTRAINTS,
|
||||
BEDROCK_CONSTRAINTS,
|
||||
GEMINI_CONSTRAINTS,
|
||||
OPENAI_CONSTRAINTS,
|
||||
AudioConstraints,
|
||||
ImageConstraints,
|
||||
PDFConstraints,
|
||||
ProviderConstraints,
|
||||
VideoConstraints,
|
||||
get_constraints_for_provider,
|
||||
)
|
||||
import pytest
|
||||
|
||||
|
||||
class TestImageConstraints:
|
||||
"""Tests for ImageConstraints dataclass."""
|
||||
|
||||
def test_image_constraints_creation(self):
|
||||
"""Test creating image constraints with all fields."""
|
||||
constraints = ImageConstraints(
|
||||
max_size_bytes=5 * 1024 * 1024,
|
||||
max_width=8000,
|
||||
max_height=8000,
|
||||
max_images_per_request=10,
|
||||
)
|
||||
|
||||
assert constraints.max_size_bytes == 5 * 1024 * 1024
|
||||
assert constraints.max_width == 8000
|
||||
assert constraints.max_height == 8000
|
||||
assert constraints.max_images_per_request == 10
|
||||
|
||||
def test_image_constraints_defaults(self):
|
||||
"""Test image constraints with default values."""
|
||||
constraints = ImageConstraints(max_size_bytes=1000)
|
||||
|
||||
assert constraints.max_size_bytes == 1000
|
||||
assert constraints.max_width is None
|
||||
assert constraints.max_height is None
|
||||
assert constraints.max_images_per_request is None
|
||||
assert "image/png" in constraints.supported_formats
|
||||
|
||||
def test_image_constraints_frozen(self):
|
||||
"""Test that image constraints are immutable."""
|
||||
constraints = ImageConstraints(max_size_bytes=1000)
|
||||
|
||||
with pytest.raises(Exception):
|
||||
constraints.max_size_bytes = 2000
|
||||
|
||||
|
||||
class TestPDFConstraints:
|
||||
"""Tests for PDFConstraints dataclass."""
|
||||
|
||||
def test_pdf_constraints_creation(self):
|
||||
"""Test creating PDF constraints."""
|
||||
constraints = PDFConstraints(
|
||||
max_size_bytes=30 * 1024 * 1024,
|
||||
max_pages=100,
|
||||
)
|
||||
|
||||
assert constraints.max_size_bytes == 30 * 1024 * 1024
|
||||
assert constraints.max_pages == 100
|
||||
|
||||
def test_pdf_constraints_defaults(self):
|
||||
"""Test PDF constraints with default values."""
|
||||
constraints = PDFConstraints(max_size_bytes=1000)
|
||||
|
||||
assert constraints.max_size_bytes == 1000
|
||||
assert constraints.max_pages is None
|
||||
|
||||
|
||||
class TestAudioConstraints:
|
||||
"""Tests for AudioConstraints dataclass."""
|
||||
|
||||
def test_audio_constraints_creation(self):
|
||||
"""Test creating audio constraints."""
|
||||
constraints = AudioConstraints(
|
||||
max_size_bytes=100 * 1024 * 1024,
|
||||
max_duration_seconds=3600,
|
||||
)
|
||||
|
||||
assert constraints.max_size_bytes == 100 * 1024 * 1024
|
||||
assert constraints.max_duration_seconds == 3600
|
||||
assert "audio/mp3" in constraints.supported_formats
|
||||
|
||||
|
||||
class TestVideoConstraints:
|
||||
"""Tests for VideoConstraints dataclass."""
|
||||
|
||||
def test_video_constraints_creation(self):
|
||||
"""Test creating video constraints."""
|
||||
constraints = VideoConstraints(
|
||||
max_size_bytes=2 * 1024 * 1024 * 1024,
|
||||
max_duration_seconds=7200,
|
||||
)
|
||||
|
||||
assert constraints.max_size_bytes == 2 * 1024 * 1024 * 1024
|
||||
assert constraints.max_duration_seconds == 7200
|
||||
assert "video/mp4" in constraints.supported_formats
|
||||
|
||||
|
||||
class TestProviderConstraints:
|
||||
"""Tests for ProviderConstraints dataclass."""
|
||||
|
||||
def test_provider_constraints_creation(self):
|
||||
"""Test creating full provider constraints."""
|
||||
constraints = ProviderConstraints(
|
||||
name="test-provider",
|
||||
image=ImageConstraints(max_size_bytes=5 * 1024 * 1024),
|
||||
pdf=PDFConstraints(max_size_bytes=30 * 1024 * 1024),
|
||||
supports_file_upload=True,
|
||||
file_upload_threshold_bytes=10 * 1024 * 1024,
|
||||
)
|
||||
|
||||
assert constraints.name == "test-provider"
|
||||
assert constraints.image is not None
|
||||
assert constraints.pdf is not None
|
||||
assert constraints.supports_file_upload is True
|
||||
|
||||
def test_provider_constraints_defaults(self):
|
||||
"""Test provider constraints with default values."""
|
||||
constraints = ProviderConstraints(name="test")
|
||||
|
||||
assert constraints.name == "test"
|
||||
assert constraints.image is None
|
||||
assert constraints.pdf is None
|
||||
assert constraints.audio is None
|
||||
assert constraints.video is None
|
||||
assert constraints.supports_file_upload is False
|
||||
|
||||
|
||||
class TestPredefinedConstraints:
|
||||
"""Tests for predefined provider constraints."""
|
||||
|
||||
def test_anthropic_constraints(self):
|
||||
"""Test Anthropic constraints are properly defined."""
|
||||
assert ANTHROPIC_CONSTRAINTS.name == "anthropic"
|
||||
assert ANTHROPIC_CONSTRAINTS.image is not None
|
||||
assert ANTHROPIC_CONSTRAINTS.image.max_size_bytes == 5 * 1024 * 1024
|
||||
assert ANTHROPIC_CONSTRAINTS.image.max_width == 8000
|
||||
assert ANTHROPIC_CONSTRAINTS.pdf is not None
|
||||
assert ANTHROPIC_CONSTRAINTS.pdf.max_pages == 100
|
||||
assert ANTHROPIC_CONSTRAINTS.supports_file_upload is True
|
||||
|
||||
def test_openai_constraints(self):
|
||||
"""Test OpenAI constraints are properly defined."""
|
||||
assert OPENAI_CONSTRAINTS.name == "openai"
|
||||
assert OPENAI_CONSTRAINTS.image is not None
|
||||
assert OPENAI_CONSTRAINTS.image.max_size_bytes == 20 * 1024 * 1024
|
||||
assert OPENAI_CONSTRAINTS.pdf is None # OpenAI doesn't support PDFs
|
||||
|
||||
def test_gemini_constraints(self):
|
||||
"""Test Gemini constraints are properly defined."""
|
||||
assert GEMINI_CONSTRAINTS.name == "gemini"
|
||||
assert GEMINI_CONSTRAINTS.image is not None
|
||||
assert GEMINI_CONSTRAINTS.pdf is not None
|
||||
assert GEMINI_CONSTRAINTS.audio is not None
|
||||
assert GEMINI_CONSTRAINTS.video is not None
|
||||
assert GEMINI_CONSTRAINTS.supports_file_upload is True
|
||||
|
||||
def test_bedrock_constraints(self):
|
||||
"""Test Bedrock constraints are properly defined."""
|
||||
assert BEDROCK_CONSTRAINTS.name == "bedrock"
|
||||
assert BEDROCK_CONSTRAINTS.image is not None
|
||||
assert BEDROCK_CONSTRAINTS.image.max_size_bytes == 4_608_000
|
||||
assert BEDROCK_CONSTRAINTS.pdf is not None
|
||||
assert BEDROCK_CONSTRAINTS.supports_file_upload is False
|
||||
|
||||
|
||||
class TestGetConstraintsForProvider:
|
||||
"""Tests for get_constraints_for_provider function."""
|
||||
|
||||
def test_get_by_exact_name(self):
|
||||
"""Test getting constraints by exact provider name."""
|
||||
result = get_constraints_for_provider("anthropic")
|
||||
assert result == ANTHROPIC_CONSTRAINTS
|
||||
|
||||
result = get_constraints_for_provider("openai")
|
||||
assert result == OPENAI_CONSTRAINTS
|
||||
|
||||
result = get_constraints_for_provider("gemini")
|
||||
assert result == GEMINI_CONSTRAINTS
|
||||
|
||||
def test_get_by_alias(self):
|
||||
"""Test getting constraints by alias name."""
|
||||
result = get_constraints_for_provider("claude")
|
||||
assert result == ANTHROPIC_CONSTRAINTS
|
||||
|
||||
result = get_constraints_for_provider("gpt")
|
||||
assert result == OPENAI_CONSTRAINTS
|
||||
|
||||
result = get_constraints_for_provider("google")
|
||||
assert result == GEMINI_CONSTRAINTS
|
||||
|
||||
def test_get_case_insensitive(self):
|
||||
"""Test case-insensitive lookup."""
|
||||
result = get_constraints_for_provider("ANTHROPIC")
|
||||
assert result == ANTHROPIC_CONSTRAINTS
|
||||
|
||||
result = get_constraints_for_provider("OpenAI")
|
||||
assert result == OPENAI_CONSTRAINTS
|
||||
|
||||
def test_get_with_provider_constraints_object(self):
|
||||
"""Test passing ProviderConstraints object returns it unchanged."""
|
||||
custom = ProviderConstraints(name="custom")
|
||||
result = get_constraints_for_provider(custom)
|
||||
assert result is custom
|
||||
|
||||
def test_get_unknown_provider(self):
|
||||
"""Test unknown provider returns None."""
|
||||
result = get_constraints_for_provider("unknown-provider")
|
||||
assert result is None
|
||||
|
||||
def test_get_by_partial_match(self):
|
||||
"""Test partial match in provider string."""
|
||||
result = get_constraints_for_provider("claude-3-sonnet")
|
||||
assert result == ANTHROPIC_CONSTRAINTS
|
||||
|
||||
result = get_constraints_for_provider("gpt-4o")
|
||||
assert result == OPENAI_CONSTRAINTS
|
||||
|
||||
result = get_constraints_for_provider("gemini-pro")
|
||||
assert result == GEMINI_CONSTRAINTS
|
||||
@@ -1,303 +0,0 @@
|
||||
"""Tests for FileProcessor class."""
|
||||
|
||||
from crewai_files import FileBytes, ImageFile
|
||||
from crewai_files.processing.constraints import (
|
||||
ANTHROPIC_CONSTRAINTS,
|
||||
ImageConstraints,
|
||||
ProviderConstraints,
|
||||
)
|
||||
from crewai_files.processing.enums import FileHandling
|
||||
from crewai_files.processing.exceptions import (
|
||||
FileTooLargeError,
|
||||
)
|
||||
from crewai_files.processing.processor import FileProcessor
|
||||
import pytest
|
||||
|
||||
|
||||
# Minimal valid PNG: 8x8 pixel RGB image (valid for PIL)
|
||||
MINIMAL_PNG = bytes(
|
||||
[
|
||||
0x89,
|
||||
0x50,
|
||||
0x4E,
|
||||
0x47,
|
||||
0x0D,
|
||||
0x0A,
|
||||
0x1A,
|
||||
0x0A,
|
||||
0x00,
|
||||
0x00,
|
||||
0x00,
|
||||
0x0D,
|
||||
0x49,
|
||||
0x48,
|
||||
0x44,
|
||||
0x52,
|
||||
0x00,
|
||||
0x00,
|
||||
0x00,
|
||||
0x08,
|
||||
0x00,
|
||||
0x00,
|
||||
0x00,
|
||||
0x08,
|
||||
0x08,
|
||||
0x02,
|
||||
0x00,
|
||||
0x00,
|
||||
0x00,
|
||||
0x4B,
|
||||
0x6D,
|
||||
0x29,
|
||||
0xDC,
|
||||
0x00,
|
||||
0x00,
|
||||
0x00,
|
||||
0x12,
|
||||
0x49,
|
||||
0x44,
|
||||
0x41,
|
||||
0x54,
|
||||
0x78,
|
||||
0x9C,
|
||||
0x63,
|
||||
0xFC,
|
||||
0xCF,
|
||||
0x80,
|
||||
0x1D,
|
||||
0x30,
|
||||
0xE1,
|
||||
0x10,
|
||||
0x1F,
|
||||
0xA4,
|
||||
0x12,
|
||||
0x00,
|
||||
0xCD,
|
||||
0x41,
|
||||
0x01,
|
||||
0x0F,
|
||||
0xE8,
|
||||
0x41,
|
||||
0xE2,
|
||||
0x6F,
|
||||
0x00,
|
||||
0x00,
|
||||
0x00,
|
||||
0x00,
|
||||
0x49,
|
||||
0x45,
|
||||
0x4E,
|
||||
0x44,
|
||||
0xAE,
|
||||
0x42,
|
||||
0x60,
|
||||
0x82,
|
||||
]
|
||||
)
|
||||
|
||||
# Minimal valid PDF
|
||||
MINIMAL_PDF = (
|
||||
b"%PDF-1.4\n1 0 obj<</Type/Catalog/Pages 2 0 R>>endobj "
|
||||
b"2 0 obj<</Type/Pages/Kids[3 0 R]/Count 1>>endobj "
|
||||
b"3 0 obj<</Type/Page/MediaBox[0 0 612 792]/Parent 2 0 R>>endobj "
|
||||
b"xref\n0 4\n0000000000 65535 f \n0000000009 00000 n \n"
|
||||
b"0000000052 00000 n \n0000000101 00000 n \n"
|
||||
b"trailer<</Size 4/Root 1 0 R>>\nstartxref\n178\n%%EOF"
|
||||
)
|
||||
|
||||
|
||||
class TestFileProcessorInit:
|
||||
"""Tests for FileProcessor initialization."""
|
||||
|
||||
def test_init_with_constraints(self):
|
||||
"""Test initialization with ProviderConstraints."""
|
||||
processor = FileProcessor(constraints=ANTHROPIC_CONSTRAINTS)
|
||||
|
||||
assert processor.constraints == ANTHROPIC_CONSTRAINTS
|
||||
|
||||
def test_init_with_provider_string(self):
|
||||
"""Test initialization with provider name string."""
|
||||
processor = FileProcessor(constraints="anthropic")
|
||||
|
||||
assert processor.constraints == ANTHROPIC_CONSTRAINTS
|
||||
|
||||
def test_init_with_unknown_provider(self):
|
||||
"""Test initialization with unknown provider sets constraints to None."""
|
||||
processor = FileProcessor(constraints="unknown")
|
||||
|
||||
assert processor.constraints is None
|
||||
|
||||
def test_init_with_none_constraints(self):
|
||||
"""Test initialization with None constraints."""
|
||||
processor = FileProcessor(constraints=None)
|
||||
|
||||
assert processor.constraints is None
|
||||
|
||||
|
||||
class TestFileProcessorValidate:
|
||||
"""Tests for FileProcessor.validate method."""
|
||||
|
||||
def test_validate_valid_file(self):
|
||||
"""Test validating a valid file returns no errors."""
|
||||
processor = FileProcessor(constraints=ANTHROPIC_CONSTRAINTS)
|
||||
file = ImageFile(source=FileBytes(data=MINIMAL_PNG, filename="test.png"))
|
||||
|
||||
errors = processor.validate(file)
|
||||
|
||||
assert len(errors) == 0
|
||||
|
||||
def test_validate_without_constraints(self):
|
||||
"""Test validating without constraints returns empty list."""
|
||||
processor = FileProcessor(constraints=None)
|
||||
file = ImageFile(source=FileBytes(data=MINIMAL_PNG, filename="test.png"))
|
||||
|
||||
errors = processor.validate(file)
|
||||
|
||||
assert len(errors) == 0
|
||||
|
||||
def test_validate_strict_raises_on_error(self):
|
||||
"""Test STRICT mode raises on validation error."""
|
||||
constraints = ProviderConstraints(
|
||||
name="test",
|
||||
image=ImageConstraints(max_size_bytes=10),
|
||||
)
|
||||
processor = FileProcessor(constraints=constraints)
|
||||
# Set mode to strict on the file
|
||||
file = ImageFile(
|
||||
source=FileBytes(data=MINIMAL_PNG, filename="test.png"), mode="strict"
|
||||
)
|
||||
|
||||
with pytest.raises(FileTooLargeError):
|
||||
processor.validate(file)
|
||||
|
||||
|
||||
class TestFileProcessorProcess:
|
||||
"""Tests for FileProcessor.process method."""
|
||||
|
||||
def test_process_valid_file(self):
|
||||
"""Test processing a valid file returns it unchanged."""
|
||||
processor = FileProcessor(constraints=ANTHROPIC_CONSTRAINTS)
|
||||
file = ImageFile(source=FileBytes(data=MINIMAL_PNG, filename="test.png"))
|
||||
|
||||
result = processor.process(file)
|
||||
|
||||
assert result == file
|
||||
|
||||
def test_process_without_constraints(self):
|
||||
"""Test processing without constraints returns file unchanged."""
|
||||
processor = FileProcessor(constraints=None)
|
||||
file = ImageFile(source=FileBytes(data=MINIMAL_PNG, filename="test.png"))
|
||||
|
||||
result = processor.process(file)
|
||||
|
||||
assert result == file
|
||||
|
||||
def test_process_strict_raises_on_error(self):
|
||||
"""Test STRICT mode raises on processing error."""
|
||||
constraints = ProviderConstraints(
|
||||
name="test",
|
||||
image=ImageConstraints(max_size_bytes=10),
|
||||
)
|
||||
processor = FileProcessor(constraints=constraints)
|
||||
# Set mode to strict on the file
|
||||
file = ImageFile(
|
||||
source=FileBytes(data=MINIMAL_PNG, filename="test.png"), mode="strict"
|
||||
)
|
||||
|
||||
with pytest.raises(FileTooLargeError):
|
||||
processor.process(file)
|
||||
|
||||
def test_process_warn_returns_file(self):
|
||||
"""Test WARN mode returns file with warning."""
|
||||
constraints = ProviderConstraints(
|
||||
name="test",
|
||||
image=ImageConstraints(max_size_bytes=10),
|
||||
)
|
||||
processor = FileProcessor(constraints=constraints)
|
||||
# Set mode to warn on the file
|
||||
file = ImageFile(
|
||||
source=FileBytes(data=MINIMAL_PNG, filename="test.png"), mode="warn"
|
||||
)
|
||||
|
||||
result = processor.process(file)
|
||||
|
||||
assert result == file
|
||||
|
||||
|
||||
class TestFileProcessorProcessFiles:
|
||||
"""Tests for FileProcessor.process_files method."""
|
||||
|
||||
def test_process_files_multiple(self):
|
||||
"""Test processing multiple files."""
|
||||
processor = FileProcessor(constraints=ANTHROPIC_CONSTRAINTS)
|
||||
files = {
|
||||
"image1": ImageFile(
|
||||
source=FileBytes(data=MINIMAL_PNG, filename="test1.png")
|
||||
),
|
||||
"image2": ImageFile(
|
||||
source=FileBytes(data=MINIMAL_PNG, filename="test2.png")
|
||||
),
|
||||
}
|
||||
|
||||
result = processor.process_files(files)
|
||||
|
||||
assert len(result) == 2
|
||||
assert "image1" in result
|
||||
assert "image2" in result
|
||||
|
||||
def test_process_files_empty(self):
|
||||
"""Test processing empty files dict."""
|
||||
processor = FileProcessor(constraints=ANTHROPIC_CONSTRAINTS)
|
||||
|
||||
result = processor.process_files({})
|
||||
|
||||
assert result == {}
|
||||
|
||||
|
||||
class TestFileHandlingEnum:
|
||||
"""Tests for FileHandling enum."""
|
||||
|
||||
def test_enum_values(self):
|
||||
"""Test all enum values are accessible."""
|
||||
assert FileHandling.STRICT.value == "strict"
|
||||
assert FileHandling.AUTO.value == "auto"
|
||||
assert FileHandling.WARN.value == "warn"
|
||||
assert FileHandling.CHUNK.value == "chunk"
|
||||
|
||||
|
||||
class TestFileProcessorPerFileMode:
|
||||
"""Tests for per-file mode handling."""
|
||||
|
||||
def test_file_default_mode_is_auto(self):
|
||||
"""Test that files default to auto mode."""
|
||||
file = ImageFile(source=FileBytes(data=MINIMAL_PNG, filename="test.png"))
|
||||
assert file.mode == "auto"
|
||||
|
||||
def test_file_custom_mode(self):
|
||||
"""Test setting custom mode on file."""
|
||||
file = ImageFile(
|
||||
source=FileBytes(data=MINIMAL_PNG, filename="test.png"), mode="strict"
|
||||
)
|
||||
assert file.mode == "strict"
|
||||
|
||||
def test_processor_respects_file_mode(self):
|
||||
"""Test processor uses each file's mode setting."""
|
||||
constraints = ProviderConstraints(
|
||||
name="test",
|
||||
image=ImageConstraints(max_size_bytes=10),
|
||||
)
|
||||
processor = FileProcessor(constraints=constraints)
|
||||
|
||||
# File with strict mode should raise
|
||||
strict_file = ImageFile(
|
||||
source=FileBytes(data=MINIMAL_PNG, filename="test.png"), mode="strict"
|
||||
)
|
||||
with pytest.raises(FileTooLargeError):
|
||||
processor.process(strict_file)
|
||||
|
||||
# File with warn mode should not raise
|
||||
warn_file = ImageFile(
|
||||
source=FileBytes(data=MINIMAL_PNG, filename="test.png"), mode="warn"
|
||||
)
|
||||
result = processor.process(warn_file)
|
||||
assert result == warn_file
|
||||
@@ -1,362 +0,0 @@
|
||||
"""Unit tests for file transformers."""
|
||||
|
||||
import io
|
||||
from unittest.mock import patch
|
||||
|
||||
from crewai_files import ImageFile, PDFFile, TextFile
|
||||
from crewai_files.core.sources import FileBytes
|
||||
from crewai_files.processing.exceptions import ProcessingDependencyError
|
||||
from crewai_files.processing.transformers import (
|
||||
chunk_pdf,
|
||||
chunk_text,
|
||||
get_image_dimensions,
|
||||
get_pdf_page_count,
|
||||
optimize_image,
|
||||
resize_image,
|
||||
)
|
||||
import pytest
|
||||
|
||||
|
||||
def create_test_png(width: int = 100, height: int = 100) -> bytes:
|
||||
"""Create a minimal valid PNG for testing."""
|
||||
from PIL import Image
|
||||
|
||||
img = Image.new("RGB", (width, height), color="red")
|
||||
buffer = io.BytesIO()
|
||||
img.save(buffer, format="PNG")
|
||||
return buffer.getvalue()
|
||||
|
||||
|
||||
def create_test_pdf(num_pages: int = 1) -> bytes:
|
||||
"""Create a minimal valid PDF for testing."""
|
||||
from pypdf import PdfWriter
|
||||
|
||||
writer = PdfWriter()
|
||||
for _ in range(num_pages):
|
||||
writer.add_blank_page(width=612, height=792)
|
||||
|
||||
buffer = io.BytesIO()
|
||||
writer.write(buffer)
|
||||
return buffer.getvalue()
|
||||
|
||||
|
||||
class TestResizeImage:
|
||||
"""Tests for resize_image function."""
|
||||
|
||||
def test_resize_larger_image(self) -> None:
|
||||
"""Test resizing an image larger than max dimensions."""
|
||||
png_bytes = create_test_png(200, 150)
|
||||
img = ImageFile(source=FileBytes(data=png_bytes, filename="test.png"))
|
||||
|
||||
result = resize_image(img, max_width=100, max_height=100)
|
||||
|
||||
dims = get_image_dimensions(result)
|
||||
assert dims is not None
|
||||
width, height = dims
|
||||
assert width <= 100
|
||||
assert height <= 100
|
||||
|
||||
def test_no_resize_if_within_bounds(self) -> None:
|
||||
"""Test that small images are returned unchanged."""
|
||||
png_bytes = create_test_png(50, 50)
|
||||
img = ImageFile(source=FileBytes(data=png_bytes, filename="small.png"))
|
||||
|
||||
result = resize_image(img, max_width=100, max_height=100)
|
||||
|
||||
assert result is img
|
||||
|
||||
def test_preserve_aspect_ratio(self) -> None:
|
||||
"""Test that aspect ratio is preserved during resize."""
|
||||
png_bytes = create_test_png(200, 100)
|
||||
img = ImageFile(source=FileBytes(data=png_bytes, filename="wide.png"))
|
||||
|
||||
result = resize_image(img, max_width=100, max_height=100)
|
||||
|
||||
dims = get_image_dimensions(result)
|
||||
assert dims is not None
|
||||
width, height = dims
|
||||
assert width == 100
|
||||
assert height == 50
|
||||
|
||||
def test_resize_without_aspect_ratio(self) -> None:
|
||||
"""Test resizing without preserving aspect ratio."""
|
||||
png_bytes = create_test_png(200, 100)
|
||||
img = ImageFile(source=FileBytes(data=png_bytes, filename="wide.png"))
|
||||
|
||||
result = resize_image(
|
||||
img, max_width=50, max_height=50, preserve_aspect_ratio=False
|
||||
)
|
||||
|
||||
dims = get_image_dimensions(result)
|
||||
assert dims is not None
|
||||
width, height = dims
|
||||
assert width == 50
|
||||
assert height == 50
|
||||
|
||||
def test_resize_returns_image_file(self) -> None:
|
||||
"""Test that resize returns an ImageFile instance."""
|
||||
png_bytes = create_test_png(200, 200)
|
||||
img = ImageFile(source=FileBytes(data=png_bytes, filename="test.png"))
|
||||
|
||||
result = resize_image(img, max_width=100, max_height=100)
|
||||
|
||||
assert isinstance(result, ImageFile)
|
||||
|
||||
def test_raises_without_pillow(self) -> None:
|
||||
"""Test that ProcessingDependencyError is raised without Pillow."""
|
||||
img = ImageFile(source=FileBytes(data=b"fake", filename="test.png"))
|
||||
|
||||
with patch.dict("sys.modules", {"PIL": None, "PIL.Image": None}):
|
||||
with pytest.raises(ProcessingDependencyError) as exc_info:
|
||||
# Force reimport to trigger ImportError
|
||||
import importlib
|
||||
|
||||
import crewai_files.processing.transformers as t
|
||||
|
||||
importlib.reload(t)
|
||||
t.resize_image(img, 100, 100)
|
||||
|
||||
assert "Pillow" in str(exc_info.value)
|
||||
|
||||
|
||||
class TestOptimizeImage:
|
||||
"""Tests for optimize_image function."""
|
||||
|
||||
def test_optimize_reduces_size(self) -> None:
|
||||
"""Test that optimization reduces file size."""
|
||||
png_bytes = create_test_png(500, 500)
|
||||
original_size = len(png_bytes)
|
||||
img = ImageFile(source=FileBytes(data=png_bytes, filename="large.png"))
|
||||
|
||||
result = optimize_image(img, target_size_bytes=original_size // 2)
|
||||
|
||||
result_size = len(result.read())
|
||||
assert result_size < original_size
|
||||
|
||||
def test_no_optimize_if_under_target(self) -> None:
|
||||
"""Test that small images are returned unchanged."""
|
||||
png_bytes = create_test_png(50, 50)
|
||||
img = ImageFile(source=FileBytes(data=png_bytes, filename="small.png"))
|
||||
|
||||
result = optimize_image(img, target_size_bytes=1024 * 1024)
|
||||
|
||||
assert result is img
|
||||
|
||||
def test_optimize_returns_image_file(self) -> None:
|
||||
"""Test that optimize returns an ImageFile instance."""
|
||||
png_bytes = create_test_png(200, 200)
|
||||
img = ImageFile(source=FileBytes(data=png_bytes, filename="test.png"))
|
||||
|
||||
result = optimize_image(img, target_size_bytes=100)
|
||||
|
||||
assert isinstance(result, ImageFile)
|
||||
|
||||
def test_optimize_respects_min_quality(self) -> None:
|
||||
"""Test that optimization stops at minimum quality."""
|
||||
png_bytes = create_test_png(100, 100)
|
||||
img = ImageFile(source=FileBytes(data=png_bytes, filename="test.png"))
|
||||
|
||||
# Request impossibly small size - should stop at min quality
|
||||
result = optimize_image(img, target_size_bytes=10, min_quality=50)
|
||||
|
||||
assert isinstance(result, ImageFile)
|
||||
assert len(result.read()) > 10
|
||||
|
||||
|
||||
class TestChunkPdf:
|
||||
"""Tests for chunk_pdf function."""
|
||||
|
||||
def test_chunk_splits_large_pdf(self) -> None:
|
||||
"""Test that large PDFs are split into chunks."""
|
||||
pdf_bytes = create_test_pdf(num_pages=10)
|
||||
pdf = PDFFile(source=FileBytes(data=pdf_bytes, filename="large.pdf"))
|
||||
|
||||
result = list(chunk_pdf(pdf, max_pages=3))
|
||||
|
||||
assert len(result) == 4
|
||||
assert all(isinstance(chunk, PDFFile) for chunk in result)
|
||||
|
||||
def test_no_chunk_if_within_limit(self) -> None:
|
||||
"""Test that small PDFs are returned unchanged."""
|
||||
pdf_bytes = create_test_pdf(num_pages=3)
|
||||
pdf = PDFFile(source=FileBytes(data=pdf_bytes, filename="small.pdf"))
|
||||
|
||||
result = list(chunk_pdf(pdf, max_pages=5))
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0] is pdf
|
||||
|
||||
def test_chunk_filenames(self) -> None:
|
||||
"""Test that chunked files have indexed filenames."""
|
||||
pdf_bytes = create_test_pdf(num_pages=6)
|
||||
pdf = PDFFile(source=FileBytes(data=pdf_bytes, filename="document.pdf"))
|
||||
|
||||
result = list(chunk_pdf(pdf, max_pages=2))
|
||||
|
||||
assert result[0].filename == "document_chunk_0.pdf"
|
||||
assert result[1].filename == "document_chunk_1.pdf"
|
||||
assert result[2].filename == "document_chunk_2.pdf"
|
||||
|
||||
def test_chunk_with_overlap(self) -> None:
|
||||
"""Test chunking with overlapping pages."""
|
||||
pdf_bytes = create_test_pdf(num_pages=10)
|
||||
pdf = PDFFile(source=FileBytes(data=pdf_bytes, filename="doc.pdf"))
|
||||
|
||||
result = list(chunk_pdf(pdf, max_pages=4, overlap_pages=1))
|
||||
|
||||
# With overlap, we get more chunks
|
||||
assert len(result) >= 3
|
||||
|
||||
def test_chunk_page_counts(self) -> None:
|
||||
"""Test that each chunk has correct page count."""
|
||||
pdf_bytes = create_test_pdf(num_pages=7)
|
||||
pdf = PDFFile(source=FileBytes(data=pdf_bytes, filename="doc.pdf"))
|
||||
|
||||
result = list(chunk_pdf(pdf, max_pages=3))
|
||||
|
||||
page_counts = [get_pdf_page_count(chunk) for chunk in result]
|
||||
assert page_counts == [3, 3, 1]
|
||||
|
||||
|
||||
class TestChunkText:
|
||||
"""Tests for chunk_text function."""
|
||||
|
||||
def test_chunk_splits_large_text(self) -> None:
|
||||
"""Test that large text files are split into chunks."""
|
||||
content = "Hello world. " * 100
|
||||
text = TextFile(source=content.encode(), filename="large.txt")
|
||||
|
||||
result = list(chunk_text(text, max_chars=200, overlap_chars=0))
|
||||
|
||||
assert len(result) > 1
|
||||
assert all(isinstance(chunk, TextFile) for chunk in result)
|
||||
|
||||
def test_no_chunk_if_within_limit(self) -> None:
|
||||
"""Test that small text files are returned unchanged."""
|
||||
content = "Short text"
|
||||
text = TextFile(source=content.encode(), filename="small.txt")
|
||||
|
||||
result = list(chunk_text(text, max_chars=1000, overlap_chars=0))
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0] is text
|
||||
|
||||
def test_chunk_filenames(self) -> None:
|
||||
"""Test that chunked files have indexed filenames."""
|
||||
content = "A" * 500
|
||||
text = TextFile(source=FileBytes(data=content.encode(), filename="data.txt"))
|
||||
|
||||
result = list(chunk_text(text, max_chars=200, overlap_chars=0))
|
||||
|
||||
assert result[0].filename == "data_chunk_0.txt"
|
||||
assert result[1].filename == "data_chunk_1.txt"
|
||||
assert len(result) == 3
|
||||
|
||||
def test_chunk_preserves_extension(self) -> None:
|
||||
"""Test that file extension is preserved in chunks."""
|
||||
content = "A" * 500
|
||||
text = TextFile(source=FileBytes(data=content.encode(), filename="script.py"))
|
||||
|
||||
result = list(chunk_text(text, max_chars=200, overlap_chars=0))
|
||||
|
||||
assert all(chunk.filename.endswith(".py") for chunk in result)
|
||||
|
||||
def test_chunk_prefers_newline_boundaries(self) -> None:
|
||||
"""Test that chunking prefers to split at newlines."""
|
||||
content = "Line one\nLine two\nLine three\nLine four\nLine five"
|
||||
text = TextFile(source=content.encode(), filename="lines.txt")
|
||||
|
||||
result = list(
|
||||
chunk_text(text, max_chars=25, overlap_chars=0, split_on_newlines=True)
|
||||
)
|
||||
|
||||
# Should split at newline boundaries
|
||||
for chunk in result:
|
||||
chunk_text_content = chunk.read().decode()
|
||||
# Chunks should end at newlines (except possibly the last)
|
||||
if chunk != result[-1]:
|
||||
assert (
|
||||
chunk_text_content.endswith("\n") or len(chunk_text_content) <= 25
|
||||
)
|
||||
|
||||
def test_chunk_with_overlap(self) -> None:
|
||||
"""Test chunking with overlapping characters."""
|
||||
content = "ABCDEFGHIJ" * 10
|
||||
text = TextFile(source=content.encode(), filename="data.txt")
|
||||
|
||||
result = list(chunk_text(text, max_chars=30, overlap_chars=5))
|
||||
|
||||
# With overlap, chunks should share some content
|
||||
assert len(result) >= 3
|
||||
|
||||
def test_chunk_overlap_larger_than_max_chars(self) -> None:
|
||||
"""Test that overlap > max_chars doesn't cause infinite loop."""
|
||||
content = "A" * 100
|
||||
text = TextFile(source=content.encode(), filename="data.txt")
|
||||
|
||||
# overlap_chars > max_chars should still work (just with max overlap)
|
||||
result = list(chunk_text(text, max_chars=20, overlap_chars=50))
|
||||
|
||||
assert len(result) > 1
|
||||
# Should still complete without hanging
|
||||
|
||||
|
||||
class TestGetImageDimensions:
|
||||
"""Tests for get_image_dimensions function."""
|
||||
|
||||
def test_get_dimensions(self) -> None:
|
||||
"""Test getting image dimensions."""
|
||||
png_bytes = create_test_png(150, 100)
|
||||
img = ImageFile(source=FileBytes(data=png_bytes, filename="test.png"))
|
||||
|
||||
dims = get_image_dimensions(img)
|
||||
|
||||
assert dims == (150, 100)
|
||||
|
||||
def test_returns_none_for_invalid_image(self) -> None:
|
||||
"""Test that None is returned for invalid image data."""
|
||||
img = ImageFile(source=FileBytes(data=b"not an image", filename="bad.png"))
|
||||
|
||||
dims = get_image_dimensions(img)
|
||||
|
||||
assert dims is None
|
||||
|
||||
def test_returns_none_without_pillow(self) -> None:
|
||||
"""Test that None is returned when Pillow is not installed."""
|
||||
png_bytes = create_test_png(100, 100)
|
||||
ImageFile(source=FileBytes(data=png_bytes, filename="test.png"))
|
||||
|
||||
with patch.dict("sys.modules", {"PIL": None}):
|
||||
# Can't easily test this without unloading module
|
||||
# Just verify the function handles the case gracefully
|
||||
pass
|
||||
|
||||
|
||||
class TestGetPdfPageCount:
|
||||
"""Tests for get_pdf_page_count function."""
|
||||
|
||||
def test_get_page_count(self) -> None:
|
||||
"""Test getting PDF page count."""
|
||||
pdf_bytes = create_test_pdf(num_pages=5)
|
||||
pdf = PDFFile(source=FileBytes(data=pdf_bytes, filename="test.pdf"))
|
||||
|
||||
count = get_pdf_page_count(pdf)
|
||||
|
||||
assert count == 5
|
||||
|
||||
def test_single_page(self) -> None:
|
||||
"""Test page count for single page PDF."""
|
||||
pdf_bytes = create_test_pdf(num_pages=1)
|
||||
pdf = PDFFile(source=FileBytes(data=pdf_bytes, filename="single.pdf"))
|
||||
|
||||
count = get_pdf_page_count(pdf)
|
||||
|
||||
assert count == 1
|
||||
|
||||
def test_returns_none_for_invalid_pdf(self) -> None:
|
||||
"""Test that None is returned for invalid PDF data."""
|
||||
pdf = PDFFile(source=FileBytes(data=b"not a pdf", filename="bad.pdf"))
|
||||
|
||||
count = get_pdf_page_count(pdf)
|
||||
|
||||
assert count is None
|
||||
@@ -1,644 +0,0 @@
|
||||
"""Tests for file validators."""
|
||||
|
||||
from unittest.mock import patch
|
||||
|
||||
from crewai_files import AudioFile, FileBytes, ImageFile, PDFFile, TextFile, VideoFile
|
||||
from crewai_files.processing.constraints import (
|
||||
ANTHROPIC_CONSTRAINTS,
|
||||
AudioConstraints,
|
||||
ImageConstraints,
|
||||
PDFConstraints,
|
||||
ProviderConstraints,
|
||||
VideoConstraints,
|
||||
)
|
||||
from crewai_files.processing.exceptions import (
|
||||
FileTooLargeError,
|
||||
FileValidationError,
|
||||
UnsupportedFileTypeError,
|
||||
)
|
||||
from crewai_files.processing.validators import (
|
||||
_get_audio_duration,
|
||||
_get_video_duration,
|
||||
validate_audio,
|
||||
validate_file,
|
||||
validate_image,
|
||||
validate_pdf,
|
||||
validate_text,
|
||||
validate_video,
|
||||
)
|
||||
import pytest
|
||||
|
||||
|
||||
# Minimal valid PNG: 8x8 pixel RGB image (valid for PIL)
|
||||
MINIMAL_PNG = bytes(
|
||||
[
|
||||
0x89,
|
||||
0x50,
|
||||
0x4E,
|
||||
0x47,
|
||||
0x0D,
|
||||
0x0A,
|
||||
0x1A,
|
||||
0x0A,
|
||||
0x00,
|
||||
0x00,
|
||||
0x00,
|
||||
0x0D,
|
||||
0x49,
|
||||
0x48,
|
||||
0x44,
|
||||
0x52,
|
||||
0x00,
|
||||
0x00,
|
||||
0x00,
|
||||
0x08,
|
||||
0x00,
|
||||
0x00,
|
||||
0x00,
|
||||
0x08,
|
||||
0x08,
|
||||
0x02,
|
||||
0x00,
|
||||
0x00,
|
||||
0x00,
|
||||
0x4B,
|
||||
0x6D,
|
||||
0x29,
|
||||
0xDC,
|
||||
0x00,
|
||||
0x00,
|
||||
0x00,
|
||||
0x12,
|
||||
0x49,
|
||||
0x44,
|
||||
0x41,
|
||||
0x54,
|
||||
0x78,
|
||||
0x9C,
|
||||
0x63,
|
||||
0xFC,
|
||||
0xCF,
|
||||
0x80,
|
||||
0x1D,
|
||||
0x30,
|
||||
0xE1,
|
||||
0x10,
|
||||
0x1F,
|
||||
0xA4,
|
||||
0x12,
|
||||
0x00,
|
||||
0xCD,
|
||||
0x41,
|
||||
0x01,
|
||||
0x0F,
|
||||
0xE8,
|
||||
0x41,
|
||||
0xE2,
|
||||
0x6F,
|
||||
0x00,
|
||||
0x00,
|
||||
0x00,
|
||||
0x00,
|
||||
0x49,
|
||||
0x45,
|
||||
0x4E,
|
||||
0x44,
|
||||
0xAE,
|
||||
0x42,
|
||||
0x60,
|
||||
0x82,
|
||||
]
|
||||
)
|
||||
|
||||
# Minimal valid PDF
|
||||
MINIMAL_PDF = (
|
||||
b"%PDF-1.4\n1 0 obj<</Type/Catalog/Pages 2 0 R>>endobj "
|
||||
b"2 0 obj<</Type/Pages/Kids[3 0 R]/Count 1>>endobj "
|
||||
b"3 0 obj<</Type/Page/MediaBox[0 0 612 792]/Parent 2 0 R>>endobj "
|
||||
b"xref\n0 4\n0000000000 65535 f \n0000000009 00000 n \n"
|
||||
b"0000000052 00000 n \n0000000101 00000 n \n"
|
||||
b"trailer<</Size 4/Root 1 0 R>>\nstartxref\n178\n%%EOF"
|
||||
)
|
||||
|
||||
|
||||
class TestValidateImage:
|
||||
"""Tests for validate_image function."""
|
||||
|
||||
def test_validate_valid_image(self):
|
||||
"""Test validating a valid image within constraints."""
|
||||
constraints = ImageConstraints(
|
||||
max_size_bytes=10 * 1024 * 1024,
|
||||
supported_formats=("image/png",),
|
||||
)
|
||||
file = ImageFile(source=FileBytes(data=MINIMAL_PNG, filename="test.png"))
|
||||
|
||||
errors = validate_image(file, constraints, raise_on_error=False)
|
||||
|
||||
assert len(errors) == 0
|
||||
|
||||
def test_validate_image_too_large(self):
|
||||
"""Test validating an image that exceeds size limit."""
|
||||
constraints = ImageConstraints(
|
||||
max_size_bytes=10, # Very small limit
|
||||
supported_formats=("image/png",),
|
||||
)
|
||||
file = ImageFile(source=FileBytes(data=MINIMAL_PNG, filename="test.png"))
|
||||
|
||||
with pytest.raises(FileTooLargeError) as exc_info:
|
||||
validate_image(file, constraints)
|
||||
|
||||
assert "exceeds" in str(exc_info.value)
|
||||
assert exc_info.value.file_name == "test.png"
|
||||
|
||||
def test_validate_image_unsupported_format(self):
|
||||
"""Test validating an image with unsupported format."""
|
||||
constraints = ImageConstraints(
|
||||
max_size_bytes=10 * 1024 * 1024,
|
||||
supported_formats=("image/jpeg",), # Only JPEG
|
||||
)
|
||||
file = ImageFile(source=FileBytes(data=MINIMAL_PNG, filename="test.png"))
|
||||
|
||||
with pytest.raises(UnsupportedFileTypeError) as exc_info:
|
||||
validate_image(file, constraints)
|
||||
|
||||
assert "not supported" in str(exc_info.value)
|
||||
|
||||
def test_validate_image_no_raise(self):
|
||||
"""Test validating with raise_on_error=False returns errors list."""
|
||||
constraints = ImageConstraints(
|
||||
max_size_bytes=10,
|
||||
supported_formats=("image/jpeg",),
|
||||
)
|
||||
file = ImageFile(source=FileBytes(data=MINIMAL_PNG, filename="test.png"))
|
||||
|
||||
errors = validate_image(file, constraints, raise_on_error=False)
|
||||
|
||||
assert len(errors) == 2 # Size error and format error
|
||||
|
||||
|
||||
class TestValidatePDF:
|
||||
"""Tests for validate_pdf function."""
|
||||
|
||||
def test_validate_valid_pdf(self):
|
||||
"""Test validating a valid PDF within constraints."""
|
||||
constraints = PDFConstraints(
|
||||
max_size_bytes=10 * 1024 * 1024,
|
||||
)
|
||||
file = PDFFile(source=FileBytes(data=MINIMAL_PDF, filename="test.pdf"))
|
||||
|
||||
errors = validate_pdf(file, constraints, raise_on_error=False)
|
||||
|
||||
assert len(errors) == 0
|
||||
|
||||
def test_validate_pdf_too_large(self):
|
||||
"""Test validating a PDF that exceeds size limit."""
|
||||
constraints = PDFConstraints(
|
||||
max_size_bytes=10, # Very small limit
|
||||
)
|
||||
file = PDFFile(source=FileBytes(data=MINIMAL_PDF, filename="test.pdf"))
|
||||
|
||||
with pytest.raises(FileTooLargeError) as exc_info:
|
||||
validate_pdf(file, constraints)
|
||||
|
||||
assert "exceeds" in str(exc_info.value)
|
||||
|
||||
|
||||
class TestValidateText:
|
||||
"""Tests for validate_text function."""
|
||||
|
||||
def test_validate_valid_text(self):
|
||||
"""Test validating a valid text file."""
|
||||
constraints = ProviderConstraints(
|
||||
name="test",
|
||||
general_max_size_bytes=10 * 1024 * 1024,
|
||||
)
|
||||
file = TextFile(source=FileBytes(data=b"Hello, World!", filename="test.txt"))
|
||||
|
||||
errors = validate_text(file, constraints, raise_on_error=False)
|
||||
|
||||
assert len(errors) == 0
|
||||
|
||||
def test_validate_text_too_large(self):
|
||||
"""Test validating text that exceeds size limit."""
|
||||
constraints = ProviderConstraints(
|
||||
name="test",
|
||||
general_max_size_bytes=5,
|
||||
)
|
||||
file = TextFile(source=FileBytes(data=b"Hello, World!", filename="test.txt"))
|
||||
|
||||
with pytest.raises(FileTooLargeError):
|
||||
validate_text(file, constraints)
|
||||
|
||||
def test_validate_text_no_limit(self):
|
||||
"""Test validating text with no size limit."""
|
||||
constraints = ProviderConstraints(name="test")
|
||||
file = TextFile(source=FileBytes(data=b"Hello, World!", filename="test.txt"))
|
||||
|
||||
errors = validate_text(file, constraints, raise_on_error=False)
|
||||
|
||||
assert len(errors) == 0
|
||||
|
||||
|
||||
class TestValidateFile:
|
||||
"""Tests for validate_file function."""
|
||||
|
||||
def test_validate_file_dispatches_to_image(self):
|
||||
"""Test validate_file dispatches to image validator."""
|
||||
file = ImageFile(source=FileBytes(data=MINIMAL_PNG, filename="test.png"))
|
||||
|
||||
errors = validate_file(file, ANTHROPIC_CONSTRAINTS, raise_on_error=False)
|
||||
|
||||
assert len(errors) == 0
|
||||
|
||||
def test_validate_file_dispatches_to_pdf(self):
|
||||
"""Test validate_file dispatches to PDF validator."""
|
||||
file = PDFFile(source=FileBytes(data=MINIMAL_PDF, filename="test.pdf"))
|
||||
|
||||
errors = validate_file(file, ANTHROPIC_CONSTRAINTS, raise_on_error=False)
|
||||
|
||||
assert len(errors) == 0
|
||||
|
||||
def test_validate_file_unsupported_type(self):
|
||||
"""Test validating a file type not supported by provider."""
|
||||
constraints = ProviderConstraints(
|
||||
name="test",
|
||||
image=None, # No image support
|
||||
)
|
||||
file = ImageFile(source=FileBytes(data=MINIMAL_PNG, filename="test.png"))
|
||||
|
||||
with pytest.raises(UnsupportedFileTypeError) as exc_info:
|
||||
validate_file(file, constraints)
|
||||
|
||||
assert "does not support images" in str(exc_info.value)
|
||||
|
||||
def test_validate_file_pdf_not_supported(self):
|
||||
"""Test validating PDF when provider doesn't support it."""
|
||||
constraints = ProviderConstraints(
|
||||
name="test",
|
||||
pdf=None, # No PDF support
|
||||
)
|
||||
file = PDFFile(source=FileBytes(data=MINIMAL_PDF, filename="test.pdf"))
|
||||
|
||||
with pytest.raises(UnsupportedFileTypeError) as exc_info:
|
||||
validate_file(file, constraints)
|
||||
|
||||
assert "does not support PDFs" in str(exc_info.value)
|
||||
|
||||
|
||||
# Minimal audio bytes for testing (not a valid audio file, used for mocked tests)
|
||||
MINIMAL_AUDIO = b"\x00" * 100
|
||||
|
||||
# Minimal video bytes for testing (not a valid video file, used for mocked tests)
|
||||
MINIMAL_VIDEO = b"\x00" * 100
|
||||
|
||||
# Fallback content type when python-magic cannot detect
|
||||
FALLBACK_CONTENT_TYPE = "application/octet-stream"
|
||||
|
||||
|
||||
class TestValidateAudio:
|
||||
"""Tests for validate_audio function and audio duration validation."""
|
||||
|
||||
def test_validate_valid_audio(self):
|
||||
"""Test validating a valid audio file within constraints."""
|
||||
constraints = AudioConstraints(
|
||||
max_size_bytes=10 * 1024 * 1024,
|
||||
supported_formats=("audio/mp3", "audio/mpeg", FALLBACK_CONTENT_TYPE),
|
||||
)
|
||||
file = AudioFile(source=FileBytes(data=MINIMAL_AUDIO, filename="test.mp3"))
|
||||
|
||||
errors = validate_audio(file, constraints, raise_on_error=False)
|
||||
|
||||
assert len(errors) == 0
|
||||
|
||||
def test_validate_audio_too_large(self):
|
||||
"""Test validating an audio file that exceeds size limit."""
|
||||
constraints = AudioConstraints(
|
||||
max_size_bytes=10, # Very small limit
|
||||
supported_formats=("audio/mp3", "audio/mpeg", FALLBACK_CONTENT_TYPE),
|
||||
)
|
||||
file = AudioFile(source=FileBytes(data=MINIMAL_AUDIO, filename="test.mp3"))
|
||||
|
||||
with pytest.raises(FileTooLargeError) as exc_info:
|
||||
validate_audio(file, constraints)
|
||||
|
||||
assert "exceeds" in str(exc_info.value)
|
||||
assert exc_info.value.file_name == "test.mp3"
|
||||
|
||||
def test_validate_audio_unsupported_format(self):
|
||||
"""Test validating an audio file with unsupported format."""
|
||||
constraints = AudioConstraints(
|
||||
max_size_bytes=10 * 1024 * 1024,
|
||||
supported_formats=("audio/wav",), # Only WAV
|
||||
)
|
||||
file = AudioFile(source=FileBytes(data=MINIMAL_AUDIO, filename="test.mp3"))
|
||||
|
||||
with pytest.raises(UnsupportedFileTypeError) as exc_info:
|
||||
validate_audio(file, constraints)
|
||||
|
||||
assert "not supported" in str(exc_info.value)
|
||||
|
||||
@patch("crewai_files.processing.validators._get_audio_duration")
|
||||
def test_validate_audio_duration_passes(self, mock_get_duration):
|
||||
"""Test validating audio when duration is under limit."""
|
||||
mock_get_duration.return_value = 30.0
|
||||
constraints = AudioConstraints(
|
||||
max_size_bytes=10 * 1024 * 1024,
|
||||
max_duration_seconds=60,
|
||||
supported_formats=("audio/mp3", "audio/mpeg", FALLBACK_CONTENT_TYPE),
|
||||
)
|
||||
file = AudioFile(source=FileBytes(data=MINIMAL_AUDIO, filename="test.mp3"))
|
||||
|
||||
errors = validate_audio(file, constraints, raise_on_error=False)
|
||||
|
||||
assert len(errors) == 0
|
||||
mock_get_duration.assert_called_once()
|
||||
|
||||
@patch("crewai_files.processing.validators._get_audio_duration")
|
||||
def test_validate_audio_duration_fails(self, mock_get_duration):
|
||||
"""Test validating audio when duration exceeds limit."""
|
||||
mock_get_duration.return_value = 120.5
|
||||
constraints = AudioConstraints(
|
||||
max_size_bytes=10 * 1024 * 1024,
|
||||
max_duration_seconds=60,
|
||||
supported_formats=("audio/mp3", "audio/mpeg", FALLBACK_CONTENT_TYPE),
|
||||
)
|
||||
file = AudioFile(source=FileBytes(data=MINIMAL_AUDIO, filename="test.mp3"))
|
||||
|
||||
with pytest.raises(FileValidationError) as exc_info:
|
||||
validate_audio(file, constraints)
|
||||
|
||||
assert "duration" in str(exc_info.value).lower()
|
||||
assert "120.5s" in str(exc_info.value)
|
||||
assert "60s" in str(exc_info.value)
|
||||
|
||||
@patch("crewai_files.processing.validators._get_audio_duration")
|
||||
def test_validate_audio_duration_no_raise(self, mock_get_duration):
|
||||
"""Test audio duration validation with raise_on_error=False."""
|
||||
mock_get_duration.return_value = 120.5
|
||||
constraints = AudioConstraints(
|
||||
max_size_bytes=10 * 1024 * 1024,
|
||||
max_duration_seconds=60,
|
||||
supported_formats=("audio/mp3", "audio/mpeg", FALLBACK_CONTENT_TYPE),
|
||||
)
|
||||
file = AudioFile(source=FileBytes(data=MINIMAL_AUDIO, filename="test.mp3"))
|
||||
|
||||
errors = validate_audio(file, constraints, raise_on_error=False)
|
||||
|
||||
assert len(errors) == 1
|
||||
assert "duration" in errors[0].lower()
|
||||
|
||||
@patch("crewai_files.processing.validators._get_audio_duration")
|
||||
def test_validate_audio_duration_none_skips(self, mock_get_duration):
|
||||
"""Test that duration validation is skipped when max_duration_seconds is None."""
|
||||
constraints = AudioConstraints(
|
||||
max_size_bytes=10 * 1024 * 1024,
|
||||
max_duration_seconds=None,
|
||||
supported_formats=("audio/mp3", "audio/mpeg", FALLBACK_CONTENT_TYPE),
|
||||
)
|
||||
file = AudioFile(source=FileBytes(data=MINIMAL_AUDIO, filename="test.mp3"))
|
||||
|
||||
errors = validate_audio(file, constraints, raise_on_error=False)
|
||||
|
||||
assert len(errors) == 0
|
||||
mock_get_duration.assert_not_called()
|
||||
|
||||
@patch("crewai_files.processing.validators._get_audio_duration")
|
||||
def test_validate_audio_duration_detection_returns_none(self, mock_get_duration):
|
||||
"""Test that validation passes when duration detection returns None."""
|
||||
mock_get_duration.return_value = None
|
||||
constraints = AudioConstraints(
|
||||
max_size_bytes=10 * 1024 * 1024,
|
||||
max_duration_seconds=60,
|
||||
supported_formats=("audio/mp3", "audio/mpeg", FALLBACK_CONTENT_TYPE),
|
||||
)
|
||||
file = AudioFile(source=FileBytes(data=MINIMAL_AUDIO, filename="test.mp3"))
|
||||
|
||||
errors = validate_audio(file, constraints, raise_on_error=False)
|
||||
|
||||
assert len(errors) == 0
|
||||
|
||||
|
||||
class TestValidateVideo:
|
||||
"""Tests for validate_video function and video duration validation."""
|
||||
|
||||
def test_validate_valid_video(self):
|
||||
"""Test validating a valid video file within constraints."""
|
||||
constraints = VideoConstraints(
|
||||
max_size_bytes=10 * 1024 * 1024,
|
||||
supported_formats=("video/mp4", FALLBACK_CONTENT_TYPE),
|
||||
)
|
||||
file = VideoFile(source=FileBytes(data=MINIMAL_VIDEO, filename="test.mp4"))
|
||||
|
||||
errors = validate_video(file, constraints, raise_on_error=False)
|
||||
|
||||
assert len(errors) == 0
|
||||
|
||||
def test_validate_video_too_large(self):
|
||||
"""Test validating a video file that exceeds size limit."""
|
||||
constraints = VideoConstraints(
|
||||
max_size_bytes=10, # Very small limit
|
||||
supported_formats=("video/mp4", FALLBACK_CONTENT_TYPE),
|
||||
)
|
||||
file = VideoFile(source=FileBytes(data=MINIMAL_VIDEO, filename="test.mp4"))
|
||||
|
||||
with pytest.raises(FileTooLargeError) as exc_info:
|
||||
validate_video(file, constraints)
|
||||
|
||||
assert "exceeds" in str(exc_info.value)
|
||||
assert exc_info.value.file_name == "test.mp4"
|
||||
|
||||
def test_validate_video_unsupported_format(self):
|
||||
"""Test validating a video file with unsupported format."""
|
||||
constraints = VideoConstraints(
|
||||
max_size_bytes=10 * 1024 * 1024,
|
||||
supported_formats=("video/webm",), # Only WebM
|
||||
)
|
||||
file = VideoFile(source=FileBytes(data=MINIMAL_VIDEO, filename="test.mp4"))
|
||||
|
||||
with pytest.raises(UnsupportedFileTypeError) as exc_info:
|
||||
validate_video(file, constraints)
|
||||
|
||||
assert "not supported" in str(exc_info.value)
|
||||
|
||||
@patch("crewai_files.processing.validators._get_video_duration")
|
||||
def test_validate_video_duration_passes(self, mock_get_duration):
|
||||
"""Test validating video when duration is under limit."""
|
||||
mock_get_duration.return_value = 30.0
|
||||
constraints = VideoConstraints(
|
||||
max_size_bytes=10 * 1024 * 1024,
|
||||
max_duration_seconds=60,
|
||||
supported_formats=("video/mp4", FALLBACK_CONTENT_TYPE),
|
||||
)
|
||||
file = VideoFile(source=FileBytes(data=MINIMAL_VIDEO, filename="test.mp4"))
|
||||
|
||||
errors = validate_video(file, constraints, raise_on_error=False)
|
||||
|
||||
assert len(errors) == 0
|
||||
mock_get_duration.assert_called_once()
|
||||
|
||||
@patch("crewai_files.processing.validators._get_video_duration")
|
||||
def test_validate_video_duration_fails(self, mock_get_duration):
|
||||
"""Test validating video when duration exceeds limit."""
|
||||
mock_get_duration.return_value = 180.0
|
||||
constraints = VideoConstraints(
|
||||
max_size_bytes=10 * 1024 * 1024,
|
||||
max_duration_seconds=60,
|
||||
supported_formats=("video/mp4", FALLBACK_CONTENT_TYPE),
|
||||
)
|
||||
file = VideoFile(source=FileBytes(data=MINIMAL_VIDEO, filename="test.mp4"))
|
||||
|
||||
with pytest.raises(FileValidationError) as exc_info:
|
||||
validate_video(file, constraints)
|
||||
|
||||
assert "duration" in str(exc_info.value).lower()
|
||||
assert "180.0s" in str(exc_info.value)
|
||||
assert "60s" in str(exc_info.value)
|
||||
|
||||
@patch("crewai_files.processing.validators._get_video_duration")
|
||||
def test_validate_video_duration_no_raise(self, mock_get_duration):
|
||||
"""Test video duration validation with raise_on_error=False."""
|
||||
mock_get_duration.return_value = 180.0
|
||||
constraints = VideoConstraints(
|
||||
max_size_bytes=10 * 1024 * 1024,
|
||||
max_duration_seconds=60,
|
||||
supported_formats=("video/mp4", FALLBACK_CONTENT_TYPE),
|
||||
)
|
||||
file = VideoFile(source=FileBytes(data=MINIMAL_VIDEO, filename="test.mp4"))
|
||||
|
||||
errors = validate_video(file, constraints, raise_on_error=False)
|
||||
|
||||
assert len(errors) == 1
|
||||
assert "duration" in errors[0].lower()
|
||||
|
||||
@patch("crewai_files.processing.validators._get_video_duration")
|
||||
def test_validate_video_duration_none_skips(self, mock_get_duration):
|
||||
"""Test that duration validation is skipped when max_duration_seconds is None."""
|
||||
constraints = VideoConstraints(
|
||||
max_size_bytes=10 * 1024 * 1024,
|
||||
max_duration_seconds=None,
|
||||
supported_formats=("video/mp4", FALLBACK_CONTENT_TYPE),
|
||||
)
|
||||
file = VideoFile(source=FileBytes(data=MINIMAL_VIDEO, filename="test.mp4"))
|
||||
|
||||
errors = validate_video(file, constraints, raise_on_error=False)
|
||||
|
||||
assert len(errors) == 0
|
||||
mock_get_duration.assert_not_called()
|
||||
|
||||
@patch("crewai_files.processing.validators._get_video_duration")
|
||||
def test_validate_video_duration_detection_returns_none(self, mock_get_duration):
|
||||
"""Test that validation passes when duration detection returns None."""
|
||||
mock_get_duration.return_value = None
|
||||
constraints = VideoConstraints(
|
||||
max_size_bytes=10 * 1024 * 1024,
|
||||
max_duration_seconds=60,
|
||||
supported_formats=("video/mp4", FALLBACK_CONTENT_TYPE),
|
||||
)
|
||||
file = VideoFile(source=FileBytes(data=MINIMAL_VIDEO, filename="test.mp4"))
|
||||
|
||||
errors = validate_video(file, constraints, raise_on_error=False)
|
||||
|
||||
assert len(errors) == 0
|
||||
|
||||
|
||||
class TestGetAudioDuration:
|
||||
"""Tests for _get_audio_duration helper function."""
|
||||
|
||||
def test_get_audio_duration_corrupt_file(self):
|
||||
"""Test handling of corrupt audio data."""
|
||||
corrupt_data = b"not valid audio data at all"
|
||||
result = _get_audio_duration(corrupt_data)
|
||||
|
||||
assert result is None
|
||||
|
||||
|
||||
class TestGetVideoDuration:
|
||||
"""Tests for _get_video_duration helper function."""
|
||||
|
||||
def test_get_video_duration_corrupt_file(self):
|
||||
"""Test handling of corrupt video data."""
|
||||
corrupt_data = b"not valid video data at all"
|
||||
result = _get_video_duration(corrupt_data)
|
||||
|
||||
assert result is None
|
||||
|
||||
|
||||
class TestRealVideoFile:
|
||||
"""Tests using real video fixture file."""
|
||||
|
||||
@pytest.fixture
|
||||
def sample_video_path(self):
|
||||
"""Path to sample video fixture."""
|
||||
from pathlib import Path
|
||||
|
||||
path = Path(__file__).parent.parent.parent / "fixtures" / "sample_video.mp4"
|
||||
if not path.exists():
|
||||
pytest.skip("sample_video.mp4 fixture not found")
|
||||
return path
|
||||
|
||||
@pytest.fixture
|
||||
def sample_video_content(self, sample_video_path):
|
||||
"""Read sample video content."""
|
||||
return sample_video_path.read_bytes()
|
||||
|
||||
def test_get_video_duration_real_file(self, sample_video_content):
|
||||
"""Test duration detection with real video file."""
|
||||
try:
|
||||
import av # noqa: F401
|
||||
except ImportError:
|
||||
pytest.skip("PyAV not installed")
|
||||
|
||||
duration = _get_video_duration(sample_video_content, "video/mp4")
|
||||
|
||||
assert duration is not None
|
||||
assert 4.5 <= duration <= 5.5 # ~5 seconds with tolerance
|
||||
|
||||
def test_get_video_duration_real_file_no_format_hint(self, sample_video_content):
|
||||
"""Test duration detection without format hint."""
|
||||
try:
|
||||
import av # noqa: F401
|
||||
except ImportError:
|
||||
pytest.skip("PyAV not installed")
|
||||
|
||||
duration = _get_video_duration(sample_video_content)
|
||||
|
||||
assert duration is not None
|
||||
assert 4.5 <= duration <= 5.5
|
||||
|
||||
def test_validate_video_real_file_passes(self, sample_video_path):
|
||||
"""Test validating real video file within constraints."""
|
||||
try:
|
||||
import av # noqa: F401
|
||||
except ImportError:
|
||||
pytest.skip("PyAV not installed")
|
||||
|
||||
constraints = VideoConstraints(
|
||||
max_size_bytes=10 * 1024 * 1024,
|
||||
max_duration_seconds=60,
|
||||
supported_formats=("video/mp4",),
|
||||
)
|
||||
file = VideoFile(source=str(sample_video_path))
|
||||
|
||||
errors = validate_video(file, constraints, raise_on_error=False)
|
||||
|
||||
assert len(errors) == 0
|
||||
|
||||
def test_validate_video_real_file_duration_exceeded(self, sample_video_path):
|
||||
"""Test validating real video file that exceeds duration limit."""
|
||||
try:
|
||||
import av # noqa: F401
|
||||
except ImportError:
|
||||
pytest.skip("PyAV not installed")
|
||||
|
||||
constraints = VideoConstraints(
|
||||
max_size_bytes=10 * 1024 * 1024,
|
||||
max_duration_seconds=2, # Video is ~5 seconds
|
||||
supported_formats=("video/mp4",),
|
||||
)
|
||||
file = VideoFile(source=str(sample_video_path))
|
||||
|
||||
with pytest.raises(FileValidationError) as exc_info:
|
||||
validate_video(file, constraints)
|
||||
|
||||
assert "duration" in str(exc_info.value).lower()
|
||||
assert "2s" in str(exc_info.value)
|
||||
@@ -1,311 +0,0 @@
|
||||
"""Tests for FileUrl source type and URL resolution."""
|
||||
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
from crewai_files import FileBytes, FileUrl, ImageFile
|
||||
from crewai_files.core.resolved import InlineBase64, UrlReference
|
||||
from crewai_files.core.sources import FilePath, _normalize_source
|
||||
from crewai_files.resolution.resolver import FileResolver
|
||||
import pytest
|
||||
|
||||
|
||||
class TestFileUrl:
|
||||
"""Tests for FileUrl source type."""
|
||||
|
||||
def test_create_file_url(self):
|
||||
"""Test creating FileUrl with valid URL."""
|
||||
url = FileUrl(url="https://example.com/image.png")
|
||||
|
||||
assert url.url == "https://example.com/image.png"
|
||||
assert url.filename is None
|
||||
|
||||
def test_create_file_url_with_filename(self):
|
||||
"""Test creating FileUrl with custom filename."""
|
||||
url = FileUrl(url="https://example.com/image.png", filename="custom.png")
|
||||
|
||||
assert url.url == "https://example.com/image.png"
|
||||
assert url.filename == "custom.png"
|
||||
|
||||
def test_invalid_url_scheme_raises(self):
|
||||
"""Test that non-http(s) URLs raise ValueError."""
|
||||
with pytest.raises(ValueError, match="Invalid URL scheme"):
|
||||
FileUrl(url="ftp://example.com/file.txt")
|
||||
|
||||
def test_invalid_url_scheme_file_raises(self):
|
||||
"""Test that file:// URLs raise ValueError."""
|
||||
with pytest.raises(ValueError, match="Invalid URL scheme"):
|
||||
FileUrl(url="file:///path/to/file.txt")
|
||||
|
||||
def test_http_url_valid(self):
|
||||
"""Test that HTTP URLs are valid."""
|
||||
url = FileUrl(url="http://example.com/image.jpg")
|
||||
|
||||
assert url.url == "http://example.com/image.jpg"
|
||||
|
||||
def test_https_url_valid(self):
|
||||
"""Test that HTTPS URLs are valid."""
|
||||
url = FileUrl(url="https://example.com/image.jpg")
|
||||
|
||||
assert url.url == "https://example.com/image.jpg"
|
||||
|
||||
def test_content_type_guessing_png(self):
|
||||
"""Test content type guessing for PNG files."""
|
||||
url = FileUrl(url="https://example.com/image.png")
|
||||
|
||||
assert url.content_type == "image/png"
|
||||
|
||||
def test_content_type_guessing_jpeg(self):
|
||||
"""Test content type guessing for JPEG files."""
|
||||
url = FileUrl(url="https://example.com/photo.jpg")
|
||||
|
||||
assert url.content_type == "image/jpeg"
|
||||
|
||||
def test_content_type_guessing_pdf(self):
|
||||
"""Test content type guessing for PDF files."""
|
||||
url = FileUrl(url="https://example.com/document.pdf")
|
||||
|
||||
assert url.content_type == "application/pdf"
|
||||
|
||||
def test_content_type_guessing_with_query_params(self):
|
||||
"""Test content type guessing with URL query parameters."""
|
||||
url = FileUrl(url="https://example.com/image.png?v=123&token=abc")
|
||||
|
||||
assert url.content_type == "image/png"
|
||||
|
||||
def test_content_type_fallback_unknown(self):
|
||||
"""Test content type falls back to octet-stream for unknown extensions."""
|
||||
url = FileUrl(url="https://example.com/file.unknownext123")
|
||||
|
||||
assert url.content_type == "application/octet-stream"
|
||||
|
||||
def test_content_type_no_extension(self):
|
||||
"""Test content type for URL without extension."""
|
||||
url = FileUrl(url="https://example.com/file")
|
||||
|
||||
assert url.content_type == "application/octet-stream"
|
||||
|
||||
def test_read_fetches_content(self):
|
||||
"""Test that read() fetches content from URL."""
|
||||
url = FileUrl(url="https://example.com/image.png")
|
||||
mock_response = MagicMock()
|
||||
mock_response.content = b"fake image content"
|
||||
mock_response.headers = {"content-type": "image/png"}
|
||||
|
||||
with patch("httpx.get", return_value=mock_response) as mock_get:
|
||||
content = url.read()
|
||||
|
||||
mock_get.assert_called_once_with(
|
||||
"https://example.com/image.png", follow_redirects=True
|
||||
)
|
||||
assert content == b"fake image content"
|
||||
|
||||
def test_read_caches_content(self):
|
||||
"""Test that read() caches content."""
|
||||
url = FileUrl(url="https://example.com/image.png")
|
||||
mock_response = MagicMock()
|
||||
mock_response.content = b"fake content"
|
||||
mock_response.headers = {}
|
||||
|
||||
with patch("httpx.get", return_value=mock_response) as mock_get:
|
||||
content1 = url.read()
|
||||
content2 = url.read()
|
||||
|
||||
mock_get.assert_called_once()
|
||||
assert content1 == content2
|
||||
|
||||
def test_read_updates_content_type_from_response(self):
|
||||
"""Test that read() updates content type from response headers."""
|
||||
url = FileUrl(url="https://example.com/file")
|
||||
mock_response = MagicMock()
|
||||
mock_response.content = b"fake content"
|
||||
mock_response.headers = {"content-type": "image/webp; charset=utf-8"}
|
||||
|
||||
with patch("httpx.get", return_value=mock_response):
|
||||
url.read()
|
||||
|
||||
assert url.content_type == "image/webp"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_aread_fetches_content(self):
|
||||
"""Test that aread() fetches content from URL asynchronously."""
|
||||
url = FileUrl(url="https://example.com/image.png")
|
||||
mock_response = MagicMock()
|
||||
mock_response.content = b"async fake content"
|
||||
mock_response.headers = {"content-type": "image/png"}
|
||||
mock_response.raise_for_status = MagicMock()
|
||||
|
||||
mock_client = MagicMock()
|
||||
mock_client.get = AsyncMock(return_value=mock_response)
|
||||
mock_client.__aenter__ = AsyncMock(return_value=mock_client)
|
||||
mock_client.__aexit__ = AsyncMock(return_value=None)
|
||||
|
||||
with patch("httpx.AsyncClient", return_value=mock_client):
|
||||
content = await url.aread()
|
||||
|
||||
assert content == b"async fake content"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_aread_caches_content(self):
|
||||
"""Test that aread() caches content."""
|
||||
url = FileUrl(url="https://example.com/image.png")
|
||||
mock_response = MagicMock()
|
||||
mock_response.content = b"cached content"
|
||||
mock_response.headers = {}
|
||||
mock_response.raise_for_status = MagicMock()
|
||||
|
||||
mock_client = MagicMock()
|
||||
mock_client.get = AsyncMock(return_value=mock_response)
|
||||
mock_client.__aenter__ = AsyncMock(return_value=mock_client)
|
||||
mock_client.__aexit__ = AsyncMock(return_value=None)
|
||||
|
||||
with patch("httpx.AsyncClient", return_value=mock_client):
|
||||
content1 = await url.aread()
|
||||
content2 = await url.aread()
|
||||
|
||||
mock_client.get.assert_called_once()
|
||||
assert content1 == content2
|
||||
|
||||
|
||||
class TestNormalizeSource:
|
||||
"""Tests for _normalize_source with URL detection."""
|
||||
|
||||
def test_normalize_url_string(self):
|
||||
"""Test that URL strings are converted to FileUrl."""
|
||||
result = _normalize_source("https://example.com/image.png")
|
||||
|
||||
assert isinstance(result, FileUrl)
|
||||
assert result.url == "https://example.com/image.png"
|
||||
|
||||
def test_normalize_http_url_string(self):
|
||||
"""Test that HTTP URL strings are converted to FileUrl."""
|
||||
result = _normalize_source("http://example.com/file.pdf")
|
||||
|
||||
assert isinstance(result, FileUrl)
|
||||
assert result.url == "http://example.com/file.pdf"
|
||||
|
||||
def test_normalize_file_path_string(self, tmp_path):
|
||||
"""Test that file path strings are converted to FilePath."""
|
||||
test_file = tmp_path / "test.png"
|
||||
test_file.write_bytes(b"test content")
|
||||
|
||||
result = _normalize_source(str(test_file))
|
||||
|
||||
assert isinstance(result, FilePath)
|
||||
|
||||
def test_normalize_relative_path_is_not_url(self):
|
||||
"""Test that relative path strings are not treated as URLs."""
|
||||
result = _normalize_source("https://example.com/file.png")
|
||||
|
||||
assert isinstance(result, FileUrl)
|
||||
assert not isinstance(result, FilePath)
|
||||
|
||||
def test_normalize_file_url_passthrough(self):
|
||||
"""Test that FileUrl instances pass through unchanged."""
|
||||
original = FileUrl(url="https://example.com/image.png")
|
||||
result = _normalize_source(original)
|
||||
|
||||
assert result is original
|
||||
|
||||
|
||||
class TestResolverUrlHandling:
|
||||
"""Tests for FileResolver URL handling."""
|
||||
|
||||
def test_resolve_url_source_for_supported_provider(self):
|
||||
"""Test URL source resolves to UrlReference for supported providers."""
|
||||
resolver = FileResolver()
|
||||
file = ImageFile(source=FileUrl(url="https://example.com/image.png"))
|
||||
|
||||
resolved = resolver.resolve(file, "anthropic")
|
||||
|
||||
assert isinstance(resolved, UrlReference)
|
||||
assert resolved.url == "https://example.com/image.png"
|
||||
assert resolved.content_type == "image/png"
|
||||
|
||||
def test_resolve_url_source_openai(self):
|
||||
"""Test URL source resolves to UrlReference for OpenAI."""
|
||||
resolver = FileResolver()
|
||||
file = ImageFile(source=FileUrl(url="https://example.com/photo.jpg"))
|
||||
|
||||
resolved = resolver.resolve(file, "openai")
|
||||
|
||||
assert isinstance(resolved, UrlReference)
|
||||
assert resolved.url == "https://example.com/photo.jpg"
|
||||
|
||||
def test_resolve_url_source_gemini(self):
|
||||
"""Test URL source resolves to UrlReference for Gemini."""
|
||||
resolver = FileResolver()
|
||||
file = ImageFile(source=FileUrl(url="https://example.com/image.webp"))
|
||||
|
||||
resolved = resolver.resolve(file, "gemini")
|
||||
|
||||
assert isinstance(resolved, UrlReference)
|
||||
assert resolved.url == "https://example.com/image.webp"
|
||||
|
||||
def test_resolve_url_source_azure(self):
|
||||
"""Test URL source resolves to UrlReference for Azure."""
|
||||
resolver = FileResolver()
|
||||
file = ImageFile(source=FileUrl(url="https://example.com/image.gif"))
|
||||
|
||||
resolved = resolver.resolve(file, "azure")
|
||||
|
||||
assert isinstance(resolved, UrlReference)
|
||||
assert resolved.url == "https://example.com/image.gif"
|
||||
|
||||
def test_resolve_url_source_bedrock_fetches_content(self):
|
||||
"""Test URL source fetches content for Bedrock (unsupported URLs)."""
|
||||
resolver = FileResolver()
|
||||
file_url = FileUrl(url="https://example.com/image.png")
|
||||
file = ImageFile(source=file_url)
|
||||
|
||||
mock_response = MagicMock()
|
||||
mock_response.content = b"\x89PNG\r\n\x1a\n" + b"\x00" * 50
|
||||
mock_response.headers = {"content-type": "image/png"}
|
||||
|
||||
with patch("httpx.get", return_value=mock_response):
|
||||
resolved = resolver.resolve(file, "bedrock")
|
||||
|
||||
assert not isinstance(resolved, UrlReference)
|
||||
|
||||
def test_resolve_bytes_source_still_works(self):
|
||||
"""Test that bytes source still resolves normally."""
|
||||
resolver = FileResolver()
|
||||
minimal_png = (
|
||||
b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x08\x00\x00\x00\x08"
|
||||
b"\x01\x00\x00\x00\x00\xf9Y\xab\xcd\x00\x00\x00\nIDATx\x9cc`\x00\x00"
|
||||
b"\x00\x02\x00\x01\xe2!\xbc3\x00\x00\x00\x00IEND\xaeB`\x82"
|
||||
)
|
||||
file = ImageFile(source=FileBytes(data=minimal_png, filename="test.png"))
|
||||
|
||||
resolved = resolver.resolve(file, "anthropic")
|
||||
|
||||
assert isinstance(resolved, InlineBase64)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_aresolve_url_source(self):
|
||||
"""Test async URL resolution for supported provider."""
|
||||
resolver = FileResolver()
|
||||
file = ImageFile(source=FileUrl(url="https://example.com/image.png"))
|
||||
|
||||
resolved = await resolver.aresolve(file, "anthropic")
|
||||
|
||||
assert isinstance(resolved, UrlReference)
|
||||
assert resolved.url == "https://example.com/image.png"
|
||||
|
||||
|
||||
class TestImageFileWithUrl:
|
||||
"""Tests for creating ImageFile with URL source."""
|
||||
|
||||
def test_image_file_from_url_string(self):
|
||||
"""Test creating ImageFile from URL string."""
|
||||
file = ImageFile(source="https://example.com/image.png")
|
||||
|
||||
assert isinstance(file.source, FileUrl)
|
||||
assert file.source.url == "https://example.com/image.png"
|
||||
|
||||
def test_image_file_from_file_url(self):
|
||||
"""Test creating ImageFile from FileUrl instance."""
|
||||
url = FileUrl(url="https://example.com/photo.jpg")
|
||||
file = ImageFile(source=url)
|
||||
|
||||
assert file.source is url
|
||||
assert file.content_type == "image/jpeg"
|
||||
@@ -1,134 +0,0 @@
|
||||
"""Tests for resolved file types."""
|
||||
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from crewai_files.core.resolved import (
|
||||
FileReference,
|
||||
InlineBase64,
|
||||
InlineBytes,
|
||||
ResolvedFile,
|
||||
UrlReference,
|
||||
)
|
||||
import pytest
|
||||
|
||||
|
||||
class TestInlineBase64:
|
||||
"""Tests for InlineBase64 resolved type."""
|
||||
|
||||
def test_create_inline_base64(self):
|
||||
"""Test creating InlineBase64 instance."""
|
||||
resolved = InlineBase64(
|
||||
content_type="image/png",
|
||||
data="iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg==",
|
||||
)
|
||||
|
||||
assert resolved.content_type == "image/png"
|
||||
assert len(resolved.data) > 0
|
||||
|
||||
def test_inline_base64_is_resolved_file(self):
|
||||
"""Test InlineBase64 is a ResolvedFile."""
|
||||
resolved = InlineBase64(content_type="image/png", data="abc123")
|
||||
|
||||
assert isinstance(resolved, ResolvedFile)
|
||||
|
||||
def test_inline_base64_frozen(self):
|
||||
"""Test InlineBase64 is immutable."""
|
||||
resolved = InlineBase64(content_type="image/png", data="abc123")
|
||||
|
||||
with pytest.raises(Exception):
|
||||
resolved.data = "xyz789"
|
||||
|
||||
|
||||
class TestInlineBytes:
|
||||
"""Tests for InlineBytes resolved type."""
|
||||
|
||||
def test_create_inline_bytes(self):
|
||||
"""Test creating InlineBytes instance."""
|
||||
data = b"\x89PNG\r\n\x1a\n"
|
||||
resolved = InlineBytes(
|
||||
content_type="image/png",
|
||||
data=data,
|
||||
)
|
||||
|
||||
assert resolved.content_type == "image/png"
|
||||
assert resolved.data == data
|
||||
|
||||
def test_inline_bytes_is_resolved_file(self):
|
||||
"""Test InlineBytes is a ResolvedFile."""
|
||||
resolved = InlineBytes(content_type="image/png", data=b"test")
|
||||
|
||||
assert isinstance(resolved, ResolvedFile)
|
||||
|
||||
|
||||
class TestFileReference:
|
||||
"""Tests for FileReference resolved type."""
|
||||
|
||||
def test_create_file_reference(self):
|
||||
"""Test creating FileReference instance."""
|
||||
resolved = FileReference(
|
||||
content_type="image/png",
|
||||
file_id="file-abc123",
|
||||
provider="gemini",
|
||||
)
|
||||
|
||||
assert resolved.content_type == "image/png"
|
||||
assert resolved.file_id == "file-abc123"
|
||||
assert resolved.provider == "gemini"
|
||||
assert resolved.expires_at is None
|
||||
assert resolved.file_uri is None
|
||||
|
||||
def test_file_reference_with_expiry(self):
|
||||
"""Test FileReference with expiry time."""
|
||||
expiry = datetime.now(timezone.utc)
|
||||
resolved = FileReference(
|
||||
content_type="application/pdf",
|
||||
file_id="file-xyz789",
|
||||
provider="gemini",
|
||||
expires_at=expiry,
|
||||
)
|
||||
|
||||
assert resolved.expires_at == expiry
|
||||
|
||||
def test_file_reference_with_uri(self):
|
||||
"""Test FileReference with URI."""
|
||||
resolved = FileReference(
|
||||
content_type="video/mp4",
|
||||
file_id="file-video123",
|
||||
provider="gemini",
|
||||
file_uri="https://generativelanguage.googleapis.com/v1/files/file-video123",
|
||||
)
|
||||
|
||||
assert resolved.file_uri is not None
|
||||
|
||||
def test_file_reference_is_resolved_file(self):
|
||||
"""Test FileReference is a ResolvedFile."""
|
||||
resolved = FileReference(
|
||||
content_type="image/png",
|
||||
file_id="file-123",
|
||||
provider="anthropic",
|
||||
)
|
||||
|
||||
assert isinstance(resolved, ResolvedFile)
|
||||
|
||||
|
||||
class TestUrlReference:
|
||||
"""Tests for UrlReference resolved type."""
|
||||
|
||||
def test_create_url_reference(self):
|
||||
"""Test creating UrlReference instance."""
|
||||
resolved = UrlReference(
|
||||
content_type="image/png",
|
||||
url="https://storage.googleapis.com/bucket/image.png",
|
||||
)
|
||||
|
||||
assert resolved.content_type == "image/png"
|
||||
assert resolved.url == "https://storage.googleapis.com/bucket/image.png"
|
||||
|
||||
def test_url_reference_is_resolved_file(self):
|
||||
"""Test UrlReference is a ResolvedFile."""
|
||||
resolved = UrlReference(
|
||||
content_type="image/jpeg",
|
||||
url="https://example.com/photo.jpg",
|
||||
)
|
||||
|
||||
assert isinstance(resolved, ResolvedFile)
|
||||
@@ -1,176 +0,0 @@
|
||||
"""Tests for FileResolver."""
|
||||
|
||||
from crewai_files import FileBytes, ImageFile
|
||||
from crewai_files.cache.upload_cache import UploadCache
|
||||
from crewai_files.core.resolved import InlineBase64, InlineBytes
|
||||
from crewai_files.resolution.resolver import (
|
||||
FileResolver,
|
||||
FileResolverConfig,
|
||||
create_resolver,
|
||||
)
|
||||
|
||||
|
||||
# Minimal valid PNG
|
||||
MINIMAL_PNG = (
|
||||
b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x08\x00\x00\x00\x08"
|
||||
b"\x01\x00\x00\x00\x00\xf9Y\xab\xcd\x00\x00\x00\nIDATx\x9cc`\x00\x00"
|
||||
b"\x00\x02\x00\x01\xe2!\xbc3\x00\x00\x00\x00IEND\xaeB`\x82"
|
||||
)
|
||||
|
||||
|
||||
class TestFileResolverConfig:
|
||||
"""Tests for FileResolverConfig."""
|
||||
|
||||
def test_default_config(self):
|
||||
"""Test default configuration values."""
|
||||
config = FileResolverConfig()
|
||||
|
||||
assert config.prefer_upload is False
|
||||
assert config.upload_threshold_bytes is None
|
||||
assert config.use_bytes_for_bedrock is True
|
||||
|
||||
def test_custom_config(self):
|
||||
"""Test custom configuration values."""
|
||||
config = FileResolverConfig(
|
||||
prefer_upload=True,
|
||||
upload_threshold_bytes=1024 * 1024,
|
||||
use_bytes_for_bedrock=False,
|
||||
)
|
||||
|
||||
assert config.prefer_upload is True
|
||||
assert config.upload_threshold_bytes == 1024 * 1024
|
||||
assert config.use_bytes_for_bedrock is False
|
||||
|
||||
|
||||
class TestFileResolver:
|
||||
"""Tests for FileResolver class."""
|
||||
|
||||
def test_resolve_inline_base64(self):
|
||||
"""Test resolving file as inline base64."""
|
||||
resolver = FileResolver()
|
||||
file = ImageFile(source=FileBytes(data=MINIMAL_PNG, filename="test.png"))
|
||||
|
||||
resolved = resolver.resolve(file, "openai")
|
||||
|
||||
assert isinstance(resolved, InlineBase64)
|
||||
assert resolved.content_type == "image/png"
|
||||
assert len(resolved.data) > 0
|
||||
|
||||
def test_resolve_inline_bytes_for_bedrock(self):
|
||||
"""Test resolving file as inline bytes for Bedrock."""
|
||||
config = FileResolverConfig(use_bytes_for_bedrock=True)
|
||||
resolver = FileResolver(config=config)
|
||||
file = ImageFile(source=FileBytes(data=MINIMAL_PNG, filename="test.png"))
|
||||
|
||||
resolved = resolver.resolve(file, "bedrock")
|
||||
|
||||
assert isinstance(resolved, InlineBytes)
|
||||
assert resolved.content_type == "image/png"
|
||||
assert resolved.data == MINIMAL_PNG
|
||||
|
||||
def test_resolve_files_multiple(self):
|
||||
"""Test resolving multiple files."""
|
||||
resolver = FileResolver()
|
||||
files = {
|
||||
"image1": ImageFile(
|
||||
source=FileBytes(data=MINIMAL_PNG, filename="test1.png")
|
||||
),
|
||||
"image2": ImageFile(
|
||||
source=FileBytes(data=MINIMAL_PNG, filename="test2.png")
|
||||
),
|
||||
}
|
||||
|
||||
resolved = resolver.resolve_files(files, "openai")
|
||||
|
||||
assert len(resolved) == 2
|
||||
assert "image1" in resolved
|
||||
assert "image2" in resolved
|
||||
assert all(isinstance(r, InlineBase64) for r in resolved.values())
|
||||
|
||||
def test_resolve_with_cache(self):
|
||||
"""Test resolver uses cache."""
|
||||
cache = UploadCache()
|
||||
resolver = FileResolver(upload_cache=cache)
|
||||
file = ImageFile(source=FileBytes(data=MINIMAL_PNG, filename="test.png"))
|
||||
|
||||
# First resolution
|
||||
resolved1 = resolver.resolve(file, "openai")
|
||||
# Second resolution (should use same base64 encoding)
|
||||
resolved2 = resolver.resolve(file, "openai")
|
||||
|
||||
assert isinstance(resolved1, InlineBase64)
|
||||
assert isinstance(resolved2, InlineBase64)
|
||||
# Data should be identical
|
||||
assert resolved1.data == resolved2.data
|
||||
|
||||
def test_clear_cache(self):
|
||||
"""Test clearing resolver cache."""
|
||||
cache = UploadCache()
|
||||
file = ImageFile(source=FileBytes(data=MINIMAL_PNG, filename="test.png"))
|
||||
|
||||
# Add something to cache manually
|
||||
cache.set(file=file, provider="gemini", file_id="test")
|
||||
|
||||
resolver = FileResolver(upload_cache=cache)
|
||||
resolver.clear_cache()
|
||||
|
||||
assert len(cache) == 0
|
||||
|
||||
def test_get_cached_uploads(self):
|
||||
"""Test getting cached uploads from resolver."""
|
||||
cache = UploadCache()
|
||||
file = ImageFile(source=FileBytes(data=MINIMAL_PNG, filename="test.png"))
|
||||
|
||||
cache.set(file=file, provider="gemini", file_id="test-1")
|
||||
cache.set(file=file, provider="anthropic", file_id="test-2")
|
||||
|
||||
resolver = FileResolver(upload_cache=cache)
|
||||
|
||||
gemini_uploads = resolver.get_cached_uploads("gemini")
|
||||
anthropic_uploads = resolver.get_cached_uploads("anthropic")
|
||||
|
||||
assert len(gemini_uploads) == 1
|
||||
assert len(anthropic_uploads) == 1
|
||||
|
||||
def test_get_cached_uploads_empty(self):
|
||||
"""Test getting cached uploads when no cache."""
|
||||
resolver = FileResolver() # No cache
|
||||
|
||||
uploads = resolver.get_cached_uploads("gemini")
|
||||
|
||||
assert uploads == []
|
||||
|
||||
|
||||
class TestCreateResolver:
|
||||
"""Tests for create_resolver factory function."""
|
||||
|
||||
def test_create_default_resolver(self):
|
||||
"""Test creating resolver with default settings."""
|
||||
resolver = create_resolver()
|
||||
|
||||
assert resolver.config.prefer_upload is False
|
||||
assert resolver.upload_cache is not None
|
||||
|
||||
def test_create_resolver_with_options(self):
|
||||
"""Test creating resolver with custom options."""
|
||||
resolver = create_resolver(
|
||||
prefer_upload=True,
|
||||
upload_threshold_bytes=5 * 1024 * 1024,
|
||||
enable_cache=False,
|
||||
)
|
||||
|
||||
assert resolver.config.prefer_upload is True
|
||||
assert resolver.config.upload_threshold_bytes == 5 * 1024 * 1024
|
||||
assert resolver.upload_cache is None
|
||||
|
||||
def test_create_resolver_cache_enabled(self):
|
||||
"""Test resolver has cache when enabled."""
|
||||
resolver = create_resolver(enable_cache=True)
|
||||
|
||||
assert resolver.upload_cache is not None
|
||||
|
||||
def test_create_resolver_cache_disabled(self):
|
||||
"""Test resolver has no cache when disabled."""
|
||||
resolver = create_resolver(enable_cache=False)
|
||||
|
||||
assert resolver.upload_cache is None
|
||||
@@ -1,210 +0,0 @@
|
||||
"""Tests for upload cache."""
|
||||
|
||||
from datetime import datetime, timedelta, timezone
|
||||
|
||||
from crewai_files import FileBytes, ImageFile
|
||||
from crewai_files.cache.upload_cache import CachedUpload, UploadCache
|
||||
|
||||
|
||||
# Minimal valid PNG
|
||||
MINIMAL_PNG = (
|
||||
b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x08\x00\x00\x00\x08"
|
||||
b"\x01\x00\x00\x00\x00\xf9Y\xab\xcd\x00\x00\x00\nIDATx\x9cc`\x00\x00"
|
||||
b"\x00\x02\x00\x01\xe2!\xbc3\x00\x00\x00\x00IEND\xaeB`\x82"
|
||||
)
|
||||
|
||||
|
||||
class TestCachedUpload:
|
||||
"""Tests for CachedUpload dataclass."""
|
||||
|
||||
def test_cached_upload_creation(self):
|
||||
"""Test creating a cached upload."""
|
||||
now = datetime.now(timezone.utc)
|
||||
cached = CachedUpload(
|
||||
file_id="file-123",
|
||||
provider="gemini",
|
||||
file_uri="files/file-123",
|
||||
content_type="image/png",
|
||||
uploaded_at=now,
|
||||
expires_at=now + timedelta(hours=48),
|
||||
)
|
||||
|
||||
assert cached.file_id == "file-123"
|
||||
assert cached.provider == "gemini"
|
||||
assert cached.file_uri == "files/file-123"
|
||||
assert cached.content_type == "image/png"
|
||||
|
||||
def test_is_expired_false(self):
|
||||
"""Test is_expired returns False for non-expired upload."""
|
||||
future = datetime.now(timezone.utc) + timedelta(hours=24)
|
||||
cached = CachedUpload(
|
||||
file_id="file-123",
|
||||
provider="gemini",
|
||||
file_uri=None,
|
||||
content_type="image/png",
|
||||
uploaded_at=datetime.now(timezone.utc),
|
||||
expires_at=future,
|
||||
)
|
||||
|
||||
assert cached.is_expired() is False
|
||||
|
||||
def test_is_expired_true(self):
|
||||
"""Test is_expired returns True for expired upload."""
|
||||
past = datetime.now(timezone.utc) - timedelta(hours=1)
|
||||
cached = CachedUpload(
|
||||
file_id="file-123",
|
||||
provider="gemini",
|
||||
file_uri=None,
|
||||
content_type="image/png",
|
||||
uploaded_at=datetime.now(timezone.utc) - timedelta(hours=2),
|
||||
expires_at=past,
|
||||
)
|
||||
|
||||
assert cached.is_expired() is True
|
||||
|
||||
def test_is_expired_no_expiry(self):
|
||||
"""Test is_expired returns False when no expiry set."""
|
||||
cached = CachedUpload(
|
||||
file_id="file-123",
|
||||
provider="anthropic",
|
||||
file_uri=None,
|
||||
content_type="image/png",
|
||||
uploaded_at=datetime.now(timezone.utc),
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
assert cached.is_expired() is False
|
||||
|
||||
|
||||
class TestUploadCache:
|
||||
"""Tests for UploadCache class."""
|
||||
|
||||
def test_cache_creation(self):
|
||||
"""Test creating an empty cache."""
|
||||
cache = UploadCache()
|
||||
|
||||
assert len(cache) == 0
|
||||
|
||||
def test_set_and_get(self):
|
||||
"""Test setting and getting cached uploads."""
|
||||
cache = UploadCache()
|
||||
file = ImageFile(source=FileBytes(data=MINIMAL_PNG, filename="test.png"))
|
||||
|
||||
cache.set(
|
||||
file=file,
|
||||
provider="gemini",
|
||||
file_id="file-123",
|
||||
file_uri="files/file-123",
|
||||
)
|
||||
|
||||
result = cache.get(file, "gemini")
|
||||
|
||||
assert result is not None
|
||||
assert result.file_id == "file-123"
|
||||
assert result.provider == "gemini"
|
||||
|
||||
def test_get_missing(self):
|
||||
"""Test getting non-existent entry returns None."""
|
||||
cache = UploadCache()
|
||||
file = ImageFile(source=FileBytes(data=MINIMAL_PNG, filename="test.png"))
|
||||
|
||||
result = cache.get(file, "gemini")
|
||||
|
||||
assert result is None
|
||||
|
||||
def test_get_different_provider(self):
|
||||
"""Test getting with different provider returns None."""
|
||||
cache = UploadCache()
|
||||
file = ImageFile(source=FileBytes(data=MINIMAL_PNG, filename="test.png"))
|
||||
|
||||
cache.set(file=file, provider="gemini", file_id="file-123")
|
||||
|
||||
result = cache.get(file, "anthropic") # Different provider
|
||||
|
||||
assert result is None
|
||||
|
||||
def test_remove(self):
|
||||
"""Test removing cached entry."""
|
||||
cache = UploadCache()
|
||||
file = ImageFile(source=FileBytes(data=MINIMAL_PNG, filename="test.png"))
|
||||
|
||||
cache.set(file=file, provider="gemini", file_id="file-123")
|
||||
removed = cache.remove(file, "gemini")
|
||||
|
||||
assert removed is True
|
||||
assert cache.get(file, "gemini") is None
|
||||
|
||||
def test_remove_missing(self):
|
||||
"""Test removing non-existent entry returns False."""
|
||||
cache = UploadCache()
|
||||
file = ImageFile(source=FileBytes(data=MINIMAL_PNG, filename="test.png"))
|
||||
|
||||
removed = cache.remove(file, "gemini")
|
||||
|
||||
assert removed is False
|
||||
|
||||
def test_remove_by_file_id(self):
|
||||
"""Test removing by file ID."""
|
||||
cache = UploadCache()
|
||||
file = ImageFile(source=FileBytes(data=MINIMAL_PNG, filename="test.png"))
|
||||
|
||||
cache.set(file=file, provider="gemini", file_id="file-123")
|
||||
removed = cache.remove_by_file_id("file-123", "gemini")
|
||||
|
||||
assert removed is True
|
||||
assert len(cache) == 0
|
||||
|
||||
def test_clear_expired(self):
|
||||
"""Test clearing expired entries."""
|
||||
cache = UploadCache()
|
||||
file1 = ImageFile(source=FileBytes(data=MINIMAL_PNG, filename="test1.png"))
|
||||
file2 = ImageFile(
|
||||
source=FileBytes(data=MINIMAL_PNG + b"x", filename="test2.png")
|
||||
)
|
||||
|
||||
# Add one expired and one valid entry
|
||||
past = datetime.now(timezone.utc) - timedelta(hours=1)
|
||||
future = datetime.now(timezone.utc) + timedelta(hours=24)
|
||||
|
||||
cache.set(file=file1, provider="gemini", file_id="expired", expires_at=past)
|
||||
cache.set(file=file2, provider="gemini", file_id="valid", expires_at=future)
|
||||
|
||||
removed = cache.clear_expired()
|
||||
|
||||
assert removed == 1
|
||||
assert len(cache) == 1
|
||||
assert cache.get(file2, "gemini") is not None
|
||||
|
||||
def test_clear(self):
|
||||
"""Test clearing all entries."""
|
||||
cache = UploadCache()
|
||||
file = ImageFile(source=FileBytes(data=MINIMAL_PNG, filename="test.png"))
|
||||
|
||||
cache.set(file=file, provider="gemini", file_id="file-123")
|
||||
cache.set(file=file, provider="anthropic", file_id="file-456")
|
||||
|
||||
cleared = cache.clear()
|
||||
|
||||
assert cleared == 2
|
||||
assert len(cache) == 0
|
||||
|
||||
def test_get_all_for_provider(self):
|
||||
"""Test getting all cached uploads for a provider."""
|
||||
cache = UploadCache()
|
||||
file1 = ImageFile(source=FileBytes(data=MINIMAL_PNG, filename="test1.png"))
|
||||
file2 = ImageFile(
|
||||
source=FileBytes(data=MINIMAL_PNG + b"x", filename="test2.png")
|
||||
)
|
||||
file3 = ImageFile(
|
||||
source=FileBytes(data=MINIMAL_PNG + b"xx", filename="test3.png")
|
||||
)
|
||||
|
||||
cache.set(file=file1, provider="gemini", file_id="file-1")
|
||||
cache.set(file=file2, provider="gemini", file_id="file-2")
|
||||
cache.set(file=file3, provider="anthropic", file_id="file-3")
|
||||
|
||||
gemini_uploads = cache.get_all_for_provider("gemini")
|
||||
anthropic_uploads = cache.get_all_for_provider("anthropic")
|
||||
|
||||
assert len(gemini_uploads) == 2
|
||||
assert len(anthropic_uploads) == 1
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -10,7 +10,7 @@ requires-python = ">=3.10, <3.14"
|
||||
dependencies = [
|
||||
# Core Dependencies
|
||||
"pydantic~=2.11.9",
|
||||
"openai~=1.83.0",
|
||||
"openai>=1.83.0,<2",
|
||||
"instructor>=1.3.3",
|
||||
# Text Processing
|
||||
"pdfplumber~=0.11.4",
|
||||
@@ -98,9 +98,6 @@ a2a = [
|
||||
"httpx-sse~=0.4.0",
|
||||
"aiocache[redis,memcached]~=0.12.3",
|
||||
]
|
||||
file-processing = [
|
||||
"crewai-files",
|
||||
]
|
||||
|
||||
|
||||
[project.scripts]
|
||||
@@ -127,7 +124,6 @@ torchvision = [
|
||||
{ index = "pytorch-nightly", marker = "python_version >= '3.13'" },
|
||||
{ index = "pytorch", marker = "python_version < '3.13'" },
|
||||
]
|
||||
crewai-files = { workspace = true }
|
||||
|
||||
|
||||
[build-system]
|
||||
|
||||
@@ -95,7 +95,6 @@ from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai_files import FileInput
|
||||
from crewai_tools import CodeInterpreterTool
|
||||
|
||||
from crewai.a2a.config import A2AClientConfig, A2AConfig, A2AServerConfig
|
||||
@@ -189,8 +188,7 @@ class Agent(BaseAgent):
|
||||
)
|
||||
multimodal: bool = Field(
|
||||
default=False,
|
||||
deprecated=True,
|
||||
description="[DEPRECATED, will be removed in v2.0 - pass files natively.] Whether the agent is multimodal.",
|
||||
description="Whether the agent is multimodal.",
|
||||
)
|
||||
inject_date: bool = Field(
|
||||
default=False,
|
||||
@@ -1646,8 +1644,7 @@ class Agent(BaseAgent):
|
||||
self,
|
||||
messages: str | list[LLMMessage],
|
||||
response_format: type[Any] | None = None,
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
) -> tuple[AgentExecutor, dict[str, Any], dict[str, Any], list[CrewStructuredTool]]:
|
||||
) -> tuple[AgentExecutor, dict[str, str], dict[str, Any], list[CrewStructuredTool]]:
|
||||
"""Prepare common setup for kickoff execution.
|
||||
|
||||
This method handles all the common preparation logic shared between
|
||||
@@ -1657,7 +1654,6 @@ class Agent(BaseAgent):
|
||||
Args:
|
||||
messages: Either a string query or a list of message dictionaries.
|
||||
response_format: Optional Pydantic model for structured output.
|
||||
input_files: Optional dict of named files to attach to the message.
|
||||
|
||||
Returns:
|
||||
Tuple of (executor, inputs, agent_info, parsed_tools) ready for execution.
|
||||
@@ -1734,28 +1730,20 @@ class Agent(BaseAgent):
|
||||
i18n=self.i18n,
|
||||
)
|
||||
|
||||
all_files: dict[str, Any] = {}
|
||||
# Format messages
|
||||
if isinstance(messages, str):
|
||||
formatted_messages = messages
|
||||
else:
|
||||
formatted_messages = "\n".join(
|
||||
str(msg.get("content", "")) for msg in messages if msg.get("content")
|
||||
)
|
||||
for msg in messages:
|
||||
if msg.get("files"):
|
||||
all_files.update(msg["files"])
|
||||
|
||||
if input_files:
|
||||
all_files.update(input_files)
|
||||
|
||||
# Build the input dict for the executor
|
||||
inputs: dict[str, Any] = {
|
||||
inputs = {
|
||||
"input": formatted_messages,
|
||||
"tool_names": get_tool_names(parsed_tools),
|
||||
"tools": render_text_description_and_args(parsed_tools),
|
||||
}
|
||||
if all_files:
|
||||
inputs["files"] = all_files
|
||||
|
||||
return executor, inputs, agent_info, parsed_tools
|
||||
|
||||
@@ -1763,12 +1751,12 @@ class Agent(BaseAgent):
|
||||
self,
|
||||
messages: str | list[LLMMessage],
|
||||
response_format: type[Any] | None = None,
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
) -> LiteAgentOutput | Coroutine[Any, Any, LiteAgentOutput]:
|
||||
"""Execute the agent with the given messages using the AgentExecutor.
|
||||
"""
|
||||
Execute the agent with the given messages using the AgentExecutor.
|
||||
|
||||
This method provides standalone agent execution without requiring a Crew.
|
||||
It supports tools, response formatting, guardrails, and file inputs.
|
||||
It supports tools, response formatting, and guardrails.
|
||||
|
||||
When called from within a Flow (sync or async method), this automatically
|
||||
detects the event loop and returns a coroutine that the Flow framework
|
||||
@@ -1778,10 +1766,7 @@ class Agent(BaseAgent):
|
||||
messages: Either a string query or a list of message dictionaries.
|
||||
If a string is provided, it will be converted to a user message.
|
||||
If a list is provided, each dict should have 'role' and 'content' keys.
|
||||
Messages can include a 'files' field with file inputs.
|
||||
response_format: Optional Pydantic model for structured output.
|
||||
input_files: Optional dict of named files to attach to the message.
|
||||
Files can be paths, bytes, or File objects from crewai_files.
|
||||
|
||||
Returns:
|
||||
LiteAgentOutput: The result of the agent execution.
|
||||
@@ -1793,10 +1778,10 @@ class Agent(BaseAgent):
|
||||
# Magic auto-async: if inside event loop (e.g., inside a Flow),
|
||||
# return coroutine for Flow to await
|
||||
if is_inside_event_loop():
|
||||
return self.kickoff_async(messages, response_format, input_files)
|
||||
return self.kickoff_async(messages, response_format)
|
||||
|
||||
executor, inputs, agent_info, parsed_tools = self._prepare_kickoff(
|
||||
messages, response_format, input_files
|
||||
messages, response_format
|
||||
)
|
||||
|
||||
try:
|
||||
@@ -2042,9 +2027,9 @@ class Agent(BaseAgent):
|
||||
self,
|
||||
messages: str | list[LLMMessage],
|
||||
response_format: type[Any] | None = None,
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
) -> LiteAgentOutput:
|
||||
"""Execute the agent asynchronously with the given messages.
|
||||
"""
|
||||
Execute the agent asynchronously with the given messages.
|
||||
|
||||
This is the async version of the kickoff method that uses native async
|
||||
execution. It is designed for use within async contexts, such as when
|
||||
@@ -2054,16 +2039,13 @@ class Agent(BaseAgent):
|
||||
messages: Either a string query or a list of message dictionaries.
|
||||
If a string is provided, it will be converted to a user message.
|
||||
If a list is provided, each dict should have 'role' and 'content' keys.
|
||||
Messages can include a 'files' field with file inputs.
|
||||
response_format: Optional Pydantic model for structured output.
|
||||
input_files: Optional dict of named files to attach to the message.
|
||||
Files can be paths, bytes, or File objects from crewai_files.
|
||||
|
||||
Returns:
|
||||
LiteAgentOutput: The result of the agent execution.
|
||||
"""
|
||||
executor, inputs, agent_info, parsed_tools = self._prepare_kickoff(
|
||||
messages, response_format, input_files
|
||||
messages, response_format
|
||||
)
|
||||
|
||||
try:
|
||||
@@ -2108,24 +2090,6 @@ class Agent(BaseAgent):
|
||||
)
|
||||
raise
|
||||
|
||||
async def akickoff(
|
||||
self,
|
||||
messages: str | list[LLMMessage],
|
||||
response_format: type[Any] | None = None,
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
) -> LiteAgentOutput:
|
||||
"""Async version of kickoff. Alias for kickoff_async.
|
||||
|
||||
Args:
|
||||
messages: Either a string query or a list of message dictionaries.
|
||||
response_format: Optional Pydantic model for structured output.
|
||||
input_files: Optional dict of named files to attach to the message.
|
||||
|
||||
Returns:
|
||||
LiteAgentOutput: The result of the agent execution.
|
||||
"""
|
||||
return await self.kickoff_async(messages, response_format, input_files)
|
||||
|
||||
|
||||
# Rebuild Agent model to resolve A2A type forward references
|
||||
try:
|
||||
|
||||
@@ -45,7 +45,6 @@ from crewai.utilities.agent_utils import (
|
||||
track_delegation_if_needed,
|
||||
)
|
||||
from crewai.utilities.constants import TRAINING_DATA_FILE
|
||||
from crewai.utilities.file_store import aget_all_files, get_all_files
|
||||
from crewai.utilities.i18n import I18N, get_i18n
|
||||
from crewai.utilities.printer import Printer
|
||||
from crewai.utilities.string_utils import sanitize_tool_name
|
||||
@@ -192,8 +191,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
user_prompt = self._format_prompt(self.prompt.get("prompt", ""), inputs)
|
||||
self.messages.append(format_message_for_llm(user_prompt))
|
||||
|
||||
self._inject_multimodal_files(inputs)
|
||||
|
||||
self._show_start_logs()
|
||||
|
||||
self.ask_for_human_input = bool(inputs.get("ask_for_human_input", False))
|
||||
@@ -218,66 +215,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self._create_external_memory(formatted_answer)
|
||||
return {"output": formatted_answer.output}
|
||||
|
||||
def _inject_multimodal_files(self, inputs: dict[str, Any] | None = None) -> None:
|
||||
"""Attach files to the last user message for LLM-layer formatting.
|
||||
|
||||
Merges files from crew/task store and inputs dict, then attaches them
|
||||
to the message's `files` field. Input files take precedence over
|
||||
crew/task files with the same name.
|
||||
|
||||
Args:
|
||||
inputs: Optional inputs dict that may contain files.
|
||||
"""
|
||||
files: dict[str, Any] = {}
|
||||
|
||||
if self.crew and self.task:
|
||||
crew_files = get_all_files(self.crew.id, self.task.id)
|
||||
if crew_files:
|
||||
files.update(crew_files)
|
||||
|
||||
if inputs and inputs.get("files"):
|
||||
files.update(inputs["files"])
|
||||
|
||||
if not files:
|
||||
return
|
||||
|
||||
for i in range(len(self.messages) - 1, -1, -1):
|
||||
msg = self.messages[i]
|
||||
if msg.get("role") == "user":
|
||||
msg["files"] = files
|
||||
break
|
||||
|
||||
async def _ainject_multimodal_files(
|
||||
self, inputs: dict[str, Any] | None = None
|
||||
) -> None:
|
||||
"""Async attach files to the last user message for LLM-layer formatting.
|
||||
|
||||
Merges files from crew/task store and inputs dict, then attaches them
|
||||
to the message's `files` field. Input files take precedence over
|
||||
crew/task files with the same name.
|
||||
|
||||
Args:
|
||||
inputs: Optional inputs dict that may contain files.
|
||||
"""
|
||||
files: dict[str, Any] = {}
|
||||
|
||||
if self.crew and self.task:
|
||||
crew_files = await aget_all_files(self.crew.id, self.task.id)
|
||||
if crew_files:
|
||||
files.update(crew_files)
|
||||
|
||||
if inputs and inputs.get("files"):
|
||||
files.update(inputs["files"])
|
||||
|
||||
if not files:
|
||||
return
|
||||
|
||||
for i in range(len(self.messages) - 1, -1, -1):
|
||||
msg = self.messages[i]
|
||||
if msg.get("role") == "user":
|
||||
msg["files"] = files
|
||||
break
|
||||
|
||||
def _invoke_loop(self) -> AgentFinish:
|
||||
"""Execute agent loop until completion.
|
||||
|
||||
@@ -763,7 +700,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
if (
|
||||
original_tool
|
||||
and hasattr(original_tool, "cache_function")
|
||||
and callable(original_tool.cache_function)
|
||||
and original_tool.cache_function
|
||||
):
|
||||
should_cache = original_tool.cache_function(
|
||||
args_dict, raw_result
|
||||
@@ -794,7 +731,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
error=e,
|
||||
),
|
||||
)
|
||||
elif max_usage_reached and original_tool:
|
||||
elif max_usage_reached:
|
||||
# Return error message when max usage limit is reached
|
||||
result = f"Tool '{func_name}' has reached its usage limit of {original_tool.max_usage_count} times and cannot be used anymore."
|
||||
|
||||
@@ -873,8 +810,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
user_prompt = self._format_prompt(self.prompt.get("prompt", ""), inputs)
|
||||
self.messages.append(format_message_for_llm(user_prompt))
|
||||
|
||||
await self._ainject_multimodal_files(inputs)
|
||||
|
||||
self._show_start_logs()
|
||||
|
||||
self.ask_for_human_input = bool(inputs.get("ask_for_human_input", False))
|
||||
|
||||
@@ -8,7 +8,6 @@ from hashlib import md5
|
||||
import json
|
||||
import re
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
cast,
|
||||
)
|
||||
@@ -32,10 +31,6 @@ from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
from typing_extensions import Self
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai_files import FileInput
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.cache.cache_handler import CacheHandler
|
||||
@@ -85,7 +80,6 @@ from crewai.task import Task
|
||||
from crewai.tasks.conditional_task import ConditionalTask
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.tools.agent_tools.read_file_tool import ReadFileTool
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.types.streaming import CrewStreamingOutput
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
@@ -94,7 +88,6 @@ from crewai.utilities.crew.models import CrewContext
|
||||
from crewai.utilities.evaluators.crew_evaluator_handler import CrewEvaluator
|
||||
from crewai.utilities.evaluators.task_evaluator import TaskEvaluator
|
||||
from crewai.utilities.file_handler import FileHandler
|
||||
from crewai.utilities.file_store import clear_files, get_all_files
|
||||
from crewai.utilities.formatter import (
|
||||
aggregate_raw_outputs_from_task_outputs,
|
||||
aggregate_raw_outputs_from_tasks,
|
||||
@@ -684,17 +677,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
def kickoff(
|
||||
self,
|
||||
inputs: dict[str, Any] | None = None,
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
) -> CrewOutput | CrewStreamingOutput:
|
||||
"""Execute the crew's workflow.
|
||||
|
||||
Args:
|
||||
inputs: Optional input dictionary for task interpolation.
|
||||
input_files: Optional dict of named file inputs for the crew.
|
||||
|
||||
Returns:
|
||||
CrewOutput or CrewStreamingOutput if streaming is enabled.
|
||||
"""
|
||||
if self.stream:
|
||||
enable_agent_streaming(self.agents)
|
||||
ctx = StreamingContext()
|
||||
@@ -703,7 +686,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
"""Execute the crew and capture the result."""
|
||||
try:
|
||||
self.stream = False
|
||||
crew_result = self.kickoff(inputs=inputs, input_files=input_files)
|
||||
crew_result = self.kickoff(inputs=inputs)
|
||||
if isinstance(crew_result, CrewOutput):
|
||||
ctx.result_holder.append(crew_result)
|
||||
except Exception as exc:
|
||||
@@ -726,7 +709,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
token = attach(baggage_ctx)
|
||||
|
||||
try:
|
||||
inputs = prepare_kickoff(self, inputs, input_files)
|
||||
inputs = prepare_kickoff(self, inputs)
|
||||
|
||||
if self.process == Process.sequential:
|
||||
result = self._run_sequential_process()
|
||||
@@ -750,23 +733,13 @@ class Crew(FlowTrackable, BaseModel):
|
||||
)
|
||||
raise
|
||||
finally:
|
||||
clear_files(self.id)
|
||||
detach(token)
|
||||
|
||||
def kickoff_for_each(
|
||||
self,
|
||||
inputs: list[dict[str, Any]],
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
self, inputs: list[dict[str, Any]]
|
||||
) -> list[CrewOutput | CrewStreamingOutput]:
|
||||
"""Executes the Crew's workflow for each input and aggregates results.
|
||||
|
||||
Args:
|
||||
inputs: List of input dictionaries, one per execution.
|
||||
input_files: Optional dict of named file inputs shared across all executions.
|
||||
|
||||
Returns:
|
||||
List of CrewOutput or CrewStreamingOutput objects.
|
||||
|
||||
If stream=True, returns a list of CrewStreamingOutput objects that must
|
||||
each be iterated to get stream chunks and access results.
|
||||
"""
|
||||
@@ -777,7 +750,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
for input_data in inputs:
|
||||
crew = self.copy()
|
||||
|
||||
output = crew.kickoff(inputs=input_data, input_files=input_files)
|
||||
output = crew.kickoff(inputs=input_data)
|
||||
|
||||
if not self.stream and crew.usage_metrics:
|
||||
total_usage_metrics.add_usage_metrics(crew.usage_metrics)
|
||||
@@ -790,19 +763,10 @@ class Crew(FlowTrackable, BaseModel):
|
||||
return results
|
||||
|
||||
async def kickoff_async(
|
||||
self,
|
||||
inputs: dict[str, Any] | None = None,
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
self, inputs: dict[str, Any] | None = None
|
||||
) -> CrewOutput | CrewStreamingOutput:
|
||||
"""Asynchronous kickoff method to start the crew execution.
|
||||
|
||||
Args:
|
||||
inputs: Optional input dictionary for task interpolation.
|
||||
input_files: Optional dict of named file inputs for the crew.
|
||||
|
||||
Returns:
|
||||
CrewOutput or CrewStreamingOutput if streaming is enabled.
|
||||
|
||||
If stream=True, returns a CrewStreamingOutput that can be async-iterated
|
||||
to get stream chunks. After iteration completes, access the final result
|
||||
via .result.
|
||||
@@ -816,7 +780,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
async def run_crew() -> None:
|
||||
try:
|
||||
self.stream = False
|
||||
result = await asyncio.to_thread(self.kickoff, inputs, input_files)
|
||||
result = await asyncio.to_thread(self.kickoff, inputs)
|
||||
if isinstance(result, CrewOutput):
|
||||
ctx.result_holder.append(result)
|
||||
except Exception as e:
|
||||
@@ -834,22 +798,13 @@ class Crew(FlowTrackable, BaseModel):
|
||||
|
||||
return streaming_output
|
||||
|
||||
return await asyncio.to_thread(self.kickoff, inputs, input_files)
|
||||
return await asyncio.to_thread(self.kickoff, inputs)
|
||||
|
||||
async def kickoff_for_each_async(
|
||||
self,
|
||||
inputs: list[dict[str, Any]],
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
self, inputs: list[dict[str, Any]]
|
||||
) -> list[CrewOutput | CrewStreamingOutput] | CrewStreamingOutput:
|
||||
"""Executes the Crew's workflow for each input asynchronously.
|
||||
|
||||
Args:
|
||||
inputs: List of input dictionaries, one per execution.
|
||||
input_files: Optional dict of named file inputs shared across all executions.
|
||||
|
||||
Returns:
|
||||
List of CrewOutput or CrewStreamingOutput objects.
|
||||
|
||||
If stream=True, returns a single CrewStreamingOutput that yields chunks
|
||||
from all crews as they arrive. After iteration, access results via .results
|
||||
(list of CrewOutput).
|
||||
@@ -858,27 +813,18 @@ class Crew(FlowTrackable, BaseModel):
|
||||
async def kickoff_fn(
|
||||
crew: Crew, input_data: dict[str, Any]
|
||||
) -> CrewOutput | CrewStreamingOutput:
|
||||
return await crew.kickoff_async(inputs=input_data, input_files=input_files)
|
||||
return await crew.kickoff_async(inputs=input_data)
|
||||
|
||||
return await run_for_each_async(self, inputs, kickoff_fn)
|
||||
|
||||
async def akickoff(
|
||||
self,
|
||||
inputs: dict[str, Any] | None = None,
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
self, inputs: dict[str, Any] | None = None
|
||||
) -> CrewOutput | CrewStreamingOutput:
|
||||
"""Native async kickoff method using async task execution throughout.
|
||||
|
||||
Unlike kickoff_async which wraps sync kickoff in a thread, this method
|
||||
uses native async/await for all operations including task execution,
|
||||
memory operations, and knowledge queries.
|
||||
|
||||
Args:
|
||||
inputs: Optional input dictionary for task interpolation.
|
||||
input_files: Optional dict of named file inputs for the crew.
|
||||
|
||||
Returns:
|
||||
CrewOutput or CrewStreamingOutput if streaming is enabled.
|
||||
"""
|
||||
if self.stream:
|
||||
enable_agent_streaming(self.agents)
|
||||
@@ -887,7 +833,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
async def run_crew() -> None:
|
||||
try:
|
||||
self.stream = False
|
||||
inner_result = await self.akickoff(inputs, input_files)
|
||||
inner_result = await self.akickoff(inputs)
|
||||
if isinstance(inner_result, CrewOutput):
|
||||
ctx.result_holder.append(inner_result)
|
||||
except Exception as exc:
|
||||
@@ -911,7 +857,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
token = attach(baggage_ctx)
|
||||
|
||||
try:
|
||||
inputs = prepare_kickoff(self, inputs, input_files)
|
||||
inputs = prepare_kickoff(self, inputs)
|
||||
|
||||
if self.process == Process.sequential:
|
||||
result = await self._arun_sequential_process()
|
||||
@@ -935,25 +881,14 @@ class Crew(FlowTrackable, BaseModel):
|
||||
)
|
||||
raise
|
||||
finally:
|
||||
clear_files(self.id)
|
||||
detach(token)
|
||||
|
||||
async def akickoff_for_each(
|
||||
self,
|
||||
inputs: list[dict[str, Any]],
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
self, inputs: list[dict[str, Any]]
|
||||
) -> list[CrewOutput | CrewStreamingOutput] | CrewStreamingOutput:
|
||||
"""Native async execution of the Crew's workflow for each input.
|
||||
|
||||
Uses native async throughout rather than thread-based async.
|
||||
|
||||
Args:
|
||||
inputs: List of input dictionaries, one per execution.
|
||||
input_files: Optional dict of named file inputs shared across all executions.
|
||||
|
||||
Returns:
|
||||
List of CrewOutput or CrewStreamingOutput objects.
|
||||
|
||||
If stream=True, returns a single CrewStreamingOutput that yields chunks
|
||||
from all crews as they arrive.
|
||||
"""
|
||||
@@ -961,7 +896,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
async def kickoff_fn(
|
||||
crew: Crew, input_data: dict[str, Any]
|
||||
) -> CrewOutput | CrewStreamingOutput:
|
||||
return await crew.akickoff(inputs=input_data, input_files=input_files)
|
||||
return await crew.akickoff(inputs=input_data)
|
||||
|
||||
return await run_for_each_async(self, inputs, kickoff_fn)
|
||||
|
||||
@@ -1281,8 +1216,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
and hasattr(agent, "multimodal")
|
||||
and getattr(agent, "multimodal", False)
|
||||
):
|
||||
if not (agent.llm and agent.llm.supports_multimodal()):
|
||||
tools = self._add_multimodal_tools(agent, tools)
|
||||
tools = self._add_multimodal_tools(agent, tools)
|
||||
|
||||
if agent and (hasattr(agent, "apps") and getattr(agent, "apps", None)):
|
||||
tools = self._add_platform_tools(task, tools)
|
||||
@@ -1290,24 +1224,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
if agent and (hasattr(agent, "mcps") and getattr(agent, "mcps", None)):
|
||||
tools = self._add_mcp_tools(task, tools)
|
||||
|
||||
files = get_all_files(self.id, task.id)
|
||||
if files:
|
||||
supported_types: list[str] = []
|
||||
if agent and agent.llm and agent.llm.supports_multimodal():
|
||||
supported_types = agent.llm.supported_multimodal_content_types()
|
||||
|
||||
def is_auto_injected(content_type: str) -> bool:
|
||||
return any(content_type.startswith(t) for t in supported_types)
|
||||
|
||||
# Only add read_file tool if there are files that need it
|
||||
files_needing_tool = {
|
||||
name: f
|
||||
for name, f in files.items()
|
||||
if not is_auto_injected(f.content_type)
|
||||
}
|
||||
if files_needing_tool:
|
||||
tools = self._add_file_tools(tools, files_needing_tool)
|
||||
|
||||
# Return a list[BaseTool] compatible with Task.execute_sync and execute_async
|
||||
return tools
|
||||
|
||||
def _get_agent_to_use(self, task: Task) -> BaseAgent | None:
|
||||
@@ -1391,22 +1308,6 @@ class Crew(FlowTrackable, BaseModel):
|
||||
return self._merge_tools(tools, cast(list[BaseTool], code_tools))
|
||||
return tools
|
||||
|
||||
def _add_file_tools(
|
||||
self, tools: list[BaseTool], files: dict[str, Any]
|
||||
) -> list[BaseTool]:
|
||||
"""Add file reading tool when input files are available.
|
||||
|
||||
Args:
|
||||
tools: Current list of tools.
|
||||
files: Dictionary of input files.
|
||||
|
||||
Returns:
|
||||
Updated list with file tool added.
|
||||
"""
|
||||
read_file_tool = ReadFileTool()
|
||||
read_file_tool.set_files(files)
|
||||
return self._merge_tools(tools, [read_file_tool])
|
||||
|
||||
def _add_delegation_tools(
|
||||
self, task: Task, tools: list[BaseTool]
|
||||
) -> list[BaseTool]:
|
||||
|
||||
@@ -3,16 +3,13 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Callable, Coroutine, Iterable, Mapping
|
||||
from collections.abc import Callable, Coroutine, Iterable
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from opentelemetry import baggage
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.crews.crew_output import CrewOutput
|
||||
from crewai.rag.embeddings.types import EmbedderConfig
|
||||
from crewai.types.streaming import CrewStreamingOutput, FlowStreamingOutput
|
||||
from crewai.utilities.file_store import store_files
|
||||
from crewai.utilities.streaming import (
|
||||
StreamingState,
|
||||
TaskInfo,
|
||||
@@ -20,23 +17,7 @@ from crewai.utilities.streaming import (
|
||||
)
|
||||
|
||||
|
||||
try:
|
||||
from crewai_files import (
|
||||
AudioFile,
|
||||
ImageFile,
|
||||
PDFFile,
|
||||
TextFile,
|
||||
VideoFile,
|
||||
)
|
||||
|
||||
_FILE_TYPES: tuple[type, ...] = (AudioFile, ImageFile, PDFFile, TextFile, VideoFile)
|
||||
except ImportError:
|
||||
_FILE_TYPES = ()
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai_files import FileInput
|
||||
|
||||
from crewai.crew import Crew
|
||||
|
||||
|
||||
@@ -195,40 +176,7 @@ def check_conditional_skip(
|
||||
return None
|
||||
|
||||
|
||||
def _extract_files_from_inputs(inputs: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Extract file objects from inputs dict.
|
||||
|
||||
Scans inputs for FileInput objects (ImageFile, TextFile, etc.) and
|
||||
extracts them into a separate dict.
|
||||
|
||||
Args:
|
||||
inputs: The inputs dictionary to scan.
|
||||
|
||||
Returns:
|
||||
Dictionary of extracted file objects.
|
||||
"""
|
||||
if not _FILE_TYPES:
|
||||
return {}
|
||||
|
||||
files: dict[str, Any] = {}
|
||||
keys_to_remove: list[str] = []
|
||||
|
||||
for key, value in inputs.items():
|
||||
if isinstance(value, _FILE_TYPES):
|
||||
files[key] = value
|
||||
keys_to_remove.append(key)
|
||||
|
||||
for key in keys_to_remove:
|
||||
del inputs[key]
|
||||
|
||||
return files
|
||||
|
||||
|
||||
def prepare_kickoff(
|
||||
crew: Crew,
|
||||
inputs: dict[str, Any] | None,
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
) -> dict[str, Any] | None:
|
||||
def prepare_kickoff(crew: Crew, inputs: dict[str, Any] | None) -> dict[str, Any] | None:
|
||||
"""Prepare crew for kickoff execution.
|
||||
|
||||
Handles before callbacks, event emission, task handler reset, input
|
||||
@@ -237,7 +185,6 @@ def prepare_kickoff(
|
||||
Args:
|
||||
crew: The crew instance to prepare.
|
||||
inputs: Optional input dictionary to pass to the crew.
|
||||
input_files: Optional dict of named file inputs for the crew.
|
||||
|
||||
Returns:
|
||||
The potentially modified inputs dictionary after before callbacks.
|
||||
@@ -251,23 +198,14 @@ def prepare_kickoff(
|
||||
reset_emission_counter()
|
||||
reset_last_event_id()
|
||||
|
||||
# Normalize inputs to dict[str, Any] for internal processing
|
||||
normalized: dict[str, Any] | None = None
|
||||
if inputs is not None:
|
||||
if not isinstance(inputs, Mapping):
|
||||
raise TypeError(
|
||||
f"inputs must be a dict or Mapping, got {type(inputs).__name__}"
|
||||
)
|
||||
normalized = dict(inputs)
|
||||
|
||||
for before_callback in crew.before_kickoff_callbacks:
|
||||
if normalized is None:
|
||||
normalized = {}
|
||||
normalized = before_callback(normalized)
|
||||
if inputs is None:
|
||||
inputs = {}
|
||||
inputs = before_callback(inputs)
|
||||
|
||||
future = crewai_event_bus.emit(
|
||||
crew,
|
||||
CrewKickoffStartedEvent(crew_name=crew.name, inputs=normalized),
|
||||
CrewKickoffStartedEvent(crew_name=crew.name, inputs=inputs),
|
||||
)
|
||||
if future is not None:
|
||||
try:
|
||||
@@ -278,26 +216,9 @@ def prepare_kickoff(
|
||||
crew._task_output_handler.reset()
|
||||
crew._logging_color = "bold_purple"
|
||||
|
||||
# Check for flow input files in baggage context (inherited from parent Flow)
|
||||
_flow_files = baggage.get_baggage("flow_input_files")
|
||||
flow_files: dict[str, Any] = _flow_files if isinstance(_flow_files, dict) else {}
|
||||
|
||||
if normalized is not None:
|
||||
# Extract file objects unpacked directly into inputs
|
||||
unpacked_files = _extract_files_from_inputs(normalized)
|
||||
|
||||
# Merge files: flow_files < input_files < unpacked_files (later takes precedence)
|
||||
all_files = {**flow_files, **(input_files or {}), **unpacked_files}
|
||||
if all_files:
|
||||
store_files(crew.id, all_files)
|
||||
|
||||
crew._inputs = normalized
|
||||
crew._interpolate_inputs(normalized)
|
||||
else:
|
||||
# No inputs dict provided
|
||||
all_files = {**flow_files, **(input_files or {})}
|
||||
if all_files:
|
||||
store_files(crew.id, all_files)
|
||||
if inputs is not None:
|
||||
crew._inputs = inputs
|
||||
crew._interpolate_inputs(inputs)
|
||||
crew._set_tasks_callbacks()
|
||||
crew._set_allow_crewai_trigger_context_for_first_task()
|
||||
|
||||
@@ -312,7 +233,7 @@ def prepare_kickoff(
|
||||
if crew.planning:
|
||||
crew._handle_crew_planning()
|
||||
|
||||
return normalized
|
||||
return inputs
|
||||
|
||||
|
||||
class StreamingContext:
|
||||
|
||||
@@ -767,9 +767,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
return sanitize_tool_name(tool_call.name)
|
||||
if isinstance(tool_call, dict):
|
||||
func_info = tool_call.get("function", {})
|
||||
return sanitize_tool_name(
|
||||
func_info.get("name", "") or tool_call.get("name", "unknown")
|
||||
)
|
||||
return sanitize_tool_name(func_info.get("name", "") or tool_call.get("name", "unknown"))
|
||||
return "unknown"
|
||||
|
||||
@router(execute_native_tool)
|
||||
@@ -915,8 +913,6 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
user_prompt = self._format_prompt(self.prompt["prompt"], inputs)
|
||||
self.state.messages.append(format_message_for_llm(user_prompt))
|
||||
|
||||
self._inject_files_from_inputs(inputs)
|
||||
|
||||
self.state.ask_for_human_input = bool(
|
||||
inputs.get("ask_for_human_input", False)
|
||||
)
|
||||
@@ -999,8 +995,6 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
user_prompt = self._format_prompt(self.prompt["prompt"], inputs)
|
||||
self.state.messages.append(format_message_for_llm(user_prompt))
|
||||
|
||||
self._inject_files_from_inputs(inputs)
|
||||
|
||||
self.state.ask_for_human_input = bool(
|
||||
inputs.get("ask_for_human_input", False)
|
||||
)
|
||||
@@ -1039,10 +1033,6 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
finally:
|
||||
self._is_executing = False
|
||||
|
||||
async def ainvoke(self, inputs: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Async version of invoke. Alias for invoke_async."""
|
||||
return await self.invoke_async(inputs)
|
||||
|
||||
def _handle_agent_action(
|
||||
self, formatted_answer: AgentAction, tool_result: ToolResult
|
||||
) -> AgentAction | AgentFinish:
|
||||
@@ -1190,22 +1180,6 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
training_data[agent_id] = agent_training_data
|
||||
training_handler.save(training_data)
|
||||
|
||||
def _inject_files_from_inputs(self, inputs: dict[str, Any]) -> None:
|
||||
"""Inject files from inputs into the last user message.
|
||||
|
||||
Args:
|
||||
inputs: Input dictionary that may contain a 'files' key.
|
||||
"""
|
||||
files = inputs.get("files")
|
||||
if not files:
|
||||
return
|
||||
|
||||
for i in range(len(self.state.messages) - 1, -1, -1):
|
||||
msg = self.state.messages[i]
|
||||
if msg.get("role") == "user":
|
||||
msg["files"] = files
|
||||
break
|
||||
|
||||
@staticmethod
|
||||
def _format_prompt(prompt: str, inputs: dict[str, str]) -> str:
|
||||
"""Format prompt template with input values.
|
||||
|
||||
@@ -83,8 +83,6 @@ from crewai.flow.utils import (
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai_files import FileInput
|
||||
|
||||
from crewai.flow.async_feedback.types import PendingFeedbackContext
|
||||
from crewai.flow.human_feedback import HumanFeedbackResult
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
@@ -1414,21 +1412,13 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
object.__setattr__(self._state, key, value)
|
||||
|
||||
def kickoff(
|
||||
self,
|
||||
inputs: dict[str, Any] | None = None,
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
self, inputs: dict[str, Any] | None = None
|
||||
) -> Any | FlowStreamingOutput:
|
||||
"""Start the flow execution in a synchronous context.
|
||||
"""
|
||||
Start the flow execution in a synchronous context.
|
||||
|
||||
This method wraps kickoff_async so that all state initialization and event
|
||||
emission is handled in the asynchronous method.
|
||||
|
||||
Args:
|
||||
inputs: Optional dictionary containing input values and/or a state ID.
|
||||
input_files: Optional dict of named file inputs for the flow.
|
||||
|
||||
Returns:
|
||||
The final output from the flow or FlowStreamingOutput if streaming.
|
||||
"""
|
||||
if self.stream:
|
||||
result_holder: list[Any] = []
|
||||
@@ -1448,7 +1438,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
def run_flow() -> None:
|
||||
try:
|
||||
self.stream = False
|
||||
result = self.kickoff(inputs=inputs, input_files=input_files)
|
||||
result = self.kickoff(inputs=inputs)
|
||||
result_holder.append(result)
|
||||
except Exception as e:
|
||||
# HumanFeedbackPending is expected control flow, not an error
|
||||
@@ -1470,16 +1460,15 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
return streaming_output
|
||||
|
||||
async def _run_flow() -> Any:
|
||||
return await self.kickoff_async(inputs, input_files)
|
||||
return await self.kickoff_async(inputs)
|
||||
|
||||
return asyncio.run(_run_flow())
|
||||
|
||||
async def kickoff_async(
|
||||
self,
|
||||
inputs: dict[str, Any] | None = None,
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
self, inputs: dict[str, Any] | None = None
|
||||
) -> Any | FlowStreamingOutput:
|
||||
"""Start the flow execution asynchronously.
|
||||
"""
|
||||
Start the flow execution asynchronously.
|
||||
|
||||
This method performs state restoration (if an 'id' is provided and persistence is available)
|
||||
and updates the flow state with any additional inputs. It then emits the FlowStartedEvent,
|
||||
@@ -1488,7 +1477,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
|
||||
Args:
|
||||
inputs: Optional dictionary containing input values and/or a state ID for restoration.
|
||||
input_files: Optional dict of named file inputs for the flow.
|
||||
|
||||
Returns:
|
||||
The final output from the flow, which is the result of the last executed method.
|
||||
@@ -1511,9 +1499,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
async def run_flow() -> None:
|
||||
try:
|
||||
self.stream = False
|
||||
result = await self.kickoff_async(
|
||||
inputs=inputs, input_files=input_files
|
||||
)
|
||||
result = await self.kickoff_async(inputs=inputs)
|
||||
result_holder.append(result)
|
||||
except Exception as e:
|
||||
# HumanFeedbackPending is expected control flow, not an error
|
||||
@@ -1537,7 +1523,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
return streaming_output
|
||||
|
||||
ctx = baggage.set_baggage("flow_inputs", inputs or {})
|
||||
ctx = baggage.set_baggage("flow_input_files", input_files or {}, context=ctx)
|
||||
flow_token = attach(ctx)
|
||||
|
||||
try:
|
||||
@@ -1720,20 +1705,18 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
detach(flow_token)
|
||||
|
||||
async def akickoff(
|
||||
self,
|
||||
inputs: dict[str, Any] | None = None,
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
self, inputs: dict[str, Any] | None = None
|
||||
) -> Any | FlowStreamingOutput:
|
||||
"""Native async method to start the flow execution. Alias for kickoff_async.
|
||||
|
||||
|
||||
Args:
|
||||
inputs: Optional dictionary containing input values and/or a state ID for restoration.
|
||||
input_files: Optional dict of named file inputs for the flow.
|
||||
|
||||
Returns:
|
||||
The final output from the flow, which is the result of the last executed method.
|
||||
"""
|
||||
return await self.kickoff_async(inputs, input_files)
|
||||
return await self.kickoff_async(inputs)
|
||||
|
||||
async def _execute_start_method(self, start_method_name: FlowMethodName) -> None:
|
||||
"""Executes a flow's start method and its triggered listeners.
|
||||
|
||||
@@ -1,11 +1,8 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Callable
|
||||
import inspect
|
||||
import json
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Literal,
|
||||
cast,
|
||||
@@ -26,10 +23,6 @@ from pydantic import (
|
||||
)
|
||||
from typing_extensions import Self
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai_files import FileInput
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
|
||||
from crewai.agents.cache.cache_handler import CacheHandler
|
||||
@@ -303,9 +296,9 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
self,
|
||||
messages: str | list[LLMMessage],
|
||||
response_format: type[BaseModel] | None = None,
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
) -> LiteAgentOutput:
|
||||
"""Execute the agent with the given messages.
|
||||
"""
|
||||
Execute the agent with the given messages.
|
||||
|
||||
Args:
|
||||
messages: Either a string query or a list of message dictionaries.
|
||||
@@ -313,8 +306,6 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
If a list is provided, each dict should have 'role' and 'content' keys.
|
||||
response_format: Optional Pydantic model for structured output. If provided,
|
||||
overrides self.response_format for this execution.
|
||||
input_files: Optional dict of named files to attach to the message.
|
||||
Files can be paths, bytes, or File objects from crewai_files.
|
||||
|
||||
Returns:
|
||||
LiteAgentOutput: The result of the agent execution.
|
||||
@@ -336,7 +327,7 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
|
||||
# Format messages for the LLM
|
||||
self._messages = self._format_messages(
|
||||
messages, response_format=response_format, input_files=input_files
|
||||
messages, response_format=response_format
|
||||
)
|
||||
|
||||
return self._execute_core(
|
||||
@@ -473,45 +464,19 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
|
||||
return output
|
||||
|
||||
async def kickoff_async(
|
||||
self,
|
||||
messages: str | list[LLMMessage],
|
||||
response_format: type[BaseModel] | None = None,
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
) -> LiteAgentOutput:
|
||||
"""Execute the agent asynchronously with the given messages.
|
||||
async def kickoff_async(self, messages: str | list[LLMMessage]) -> LiteAgentOutput:
|
||||
"""
|
||||
Execute the agent asynchronously with the given messages.
|
||||
|
||||
Args:
|
||||
messages: Either a string query or a list of message dictionaries.
|
||||
If a string is provided, it will be converted to a user message.
|
||||
If a list is provided, each dict should have 'role' and 'content' keys.
|
||||
response_format: Optional Pydantic model for structured output.
|
||||
input_files: Optional dict of named files to attach to the message.
|
||||
|
||||
Returns:
|
||||
LiteAgentOutput: The result of the agent execution.
|
||||
"""
|
||||
return await asyncio.to_thread(
|
||||
self.kickoff, messages, response_format, input_files
|
||||
)
|
||||
|
||||
async def akickoff(
|
||||
self,
|
||||
messages: str | list[LLMMessage],
|
||||
response_format: type[BaseModel] | None = None,
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
) -> LiteAgentOutput:
|
||||
"""Async version of kickoff. Alias for kickoff_async.
|
||||
|
||||
Args:
|
||||
messages: Either a string query or a list of message dictionaries.
|
||||
response_format: Optional Pydantic model for structured output.
|
||||
input_files: Optional dict of named files to attach to the message.
|
||||
|
||||
Returns:
|
||||
LiteAgentOutput: The result of the agent execution.
|
||||
"""
|
||||
return await self.kickoff_async(messages, response_format, input_files)
|
||||
return await asyncio.to_thread(self.kickoff, messages)
|
||||
|
||||
def _get_default_system_prompt(
|
||||
self, response_format: type[BaseModel] | None = None
|
||||
@@ -555,14 +520,12 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
self,
|
||||
messages: str | list[LLMMessage],
|
||||
response_format: type[BaseModel] | None = None,
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
) -> list[LLMMessage]:
|
||||
"""Format messages for the LLM.
|
||||
|
||||
Args:
|
||||
messages: Input messages to format.
|
||||
response_format: Optional response format to use instead of self.response_format.
|
||||
input_files: Optional dict of named files to include with the messages.
|
||||
messages: Input messages to format
|
||||
response_format: Optional response format to use instead of self.response_format
|
||||
"""
|
||||
if isinstance(messages, str):
|
||||
messages = [{"role": "user", "content": messages}]
|
||||
@@ -577,13 +540,6 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
# Add the rest of the messages
|
||||
formatted_messages.extend(messages)
|
||||
|
||||
# Attach files to the last user message if provided
|
||||
if input_files:
|
||||
for msg in reversed(formatted_messages):
|
||||
if msg.get("role") == "user":
|
||||
msg["files"] = input_files
|
||||
break
|
||||
|
||||
return formatted_messages
|
||||
|
||||
def _invoke_loop(self) -> AgentFinish:
|
||||
|
||||
@@ -53,14 +53,6 @@ from crewai.utilities.logger_utils import suppress_warnings
|
||||
from crewai.utilities.string_utils import sanitize_tool_name
|
||||
|
||||
|
||||
try:
|
||||
from crewai_files import aformat_multimodal_content, format_multimodal_content
|
||||
|
||||
HAS_CREWAI_FILES = True
|
||||
except ImportError:
|
||||
HAS_CREWAI_FILES = False
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from litellm.exceptions import ContextWindowExceededError
|
||||
from litellm.litellm_core_utils.get_supported_openai_params import (
|
||||
@@ -669,14 +661,12 @@ class LLM(BaseLLM):
|
||||
self,
|
||||
messages: str | list[LLMMessage],
|
||||
tools: list[dict[str, BaseTool]] | None = None,
|
||||
skip_file_processing: bool = False,
|
||||
) -> dict[str, Any]:
|
||||
"""Prepare parameters for the completion call.
|
||||
|
||||
Args:
|
||||
messages: Input messages for the LLM
|
||||
tools: Optional list of tool schemas
|
||||
skip_file_processing: Skip file processing (used when already done async)
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: Parameters for the completion call
|
||||
@@ -684,9 +674,6 @@ class LLM(BaseLLM):
|
||||
# --- 1) Format messages according to provider requirements
|
||||
if isinstance(messages, str):
|
||||
messages = [{"role": "user", "content": messages}]
|
||||
# --- 1a) Process any file attachments into multimodal content
|
||||
if not skip_file_processing:
|
||||
messages = self._process_message_files(messages)
|
||||
formatted_messages = self._format_messages_for_provider(messages)
|
||||
|
||||
# --- 2) Prepare the parameters for the completion call
|
||||
@@ -697,7 +684,7 @@ class LLM(BaseLLM):
|
||||
"temperature": self.temperature,
|
||||
"top_p": self.top_p,
|
||||
"n": self.n,
|
||||
"stop": self.stop or None,
|
||||
"stop": self.stop,
|
||||
"max_tokens": self.max_tokens or self.max_completion_tokens,
|
||||
"presence_penalty": self.presence_penalty,
|
||||
"frequency_penalty": self.frequency_penalty,
|
||||
@@ -1812,9 +1799,6 @@ class LLM(BaseLLM):
|
||||
if isinstance(messages, str):
|
||||
messages = [{"role": "user", "content": messages}]
|
||||
|
||||
# Process file attachments asynchronously before preparing params
|
||||
messages = await self._aprocess_message_files(messages)
|
||||
|
||||
if "o1" in self.model.lower():
|
||||
for message in messages:
|
||||
if message.get("role") == "system":
|
||||
@@ -1825,9 +1809,7 @@ class LLM(BaseLLM):
|
||||
if callbacks and len(callbacks) > 0:
|
||||
self.set_callbacks(callbacks)
|
||||
try:
|
||||
params = self._prepare_completion_params(
|
||||
messages, tools, skip_file_processing=True
|
||||
)
|
||||
params = self._prepare_completion_params(messages, tools)
|
||||
|
||||
if self.stream:
|
||||
return await self._ahandle_streaming_response(
|
||||
@@ -1914,88 +1896,6 @@ class LLM(BaseLLM):
|
||||
),
|
||||
)
|
||||
|
||||
def _process_message_files(self, messages: list[LLMMessage]) -> list[LLMMessage]:
|
||||
"""Process files attached to messages and format for provider.
|
||||
|
||||
For each message with a `files` field, formats the files into
|
||||
provider-specific content blocks and updates the message content.
|
||||
|
||||
Args:
|
||||
messages: List of messages that may contain file attachments.
|
||||
|
||||
Returns:
|
||||
Messages with files formatted into content blocks.
|
||||
"""
|
||||
if not HAS_CREWAI_FILES or not self.supports_multimodal():
|
||||
return messages
|
||||
|
||||
provider = getattr(self, "provider", None) or self.model
|
||||
|
||||
for msg in messages:
|
||||
files = msg.get("files")
|
||||
if not files:
|
||||
continue
|
||||
|
||||
content_blocks = format_multimodal_content(files, provider)
|
||||
if not content_blocks:
|
||||
msg.pop("files", None)
|
||||
continue
|
||||
|
||||
existing_content = msg.get("content", "")
|
||||
if isinstance(existing_content, str):
|
||||
msg["content"] = [
|
||||
self.format_text_content(existing_content),
|
||||
*content_blocks,
|
||||
]
|
||||
elif isinstance(existing_content, list):
|
||||
msg["content"] = [*existing_content, *content_blocks]
|
||||
|
||||
msg.pop("files", None)
|
||||
|
||||
return messages
|
||||
|
||||
async def _aprocess_message_files(
|
||||
self, messages: list[LLMMessage]
|
||||
) -> list[LLMMessage]:
|
||||
"""Async process files attached to messages and format for provider.
|
||||
|
||||
For each message with a `files` field, formats the files into
|
||||
provider-specific content blocks and updates the message content.
|
||||
|
||||
Args:
|
||||
messages: List of messages that may contain file attachments.
|
||||
|
||||
Returns:
|
||||
Messages with files formatted into content blocks.
|
||||
"""
|
||||
if not HAS_CREWAI_FILES or not self.supports_multimodal():
|
||||
return messages
|
||||
|
||||
provider = getattr(self, "provider", None) or self.model
|
||||
|
||||
for msg in messages:
|
||||
files = msg.get("files")
|
||||
if not files:
|
||||
continue
|
||||
|
||||
content_blocks = await aformat_multimodal_content(files, provider)
|
||||
if not content_blocks:
|
||||
msg.pop("files", None)
|
||||
continue
|
||||
|
||||
existing_content = msg.get("content", "")
|
||||
if isinstance(existing_content, str):
|
||||
msg["content"] = [
|
||||
self.format_text_content(existing_content),
|
||||
*content_blocks,
|
||||
]
|
||||
elif isinstance(existing_content, list):
|
||||
msg["content"] = [*existing_content, *content_blocks]
|
||||
|
||||
msg.pop("files", None)
|
||||
|
||||
return messages
|
||||
|
||||
def _format_messages_for_provider(
|
||||
self, messages: list[LLMMessage]
|
||||
) -> list[dict[str, str]]:
|
||||
@@ -2320,44 +2220,3 @@ class LLM(BaseLLM):
|
||||
stop=copy.deepcopy(self.stop, memo) if self.stop else None,
|
||||
**filtered_params,
|
||||
)
|
||||
|
||||
def supports_multimodal(self) -> bool:
|
||||
"""Check if the model supports multimodal inputs.
|
||||
|
||||
For litellm, check common vision-enabled model prefixes.
|
||||
|
||||
Returns:
|
||||
True if the model likely supports images.
|
||||
"""
|
||||
vision_prefixes = (
|
||||
"gpt-4o",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-vision",
|
||||
"gpt-4.1",
|
||||
"claude-3",
|
||||
"claude-4",
|
||||
"gemini",
|
||||
)
|
||||
model_lower = self.model.lower()
|
||||
return any(
|
||||
model_lower.startswith(p) or f"/{p}" in model_lower for p in vision_prefixes
|
||||
)
|
||||
|
||||
def supported_multimodal_content_types(self) -> list[str]:
|
||||
"""Get content types supported for multimodal input.
|
||||
|
||||
Determines supported types based on the underlying model.
|
||||
|
||||
Returns:
|
||||
List of supported MIME type prefixes.
|
||||
"""
|
||||
if not self.supports_multimodal():
|
||||
return []
|
||||
|
||||
model_lower = self.model.lower()
|
||||
|
||||
if "gemini" in model_lower:
|
||||
return ["image/", "audio/", "video/", "application/pdf", "text/"]
|
||||
if "claude-3" in model_lower or "claude-4" in model_lower:
|
||||
return ["image/", "application/pdf"]
|
||||
return ["image/"]
|
||||
|
||||
@@ -280,47 +280,6 @@ class BaseLLM(ABC):
|
||||
# Default implementation - subclasses should override with model-specific values
|
||||
return DEFAULT_CONTEXT_WINDOW_SIZE
|
||||
|
||||
def supports_multimodal(self) -> bool:
|
||||
"""Check if the LLM supports multimodal inputs.
|
||||
|
||||
Returns:
|
||||
True if the LLM supports images, PDFs, audio, or video.
|
||||
"""
|
||||
return False
|
||||
|
||||
def supported_multimodal_content_types(self) -> list[str]:
|
||||
"""Get the content types supported by this LLM for multimodal input.
|
||||
|
||||
Returns:
|
||||
List of supported MIME type prefixes (e.g., ["image/", "application/pdf"]).
|
||||
"""
|
||||
return []
|
||||
|
||||
def format_text_content(self, text: str) -> dict[str, Any]:
|
||||
"""Format text as a content block for the LLM.
|
||||
|
||||
Default implementation uses OpenAI/Anthropic format.
|
||||
Subclasses should override for provider-specific formatting.
|
||||
|
||||
Args:
|
||||
text: The text content to format.
|
||||
|
||||
Returns:
|
||||
A content block in the provider's expected format.
|
||||
"""
|
||||
return {"type": "text", "text": text}
|
||||
|
||||
def get_file_uploader(self) -> Any:
|
||||
"""Get a file uploader configured with this LLM's client.
|
||||
|
||||
Returns an uploader instance that reuses this LLM's authenticated client,
|
||||
avoiding the need to create a new connection for file uploads.
|
||||
|
||||
Returns:
|
||||
A FileUploader instance, or None if not supported by this provider.
|
||||
"""
|
||||
return None
|
||||
|
||||
# Common helper methods for native SDK implementations
|
||||
|
||||
def _emit_call_started_event(
|
||||
|
||||
@@ -31,32 +31,6 @@ except ImportError:
|
||||
) from None
|
||||
|
||||
|
||||
ANTHROPIC_FILES_API_BETA = "files-api-2025-04-14"
|
||||
|
||||
|
||||
def _contains_file_id_reference(messages: list[dict[str, Any]]) -> bool:
|
||||
"""Check if any message content contains a file_id reference.
|
||||
|
||||
Anthropic's Files API is in beta and requires a special header when
|
||||
file_id references are used in content blocks.
|
||||
|
||||
Args:
|
||||
messages: List of message dicts to check.
|
||||
|
||||
Returns:
|
||||
True if any content block contains a file_id reference.
|
||||
"""
|
||||
for message in messages:
|
||||
content = message.get("content")
|
||||
if isinstance(content, list):
|
||||
for block in content:
|
||||
if isinstance(block, dict):
|
||||
source = block.get("source", {})
|
||||
if isinstance(source, dict) and source.get("type") == "file":
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class AnthropicThinkingConfig(BaseModel):
|
||||
type: Literal["enabled", "disabled"]
|
||||
budget_tokens: int | None = None
|
||||
@@ -575,14 +549,8 @@ class AnthropicCompletion(BaseLLM):
|
||||
params["tools"] = [structured_tool]
|
||||
params["tool_choice"] = {"type": "tool", "name": "structured_output"}
|
||||
|
||||
uses_file_api = _contains_file_id_reference(params.get("messages", []))
|
||||
|
||||
try:
|
||||
if uses_file_api:
|
||||
params["betas"] = [ANTHROPIC_FILES_API_BETA]
|
||||
response = self.client.beta.messages.create(**params)
|
||||
else:
|
||||
response = self.client.messages.create(**params)
|
||||
response: Message = self.client.messages.create(**params)
|
||||
|
||||
except Exception as e:
|
||||
if is_context_length_exceeded(e):
|
||||
@@ -1005,14 +973,8 @@ class AnthropicCompletion(BaseLLM):
|
||||
params["tools"] = [structured_tool]
|
||||
params["tool_choice"] = {"type": "tool", "name": "structured_output"}
|
||||
|
||||
uses_file_api = _contains_file_id_reference(params.get("messages", []))
|
||||
|
||||
try:
|
||||
if uses_file_api:
|
||||
params["betas"] = [ANTHROPIC_FILES_API_BETA]
|
||||
response = await self.async_client.beta.messages.create(**params)
|
||||
else:
|
||||
response = await self.async_client.messages.create(**params)
|
||||
response: Message = await self.async_client.messages.create(**params)
|
||||
|
||||
except Exception as e:
|
||||
if is_context_length_exceeded(e):
|
||||
@@ -1354,39 +1316,3 @@ class AnthropicCompletion(BaseLLM):
|
||||
"total_tokens": input_tokens + output_tokens,
|
||||
}
|
||||
return {"total_tokens": 0}
|
||||
|
||||
def supports_multimodal(self) -> bool:
|
||||
"""Check if the model supports multimodal inputs.
|
||||
|
||||
All Claude 3+ models support vision and PDFs.
|
||||
|
||||
Returns:
|
||||
True if the model supports images and PDFs.
|
||||
"""
|
||||
return "claude-3" in self.model.lower() or "claude-4" in self.model.lower()
|
||||
|
||||
def supported_multimodal_content_types(self) -> list[str]:
|
||||
"""Get content types supported by Anthropic for multimodal input.
|
||||
|
||||
Returns:
|
||||
List of supported MIME type prefixes.
|
||||
"""
|
||||
if not self.supports_multimodal():
|
||||
return []
|
||||
return ["image/", "application/pdf"]
|
||||
|
||||
def get_file_uploader(self) -> Any:
|
||||
"""Get an Anthropic file uploader using this LLM's clients.
|
||||
|
||||
Returns:
|
||||
AnthropicFileUploader instance with pre-configured sync and async clients.
|
||||
"""
|
||||
try:
|
||||
from crewai_files.uploaders.anthropic import AnthropicFileUploader
|
||||
|
||||
return AnthropicFileUploader(
|
||||
client=self.client,
|
||||
async_client=self.async_client,
|
||||
)
|
||||
except ImportError:
|
||||
return None
|
||||
|
||||
@@ -1073,24 +1073,3 @@ class AzureCompletion(BaseLLM):
|
||||
async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
|
||||
"""Async context manager exit."""
|
||||
await self.aclose()
|
||||
|
||||
def supports_multimodal(self) -> bool:
|
||||
"""Check if the model supports multimodal inputs.
|
||||
|
||||
Azure OpenAI vision-enabled models include GPT-4o and GPT-4 Turbo with Vision.
|
||||
|
||||
Returns:
|
||||
True if the model supports images.
|
||||
"""
|
||||
vision_models = ("gpt-4o", "gpt-4-turbo", "gpt-4-vision", "gpt-4v")
|
||||
return any(self.model.lower().startswith(m) for m in vision_models)
|
||||
|
||||
def supported_multimodal_content_types(self) -> list[str]:
|
||||
"""Get content types supported by Azure for multimodal input.
|
||||
|
||||
Returns:
|
||||
List of supported MIME type prefixes.
|
||||
"""
|
||||
if not self.supports_multimodal():
|
||||
return []
|
||||
return ["image/"]
|
||||
|
||||
@@ -1360,15 +1360,11 @@ class BedrockCompletion(BaseLLM):
|
||||
)
|
||||
else:
|
||||
# Convert to Converse API format with proper content structure
|
||||
if isinstance(content, list):
|
||||
# Already formatted as multimodal content blocks
|
||||
converse_messages.append({"role": role, "content": content})
|
||||
else:
|
||||
# String content - wrap in text block
|
||||
text_content = content if content else ""
|
||||
converse_messages.append(
|
||||
{"role": role, "content": [{"text": text_content}]}
|
||||
)
|
||||
# Ensure content is not None
|
||||
text_content = content if content else ""
|
||||
converse_messages.append(
|
||||
{"role": role, "content": [{"text": text_content}]}
|
||||
)
|
||||
|
||||
# CRITICAL: Handle model-specific conversation requirements
|
||||
# Cohere and some other models require conversation to end with user message
|
||||
@@ -1595,156 +1591,3 @@ class BedrockCompletion(BaseLLM):
|
||||
|
||||
# Default context window size
|
||||
return int(8192 * CONTEXT_WINDOW_USAGE_RATIO)
|
||||
|
||||
def supports_multimodal(self) -> bool:
|
||||
"""Check if the model supports multimodal inputs.
|
||||
|
||||
Claude 3+ and Nova Lite/Pro/Premier on Bedrock support vision.
|
||||
|
||||
Returns:
|
||||
True if the model supports images.
|
||||
"""
|
||||
model_lower = self.model.lower()
|
||||
vision_models = (
|
||||
"anthropic.claude-3",
|
||||
"amazon.nova-lite",
|
||||
"amazon.nova-pro",
|
||||
"amazon.nova-premier",
|
||||
"us.amazon.nova-lite",
|
||||
"us.amazon.nova-pro",
|
||||
"us.amazon.nova-premier",
|
||||
)
|
||||
return any(model_lower.startswith(m) for m in vision_models)
|
||||
|
||||
def _is_nova_model(self) -> bool:
|
||||
"""Check if the model is an Amazon Nova model.
|
||||
|
||||
Only Nova models support S3 links for multimedia.
|
||||
|
||||
Returns:
|
||||
True if the model is a Nova model.
|
||||
"""
|
||||
model_lower = self.model.lower()
|
||||
return "amazon.nova-" in model_lower
|
||||
|
||||
def supported_multimodal_content_types(self) -> list[str]:
|
||||
"""Get content types supported by Bedrock for multimodal input.
|
||||
|
||||
Returns:
|
||||
List of supported MIME type prefixes.
|
||||
"""
|
||||
if not self.supports_multimodal():
|
||||
return []
|
||||
|
||||
types = ["image/png", "image/jpeg", "image/gif", "image/webp"]
|
||||
|
||||
if self._is_nova_model():
|
||||
types.extend(
|
||||
[
|
||||
"application/pdf",
|
||||
"text/csv",
|
||||
"text/plain",
|
||||
"text/markdown",
|
||||
"text/html",
|
||||
"application/msword",
|
||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
||||
"application/vnd.ms-excel",
|
||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
||||
"video/mp4",
|
||||
"video/quicktime",
|
||||
"video/x-matroska",
|
||||
"video/webm",
|
||||
"video/x-flv",
|
||||
"video/mpeg",
|
||||
"video/x-ms-wmv",
|
||||
"video/3gpp",
|
||||
]
|
||||
)
|
||||
else:
|
||||
types.append("application/pdf")
|
||||
|
||||
return types
|
||||
|
||||
def get_file_uploader(self) -> Any:
|
||||
"""Get a Bedrock S3 file uploader using this LLM's AWS credentials.
|
||||
|
||||
Creates an S3 client using the same AWS credentials configured for
|
||||
this Bedrock LLM instance.
|
||||
|
||||
Returns:
|
||||
BedrockFileUploader instance with pre-configured S3 client,
|
||||
or None if crewai_files is not installed.
|
||||
"""
|
||||
try:
|
||||
import boto3
|
||||
from crewai_files.uploaders.bedrock import BedrockFileUploader
|
||||
|
||||
s3_client = boto3.client(
|
||||
"s3",
|
||||
region_name=self.region_name,
|
||||
aws_access_key_id=self.aws_access_key_id,
|
||||
aws_secret_access_key=self.aws_secret_access_key,
|
||||
aws_session_token=self.aws_session_token,
|
||||
)
|
||||
return BedrockFileUploader(
|
||||
region=self.region_name,
|
||||
client=s3_client,
|
||||
)
|
||||
except ImportError:
|
||||
return None
|
||||
|
||||
def _get_document_format(self, content_type: str) -> str | None:
|
||||
"""Map content type to Bedrock document format.
|
||||
|
||||
Args:
|
||||
content_type: MIME type of the document.
|
||||
|
||||
Returns:
|
||||
Bedrock format string or None if unsupported.
|
||||
"""
|
||||
format_map = {
|
||||
"application/pdf": "pdf",
|
||||
"text/csv": "csv",
|
||||
"text/plain": "txt",
|
||||
"text/markdown": "md",
|
||||
"text/html": "html",
|
||||
"application/msword": "doc",
|
||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": "docx",
|
||||
"application/vnd.ms-excel": "xls",
|
||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": "xlsx",
|
||||
}
|
||||
return format_map.get(content_type)
|
||||
|
||||
def _get_video_format(self, content_type: str) -> str | None:
|
||||
"""Map content type to Bedrock video format.
|
||||
|
||||
Args:
|
||||
content_type: MIME type of the video.
|
||||
|
||||
Returns:
|
||||
Bedrock format string or None if unsupported.
|
||||
"""
|
||||
format_map = {
|
||||
"video/mp4": "mp4",
|
||||
"video/quicktime": "mov",
|
||||
"video/x-matroska": "mkv",
|
||||
"video/webm": "webm",
|
||||
"video/x-flv": "flv",
|
||||
"video/mpeg": "mpeg",
|
||||
"video/x-ms-wmv": "wmv",
|
||||
"video/3gpp": "three_gp",
|
||||
}
|
||||
return format_map.get(content_type)
|
||||
|
||||
def format_text_content(self, text: str) -> dict[str, Any]:
|
||||
"""Format text as a Bedrock content block.
|
||||
|
||||
Bedrock uses {"text": "..."} format instead of {"type": "text", "text": "..."}.
|
||||
|
||||
Args:
|
||||
text: The text content to format.
|
||||
|
||||
Returns:
|
||||
A content block in Bedrock's expected format.
|
||||
"""
|
||||
return {"text": text}
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
@@ -517,31 +516,17 @@ class GeminiCompletion(BaseLLM):
|
||||
role = message["role"]
|
||||
content = message["content"]
|
||||
|
||||
# Build parts list from content
|
||||
parts: list[types.Part] = []
|
||||
# Convert content to string if it's a list
|
||||
if isinstance(content, list):
|
||||
for item in content:
|
||||
if isinstance(item, dict):
|
||||
if "text" in item:
|
||||
parts.append(types.Part.from_text(text=str(item["text"])))
|
||||
elif "inlineData" in item:
|
||||
inline = item["inlineData"]
|
||||
parts.append(
|
||||
types.Part.from_bytes(
|
||||
data=base64.b64decode(inline["data"]),
|
||||
mime_type=inline["mimeType"],
|
||||
)
|
||||
)
|
||||
else:
|
||||
parts.append(types.Part.from_text(text=str(item)))
|
||||
text_content = " ".join(
|
||||
str(item.get("text", "")) if isinstance(item, dict) else str(item)
|
||||
for item in content
|
||||
)
|
||||
else:
|
||||
parts.append(types.Part.from_text(text=str(content) if content else ""))
|
||||
text_content = str(content) if content else ""
|
||||
|
||||
if role == "system":
|
||||
# Extract system instruction - Gemini handles it separately
|
||||
text_content = " ".join(
|
||||
p.text for p in parts if hasattr(p, "text") and p.text
|
||||
)
|
||||
if system_instruction:
|
||||
system_instruction += f"\n\n{text_content}"
|
||||
else:
|
||||
@@ -598,7 +583,9 @@ class GeminiCompletion(BaseLLM):
|
||||
gemini_role = "model" if role == "assistant" else "user"
|
||||
|
||||
# Create Content object
|
||||
gemini_content = types.Content(role=gemini_role, parts=parts)
|
||||
gemini_content = types.Content(
|
||||
role=gemini_role, parts=[types.Part.from_text(text=text_content)]
|
||||
)
|
||||
contents.append(gemini_content)
|
||||
|
||||
return contents, system_instruction
|
||||
@@ -1190,47 +1177,3 @@ class GeminiCompletion(BaseLLM):
|
||||
)
|
||||
)
|
||||
return result
|
||||
|
||||
def supports_multimodal(self) -> bool:
|
||||
"""Check if the model supports multimodal inputs.
|
||||
|
||||
Gemini models support images, audio, video, and PDFs.
|
||||
|
||||
Returns:
|
||||
True if the model supports multimodal inputs.
|
||||
"""
|
||||
return True
|
||||
|
||||
def supported_multimodal_content_types(self) -> list[str]:
|
||||
"""Get content types supported by Gemini for multimodal input.
|
||||
|
||||
Returns:
|
||||
List of supported MIME type prefixes.
|
||||
"""
|
||||
return ["image/", "audio/", "video/", "application/pdf", "text/"]
|
||||
|
||||
def format_text_content(self, text: str) -> dict[str, Any]:
|
||||
"""Format text as a Gemini content block.
|
||||
|
||||
Gemini uses {"text": "..."} format instead of {"type": "text", "text": "..."}.
|
||||
|
||||
Args:
|
||||
text: The text content to format.
|
||||
|
||||
Returns:
|
||||
A content block in Gemini's expected format.
|
||||
"""
|
||||
return {"text": text}
|
||||
|
||||
def get_file_uploader(self) -> Any:
|
||||
"""Get a Gemini file uploader using this LLM's client.
|
||||
|
||||
Returns:
|
||||
GeminiFileUploader instance with pre-configured client.
|
||||
"""
|
||||
try:
|
||||
from crewai_files.uploaders.gemini import GeminiFileUploader
|
||||
|
||||
return GeminiFileUploader(client=self.client)
|
||||
except ImportError:
|
||||
return None
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -44,20 +44,6 @@ from crewai.tools.base_tool import BaseTool
|
||||
from crewai.utilities.config import process_config
|
||||
from crewai.utilities.constants import NOT_SPECIFIED, _NotSpecified
|
||||
from crewai.utilities.converter import Converter, convert_to_model
|
||||
from crewai.utilities.file_store import (
|
||||
clear_task_files,
|
||||
get_all_files,
|
||||
store_task_files,
|
||||
)
|
||||
|
||||
|
||||
try:
|
||||
from crewai_files import FileInput, FilePath
|
||||
|
||||
HAS_CREWAI_FILES = True
|
||||
except ImportError:
|
||||
FileInput = Any # type: ignore[misc,assignment]
|
||||
HAS_CREWAI_FILES = False
|
||||
from crewai.utilities.guardrail import (
|
||||
process_guardrail,
|
||||
)
|
||||
@@ -156,10 +142,6 @@ class Task(BaseModel):
|
||||
default_factory=list,
|
||||
description="Tools the agent is limited to use for this task.",
|
||||
)
|
||||
input_files: dict[str, FileInput] = Field(
|
||||
default_factory=dict,
|
||||
description="Named input files for this task. Keys are reference names, values are paths or File objects.",
|
||||
)
|
||||
security_config: SecurityConfig = Field(
|
||||
default_factory=SecurityConfig,
|
||||
description="Security configuration for the task.",
|
||||
@@ -375,24 +357,6 @@ class Task(BaseModel):
|
||||
"may_not_set_field", "This field is not to be set by the user.", {}
|
||||
)
|
||||
|
||||
@field_validator("input_files", mode="before")
|
||||
@classmethod
|
||||
def _normalize_input_files(cls, v: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Convert string paths to FilePath objects."""
|
||||
if not v:
|
||||
return v
|
||||
|
||||
if not HAS_CREWAI_FILES:
|
||||
return v
|
||||
|
||||
result = {}
|
||||
for key, value in v.items():
|
||||
if isinstance(value, str):
|
||||
result[key] = FilePath(path=Path(value))
|
||||
else:
|
||||
result[key] = value
|
||||
return result
|
||||
|
||||
@field_validator("output_file")
|
||||
@classmethod
|
||||
def output_file_validation(cls, value: str | None) -> str | None:
|
||||
@@ -531,10 +495,10 @@ class Task(BaseModel):
|
||||
) -> None:
|
||||
"""Execute the task asynchronously with context handling."""
|
||||
try:
|
||||
result = self._execute_core(agent, context, tools)
|
||||
future.set_result(result)
|
||||
result = self._execute_core(agent, context, tools)
|
||||
future.set_result(result)
|
||||
except Exception as e:
|
||||
future.set_exception(e)
|
||||
future.set_exception(e)
|
||||
|
||||
async def aexecute_sync(
|
||||
self,
|
||||
@@ -552,7 +516,6 @@ class Task(BaseModel):
|
||||
tools: list[Any] | None,
|
||||
) -> TaskOutput:
|
||||
"""Run the core execution logic of the task asynchronously."""
|
||||
self._store_input_files()
|
||||
try:
|
||||
agent = agent or self.agent
|
||||
self.agent = agent
|
||||
@@ -637,8 +600,6 @@ class Task(BaseModel):
|
||||
self.end_time = datetime.datetime.now()
|
||||
crewai_event_bus.emit(self, TaskFailedEvent(error=str(e), task=self)) # type: ignore[no-untyped-call]
|
||||
raise e # Re-raise the exception after emitting the event
|
||||
finally:
|
||||
clear_task_files(self.id)
|
||||
|
||||
def _execute_core(
|
||||
self,
|
||||
@@ -647,7 +608,6 @@ class Task(BaseModel):
|
||||
tools: list[Any] | None,
|
||||
) -> TaskOutput:
|
||||
"""Run the core execution logic of the task."""
|
||||
self._store_input_files()
|
||||
try:
|
||||
agent = agent or self.agent
|
||||
self.agent = agent
|
||||
@@ -733,8 +693,6 @@ class Task(BaseModel):
|
||||
self.end_time = datetime.datetime.now()
|
||||
crewai_event_bus.emit(self, TaskFailedEvent(error=str(e), task=self)) # type: ignore[no-untyped-call]
|
||||
raise e # Re-raise the exception after emitting the event
|
||||
finally:
|
||||
clear_task_files(self.id)
|
||||
|
||||
def prompt(self) -> str:
|
||||
"""Generates the task prompt with optional markdown formatting.
|
||||
@@ -757,51 +715,6 @@ class Task(BaseModel):
|
||||
if trigger_payload is not None:
|
||||
description += f"\n\nTrigger Payload: {trigger_payload}"
|
||||
|
||||
if self.agent and self.agent.crew:
|
||||
files = get_all_files(self.agent.crew.id, self.id)
|
||||
if files:
|
||||
supported_types: list[str] = []
|
||||
if self.agent.llm and self.agent.llm.supports_multimodal():
|
||||
supported_types = (
|
||||
self.agent.llm.supported_multimodal_content_types()
|
||||
)
|
||||
|
||||
def is_auto_injected(content_type: str) -> bool:
|
||||
return any(content_type.startswith(t) for t in supported_types)
|
||||
|
||||
auto_injected_files = {
|
||||
name: f_input
|
||||
for name, f_input in files.items()
|
||||
if is_auto_injected(f_input.content_type)
|
||||
}
|
||||
tool_files = {
|
||||
name: f_input
|
||||
for name, f_input in files.items()
|
||||
if not is_auto_injected(f_input.content_type)
|
||||
}
|
||||
|
||||
file_lines: list[str] = []
|
||||
|
||||
if auto_injected_files:
|
||||
file_lines.append(
|
||||
"Input files (content already loaded in conversation):"
|
||||
)
|
||||
for name, file_input in auto_injected_files.items():
|
||||
filename = file_input.filename or name
|
||||
file_lines.append(f' - "{name}" ({filename})')
|
||||
|
||||
if tool_files:
|
||||
file_lines.append(
|
||||
"Available input files (use the name in quotes with read_file tool):"
|
||||
)
|
||||
for name, file_input in tool_files.items():
|
||||
filename = file_input.filename or name
|
||||
content_type = file_input.content_type
|
||||
file_lines.append(f' - "{name}" ({filename}, {content_type})')
|
||||
|
||||
if file_lines:
|
||||
description += "\n\n" + "\n".join(file_lines)
|
||||
|
||||
tasks_slices = [description]
|
||||
|
||||
output = self.i18n.slice("expected_output").format(
|
||||
@@ -1035,13 +948,6 @@ Follow these guidelines:
|
||||
) from e
|
||||
return
|
||||
|
||||
def _store_input_files(self) -> None:
|
||||
"""Store task input files in the file store."""
|
||||
if not HAS_CREWAI_FILES or not self.input_files:
|
||||
return
|
||||
|
||||
store_task_files(self.id, self.input_files)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"Task(description={self.description}, expected_output={self.expected_output})"
|
||||
|
||||
|
||||
@@ -1,78 +0,0 @@
|
||||
"""Tool for reading input files provided to the crew."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from pydantic import BaseModel, Field, PrivateAttr
|
||||
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai_files import FileInput
|
||||
|
||||
|
||||
class ReadFileToolSchema(BaseModel):
|
||||
"""Schema for read file tool arguments."""
|
||||
|
||||
file_name: str = Field(..., description="The name of the input file to read")
|
||||
|
||||
|
||||
class ReadFileTool(BaseTool):
|
||||
"""Tool for reading input files provided to the crew kickoff.
|
||||
|
||||
Provides agents access to files passed via the `files` key in inputs.
|
||||
"""
|
||||
|
||||
name: str = "read_file"
|
||||
description: str = (
|
||||
"Read content from an input file by name. "
|
||||
"Returns file content as text for text files, or base64 for binary files."
|
||||
)
|
||||
args_schema: type[BaseModel] = ReadFileToolSchema
|
||||
|
||||
_files: dict[str, FileInput] | None = PrivateAttr(default=None)
|
||||
|
||||
def set_files(self, files: dict[str, FileInput] | None) -> None:
|
||||
"""Set available input files.
|
||||
|
||||
Args:
|
||||
files: Dictionary mapping file names to file inputs.
|
||||
"""
|
||||
self._files = files
|
||||
|
||||
def _run(self, file_name: str, **kwargs: object) -> str:
|
||||
"""Read an input file by name.
|
||||
|
||||
Args:
|
||||
file_name: The name of the file to read.
|
||||
|
||||
Returns:
|
||||
File content as text for text files, or base64 encoded for binary.
|
||||
"""
|
||||
if not self._files:
|
||||
return "No input files available."
|
||||
|
||||
if file_name not in self._files:
|
||||
available = ", ".join(self._files.keys())
|
||||
return f"File '{file_name}' not found. Available files: {available}"
|
||||
|
||||
file_input = self._files[file_name]
|
||||
content = file_input.read()
|
||||
content_type = file_input.content_type
|
||||
filename = file_input.filename or file_name
|
||||
|
||||
text_types = (
|
||||
"text/",
|
||||
"application/json",
|
||||
"application/xml",
|
||||
"application/x-yaml",
|
||||
)
|
||||
|
||||
if any(content_type.startswith(t) for t in text_types):
|
||||
return content.decode("utf-8")
|
||||
|
||||
encoded = base64.b64encode(content).decode("ascii")
|
||||
return f"[Binary file: {filename} ({content_type})]\nBase64: {encoded}"
|
||||
@@ -613,23 +613,13 @@ def summarize_messages(
|
||||
) -> None:
|
||||
"""Summarize messages to fit within context window.
|
||||
|
||||
Preserves any files attached to user messages and re-attaches them to
|
||||
the summarized message. Files from all user messages are merged.
|
||||
|
||||
Args:
|
||||
messages: List of messages to summarize (modified in-place)
|
||||
messages: List of messages to summarize
|
||||
llm: LLM instance for summarization
|
||||
callbacks: List of callbacks for LLM
|
||||
i18n: I18N instance for messages
|
||||
"""
|
||||
preserved_files: dict[str, Any] = {}
|
||||
for msg in messages:
|
||||
if msg.get("role") == "user" and msg.get("files"):
|
||||
preserved_files.update(msg["files"])
|
||||
|
||||
messages_string = " ".join(
|
||||
[str(message.get("content", "")) for message in messages]
|
||||
)
|
||||
messages_string = " ".join([message["content"] for message in messages]) # type: ignore[misc]
|
||||
cut_size = llm.get_context_window_size()
|
||||
|
||||
messages_groups = [
|
||||
@@ -646,7 +636,7 @@ def summarize_messages(
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
summarization_messages = [
|
||||
messages = [
|
||||
format_message_for_llm(
|
||||
i18n.slice("summarizer_system_message"), role="system"
|
||||
),
|
||||
@@ -655,7 +645,7 @@ def summarize_messages(
|
||||
),
|
||||
]
|
||||
summary = llm.call(
|
||||
summarization_messages,
|
||||
messages,
|
||||
callbacks=callbacks,
|
||||
)
|
||||
summarized_contents.append({"content": str(summary)})
|
||||
@@ -663,12 +653,11 @@ def summarize_messages(
|
||||
merged_summary = " ".join(content["content"] for content in summarized_contents)
|
||||
|
||||
messages.clear()
|
||||
summary_message = format_message_for_llm(
|
||||
i18n.slice("summary").format(merged_summary=merged_summary)
|
||||
messages.append(
|
||||
format_message_for_llm(
|
||||
i18n.slice("summary").format(merged_summary=merged_summary)
|
||||
)
|
||||
)
|
||||
if preserved_files:
|
||||
summary_message["files"] = preserved_files
|
||||
messages.append(summary_message)
|
||||
|
||||
|
||||
def show_agent_logs(
|
||||
@@ -870,11 +859,7 @@ def extract_tool_call_info(
|
||||
if hasattr(tool_call, "function"):
|
||||
# OpenAI-style: has .function.name and .function.arguments
|
||||
call_id = getattr(tool_call, "id", f"call_{id(tool_call)}")
|
||||
return (
|
||||
call_id,
|
||||
sanitize_tool_name(tool_call.function.name),
|
||||
tool_call.function.arguments,
|
||||
)
|
||||
return call_id, sanitize_tool_name(tool_call.function.name), tool_call.function.arguments
|
||||
if hasattr(tool_call, "function_call") and tool_call.function_call:
|
||||
# Gemini-style: has .function_call.name and .function_call.args
|
||||
call_id = f"call_{id(tool_call)}"
|
||||
|
||||
@@ -1,239 +0,0 @@
|
||||
"""Global file store for crew and task execution."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Coroutine
|
||||
import concurrent.futures
|
||||
from typing import TYPE_CHECKING, TypeVar
|
||||
from uuid import UUID
|
||||
|
||||
from aiocache import Cache # type: ignore[import-untyped]
|
||||
from aiocache.serializers import PickleSerializer # type: ignore[import-untyped]
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai_files import FileInput
|
||||
|
||||
_file_store = Cache(Cache.MEMORY, serializer=PickleSerializer())
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
def _run_sync(coro: Coroutine[None, None, T]) -> T:
|
||||
"""Run a coroutine synchronously, handling nested event loops.
|
||||
|
||||
If called from within a running event loop, runs the coroutine in a
|
||||
separate thread to avoid "cannot run event loop while another is running".
|
||||
|
||||
Args:
|
||||
coro: The coroutine to run.
|
||||
|
||||
Returns:
|
||||
The result of the coroutine.
|
||||
"""
|
||||
try:
|
||||
asyncio.get_running_loop()
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
|
||||
future = executor.submit(asyncio.run, coro)
|
||||
return future.result()
|
||||
except RuntimeError:
|
||||
return asyncio.run(coro)
|
||||
|
||||
|
||||
DEFAULT_TTL = 3600
|
||||
|
||||
_CREW_PREFIX = "crew:"
|
||||
_TASK_PREFIX = "task:"
|
||||
|
||||
|
||||
async def astore_files(
|
||||
execution_id: UUID,
|
||||
files: dict[str, FileInput],
|
||||
ttl: int = DEFAULT_TTL,
|
||||
) -> None:
|
||||
"""Store files for a crew execution asynchronously.
|
||||
|
||||
Args:
|
||||
execution_id: Unique identifier for the crew execution.
|
||||
files: Dictionary mapping names to file inputs.
|
||||
ttl: Time-to-live in seconds.
|
||||
"""
|
||||
await _file_store.set(f"{_CREW_PREFIX}{execution_id}", files, ttl=ttl)
|
||||
|
||||
|
||||
async def aget_files(execution_id: UUID) -> dict[str, FileInput] | None:
|
||||
"""Retrieve files for a crew execution asynchronously.
|
||||
|
||||
Args:
|
||||
execution_id: Unique identifier for the crew execution.
|
||||
|
||||
Returns:
|
||||
Dictionary of files or None if not found.
|
||||
"""
|
||||
result: dict[str, FileInput] | None = await _file_store.get(
|
||||
f"{_CREW_PREFIX}{execution_id}"
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
async def aclear_files(execution_id: UUID) -> None:
|
||||
"""Clear files for a crew execution asynchronously.
|
||||
|
||||
Args:
|
||||
execution_id: Unique identifier for the crew execution.
|
||||
"""
|
||||
await _file_store.delete(f"{_CREW_PREFIX}{execution_id}")
|
||||
|
||||
|
||||
async def astore_task_files(
|
||||
task_id: UUID,
|
||||
files: dict[str, FileInput],
|
||||
ttl: int = DEFAULT_TTL,
|
||||
) -> None:
|
||||
"""Store files for a task execution asynchronously.
|
||||
|
||||
Args:
|
||||
task_id: Unique identifier for the task.
|
||||
files: Dictionary mapping names to file inputs.
|
||||
ttl: Time-to-live in seconds.
|
||||
"""
|
||||
await _file_store.set(f"{_TASK_PREFIX}{task_id}", files, ttl=ttl)
|
||||
|
||||
|
||||
async def aget_task_files(task_id: UUID) -> dict[str, FileInput] | None:
|
||||
"""Retrieve files for a task execution asynchronously.
|
||||
|
||||
Args:
|
||||
task_id: Unique identifier for the task.
|
||||
|
||||
Returns:
|
||||
Dictionary of files or None if not found.
|
||||
"""
|
||||
result: dict[str, FileInput] | None = await _file_store.get(
|
||||
f"{_TASK_PREFIX}{task_id}"
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
async def aclear_task_files(task_id: UUID) -> None:
|
||||
"""Clear files for a task execution asynchronously.
|
||||
|
||||
Args:
|
||||
task_id: Unique identifier for the task.
|
||||
"""
|
||||
await _file_store.delete(f"{_TASK_PREFIX}{task_id}")
|
||||
|
||||
|
||||
async def aget_all_files(
|
||||
crew_id: UUID,
|
||||
task_id: UUID | None = None,
|
||||
) -> dict[str, FileInput] | None:
|
||||
"""Get merged crew and task files asynchronously.
|
||||
|
||||
Task files override crew files with the same name.
|
||||
|
||||
Args:
|
||||
crew_id: Unique identifier for the crew execution.
|
||||
task_id: Optional task identifier for task-scoped files.
|
||||
|
||||
Returns:
|
||||
Merged dictionary of files or None if none found.
|
||||
"""
|
||||
crew_files = await aget_files(crew_id) or {}
|
||||
task_files = await aget_task_files(task_id) if task_id else {}
|
||||
|
||||
if not crew_files and not task_files:
|
||||
return None
|
||||
|
||||
return {**crew_files, **(task_files or {})}
|
||||
|
||||
|
||||
def store_files(
|
||||
execution_id: UUID,
|
||||
files: dict[str, FileInput],
|
||||
ttl: int = DEFAULT_TTL,
|
||||
) -> None:
|
||||
"""Store files for a crew execution.
|
||||
|
||||
Args:
|
||||
execution_id: Unique identifier for the crew execution.
|
||||
files: Dictionary mapping names to file inputs.
|
||||
ttl: Time-to-live in seconds.
|
||||
"""
|
||||
_run_sync(astore_files(execution_id, files, ttl))
|
||||
|
||||
|
||||
def get_files(execution_id: UUID) -> dict[str, FileInput] | None:
|
||||
"""Retrieve files for a crew execution.
|
||||
|
||||
Args:
|
||||
execution_id: Unique identifier for the crew execution.
|
||||
|
||||
Returns:
|
||||
Dictionary of files or None if not found.
|
||||
"""
|
||||
return _run_sync(aget_files(execution_id))
|
||||
|
||||
|
||||
def clear_files(execution_id: UUID) -> None:
|
||||
"""Clear files for a crew execution.
|
||||
|
||||
Args:
|
||||
execution_id: Unique identifier for the crew execution.
|
||||
"""
|
||||
_run_sync(aclear_files(execution_id))
|
||||
|
||||
|
||||
def store_task_files(
|
||||
task_id: UUID,
|
||||
files: dict[str, FileInput],
|
||||
ttl: int = DEFAULT_TTL,
|
||||
) -> None:
|
||||
"""Store files for a task execution.
|
||||
|
||||
Args:
|
||||
task_id: Unique identifier for the task.
|
||||
files: Dictionary mapping names to file inputs.
|
||||
ttl: Time-to-live in seconds.
|
||||
"""
|
||||
_run_sync(astore_task_files(task_id, files, ttl))
|
||||
|
||||
|
||||
def get_task_files(task_id: UUID) -> dict[str, FileInput] | None:
|
||||
"""Retrieve files for a task execution.
|
||||
|
||||
Args:
|
||||
task_id: Unique identifier for the task.
|
||||
|
||||
Returns:
|
||||
Dictionary of files or None if not found.
|
||||
"""
|
||||
return _run_sync(aget_task_files(task_id))
|
||||
|
||||
|
||||
def clear_task_files(task_id: UUID) -> None:
|
||||
"""Clear files for a task execution.
|
||||
|
||||
Args:
|
||||
task_id: Unique identifier for the task.
|
||||
"""
|
||||
_run_sync(aclear_task_files(task_id))
|
||||
|
||||
|
||||
def get_all_files(
|
||||
crew_id: UUID,
|
||||
task_id: UUID | None = None,
|
||||
) -> dict[str, FileInput] | None:
|
||||
"""Get merged crew and task files.
|
||||
|
||||
Task files override crew files with the same name.
|
||||
|
||||
Args:
|
||||
crew_id: Unique identifier for the crew execution.
|
||||
task_id: Optional task identifier for task-scoped files.
|
||||
|
||||
Returns:
|
||||
Merged dictionary of files or None if none found.
|
||||
"""
|
||||
return _run_sync(aget_all_files(crew_id, task_id))
|
||||
@@ -1,18 +1,10 @@
|
||||
"""Types for CrewAI utilities."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Literal
|
||||
|
||||
from typing_extensions import NotRequired, TypedDict
|
||||
|
||||
|
||||
try:
|
||||
from crewai_files import FileInput
|
||||
except ImportError:
|
||||
FileInput = Any # type: ignore[misc,assignment]
|
||||
|
||||
|
||||
class LLMMessage(TypedDict):
|
||||
"""Type for formatted LLM messages.
|
||||
|
||||
@@ -26,4 +18,3 @@ class LLMMessage(TypedDict):
|
||||
tool_call_id: NotRequired[str]
|
||||
name: NotRequired[str]
|
||||
tool_calls: NotRequired[list[dict[str, Any]]]
|
||||
files: NotRequired[dict[str, FileInput]]
|
||||
|
||||
@@ -829,178 +829,3 @@ def test_lite_agent_standalone_still_works():
|
||||
assert result is not None
|
||||
assert isinstance(result, LiteAgentOutput)
|
||||
assert result.raw is not None
|
||||
|
||||
|
||||
def test_agent_kickoff_with_files_parameter():
|
||||
"""Test that Agent.kickoff() accepts and passes files to the executor."""
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
from crewai_files import File
|
||||
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
|
||||
mock_llm = Mock(spec=LLM)
|
||||
mock_llm.call.return_value = "Final Answer: I can see the file content."
|
||||
mock_llm.stop = []
|
||||
mock_llm.supports_stop_words.return_value = False
|
||||
mock_llm.get_token_usage_summary.return_value = UsageMetrics(
|
||||
total_tokens=100,
|
||||
prompt_tokens=50,
|
||||
completion_tokens=50,
|
||||
cached_prompt_tokens=0,
|
||||
successful_requests=1,
|
||||
)
|
||||
|
||||
agent = Agent(
|
||||
role="File Analyzer",
|
||||
goal="Analyze files",
|
||||
backstory="An agent that analyzes files",
|
||||
llm=mock_llm,
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
test_file = File(source=b"mock pdf content")
|
||||
files = {"document.pdf": test_file}
|
||||
|
||||
with patch.object(
|
||||
agent, "_prepare_kickoff", wraps=agent._prepare_kickoff
|
||||
) as mock_prepare:
|
||||
result = agent.kickoff(messages="Analyze the document", files=files)
|
||||
|
||||
mock_prepare.assert_called_once()
|
||||
call_args = mock_prepare.call_args
|
||||
assert call_args.args[0] == "Analyze the document"
|
||||
called_files = call_args.kwargs.get("files") or call_args.args[2]
|
||||
assert "document.pdf" in called_files
|
||||
assert called_files["document.pdf"] is test_file
|
||||
|
||||
assert result is not None
|
||||
|
||||
|
||||
def test_prepare_kickoff_extracts_files_from_messages():
|
||||
"""Test that _prepare_kickoff extracts files from messages."""
|
||||
from unittest.mock import Mock
|
||||
|
||||
from crewai_files import File
|
||||
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
|
||||
mock_llm = Mock(spec=LLM)
|
||||
mock_llm.call.return_value = "Final Answer: Done."
|
||||
mock_llm.stop = []
|
||||
mock_llm.supports_stop_words.return_value = False
|
||||
mock_llm.get_token_usage_summary.return_value = UsageMetrics(
|
||||
total_tokens=100,
|
||||
prompt_tokens=50,
|
||||
completion_tokens=50,
|
||||
cached_prompt_tokens=0,
|
||||
successful_requests=1,
|
||||
)
|
||||
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test files",
|
||||
backstory="Test backstory",
|
||||
llm=mock_llm,
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
test_file = File(source=b"mock image content")
|
||||
messages = [
|
||||
{"role": "user", "content": "Analyze this", "files": {"img.png": test_file}}
|
||||
]
|
||||
|
||||
executor, inputs, agent_info, parsed_tools = agent._prepare_kickoff(messages=messages)
|
||||
|
||||
assert "files" in inputs
|
||||
assert "img.png" in inputs["files"]
|
||||
assert inputs["files"]["img.png"] is test_file
|
||||
|
||||
|
||||
def test_prepare_kickoff_merges_files_from_messages_and_parameter():
|
||||
"""Test that _prepare_kickoff merges files from messages and parameter."""
|
||||
from unittest.mock import Mock
|
||||
|
||||
from crewai_files import File
|
||||
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
|
||||
mock_llm = Mock(spec=LLM)
|
||||
mock_llm.call.return_value = "Final Answer: Done."
|
||||
mock_llm.stop = []
|
||||
mock_llm.supports_stop_words.return_value = False
|
||||
mock_llm.get_token_usage_summary.return_value = UsageMetrics(
|
||||
total_tokens=100,
|
||||
prompt_tokens=50,
|
||||
completion_tokens=50,
|
||||
cached_prompt_tokens=0,
|
||||
successful_requests=1,
|
||||
)
|
||||
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test files",
|
||||
backstory="Test backstory",
|
||||
llm=mock_llm,
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
msg_file = File(source=b"message file content")
|
||||
param_file = File(source=b"param file content")
|
||||
messages = [
|
||||
{"role": "user", "content": "Analyze these", "files": {"from_msg.png": msg_file}}
|
||||
]
|
||||
files = {"from_param.pdf": param_file}
|
||||
|
||||
executor, inputs, agent_info, parsed_tools = agent._prepare_kickoff(
|
||||
messages=messages, files=files
|
||||
)
|
||||
|
||||
assert "files" in inputs
|
||||
assert "from_msg.png" in inputs["files"]
|
||||
assert "from_param.pdf" in inputs["files"]
|
||||
assert inputs["files"]["from_msg.png"] is msg_file
|
||||
assert inputs["files"]["from_param.pdf"] is param_file
|
||||
|
||||
|
||||
def test_prepare_kickoff_param_files_override_message_files():
|
||||
"""Test that files parameter overrides files from messages with same name."""
|
||||
from unittest.mock import Mock
|
||||
|
||||
from crewai_files import File
|
||||
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
|
||||
mock_llm = Mock(spec=LLM)
|
||||
mock_llm.call.return_value = "Final Answer: Done."
|
||||
mock_llm.stop = []
|
||||
mock_llm.supports_stop_words.return_value = False
|
||||
mock_llm.get_token_usage_summary.return_value = UsageMetrics(
|
||||
total_tokens=100,
|
||||
prompt_tokens=50,
|
||||
completion_tokens=50,
|
||||
cached_prompt_tokens=0,
|
||||
successful_requests=1,
|
||||
)
|
||||
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test files",
|
||||
backstory="Test backstory",
|
||||
llm=mock_llm,
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
msg_file = File(source=b"message file content")
|
||||
param_file = File(source=b"param file content")
|
||||
messages = [
|
||||
{"role": "user", "content": "Analyze", "files": {"same.png": msg_file}}
|
||||
]
|
||||
input_files = {"same.png": param_file}
|
||||
|
||||
executor, inputs, agent_info, parsed_tools = agent._prepare_kickoff(
|
||||
messages=messages, input_files=input_files
|
||||
)
|
||||
|
||||
assert "files" in inputs
|
||||
assert inputs["files"]["same.png"] is param_file # param takes precedence
|
||||
|
||||
@@ -47,17 +47,14 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-AB7O2DR8lqTcngpTRMomIOR3MQjlP\",\n \"object\"\
|
||||
: \"chat.completion\",\n \"created\": 1727213366,\n \"model\": \"gpt-4o-2024-05-13\"\
|
||||
,\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \
|
||||
\ \"role\": \"assistant\",\n \"content\": \"Thought: I now can\
|
||||
\ give a great answer\\nFinal Answer: Hi!\",\n \"refusal\": null\n\
|
||||
\ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n\
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 154,\n \"completion_tokens\"\
|
||||
: 15,\n \"total_tokens\": 169,\n \"completion_tokens_details\": {\n\
|
||||
\ \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"\
|
||||
fp_e375328146\"\n}\n"
|
||||
content: "{\n \"id\": \"chatcmpl-AB7O2DR8lqTcngpTRMomIOR3MQjlP\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727213366,\n \"model\": \"gpt-4o-2024-05-13\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
||||
Answer: Hi!\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
154,\n \"completion_tokens\": 15,\n \"total_tokens\": 169,\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_e375328146\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
@@ -99,9 +96,8 @@ interactions:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_4243014b2ee70b9aabb42677ece6032c
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are test role2. test backstory2\nYour
|
||||
personal goal is: test goal2\nYou ONLY have access to the following tools, and
|
||||
@@ -159,19 +155,16 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-AB7O3atu0mC9020bT00tXGnRvVM9z\",\n \"object\"\
|
||||
: \"chat.completion\",\n \"created\": 1727213367,\n \"model\": \"gpt-4o-2024-05-13\"\
|
||||
,\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \
|
||||
\ \"role\": \"assistant\",\n \"content\": \"Thought: I need to\
|
||||
\ use the `get_final_answer` tool non-stop, without giving a final answer\
|
||||
\ unless explicitly told otherwise. I will continue this until necessary.\\\
|
||||
n\\nAction: get_final_answer\\nAction Input: {}\",\n \"refusal\": null\n\
|
||||
\ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n\
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 314,\n \"completion_tokens\"\
|
||||
: 43,\n \"total_tokens\": 357,\n \"completion_tokens_details\": {\n\
|
||||
\ \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"\
|
||||
fp_3537616b13\"\n}\n"
|
||||
content: "{\n \"id\": \"chatcmpl-AB7O3atu0mC9020bT00tXGnRvVM9z\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727213367,\n \"model\": \"gpt-4o-2024-05-13\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I need to use the `get_final_answer`
|
||||
tool non-stop, without giving a final answer unless explicitly told otherwise.
|
||||
I will continue this until necessary.\\n\\nAction: get_final_answer\\nAction
|
||||
Input: {}\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
314,\n \"completion_tokens\": 43,\n \"total_tokens\": 357,\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_3537616b13\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
@@ -213,9 +206,8 @@ interactions:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_298d5f7666fc3164008a49aba8fc818d
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are test role2. test backstory2\nYour
|
||||
personal goal is: test goal2\nYou ONLY have access to the following tools, and
|
||||
@@ -279,17 +271,14 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-AB7O5g38Q7AaWaUCm4FUWmpYYPzrD\",\n \"object\"\
|
||||
: \"chat.completion\",\n \"created\": 1727213369,\n \"model\": \"gpt-4o-2024-05-13\"\
|
||||
,\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \
|
||||
\ \"role\": \"assistant\",\n \"content\": \"I now know the final\
|
||||
\ answer.\\nFinal Answer: 42\",\n \"refusal\": null\n },\n \
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n\
|
||||
\ \"usage\": {\n \"prompt_tokens\": 398,\n \"completion_tokens\": 12,\n\
|
||||
\ \"total_tokens\": 410,\n \"completion_tokens_details\": {\n \"\
|
||||
reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_3537616b13\"\
|
||||
\n}\n"
|
||||
content: "{\n \"id\": \"chatcmpl-AB7O5g38Q7AaWaUCm4FUWmpYYPzrD\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727213369,\n \"model\": \"gpt-4o-2024-05-13\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now know the final answer.\\nFinal
|
||||
Answer: 42\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
398,\n \"completion_tokens\": 12,\n \"total_tokens\": 410,\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_3537616b13\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
@@ -331,7 +320,6 @@ interactions:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_4cdf64282e6e639e6ad6fd7b74cea3f9
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
version: 1
|
||||
|
||||
@@ -57,21 +57,19 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-BKUIMCbxAr4MO0Ku8tDYBgJ30LGXi\",\n \"object\"\
|
||||
: \"chat.completion\",\n \"created\": 1744222714,\n \"model\": \"gpt-4o-mini-2024-07-18\"\
|
||||
,\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \
|
||||
\ \"role\": \"assistant\",\n \"content\": \"```\\nThought: I need\
|
||||
\ more information to understand what specific query to search for.\\nAction:\
|
||||
\ search_web\\nAction Input: {\\\"query\\\":\\\"Test query\\\"}\",\n \
|
||||
\ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\"\
|
||||
: null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n\
|
||||
\ \"prompt_tokens\": 242,\n \"completion_tokens\": 31,\n \"total_tokens\"\
|
||||
: 273,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \
|
||||
\ \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\"\
|
||||
: 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\"\
|
||||
: \"default\",\n \"system_fingerprint\": \"fp_b376dfbbd5\"\n}\n"
|
||||
content: "{\n \"id\": \"chatcmpl-BKUIMCbxAr4MO0Ku8tDYBgJ30LGXi\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1744222714,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"```\\nThought: I need more information
|
||||
to understand what specific query to search for.\\nAction: search_web\\nAction
|
||||
Input: {\\\"query\\\":\\\"Test query\\\"}\",\n \"refusal\": null,\n \"annotations\":
|
||||
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 242,\n \"completion_tokens\":
|
||||
31,\n \"total_tokens\": 273,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_b376dfbbd5\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 92dc01f9bd96cf41-SJC
|
||||
@@ -115,9 +113,8 @@ interactions:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_99e3ad4ee98371cc1c55a2f5c6ae3962
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test Backstory\nYour
|
||||
personal goal is: Test Goal\n\nYou ONLY have access to the following tools,
|
||||
@@ -179,21 +176,18 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-BKUINDYiGwrVyJU7wUoXCw3hft7yF\",\n \"object\"\
|
||||
: \"chat.completion\",\n \"created\": 1744222715,\n \"model\": \"gpt-4o-mini-2024-07-18\"\
|
||||
,\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \
|
||||
\ \"role\": \"assistant\",\n \"content\": \"```\\nThought: I now\
|
||||
\ know the final answer\\nFinal Answer: This is a simulated search result\
|
||||
\ for demonstration purposes.\\n```\",\n \"refusal\": null,\n \
|
||||
\ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\"\
|
||||
: \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 295,\n \
|
||||
\ \"completion_tokens\": 26,\n \"total_tokens\": 321,\n \"prompt_tokens_details\"\
|
||||
: {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"\
|
||||
completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\"\
|
||||
: 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\"\
|
||||
: 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\"\
|
||||
: \"fp_b376dfbbd5\"\n}\n"
|
||||
content: "{\n \"id\": \"chatcmpl-BKUINDYiGwrVyJU7wUoXCw3hft7yF\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1744222715,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"```\\nThought: I now know the final answer\\nFinal
|
||||
Answer: This is a simulated search result for demonstration purposes.\\n```\",\n
|
||||
\ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
295,\n \"completion_tokens\": 26,\n \"total_tokens\": 321,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_b376dfbbd5\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 92dc02003c9ecf41-SJC
|
||||
@@ -237,9 +231,8 @@ interactions:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_dd9052c40d5d61ecc5eb141f49df3abe
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test Backstory\nYour
|
||||
personal goal is: Test Goal\n\nYou ONLY have access to the following tools,
|
||||
@@ -300,22 +293,19 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-BKUIN3xeM6JBgLjV5HQA8MTI2Uuem\",\n \"object\"\
|
||||
: \"chat.completion\",\n \"created\": 1744222715,\n \"model\": \"gpt-4o-mini-2024-07-18\"\
|
||||
,\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \
|
||||
\ \"role\": \"assistant\",\n \"content\": \"```\\nThought: I need\
|
||||
\ to clarify what specific information or topic the test query is targeting.\\\
|
||||
nAction: search_web\\nAction Input: {\\\"query\\\":\\\"What is the purpose\
|
||||
\ of a test query in data retrieval?\\\"}\",\n \"refusal\": null,\n\
|
||||
\ \"annotations\": []\n },\n \"logprobs\": null,\n \"\
|
||||
finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\"\
|
||||
: 288,\n \"completion_tokens\": 43,\n \"total_tokens\": 331,\n \"\
|
||||
prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\"\
|
||||
: 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\"\
|
||||
: 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n\
|
||||
\ \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\"\
|
||||
: \"default\",\n \"system_fingerprint\": \"fp_b376dfbbd5\"\n}\n"
|
||||
content: "{\n \"id\": \"chatcmpl-BKUIN3xeM6JBgLjV5HQA8MTI2Uuem\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1744222715,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"```\\nThought: I need to clarify what
|
||||
specific information or topic the test query is targeting.\\nAction: search_web\\nAction
|
||||
Input: {\\\"query\\\":\\\"What is the purpose of a test query in data retrieval?\\\"}\",\n
|
||||
\ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
288,\n \"completion_tokens\": 43,\n \"total_tokens\": 331,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_b376dfbbd5\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 92dc0204d91ccf41-SJC
|
||||
@@ -359,9 +349,8 @@ interactions:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_e792e993009ddfe84cfbb503560d88cf
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test Backstory\nYour
|
||||
personal goal is: Test Goal\n\nYou ONLY have access to the following tools,
|
||||
@@ -427,23 +416,20 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-BKUIOqyLDCIZv6YIz1hlaW479SIzg\",\n \"object\"\
|
||||
: \"chat.completion\",\n \"created\": 1744222716,\n \"model\": \"gpt-4o-mini-2024-07-18\"\
|
||||
,\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \
|
||||
\ \"role\": \"assistant\",\n \"content\": \"```\\nThought: I now\
|
||||
\ know the final answer\\nFinal Answer: {\\n \\\"test_field\\\": \\\"A test\
|
||||
\ query is utilized to evaluate the functionality, performance, and accuracy\
|
||||
\ of data retrieval systems, ensuring they return expected results.\\\"\\\
|
||||
n}\\n```\",\n \"refusal\": null,\n \"annotations\": []\n \
|
||||
\ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n \
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 362,\n \"completion_tokens\"\
|
||||
: 49,\n \"total_tokens\": 411,\n \"prompt_tokens_details\": {\n \
|
||||
\ \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\"\
|
||||
: {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"\
|
||||
accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n\
|
||||
\ }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\":\
|
||||
\ \"fp_b376dfbbd5\"\n}\n"
|
||||
content: "{\n \"id\": \"chatcmpl-BKUIOqyLDCIZv6YIz1hlaW479SIzg\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1744222716,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"```\\nThought: I now know the final answer\\nFinal
|
||||
Answer: {\\n \\\"test_field\\\": \\\"A test query is utilized to evaluate the
|
||||
functionality, performance, and accuracy of data retrieval systems, ensuring
|
||||
they return expected results.\\\"\\n}\\n```\",\n \"refusal\": null,\n
|
||||
\ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 362,\n \"completion_tokens\":
|
||||
49,\n \"total_tokens\": 411,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_b376dfbbd5\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 92dc020a3defcf41-SJC
|
||||
@@ -487,9 +473,8 @@ interactions:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_3b6c80fd3066b9e0054d0d2280bc4c98
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"trace_id": "08371613-b242-4871-bffa-1d93f96f6ba9", "execution_type":
|
||||
"crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null,
|
||||
|
||||
@@ -54,21 +54,19 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-BKUI2djjAEPBitxovNZdlibsOnAh6\",\n \"object\"\
|
||||
: \"chat.completion\",\n \"created\": 1744222694,\n \"model\": \"gpt-4o-mini-2024-07-18\"\
|
||||
,\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \
|
||||
\ \"role\": \"assistant\",\n \"content\": \"```\\nThought: I need\
|
||||
\ to understand what specific information or topic to search for.\\nAction:\
|
||||
\ search_web\\nAction Input: {\\\"query\\\":\\\"Test query\\\"}\",\n \
|
||||
\ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\"\
|
||||
: null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n\
|
||||
\ \"prompt_tokens\": 242,\n \"completion_tokens\": 31,\n \"total_tokens\"\
|
||||
: 273,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \
|
||||
\ \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\"\
|
||||
: 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\"\
|
||||
: \"default\",\n \"system_fingerprint\": \"fp_b376dfbbd5\"\n}\n"
|
||||
content: "{\n \"id\": \"chatcmpl-BKUI2djjAEPBitxovNZdlibsOnAh6\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1744222694,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"```\\nThought: I need to understand what
|
||||
specific information or topic to search for.\\nAction: search_web\\nAction Input:
|
||||
{\\\"query\\\":\\\"Test query\\\"}\",\n \"refusal\": null,\n \"annotations\":
|
||||
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 242,\n \"completion_tokens\":
|
||||
31,\n \"total_tokens\": 273,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_b376dfbbd5\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
@@ -118,9 +116,8 @@ interactions:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_3edd4db0325fb674bada6768e82b8dc6
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test Backstory\nYour
|
||||
personal goal is: Test Goal\n\nYou ONLY have access to the following tools,
|
||||
@@ -182,23 +179,20 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-BKUI3cMLea2cs1wZznSDwEKIlNszH\",\n \"object\"\
|
||||
: \"chat.completion\",\n \"created\": 1744222695,\n \"model\": \"gpt-4o-mini-2024-07-18\"\
|
||||
,\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \
|
||||
\ \"role\": \"assistant\",\n \"content\": \"```\\nThought: I have\
|
||||
\ simulated search results related to a test query. However, I need to clarify\
|
||||
\ the specific topic or question to provide a more accurate answer.\\nAction:\
|
||||
\ search_web\\nAction Input: {\\\"query\\\":\\\"What is the purpose and significance\
|
||||
\ of a test query?\\\"}\",\n \"refusal\": null,\n \"annotations\"\
|
||||
: []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\
|
||||
\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 295,\n \"completion_tokens\"\
|
||||
: 56,\n \"total_tokens\": 351,\n \"prompt_tokens_details\": {\n \
|
||||
\ \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\"\
|
||||
: {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"\
|
||||
accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n\
|
||||
\ }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\":\
|
||||
\ \"fp_b376dfbbd5\"\n}\n"
|
||||
content: "{\n \"id\": \"chatcmpl-BKUI3cMLea2cs1wZznSDwEKIlNszH\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1744222695,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"```\\nThought: I have simulated search
|
||||
results related to a test query. However, I need to clarify the specific topic
|
||||
or question to provide a more accurate answer.\\nAction: search_web\\nAction
|
||||
Input: {\\\"query\\\":\\\"What is the purpose and significance of a test query?\\\"}\",\n
|
||||
\ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
295,\n \"completion_tokens\": 56,\n \"total_tokens\": 351,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_b376dfbbd5\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
@@ -242,9 +236,8 @@ interactions:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_a2022ae3f8c0553cd9c9f0ca3de3eea7
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: !!binary |
|
||||
Ct8CCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkStgIKEgoQY3Jld2FpLnRl
|
||||
@@ -349,23 +342,20 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-BKUI5apzxz891mmkVpae1FIcj5bog\",\n \"object\"\
|
||||
: \"chat.completion\",\n \"created\": 1744222697,\n \"model\": \"gpt-4o-mini-2024-07-18\"\
|
||||
,\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \
|
||||
\ \"role\": \"assistant\",\n \"content\": \"```\\nThought: I have\
|
||||
\ some simulated search results regarding the purpose and significance of\
|
||||
\ a test query but still need clearer context to provide a meaningful answer.\\\
|
||||
nAction: search_web\\nAction Input: {\\\"query\\\":\\\"examples of test queries\
|
||||
\ in various contexts\\\"}\",\n \"refusal\": null,\n \"annotations\"\
|
||||
: []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\
|
||||
\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 381,\n \"completion_tokens\"\
|
||||
: 49,\n \"total_tokens\": 430,\n \"prompt_tokens_details\": {\n \
|
||||
\ \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\"\
|
||||
: {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"\
|
||||
accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n\
|
||||
\ }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\":\
|
||||
\ \"fp_44added55e\"\n}\n"
|
||||
content: "{\n \"id\": \"chatcmpl-BKUI5apzxz891mmkVpae1FIcj5bog\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1744222697,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"```\\nThought: I have some simulated
|
||||
search results regarding the purpose and significance of a test query but still
|
||||
need clearer context to provide a meaningful answer.\\nAction: search_web\\nAction
|
||||
Input: {\\\"query\\\":\\\"examples of test queries in various contexts\\\"}\",\n
|
||||
\ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
381,\n \"completion_tokens\": 49,\n \"total_tokens\": 430,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_44added55e\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 92dc01919a73cf41-SJC
|
||||
@@ -409,9 +399,8 @@ interactions:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_e9af3cd9a5cb0440a452c95861ab82d0
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test Backstory\nYour
|
||||
personal goal is: Test Goal\n\nYou ONLY have access to the following tools,
|
||||
@@ -484,23 +473,20 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-BKUI6HbKAVI6BU8OX4Zh6yr7BXwRo\",\n \"object\"\
|
||||
: \"chat.completion\",\n \"created\": 1744222698,\n \"model\": \"gpt-4o-mini-2024-07-18\"\
|
||||
,\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \
|
||||
\ \"role\": \"assistant\",\n \"content\": \"```\\nThought: I still\
|
||||
\ have only simulated results and not specific information that can lead to\
|
||||
\ a final answer. I need to refine the search for more relevant information.\\\
|
||||
nAction: search_web\\nAction Input: {\\\"query\\\":\\\"test query examples\
|
||||
\ in technology and software development\\\"}\",\n \"refusal\": null,\n\
|
||||
\ \"annotations\": []\n },\n \"logprobs\": null,\n \"\
|
||||
finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\"\
|
||||
: 457,\n \"completion_tokens\": 53,\n \"total_tokens\": 510,\n \"\
|
||||
prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\"\
|
||||
: 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\"\
|
||||
: 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n\
|
||||
\ \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\"\
|
||||
: \"default\",\n \"system_fingerprint\": \"fp_b376dfbbd5\"\n}\n"
|
||||
content: "{\n \"id\": \"chatcmpl-BKUI6HbKAVI6BU8OX4Zh6yr7BXwRo\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1744222698,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"```\\nThought: I still have only simulated
|
||||
results and not specific information that can lead to a final answer. I need
|
||||
to refine the search for more relevant information.\\nAction: search_web\\nAction
|
||||
Input: {\\\"query\\\":\\\"test query examples in technology and software development\\\"}\",\n
|
||||
\ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
457,\n \"completion_tokens\": 53,\n \"total_tokens\": 510,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_b376dfbbd5\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
@@ -544,9 +530,8 @@ interactions:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_aab13cf3c930591d23ce6990b0bcd5c8
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test Backstory\nYour
|
||||
personal goal is: Test Goal\n\nYou ONLY have access to the following tools,
|
||||
@@ -625,23 +610,20 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-BKUI8M2rjDrol5uVG9EQz1OGXUC8H\",\n \"object\"\
|
||||
: \"chat.completion\",\n \"created\": 1744222700,\n \"model\": \"gpt-4o-mini-2024-07-18\"\
|
||||
,\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \
|
||||
\ \"role\": \"assistant\",\n \"content\": \"```\\nThought: I have\
|
||||
\ gathered simulated search results about test query examples in technology\
|
||||
\ and software development, but they are not precise enough to formulate a\
|
||||
\ final answer.\\nAction: search_web\\nAction Input: {\\\"query\\\":\\\"importance\
|
||||
\ of test queries in software testing\\\"} \",\n \"refusal\": null,\n\
|
||||
\ \"annotations\": []\n },\n \"logprobs\": null,\n \"\
|
||||
finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\"\
|
||||
: 538,\n \"completion_tokens\": 52,\n \"total_tokens\": 590,\n \"\
|
||||
prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\"\
|
||||
: 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\"\
|
||||
: 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n\
|
||||
\ \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\"\
|
||||
: \"default\",\n \"system_fingerprint\": \"fp_b376dfbbd5\"\n}\n"
|
||||
content: "{\n \"id\": \"chatcmpl-BKUI8M2rjDrol5uVG9EQz1OGXUC8H\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1744222700,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"```\\nThought: I have gathered simulated
|
||||
search results about test query examples in technology and software development,
|
||||
but they are not precise enough to formulate a final answer.\\nAction: search_web\\nAction
|
||||
Input: {\\\"query\\\":\\\"importance of test queries in software testing\\\"}
|
||||
\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
538,\n \"completion_tokens\": 52,\n \"total_tokens\": 590,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_b376dfbbd5\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 92dc019f0893cf41-SJC
|
||||
@@ -685,9 +667,8 @@ interactions:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_c8f2ae1b33dff9b6f88c9ab541c16c91
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test Backstory\nYour
|
||||
personal goal is: Test Goal\n\nYou ONLY have access to the following tools,
|
||||
@@ -772,23 +753,20 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-BKUI9HXHFAlkT7hKyE5JAuzg4KlWY\",\n \"object\"\
|
||||
: \"chat.completion\",\n \"created\": 1744222701,\n \"model\": \"gpt-4o-mini-2024-07-18\"\
|
||||
,\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \
|
||||
\ \"role\": \"assistant\",\n \"content\": \"```\\nThought: I have\
|
||||
\ gathered simulated search results regarding the importance of test queries\
|
||||
\ in software testing, but I still need a concrete answer about test queries.\\\
|
||||
nAction: search_web\\nAction Input: {\\\"query\\\":\\\"how to create effective\
|
||||
\ test queries\\\"} \",\n \"refusal\": null,\n \"annotations\"\
|
||||
: []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\
|
||||
\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 617,\n \"completion_tokens\"\
|
||||
: 50,\n \"total_tokens\": 667,\n \"prompt_tokens_details\": {\n \
|
||||
\ \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\"\
|
||||
: {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"\
|
||||
accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n\
|
||||
\ }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\":\
|
||||
\ \"fp_b376dfbbd5\"\n}\n"
|
||||
content: "{\n \"id\": \"chatcmpl-BKUI9HXHFAlkT7hKyE5JAuzg4KlWY\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1744222701,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"```\\nThought: I have gathered simulated
|
||||
search results regarding the importance of test queries in software testing,
|
||||
but I still need a concrete answer about test queries.\\nAction: search_web\\nAction
|
||||
Input: {\\\"query\\\":\\\"how to create effective test queries\\\"} \",\n \"refusal\":
|
||||
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 617,\n \"completion_tokens\":
|
||||
50,\n \"total_tokens\": 667,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_b376dfbbd5\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 92dc01abdf49cf41-SJC
|
||||
@@ -832,9 +810,8 @@ interactions:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_62026ef4db09d92b72d81dd96115b3e8
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: !!binary |
|
||||
CoEFCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkS2AQKEgoQY3Jld2FpLnRl
|
||||
@@ -967,24 +944,21 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-BKUIB21skPx3AsqMYyDsUC4tQcJFG\",\n \"object\"\
|
||||
: \"chat.completion\",\n \"created\": 1744222703,\n \"model\": \"gpt-4o-mini-2024-07-18\"\
|
||||
,\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \
|
||||
\ \"role\": \"assistant\",\n \"content\": \"```\\nThought: I have\
|
||||
\ gathered simulated search results on how to create effective test queries,\
|
||||
\ but I am still not reaching a definitive conclusion that addresses a specific\
|
||||
\ question about test queries.\\nAction: search_web\\nAction Input: {\\\"\
|
||||
query\\\":\\\"common practices for test queries in software development\\\"\
|
||||
}\",\n \"refusal\": null,\n \"annotations\": []\n },\n\
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n\
|
||||
\ \"usage\": {\n \"prompt_tokens\": 693,\n \"completion_tokens\": 56,\n\
|
||||
\ \"total_tokens\": 749,\n \"prompt_tokens_details\": {\n \"cached_tokens\"\
|
||||
: 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\"\
|
||||
: {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"\
|
||||
accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n\
|
||||
\ }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\":\
|
||||
\ \"fp_44added55e\"\n}\n"
|
||||
content: "{\n \"id\": \"chatcmpl-BKUIB21skPx3AsqMYyDsUC4tQcJFG\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1744222703,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"```\\nThought: I have gathered simulated
|
||||
search results on how to create effective test queries, but I am still not reaching
|
||||
a definitive conclusion that addresses a specific question about test queries.\\nAction:
|
||||
search_web\\nAction Input: {\\\"query\\\":\\\"common practices for test queries
|
||||
in software development\\\"}\",\n \"refusal\": null,\n \"annotations\":
|
||||
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 693,\n \"completion_tokens\":
|
||||
56,\n \"total_tokens\": 749,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_44added55e\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 92dc01b38829cf41-SJC
|
||||
@@ -1028,9 +1002,8 @@ interactions:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_23f7394cdd9e642f926101c1b3c4ce4c
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test Backstory\nYour
|
||||
personal goal is: Test Goal\n\nYou ONLY have access to the following tools,
|
||||
@@ -1127,23 +1100,21 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-BKUICFEqtNDypc1b9oOWmYRc7AsD8\",\n \"object\"\
|
||||
: \"chat.completion\",\n \"created\": 1744222704,\n \"model\": \"gpt-4o-mini-2024-07-18\"\
|
||||
,\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \
|
||||
\ \"role\": \"assistant\",\n \"content\": \"```\\nThought: I have\
|
||||
\ retrieved simulated search results about common practices for test queries\
|
||||
\ in software development, but they still do not lead to a clear understanding\
|
||||
\ or conclusion.\\nAction: search_web\\nAction Input: {\\\"query\\\":\\\"\
|
||||
test queries definition and purpose in software testing\\\"}\",\n \"\
|
||||
refusal\": null,\n \"annotations\": []\n },\n \"logprobs\"\
|
||||
: null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n\
|
||||
\ \"prompt_tokens\": 777,\n \"completion_tokens\": 53,\n \"total_tokens\"\
|
||||
: 830,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \
|
||||
\ \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\"\
|
||||
: 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\"\
|
||||
: \"default\",\n \"system_fingerprint\": \"fp_44added55e\"\n}\n"
|
||||
content: "{\n \"id\": \"chatcmpl-BKUICFEqtNDypc1b9oOWmYRc7AsD8\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1744222704,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"```\\nThought: I have retrieved simulated
|
||||
search results about common practices for test queries in software development,
|
||||
but they still do not lead to a clear understanding or conclusion.\\nAction:
|
||||
search_web\\nAction Input: {\\\"query\\\":\\\"test queries definition and purpose
|
||||
in software testing\\\"}\",\n \"refusal\": null,\n \"annotations\":
|
||||
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 777,\n \"completion_tokens\":
|
||||
53,\n \"total_tokens\": 830,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_44added55e\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 92dc01bfbe03cf41-SJC
|
||||
@@ -1187,9 +1158,8 @@ interactions:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_3eb046791b1255574c32dcf8798618c3
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test Backstory\nYour
|
||||
personal goal is: Test Goal\n\nYou ONLY have access to the following tools,
|
||||
@@ -1292,23 +1262,20 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-BKUIDeRfBofhIhyZITac402rRqpq4\",\n \"object\"\
|
||||
: \"chat.completion\",\n \"created\": 1744222705,\n \"model\": \"gpt-4o-mini-2024-07-18\"\
|
||||
,\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \
|
||||
\ \"role\": \"assistant\",\n \"content\": \"```\\nThought: I have\
|
||||
\ obtained simulated search results on the definition and purpose of test\
|
||||
\ queries in software testing but have not reached a clear understanding of\
|
||||
\ the overall topic.\\nAction: search_web\\nAction Input: {\\\"query\\\":\\\
|
||||
\"best practices for writing test queries in programming\\\"}\",\n \
|
||||
\ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\"\
|
||||
: null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n\
|
||||
\ \"prompt_tokens\": 858,\n \"completion_tokens\": 53,\n \"total_tokens\"\
|
||||
: 911,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \
|
||||
\ \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\"\
|
||||
: 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\"\
|
||||
: \"default\",\n \"system_fingerprint\": \"fp_b376dfbbd5\"\n}\n"
|
||||
content: "{\n \"id\": \"chatcmpl-BKUIDeRfBofhIhyZITac402rRqpq4\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1744222705,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"```\\nThought: I have obtained simulated
|
||||
search results on the definition and purpose of test queries in software testing
|
||||
but have not reached a clear understanding of the overall topic.\\nAction: search_web\\nAction
|
||||
Input: {\\\"query\\\":\\\"best practices for writing test queries in programming\\\"}\",\n
|
||||
\ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
858,\n \"completion_tokens\": 53,\n \"total_tokens\": 911,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_b376dfbbd5\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 92dc01c6be56cf41-SJC
|
||||
@@ -1352,9 +1319,8 @@ interactions:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_6b4ab6ed4aa78f13539acd43f4ede325
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: !!binary |
|
||||
CvADCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkSxwMKEgoQY3Jld2FpLnRl
|
||||
@@ -1502,23 +1468,20 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-BKUIFrC56yu0K1Kdj1JKa5ChC84RR\",\n \"object\"\
|
||||
: \"chat.completion\",\n \"created\": 1744222707,\n \"model\": \"gpt-4o-mini-2024-07-18\"\
|
||||
,\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \
|
||||
\ \"role\": \"assistant\",\n \"content\": \"```\\nThought: I have\
|
||||
\ compiled some information regarding best practices for writing test queries\
|
||||
\ in programming, but it's still not yielding a direct answer or clear outcome.\\\
|
||||
nAction: search_web\\nAction Input: {\\\"query\\\":\\\"typical structure of\
|
||||
\ test queries for SQL databases\\\"}\",\n \"refusal\": null,\n \
|
||||
\ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\"\
|
||||
: \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 939,\n \
|
||||
\ \"completion_tokens\": 52,\n \"total_tokens\": 991,\n \"prompt_tokens_details\"\
|
||||
: {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"\
|
||||
completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\"\
|
||||
: 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\"\
|
||||
: 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\"\
|
||||
: \"fp_b376dfbbd5\"\n}\n"
|
||||
content: "{\n \"id\": \"chatcmpl-BKUIFrC56yu0K1Kdj1JKa5ChC84RR\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1744222707,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"```\\nThought: I have compiled some information
|
||||
regarding best practices for writing test queries in programming, but it's still
|
||||
not yielding a direct answer or clear outcome.\\nAction: search_web\\nAction
|
||||
Input: {\\\"query\\\":\\\"typical structure of test queries for SQL databases\\\"}\",\n
|
||||
\ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
939,\n \"completion_tokens\": 52,\n \"total_tokens\": 991,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_b376dfbbd5\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 92dc01ce6dd6cf41-SJC
|
||||
@@ -1562,9 +1525,8 @@ interactions:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_74887821474e2c11eaf30624b239c1a0
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test Backstory\nYour
|
||||
personal goal is: Test Goal\n\nYou ONLY have access to the following tools,
|
||||
@@ -1686,25 +1648,22 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-BKUIGFZys7kGg0T8X7mk0vRwllozu\",\n \"object\"\
|
||||
: \"chat.completion\",\n \"created\": 1744222708,\n \"model\": \"gpt-4o-mini-2024-07-18\"\
|
||||
,\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \
|
||||
\ \"role\": \"assistant\",\n \"content\": \"```\\nThought: I now\
|
||||
\ know the final answer\\nFinal Answer: Test queries are critical components\
|
||||
\ in software testing, often used as predefined statements to verify that\
|
||||
\ software behaves as expected. They typically consist of queries written\
|
||||
\ in a language suitable for the database or application being tested, and\
|
||||
\ they help ensure accuracy and reliability in data retrieval and manipulation\
|
||||
\ processes.\\n```\",\n \"refusal\": null,\n \"annotations\"\
|
||||
: []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\
|
||||
\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 1131,\n \"completion_tokens\"\
|
||||
: 70,\n \"total_tokens\": 1201,\n \"prompt_tokens_details\": {\n \
|
||||
\ \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\"\
|
||||
: {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"\
|
||||
accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n\
|
||||
\ }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\":\
|
||||
\ \"fp_44added55e\"\n}\n"
|
||||
content: "{\n \"id\": \"chatcmpl-BKUIGFZys7kGg0T8X7mk0vRwllozu\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1744222708,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"```\\nThought: I now know the final answer\\nFinal
|
||||
Answer: Test queries are critical components in software testing, often used
|
||||
as predefined statements to verify that software behaves as expected. They typically
|
||||
consist of queries written in a language suitable for the database or application
|
||||
being tested, and they help ensure accuracy and reliability in data retrieval
|
||||
and manipulation processes.\\n```\",\n \"refusal\": null,\n \"annotations\":
|
||||
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 1131,\n \"completion_tokens\":
|
||||
70,\n \"total_tokens\": 1201,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_44added55e\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 92dc01d54d84cf41-SJC
|
||||
@@ -1748,9 +1707,8 @@ interactions:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_7bf8e57839667548acfc526d23f26b8b
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test Backstory\nYour
|
||||
personal goal is: Test Goal\n\nYou ONLY have access to the following tools,
|
||||
@@ -1872,28 +1830,25 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-BKUIIKk71pZgTB8nANjAbcokqJQme\",\n \"object\"\
|
||||
: \"chat.completion\",\n \"created\": 1744222710,\n \"model\": \"gpt-4o-mini-2024-07-18\"\
|
||||
,\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \
|
||||
\ \"role\": \"assistant\",\n \"content\": \"```\\nThought: I have\
|
||||
\ gathered some relevant information about the structure of test queries for\
|
||||
\ SQL databases, but I need to consolidate my findings to provide a well-rounded\
|
||||
\ answer.\\nFinal Answer: Test queries are structured to validate the behavior\
|
||||
\ of a database by retrieving or manipulating data, typically using SQL syntax.\
|
||||
\ They serve as a means to ensure that the database functions correctly and\
|
||||
\ meets specified requirements, and can include SELECT, INSERT, UPDATE, DELETE\
|
||||
\ statements. Common practices for writing effective test queries encompass\
|
||||
\ clarity, simplicity, and thoroughness to ensure comprehensive testing coverage.\\\
|
||||
n```\",\n \"refusal\": null,\n \"annotations\": []\n },\n\
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n\
|
||||
\ \"usage\": {\n \"prompt_tokens\": 1131,\n \"completion_tokens\":\
|
||||
\ 111,\n \"total_tokens\": 1242,\n \"prompt_tokens_details\": {\n \
|
||||
\ \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\"\
|
||||
: {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"\
|
||||
accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n\
|
||||
\ }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\":\
|
||||
\ \"fp_b376dfbbd5\"\n}\n"
|
||||
content: "{\n \"id\": \"chatcmpl-BKUIIKk71pZgTB8nANjAbcokqJQme\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1744222710,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"```\\nThought: I have gathered some relevant
|
||||
information about the structure of test queries for SQL databases, but I need
|
||||
to consolidate my findings to provide a well-rounded answer.\\nFinal Answer:
|
||||
Test queries are structured to validate the behavior of a database by retrieving
|
||||
or manipulating data, typically using SQL syntax. They serve as a means to ensure
|
||||
that the database functions correctly and meets specified requirements, and
|
||||
can include SELECT, INSERT, UPDATE, DELETE statements. Common practices for
|
||||
writing effective test queries encompass clarity, simplicity, and thoroughness
|
||||
to ensure comprehensive testing coverage.\\n```\",\n \"refusal\": null,\n
|
||||
\ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 1131,\n \"completion_tokens\":
|
||||
111,\n \"total_tokens\": 1242,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_b376dfbbd5\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
@@ -1937,9 +1892,8 @@ interactions:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_b8f316509569a5b7f996865747bd7803
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: !!binary |
|
||||
Cs4BCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkSpQEKEgoQY3Jld2FpLnRl
|
||||
@@ -2034,22 +1988,19 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-BKUIK1cGWdTdCfXW97KnyTMDv1SD9\",\n \"object\"\
|
||||
: \"chat.completion\",\n \"created\": 1744222712,\n \"model\": \"gpt-4o-mini-2024-07-18\"\
|
||||
,\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \
|
||||
\ \"role\": \"assistant\",\n \"content\": \"```\\nThought: I need\
|
||||
\ to understand what specific information or topic the user is asking about.\\\
|
||||
nAction: search_web\\nAction Input: {\\\"query\\\":\\\"Test query\\\"}\",\n\
|
||||
\ \"refusal\": null,\n \"annotations\": []\n },\n \
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"\
|
||||
usage\": {\n \"prompt_tokens\": 288,\n \"completion_tokens\": 33,\n\
|
||||
\ \"total_tokens\": 321,\n \"prompt_tokens_details\": {\n \"cached_tokens\"\
|
||||
: 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\"\
|
||||
: {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"\
|
||||
accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n\
|
||||
\ }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\":\
|
||||
\ \"fp_b376dfbbd5\"\n}\n"
|
||||
content: "{\n \"id\": \"chatcmpl-BKUIK1cGWdTdCfXW97KnyTMDv1SD9\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1744222712,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"```\\nThought: I need to understand what
|
||||
specific information or topic the user is asking about.\\nAction: search_web\\nAction
|
||||
Input: {\\\"query\\\":\\\"Test query\\\"}\",\n \"refusal\": null,\n \"annotations\":
|
||||
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 288,\n \"completion_tokens\":
|
||||
33,\n \"total_tokens\": 321,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_b376dfbbd5\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 92dc01ee68c4cf41-SJC
|
||||
@@ -2093,9 +2044,8 @@ interactions:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_e6bbe801ad40cf6cf543b8f61e91b697
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test Backstory\nYour
|
||||
personal goal is: Test Goal\n\nYou ONLY have access to the following tools,
|
||||
@@ -2160,22 +2110,19 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-BKUILKUKNjoIxHwNlg5nnEk5nXZAq\",\n \"object\"\
|
||||
: \"chat.completion\",\n \"created\": 1744222713,\n \"model\": \"gpt-4o-mini-2024-07-18\"\
|
||||
,\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \
|
||||
\ \"role\": \"assistant\",\n \"content\": \"```\\nThought: I now\
|
||||
\ know the final answer\\nFinal Answer: {\\n \\\"test_field\\\": \\\"This\
|
||||
\ is a simulated search result for demonstration purposes.\\\"\\n}\\n```\"\
|
||||
,\n \"refusal\": null,\n \"annotations\": []\n },\n \
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n\
|
||||
\ \"usage\": {\n \"prompt_tokens\": 343,\n \"completion_tokens\": 34,\n\
|
||||
\ \"total_tokens\": 377,\n \"prompt_tokens_details\": {\n \"cached_tokens\"\
|
||||
: 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\"\
|
||||
: {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"\
|
||||
accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n\
|
||||
\ }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\":\
|
||||
\ \"fp_b376dfbbd5\"\n}\n"
|
||||
content: "{\n \"id\": \"chatcmpl-BKUILKUKNjoIxHwNlg5nnEk5nXZAq\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1744222713,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"```\\nThought: I now know the final answer\\nFinal
|
||||
Answer: {\\n \\\"test_field\\\": \\\"This is a simulated search result for
|
||||
demonstration purposes.\\\"\\n}\\n```\",\n \"refusal\": null,\n \"annotations\":
|
||||
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 343,\n \"completion_tokens\":
|
||||
34,\n \"total_tokens\": 377,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_b376dfbbd5\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 92dc01f3ff6fcf41-SJC
|
||||
@@ -2219,9 +2166,8 @@ interactions:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_f14d99a5f97f81331f62313a630e0f2c
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"trace_id": "28b6676f-156a-4c60-9164-3d8d71fd3d58", "execution_type":
|
||||
"crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null,
|
||||
|
||||
@@ -58,22 +58,19 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-BHEoYLbLcG8I0GR0JGYzy87op52A6\",\n \"object\"\
|
||||
: \"chat.completion\",\n \"created\": 1743448222,\n \"model\": \"gpt-4o-mini-2024-07-18\"\
|
||||
,\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \
|
||||
\ \"role\": \"assistant\",\n \"content\": \"```\\nThought: I need\
|
||||
\ to search for the latest information about the population of Tokyo.\\nAction:\
|
||||
\ search_web\\nAction Input: {\\\"query\\\":\\\"population of Tokyo\\\"}\\\
|
||||
n```\\n\",\n \"refusal\": null,\n \"annotations\": []\n \
|
||||
\ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n\
|
||||
\ ],\n \"usage\": {\n \"prompt_tokens\": 248,\n \"completion_tokens\"\
|
||||
: 36,\n \"total_tokens\": 284,\n \"prompt_tokens_details\": {\n \
|
||||
\ \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\"\
|
||||
: {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"\
|
||||
accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n\
|
||||
\ }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\":\
|
||||
\ \"fp_b376dfbbd5\"\n}\n"
|
||||
content: "{\n \"id\": \"chatcmpl-BHEoYLbLcG8I0GR0JGYzy87op52A6\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1743448222,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"```\\nThought: I need to search for the
|
||||
latest information about the population of Tokyo.\\nAction: search_web\\nAction
|
||||
Input: {\\\"query\\\":\\\"population of Tokyo\\\"}\\n```\\n\",\n \"refusal\":
|
||||
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 248,\n \"completion_tokens\":
|
||||
36,\n \"total_tokens\": 284,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_b376dfbbd5\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
@@ -117,9 +114,8 @@ interactions:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_77d393755080a9220633995272756327
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Research Assistant.
|
||||
You are a helpful research assistant who can search for information about the
|
||||
@@ -183,23 +179,20 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-BHEoad9v9xvJUsnua1LAzxoEmoCHv\",\n \"object\"\
|
||||
: \"chat.completion\",\n \"created\": 1743448224,\n \"model\": \"gpt-4o-mini-2024-07-18\"\
|
||||
,\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \
|
||||
\ \"role\": \"assistant\",\n \"content\": \"```\\nThought: I now\
|
||||
\ know the final answer\\nFinal Answer: {\\n \\\"summary\\\": \\\"As of\
|
||||
\ 2023, the population of Tokyo is approximately 21 million people in the\
|
||||
\ city proper and around 37 million in the greater metropolitan area.\\\"\
|
||||
,\\n \\\"confidence\\\": \\\"high\\\"\\n}\\n```\",\n \"refusal\"\
|
||||
: null,\n \"annotations\": []\n },\n \"logprobs\": null,\n\
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\"\
|
||||
: 317,\n \"completion_tokens\": 61,\n \"total_tokens\": 378,\n \"\
|
||||
prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\"\
|
||||
: 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\"\
|
||||
: 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n\
|
||||
\ \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\"\
|
||||
: \"default\",\n \"system_fingerprint\": \"fp_b376dfbbd5\"\n}\n"
|
||||
content: "{\n \"id\": \"chatcmpl-BHEoad9v9xvJUsnua1LAzxoEmoCHv\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1743448224,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"```\\nThought: I now know the final answer\\nFinal
|
||||
Answer: {\\n \\\"summary\\\": \\\"As of 2023, the population of Tokyo is
|
||||
approximately 21 million people in the city proper and around 37 million in
|
||||
the greater metropolitan area.\\\",\\n \\\"confidence\\\": \\\"high\\\"\\n}\\n```\",\n
|
||||
\ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
317,\n \"completion_tokens\": 61,\n \"total_tokens\": 378,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_b376dfbbd5\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 929225866a24eb2e-SJC
|
||||
@@ -243,9 +236,8 @@ interactions:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_7a97be879488ab0dffe069cf25539bf6
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"trace_id": "62d55ec4-458b-4b53-a165-7771758fc550", "execution_type":
|
||||
"crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null,
|
||||
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -1,99 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"max_tokens":4096,"messages":[{"role":"user","content":[{"type":"text","text":"What
|
||||
type of document is this? Answer in one word."},{"type":"document","source":{"type":"base64","media_type":"application/pdf","data":"JVBERi0xLjQKMSAwIG9iaiA8PCAvVHlwZSAvQ2F0YWxvZyAvUGFnZXMgMiAwIFIgPj4gZW5kb2JqCjIgMCBvYmogPDwgL1R5cGUgL1BhZ2VzIC9LaWRzIFszIDAgUl0gL0NvdW50IDEgPj4gZW5kb2JqCjMgMCBvYmogPDwgL1R5cGUgL1BhZ2UgL1BhcmVudCAyIDAgUiAvTWVkaWFCb3ggWzAgMCA2MTIgNzkyXSA+PiBlbmRvYmoKeHJlZgowIDQKMDAwMDAwMDAwMCA2NTUzNSBmCjAwMDAwMDAwMDkgMDAwMDAgbgowMDAwMDAwMDU4IDAwMDAwIG4KMDAwMDAwMDExNSAwMDAwMCBuCnRyYWlsZXIgPDwgL1NpemUgNCAvUm9vdCAxIDAgUiA+PgpzdGFydHhyZWYKMTk2CiUlRU9GCg=="},"cache_control":{"type":"ephemeral"}}]}],"model":"claude-3-5-haiku-20241022","stream":false}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
anthropic-version:
|
||||
- '2023-06-01'
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '748'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.anthropic.com
|
||||
x-api-key:
|
||||
- X-API-KEY-XXX
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 0.71.1
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
x-stainless-timeout:
|
||||
- NOT_GIVEN
|
||||
method: POST
|
||||
uri: https://api.anthropic.com/v1/messages
|
||||
response:
|
||||
body:
|
||||
string: '{"model":"claude-3-5-haiku-20241022","id":"msg_01GsqBNcvf1u2Zg9ezjuAotu","type":"message","role":"assistant","content":[{"type":"text","text":"Invoice"}],"stop_reason":"end_turn","stop_sequence":null,"usage":{"input_tokens":1626,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":4,"service_tier":"standard"}}'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 23 Jan 2026 03:04:30 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Robots-Tag:
|
||||
- none
|
||||
anthropic-organization-id:
|
||||
- ANTHROPIC-ORGANIZATION-ID-XXX
|
||||
anthropic-ratelimit-input-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-input-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-input-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-output-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-output-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-output-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-requests-limit:
|
||||
- '4000'
|
||||
anthropic-ratelimit-requests-remaining:
|
||||
- '3999'
|
||||
anthropic-ratelimit-requests-reset:
|
||||
- '2026-01-23T03:04:29Z'
|
||||
anthropic-ratelimit-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
request-id:
|
||||
- REQUEST-ID-XXX
|
||||
strict-transport-security:
|
||||
- STS-XXX
|
||||
x-envoy-upstream-service-time:
|
||||
- '680'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -1,84 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": [{"text": "What type of document
|
||||
is this? Answer in one word."}, {"document": {"name": "document", "format":
|
||||
"pdf", "source": {"bytes": "JVBERi0xLjQKMSAwIG9iaiA8PCAvVHlwZSAvQ2F0YWxvZyAvUGFnZXMgMiAwIFIgPj4gZW5kb2JqCjIgMCBvYmogPDwgL1R5cGUgL1BhZ2VzIC9LaWRzIFszIDAgUl0gL0NvdW50IDEgPj4gZW5kb2JqCjMgMCBvYmogPDwgL1R5cGUgL1BhZ2UgL1BhcmVudCAyIDAgUiAvTWVkaWFCb3ggWzAgMCA2MTIgNzkyXSA+PiBlbmRvYmoKeHJlZgowIDQKMDAwMDAwMDAwMCA2NTUzNSBmCjAwMDAwMDAwMDkgMDAwMDAgbgowMDAwMDAwMDU4IDAwMDAwIG4KMDAwMDAwMDExNSAwMDAwMCBuCnRyYWlsZXIgPDwgL1NpemUgNCAvUm9vdCAxIDAgUiA+PgpzdGFydHhyZWYKMTk2CiUlRU9GCg=="}}}]}],
|
||||
"inferenceConfig": {}}'
|
||||
headers:
|
||||
Content-Length:
|
||||
- '646'
|
||||
Content-Type:
|
||||
- !!binary |
|
||||
YXBwbGljYXRpb24vanNvbg==
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
amz-sdk-invocation-id:
|
||||
- AMZ-SDK-INVOCATION-ID-XXX
|
||||
amz-sdk-request:
|
||||
- !!binary |
|
||||
YXR0ZW1wdD0x
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
x-amz-date:
|
||||
- X-AMZ-DATE-XXX
|
||||
method: POST
|
||||
uri: https://bedrock-runtime.us-west-2.amazonaws.com/model/anthropic.claude-3-haiku-20240307-v1%3A0/converse
|
||||
response:
|
||||
body:
|
||||
string: '{"metrics":{"latencyMs":867},"output":{"message":{"content":[{"text":"PDF"}],"role":"assistant"}},"stopReason":"end_turn","usage":{"inputTokens":57,"outputTokens":4,"serverToolUsage":{},"totalTokens":61}}'
|
||||
headers:
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '204'
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 23 Jan 2026 03:26:35 GMT
|
||||
x-amzn-RequestId:
|
||||
- X-AMZN-REQUESTID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": [{"text": "What type of document
|
||||
is this? Answer in one word."}, {"document": {"name": "document", "format":
|
||||
"pdf", "source": {"bytes": "JVBERi0xLjQKMSAwIG9iaiA8PCAvVHlwZSAvQ2F0YWxvZyAvUGFnZXMgMiAwIFIgPj4gZW5kb2JqCjIgMCBvYmogPDwgL1R5cGUgL1BhZ2VzIC9LaWRzIFszIDAgUl0gL0NvdW50IDEgPj4gZW5kb2JqCjMgMCBvYmogPDwgL1R5cGUgL1BhZ2UgL1BhcmVudCAyIDAgUiAvTWVkaWFCb3ggWzAgMCA2MTIgNzkyXSA+PiBlbmRvYmoKeHJlZgowIDQKMDAwMDAwMDAwMCA2NTUzNSBmCjAwMDAwMDAwMDkgMDAwMDAgbgowMDAwMDAwMDU4IDAwMDAwIG4KMDAwMDAwMDExNSAwMDAwMCBuCnRyYWlsZXIgPDwgL1NpemUgNCAvUm9vdCAxIDAgUiA+PgpzdGFydHhyZWYKMTk2CiUlRU9GCg=="}}}]}],
|
||||
"inferenceConfig": {}}'
|
||||
headers:
|
||||
Content-Length:
|
||||
- '646'
|
||||
Content-Type:
|
||||
- !!binary |
|
||||
YXBwbGljYXRpb24vanNvbg==
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
amz-sdk-invocation-id:
|
||||
- AMZ-SDK-INVOCATION-ID-XXX
|
||||
amz-sdk-request:
|
||||
- !!binary |
|
||||
YXR0ZW1wdD0x
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
x-amz-date:
|
||||
- X-AMZ-DATE-XXX
|
||||
method: POST
|
||||
uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/anthropic.claude-3-haiku-20240307-v1%3A0/converse
|
||||
response:
|
||||
body:
|
||||
string: '{"metrics":{"latencyMs":291},"output":{"message":{"content":[{"text":"Incomplete"}],"role":"assistant"}},"stopReason":"end_turn","usage":{"inputTokens":57,"outputTokens":5,"serverToolUsage":{},"totalTokens":62}}'
|
||||
headers:
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '211'
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 23 Jan 2026 06:02:32 GMT
|
||||
x-amzn-RequestId:
|
||||
- X-AMZN-REQUESTID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -1,67 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"contents": [{"parts": [{"text": "Summarize what this text file says in
|
||||
one sentence."}, {"inlineData": {"data": "UmV2aWV3IEd1aWRlbGluZXMKCjEuIEJlIGNsZWFyIGFuZCBjb25jaXNlOiBXcml0ZSBmZWVkYmFjayB0aGF0IGlzIGVhc3kgdG8gdW5kZXJzdGFuZC4KMi4gRm9jdXMgb24gYmVoYXZpb3IgYW5kIG91dGNvbWVzOiBEZXNjcmliZSB3aGF0IGhhcHBlbmVkIGFuZCB3aHkgaXQgbWF0dGVycy4KMy4gQmUgc3BlY2lmaWM6IFByb3ZpZGUgZXhhbXBsZXMgdG8gc3VwcG9ydCB5b3VyIHBvaW50cy4KNC4gQmFsYW5jZSBwb3NpdGl2ZXMgYW5kIGltcHJvdmVtZW50czogSGlnaGxpZ2h0IHN0cmVuZ3RocyBhbmQgYXJlYXMgdG8gZ3Jvdy4KNS4gQmUgcmVzcGVjdGZ1bCBhbmQgY29uc3RydWN0aXZlOiBBc3N1bWUgcG9zaXRpdmUgaW50ZW50IGFuZCBvZmZlciBzb2x1dGlvbnMuCjYuIFVzZSBvYmplY3RpdmUgY3JpdGVyaWE6IFJlZmVyZW5jZSBnb2FscywgbWV0cmljcywgb3IgZXhwZWN0YXRpb25zIHdoZXJlIHBvc3NpYmxlLgo3LiBTdWdnZXN0IG5leHQgc3RlcHM6IFJlY29tbWVuZCBhY3Rpb25hYmxlIHdheXMgdG8gaW1wcm92ZS4KOC4gUHJvb2ZyZWFkOiBDaGVjayB0b25lLCBncmFtbWFyLCBhbmQgY2xhcml0eSBiZWZvcmUgc3VibWl0dGluZy4K",
|
||||
"mimeType": "text/plain"}}], "role": "user"}], "generationConfig": {}}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- '*/*'
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '976'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- generativelanguage.googleapis.com
|
||||
x-goog-api-client:
|
||||
- google-genai-sdk/1.49.0 gl-python/3.12.10
|
||||
x-goog-api-key:
|
||||
- X-GOOG-API-KEY-XXX
|
||||
method: POST
|
||||
uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\":
|
||||
[\n {\n \"text\": \"The text file outlines guidelines
|
||||
for providing effective feedback, emphasizing clarity, specificity, a balance
|
||||
of positive and constructive criticism, respect, objectivity, actionable suggestions,
|
||||
and careful proofreading.\\n\"\n }\n ],\n \"role\":
|
||||
\"model\"\n },\n \"finishReason\": \"STOP\",\n \"avgLogprobs\":
|
||||
-0.17109338442484537\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\":
|
||||
136,\n \"candidatesTokenCount\": 36,\n \"totalTokenCount\": 172,\n \"promptTokensDetails\":
|
||||
[\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 136\n
|
||||
\ }\n ],\n \"candidatesTokensDetails\": [\n {\n \"modality\":
|
||||
\"TEXT\",\n \"tokenCount\": 36\n }\n ]\n },\n \"modelVersion\":
|
||||
\"gemini-2.0-flash\",\n \"responseId\": \"wxZzaYaiGYG2_uMPtMjFiAw\"\n}\n"
|
||||
headers:
|
||||
Alt-Svc:
|
||||
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
|
||||
Content-Type:
|
||||
- application/json; charset=UTF-8
|
||||
Date:
|
||||
- Fri, 23 Jan 2026 06:35:48 GMT
|
||||
Server:
|
||||
- scaffolding on HTTPServer2
|
||||
Server-Timing:
|
||||
- gfet4t7; dur=675
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
X-Frame-Options:
|
||||
- X-FRAME-OPTIONS-XXX
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -1,99 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"max_tokens":4096,"messages":[{"role":"user","content":[{"type":"text","text":"What
|
||||
type of document is this? Answer in one word."},{"type":"document","source":{"type":"base64","media_type":"application/pdf","data":"JVBERi0xLjQKMSAwIG9iaiA8PCAvVHlwZSAvQ2F0YWxvZyAvUGFnZXMgMiAwIFIgPj4gZW5kb2JqCjIgMCBvYmogPDwgL1R5cGUgL1BhZ2VzIC9LaWRzIFszIDAgUl0gL0NvdW50IDEgPj4gZW5kb2JqCjMgMCBvYmogPDwgL1R5cGUgL1BhZ2UgL1BhcmVudCAyIDAgUiAvTWVkaWFCb3ggWzAgMCA2MTIgNzkyXSA+PiBlbmRvYmoKeHJlZgowIDQKMDAwMDAwMDAwMCA2NTUzNSBmCjAwMDAwMDAwMDkgMDAwMDAgbgowMDAwMDAwMDU4IDAwMDAwIG4KMDAwMDAwMDExNSAwMDAwMCBuCnRyYWlsZXIgPDwgL1NpemUgNCAvUm9vdCAxIDAgUiA+PgpzdGFydHhyZWYKMTk2CiUlRU9GCg=="},"cache_control":{"type":"ephemeral"}}]}],"model":"claude-3-5-haiku-20241022","stream":false}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
anthropic-version:
|
||||
- '2023-06-01'
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '748'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.anthropic.com
|
||||
x-api-key:
|
||||
- X-API-KEY-XXX
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 0.71.1
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
x-stainless-timeout:
|
||||
- NOT_GIVEN
|
||||
method: POST
|
||||
uri: https://api.anthropic.com/v1/messages
|
||||
response:
|
||||
body:
|
||||
string: '{"model":"claude-3-5-haiku-20241022","id":"msg_01JTnNguizZK6JENnyGVQcca","type":"message","role":"assistant","content":[{"type":"text","text":"PDF"}],"stop_reason":"end_turn","stop_sequence":null,"usage":{"input_tokens":1626,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":4,"service_tier":"standard"}}'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 23 Jan 2026 03:04:27 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Robots-Tag:
|
||||
- none
|
||||
anthropic-organization-id:
|
||||
- ANTHROPIC-ORGANIZATION-ID-XXX
|
||||
anthropic-ratelimit-input-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-input-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-input-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-output-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-output-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-output-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-requests-limit:
|
||||
- '4000'
|
||||
anthropic-ratelimit-requests-remaining:
|
||||
- '3999'
|
||||
anthropic-ratelimit-requests-reset:
|
||||
- '2026-01-23T03:04:27Z'
|
||||
anthropic-ratelimit-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
request-id:
|
||||
- REQUEST-ID-XXX
|
||||
strict-transport-security:
|
||||
- STS-XXX
|
||||
x-envoy-upstream-service-time:
|
||||
- '732'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -1,67 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"contents": [{"parts": [{"text": "Summarize what this text says in one
|
||||
sentence."}, {"inlineData": {"data": "UmV2aWV3IEd1aWRlbGluZXMKCjEuIEJlIGNsZWFyIGFuZCBjb25jaXNlOiBXcml0ZSBmZWVkYmFjayB0aGF0IGlzIGVhc3kgdG8gdW5kZXJzdGFuZC4KMi4gRm9jdXMgb24gYmVoYXZpb3IgYW5kIG91dGNvbWVzOiBEZXNjcmliZSB3aGF0IGhhcHBlbmVkIGFuZCB3aHkgaXQgbWF0dGVycy4KMy4gQmUgc3BlY2lmaWM6IFByb3ZpZGUgZXhhbXBsZXMgdG8gc3VwcG9ydCB5b3VyIHBvaW50cy4KNC4gQmFsYW5jZSBwb3NpdGl2ZXMgYW5kIGltcHJvdmVtZW50czogSGlnaGxpZ2h0IHN0cmVuZ3RocyBhbmQgYXJlYXMgdG8gZ3Jvdy4KNS4gQmUgcmVzcGVjdGZ1bCBhbmQgY29uc3RydWN0aXZlOiBBc3N1bWUgcG9zaXRpdmUgaW50ZW50IGFuZCBvZmZlciBzb2x1dGlvbnMuCjYuIFVzZSBvYmplY3RpdmUgY3JpdGVyaWE6IFJlZmVyZW5jZSBnb2FscywgbWV0cmljcywgb3IgZXhwZWN0YXRpb25zIHdoZXJlIHBvc3NpYmxlLgo3LiBTdWdnZXN0IG5leHQgc3RlcHM6IFJlY29tbWVuZCBhY3Rpb25hYmxlIHdheXMgdG8gaW1wcm92ZS4KOC4gUHJvb2ZyZWFkOiBDaGVjayB0b25lLCBncmFtbWFyLCBhbmQgY2xhcml0eSBiZWZvcmUgc3VibWl0dGluZy4K",
|
||||
"mimeType": "text/plain"}}], "role": "user"}], "generationConfig": {}}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- '*/*'
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '971'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- generativelanguage.googleapis.com
|
||||
x-goog-api-client:
|
||||
- google-genai-sdk/1.49.0 gl-python/3.12.10
|
||||
x-goog-api-key:
|
||||
- X-GOOG-API-KEY-XXX
|
||||
method: POST
|
||||
uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\":
|
||||
[\n {\n \"text\": \"Effective review feedback should be
|
||||
clear, specific, balanced, respectful, and constructive, focusing on behaviors
|
||||
and outcomes with examples, objective criteria, and suggested next steps,
|
||||
ensuring it is proofread for clarity.\\n\"\n }\n ],\n \"role\":
|
||||
\"model\"\n },\n \"finishReason\": \"STOP\",\n \"avgLogprobs\":
|
||||
-0.35489303309743\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\":
|
||||
135,\n \"candidatesTokenCount\": 41,\n \"totalTokenCount\": 176,\n \"promptTokensDetails\":
|
||||
[\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 135\n
|
||||
\ }\n ],\n \"candidatesTokensDetails\": [\n {\n \"modality\":
|
||||
\"TEXT\",\n \"tokenCount\": 41\n }\n ]\n },\n \"modelVersion\":
|
||||
\"gemini-2.0-flash\",\n \"responseId\": \"xBZzaY2tCsa9jrEP7JT1yAo\"\n}\n"
|
||||
headers:
|
||||
Alt-Svc:
|
||||
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
|
||||
Content-Type:
|
||||
- application/json; charset=UTF-8
|
||||
Date:
|
||||
- Fri, 23 Jan 2026 06:35:48 GMT
|
||||
Server:
|
||||
- scaffolding on HTTPServer2
|
||||
Server-Timing:
|
||||
- gfet4t7; dur=732
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
X-Frame-Options:
|
||||
- X-FRAME-OPTIONS-XXX
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user