mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-05-03 16:22:49 +00:00
feat: native multimodal file handling; openai responses api
- add input_files parameter to Crew.kickoff(), Flow.kickoff(), Task, and Agent.kickoff() - add provider-specific file uploaders for OpenAI, Anthropic, Gemini, and Bedrock - add file type detection, constraint validation, and automatic format conversion - add URL file source support for multimodal content - add streaming uploads for large files - add prompt caching support for Anthropic - add OpenAI Responses API support
This commit is contained in:
@@ -98,6 +98,9 @@ a2a = [
|
||||
"httpx-sse~=0.4.0",
|
||||
"aiocache[redis,memcached]~=0.12.3",
|
||||
]
|
||||
file-processing = [
|
||||
"crewai-files",
|
||||
]
|
||||
|
||||
|
||||
[project.scripts]
|
||||
@@ -124,6 +127,7 @@ torchvision = [
|
||||
{ index = "pytorch-nightly", marker = "python_version >= '3.13'" },
|
||||
{ index = "pytorch", marker = "python_version < '3.13'" },
|
||||
]
|
||||
crewai-files = { workspace = true }
|
||||
|
||||
|
||||
[build-system]
|
||||
|
||||
@@ -95,6 +95,7 @@ from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai_files import FileInput
|
||||
from crewai_tools import CodeInterpreterTool
|
||||
|
||||
from crewai.a2a.config import A2AClientConfig, A2AConfig, A2AServerConfig
|
||||
@@ -188,7 +189,8 @@ class Agent(BaseAgent):
|
||||
)
|
||||
multimodal: bool = Field(
|
||||
default=False,
|
||||
description="Whether the agent is multimodal.",
|
||||
deprecated=True,
|
||||
description="[DEPRECATED, will be removed in v2.0 - pass files natively.] Whether the agent is multimodal.",
|
||||
)
|
||||
inject_date: bool = Field(
|
||||
default=False,
|
||||
@@ -1644,7 +1646,8 @@ class Agent(BaseAgent):
|
||||
self,
|
||||
messages: str | list[LLMMessage],
|
||||
response_format: type[Any] | None = None,
|
||||
) -> tuple[AgentExecutor, dict[str, str], dict[str, Any], list[CrewStructuredTool]]:
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
) -> tuple[AgentExecutor, dict[str, Any], dict[str, Any], list[CrewStructuredTool]]:
|
||||
"""Prepare common setup for kickoff execution.
|
||||
|
||||
This method handles all the common preparation logic shared between
|
||||
@@ -1654,6 +1657,7 @@ class Agent(BaseAgent):
|
||||
Args:
|
||||
messages: Either a string query or a list of message dictionaries.
|
||||
response_format: Optional Pydantic model for structured output.
|
||||
input_files: Optional dict of named files to attach to the message.
|
||||
|
||||
Returns:
|
||||
Tuple of (executor, inputs, agent_info, parsed_tools) ready for execution.
|
||||
@@ -1730,20 +1734,28 @@ class Agent(BaseAgent):
|
||||
i18n=self.i18n,
|
||||
)
|
||||
|
||||
# Format messages
|
||||
all_files: dict[str, Any] = {}
|
||||
if isinstance(messages, str):
|
||||
formatted_messages = messages
|
||||
else:
|
||||
formatted_messages = "\n".join(
|
||||
str(msg.get("content", "")) for msg in messages if msg.get("content")
|
||||
)
|
||||
for msg in messages:
|
||||
if msg.get("files"):
|
||||
all_files.update(msg["files"])
|
||||
|
||||
if input_files:
|
||||
all_files.update(input_files)
|
||||
|
||||
# Build the input dict for the executor
|
||||
inputs = {
|
||||
inputs: dict[str, Any] = {
|
||||
"input": formatted_messages,
|
||||
"tool_names": get_tool_names(parsed_tools),
|
||||
"tools": render_text_description_and_args(parsed_tools),
|
||||
}
|
||||
if all_files:
|
||||
inputs["files"] = all_files
|
||||
|
||||
return executor, inputs, agent_info, parsed_tools
|
||||
|
||||
@@ -1751,12 +1763,12 @@ class Agent(BaseAgent):
|
||||
self,
|
||||
messages: str | list[LLMMessage],
|
||||
response_format: type[Any] | None = None,
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
) -> LiteAgentOutput | Coroutine[Any, Any, LiteAgentOutput]:
|
||||
"""
|
||||
Execute the agent with the given messages using the AgentExecutor.
|
||||
"""Execute the agent with the given messages using the AgentExecutor.
|
||||
|
||||
This method provides standalone agent execution without requiring a Crew.
|
||||
It supports tools, response formatting, and guardrails.
|
||||
It supports tools, response formatting, guardrails, and file inputs.
|
||||
|
||||
When called from within a Flow (sync or async method), this automatically
|
||||
detects the event loop and returns a coroutine that the Flow framework
|
||||
@@ -1766,7 +1778,10 @@ class Agent(BaseAgent):
|
||||
messages: Either a string query or a list of message dictionaries.
|
||||
If a string is provided, it will be converted to a user message.
|
||||
If a list is provided, each dict should have 'role' and 'content' keys.
|
||||
Messages can include a 'files' field with file inputs.
|
||||
response_format: Optional Pydantic model for structured output.
|
||||
input_files: Optional dict of named files to attach to the message.
|
||||
Files can be paths, bytes, or File objects from crewai_files.
|
||||
|
||||
Returns:
|
||||
LiteAgentOutput: The result of the agent execution.
|
||||
@@ -1778,10 +1793,10 @@ class Agent(BaseAgent):
|
||||
# Magic auto-async: if inside event loop (e.g., inside a Flow),
|
||||
# return coroutine for Flow to await
|
||||
if is_inside_event_loop():
|
||||
return self.kickoff_async(messages, response_format)
|
||||
return self.kickoff_async(messages, response_format, input_files)
|
||||
|
||||
executor, inputs, agent_info, parsed_tools = self._prepare_kickoff(
|
||||
messages, response_format
|
||||
messages, response_format, input_files
|
||||
)
|
||||
|
||||
try:
|
||||
@@ -2027,9 +2042,9 @@ class Agent(BaseAgent):
|
||||
self,
|
||||
messages: str | list[LLMMessage],
|
||||
response_format: type[Any] | None = None,
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
) -> LiteAgentOutput:
|
||||
"""
|
||||
Execute the agent asynchronously with the given messages.
|
||||
"""Execute the agent asynchronously with the given messages.
|
||||
|
||||
This is the async version of the kickoff method that uses native async
|
||||
execution. It is designed for use within async contexts, such as when
|
||||
@@ -2039,13 +2054,16 @@ class Agent(BaseAgent):
|
||||
messages: Either a string query or a list of message dictionaries.
|
||||
If a string is provided, it will be converted to a user message.
|
||||
If a list is provided, each dict should have 'role' and 'content' keys.
|
||||
Messages can include a 'files' field with file inputs.
|
||||
response_format: Optional Pydantic model for structured output.
|
||||
input_files: Optional dict of named files to attach to the message.
|
||||
Files can be paths, bytes, or File objects from crewai_files.
|
||||
|
||||
Returns:
|
||||
LiteAgentOutput: The result of the agent execution.
|
||||
"""
|
||||
executor, inputs, agent_info, parsed_tools = self._prepare_kickoff(
|
||||
messages, response_format
|
||||
messages, response_format, input_files
|
||||
)
|
||||
|
||||
try:
|
||||
@@ -2090,6 +2108,24 @@ class Agent(BaseAgent):
|
||||
)
|
||||
raise
|
||||
|
||||
async def akickoff(
|
||||
self,
|
||||
messages: str | list[LLMMessage],
|
||||
response_format: type[Any] | None = None,
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
) -> LiteAgentOutput:
|
||||
"""Async version of kickoff. Alias for kickoff_async.
|
||||
|
||||
Args:
|
||||
messages: Either a string query or a list of message dictionaries.
|
||||
response_format: Optional Pydantic model for structured output.
|
||||
input_files: Optional dict of named files to attach to the message.
|
||||
|
||||
Returns:
|
||||
LiteAgentOutput: The result of the agent execution.
|
||||
"""
|
||||
return await self.kickoff_async(messages, response_format, input_files)
|
||||
|
||||
|
||||
# Rebuild Agent model to resolve A2A type forward references
|
||||
try:
|
||||
|
||||
@@ -45,6 +45,7 @@ from crewai.utilities.agent_utils import (
|
||||
track_delegation_if_needed,
|
||||
)
|
||||
from crewai.utilities.constants import TRAINING_DATA_FILE
|
||||
from crewai.utilities.file_store import aget_all_files, get_all_files
|
||||
from crewai.utilities.i18n import I18N, get_i18n
|
||||
from crewai.utilities.printer import Printer
|
||||
from crewai.utilities.string_utils import sanitize_tool_name
|
||||
@@ -191,6 +192,8 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
user_prompt = self._format_prompt(self.prompt.get("prompt", ""), inputs)
|
||||
self.messages.append(format_message_for_llm(user_prompt))
|
||||
|
||||
self._inject_multimodal_files(inputs)
|
||||
|
||||
self._show_start_logs()
|
||||
|
||||
self.ask_for_human_input = bool(inputs.get("ask_for_human_input", False))
|
||||
@@ -215,6 +218,66 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self._create_external_memory(formatted_answer)
|
||||
return {"output": formatted_answer.output}
|
||||
|
||||
def _inject_multimodal_files(self, inputs: dict[str, Any] | None = None) -> None:
|
||||
"""Attach files to the last user message for LLM-layer formatting.
|
||||
|
||||
Merges files from crew/task store and inputs dict, then attaches them
|
||||
to the message's `files` field. Input files take precedence over
|
||||
crew/task files with the same name.
|
||||
|
||||
Args:
|
||||
inputs: Optional inputs dict that may contain files.
|
||||
"""
|
||||
files: dict[str, Any] = {}
|
||||
|
||||
if self.crew and self.task:
|
||||
crew_files = get_all_files(self.crew.id, self.task.id)
|
||||
if crew_files:
|
||||
files.update(crew_files)
|
||||
|
||||
if inputs and inputs.get("files"):
|
||||
files.update(inputs["files"])
|
||||
|
||||
if not files:
|
||||
return
|
||||
|
||||
for i in range(len(self.messages) - 1, -1, -1):
|
||||
msg = self.messages[i]
|
||||
if msg.get("role") == "user":
|
||||
msg["files"] = files
|
||||
break
|
||||
|
||||
async def _ainject_multimodal_files(
|
||||
self, inputs: dict[str, Any] | None = None
|
||||
) -> None:
|
||||
"""Async attach files to the last user message for LLM-layer formatting.
|
||||
|
||||
Merges files from crew/task store and inputs dict, then attaches them
|
||||
to the message's `files` field. Input files take precedence over
|
||||
crew/task files with the same name.
|
||||
|
||||
Args:
|
||||
inputs: Optional inputs dict that may contain files.
|
||||
"""
|
||||
files: dict[str, Any] = {}
|
||||
|
||||
if self.crew and self.task:
|
||||
crew_files = await aget_all_files(self.crew.id, self.task.id)
|
||||
if crew_files:
|
||||
files.update(crew_files)
|
||||
|
||||
if inputs and inputs.get("files"):
|
||||
files.update(inputs["files"])
|
||||
|
||||
if not files:
|
||||
return
|
||||
|
||||
for i in range(len(self.messages) - 1, -1, -1):
|
||||
msg = self.messages[i]
|
||||
if msg.get("role") == "user":
|
||||
msg["files"] = files
|
||||
break
|
||||
|
||||
def _invoke_loop(self) -> AgentFinish:
|
||||
"""Execute agent loop until completion.
|
||||
|
||||
@@ -700,7 +763,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
if (
|
||||
original_tool
|
||||
and hasattr(original_tool, "cache_function")
|
||||
and original_tool.cache_function
|
||||
and callable(original_tool.cache_function)
|
||||
):
|
||||
should_cache = original_tool.cache_function(
|
||||
args_dict, raw_result
|
||||
@@ -731,7 +794,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
error=e,
|
||||
),
|
||||
)
|
||||
elif max_usage_reached:
|
||||
elif max_usage_reached and original_tool:
|
||||
# Return error message when max usage limit is reached
|
||||
result = f"Tool '{func_name}' has reached its usage limit of {original_tool.max_usage_count} times and cannot be used anymore."
|
||||
|
||||
@@ -810,6 +873,8 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
user_prompt = self._format_prompt(self.prompt.get("prompt", ""), inputs)
|
||||
self.messages.append(format_message_for_llm(user_prompt))
|
||||
|
||||
await self._ainject_multimodal_files(inputs)
|
||||
|
||||
self._show_start_logs()
|
||||
|
||||
self.ask_for_human_input = bool(inputs.get("ask_for_human_input", False))
|
||||
|
||||
@@ -8,6 +8,7 @@ from hashlib import md5
|
||||
import json
|
||||
import re
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
cast,
|
||||
)
|
||||
@@ -31,6 +32,21 @@ from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
from typing_extensions import Self
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai_files import FileInput
|
||||
|
||||
try:
|
||||
from crewai_files import get_supported_content_types
|
||||
|
||||
HAS_CREWAI_FILES = True
|
||||
except ImportError:
|
||||
HAS_CREWAI_FILES = False
|
||||
|
||||
def get_supported_content_types(provider: str, api: str | None = None) -> list[str]:
|
||||
return []
|
||||
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.cache.cache_handler import CacheHandler
|
||||
@@ -80,6 +96,7 @@ from crewai.task import Task
|
||||
from crewai.tasks.conditional_task import ConditionalTask
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.tools.agent_tools.read_file_tool import ReadFileTool
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.types.streaming import CrewStreamingOutput
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
@@ -88,6 +105,7 @@ from crewai.utilities.crew.models import CrewContext
|
||||
from crewai.utilities.evaluators.crew_evaluator_handler import CrewEvaluator
|
||||
from crewai.utilities.evaluators.task_evaluator import TaskEvaluator
|
||||
from crewai.utilities.file_handler import FileHandler
|
||||
from crewai.utilities.file_store import clear_files, get_all_files
|
||||
from crewai.utilities.formatter import (
|
||||
aggregate_raw_outputs_from_task_outputs,
|
||||
aggregate_raw_outputs_from_tasks,
|
||||
@@ -677,7 +695,17 @@ class Crew(FlowTrackable, BaseModel):
|
||||
def kickoff(
|
||||
self,
|
||||
inputs: dict[str, Any] | None = None,
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
) -> CrewOutput | CrewStreamingOutput:
|
||||
"""Execute the crew's workflow.
|
||||
|
||||
Args:
|
||||
inputs: Optional input dictionary for task interpolation.
|
||||
input_files: Optional dict of named file inputs for the crew.
|
||||
|
||||
Returns:
|
||||
CrewOutput or CrewStreamingOutput if streaming is enabled.
|
||||
"""
|
||||
if self.stream:
|
||||
enable_agent_streaming(self.agents)
|
||||
ctx = StreamingContext()
|
||||
@@ -686,7 +714,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
"""Execute the crew and capture the result."""
|
||||
try:
|
||||
self.stream = False
|
||||
crew_result = self.kickoff(inputs=inputs)
|
||||
crew_result = self.kickoff(inputs=inputs, input_files=input_files)
|
||||
if isinstance(crew_result, CrewOutput):
|
||||
ctx.result_holder.append(crew_result)
|
||||
except Exception as exc:
|
||||
@@ -709,7 +737,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
token = attach(baggage_ctx)
|
||||
|
||||
try:
|
||||
inputs = prepare_kickoff(self, inputs)
|
||||
inputs = prepare_kickoff(self, inputs, input_files)
|
||||
|
||||
if self.process == Process.sequential:
|
||||
result = self._run_sequential_process()
|
||||
@@ -733,13 +761,23 @@ class Crew(FlowTrackable, BaseModel):
|
||||
)
|
||||
raise
|
||||
finally:
|
||||
clear_files(self.id)
|
||||
detach(token)
|
||||
|
||||
def kickoff_for_each(
|
||||
self, inputs: list[dict[str, Any]]
|
||||
self,
|
||||
inputs: list[dict[str, Any]],
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
) -> list[CrewOutput | CrewStreamingOutput]:
|
||||
"""Executes the Crew's workflow for each input and aggregates results.
|
||||
|
||||
Args:
|
||||
inputs: List of input dictionaries, one per execution.
|
||||
input_files: Optional dict of named file inputs shared across all executions.
|
||||
|
||||
Returns:
|
||||
List of CrewOutput or CrewStreamingOutput objects.
|
||||
|
||||
If stream=True, returns a list of CrewStreamingOutput objects that must
|
||||
each be iterated to get stream chunks and access results.
|
||||
"""
|
||||
@@ -750,7 +788,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
for input_data in inputs:
|
||||
crew = self.copy()
|
||||
|
||||
output = crew.kickoff(inputs=input_data)
|
||||
output = crew.kickoff(inputs=input_data, input_files=input_files)
|
||||
|
||||
if not self.stream and crew.usage_metrics:
|
||||
total_usage_metrics.add_usage_metrics(crew.usage_metrics)
|
||||
@@ -763,10 +801,19 @@ class Crew(FlowTrackable, BaseModel):
|
||||
return results
|
||||
|
||||
async def kickoff_async(
|
||||
self, inputs: dict[str, Any] | None = None
|
||||
self,
|
||||
inputs: dict[str, Any] | None = None,
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
) -> CrewOutput | CrewStreamingOutput:
|
||||
"""Asynchronous kickoff method to start the crew execution.
|
||||
|
||||
Args:
|
||||
inputs: Optional input dictionary for task interpolation.
|
||||
input_files: Optional dict of named file inputs for the crew.
|
||||
|
||||
Returns:
|
||||
CrewOutput or CrewStreamingOutput if streaming is enabled.
|
||||
|
||||
If stream=True, returns a CrewStreamingOutput that can be async-iterated
|
||||
to get stream chunks. After iteration completes, access the final result
|
||||
via .result.
|
||||
@@ -780,7 +827,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
async def run_crew() -> None:
|
||||
try:
|
||||
self.stream = False
|
||||
result = await asyncio.to_thread(self.kickoff, inputs)
|
||||
result = await asyncio.to_thread(self.kickoff, inputs, input_files)
|
||||
if isinstance(result, CrewOutput):
|
||||
ctx.result_holder.append(result)
|
||||
except Exception as e:
|
||||
@@ -798,13 +845,22 @@ class Crew(FlowTrackable, BaseModel):
|
||||
|
||||
return streaming_output
|
||||
|
||||
return await asyncio.to_thread(self.kickoff, inputs)
|
||||
return await asyncio.to_thread(self.kickoff, inputs, input_files)
|
||||
|
||||
async def kickoff_for_each_async(
|
||||
self, inputs: list[dict[str, Any]]
|
||||
self,
|
||||
inputs: list[dict[str, Any]],
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
) -> list[CrewOutput | CrewStreamingOutput] | CrewStreamingOutput:
|
||||
"""Executes the Crew's workflow for each input asynchronously.
|
||||
|
||||
Args:
|
||||
inputs: List of input dictionaries, one per execution.
|
||||
input_files: Optional dict of named file inputs shared across all executions.
|
||||
|
||||
Returns:
|
||||
List of CrewOutput or CrewStreamingOutput objects.
|
||||
|
||||
If stream=True, returns a single CrewStreamingOutput that yields chunks
|
||||
from all crews as they arrive. After iteration, access results via .results
|
||||
(list of CrewOutput).
|
||||
@@ -813,18 +869,27 @@ class Crew(FlowTrackable, BaseModel):
|
||||
async def kickoff_fn(
|
||||
crew: Crew, input_data: dict[str, Any]
|
||||
) -> CrewOutput | CrewStreamingOutput:
|
||||
return await crew.kickoff_async(inputs=input_data)
|
||||
return await crew.kickoff_async(inputs=input_data, input_files=input_files)
|
||||
|
||||
return await run_for_each_async(self, inputs, kickoff_fn)
|
||||
|
||||
async def akickoff(
|
||||
self, inputs: dict[str, Any] | None = None
|
||||
self,
|
||||
inputs: dict[str, Any] | None = None,
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
) -> CrewOutput | CrewStreamingOutput:
|
||||
"""Native async kickoff method using async task execution throughout.
|
||||
|
||||
Unlike kickoff_async which wraps sync kickoff in a thread, this method
|
||||
uses native async/await for all operations including task execution,
|
||||
memory operations, and knowledge queries.
|
||||
|
||||
Args:
|
||||
inputs: Optional input dictionary for task interpolation.
|
||||
input_files: Optional dict of named file inputs for the crew.
|
||||
|
||||
Returns:
|
||||
CrewOutput or CrewStreamingOutput if streaming is enabled.
|
||||
"""
|
||||
if self.stream:
|
||||
enable_agent_streaming(self.agents)
|
||||
@@ -833,7 +898,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
async def run_crew() -> None:
|
||||
try:
|
||||
self.stream = False
|
||||
inner_result = await self.akickoff(inputs)
|
||||
inner_result = await self.akickoff(inputs, input_files)
|
||||
if isinstance(inner_result, CrewOutput):
|
||||
ctx.result_holder.append(inner_result)
|
||||
except Exception as exc:
|
||||
@@ -857,7 +922,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
token = attach(baggage_ctx)
|
||||
|
||||
try:
|
||||
inputs = prepare_kickoff(self, inputs)
|
||||
inputs = prepare_kickoff(self, inputs, input_files)
|
||||
|
||||
if self.process == Process.sequential:
|
||||
result = await self._arun_sequential_process()
|
||||
@@ -881,14 +946,25 @@ class Crew(FlowTrackable, BaseModel):
|
||||
)
|
||||
raise
|
||||
finally:
|
||||
clear_files(self.id)
|
||||
detach(token)
|
||||
|
||||
async def akickoff_for_each(
|
||||
self, inputs: list[dict[str, Any]]
|
||||
self,
|
||||
inputs: list[dict[str, Any]],
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
) -> list[CrewOutput | CrewStreamingOutput] | CrewStreamingOutput:
|
||||
"""Native async execution of the Crew's workflow for each input.
|
||||
|
||||
Uses native async throughout rather than thread-based async.
|
||||
|
||||
Args:
|
||||
inputs: List of input dictionaries, one per execution.
|
||||
input_files: Optional dict of named file inputs shared across all executions.
|
||||
|
||||
Returns:
|
||||
List of CrewOutput or CrewStreamingOutput objects.
|
||||
|
||||
If stream=True, returns a single CrewStreamingOutput that yields chunks
|
||||
from all crews as they arrive.
|
||||
"""
|
||||
@@ -896,7 +972,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
async def kickoff_fn(
|
||||
crew: Crew, input_data: dict[str, Any]
|
||||
) -> CrewOutput | CrewStreamingOutput:
|
||||
return await crew.akickoff(inputs=input_data)
|
||||
return await crew.akickoff(inputs=input_data, input_files=input_files)
|
||||
|
||||
return await run_for_each_async(self, inputs, kickoff_fn)
|
||||
|
||||
@@ -1216,7 +1292,8 @@ class Crew(FlowTrackable, BaseModel):
|
||||
and hasattr(agent, "multimodal")
|
||||
and getattr(agent, "multimodal", False)
|
||||
):
|
||||
tools = self._add_multimodal_tools(agent, tools)
|
||||
if not (agent.llm and agent.llm.supports_multimodal()):
|
||||
tools = self._add_multimodal_tools(agent, tools)
|
||||
|
||||
if agent and (hasattr(agent, "apps") and getattr(agent, "apps", None)):
|
||||
tools = self._add_platform_tools(task, tools)
|
||||
@@ -1224,7 +1301,28 @@ class Crew(FlowTrackable, BaseModel):
|
||||
if agent and (hasattr(agent, "mcps") and getattr(agent, "mcps", None)):
|
||||
tools = self._add_mcp_tools(task, tools)
|
||||
|
||||
# Return a list[BaseTool] compatible with Task.execute_sync and execute_async
|
||||
files = get_all_files(self.id, task.id)
|
||||
if files:
|
||||
supported_types: list[str] = []
|
||||
if agent and agent.llm and agent.llm.supports_multimodal():
|
||||
provider = getattr(agent.llm, "provider", None) or getattr(
|
||||
agent.llm, "model", "openai"
|
||||
)
|
||||
api = getattr(agent.llm, "api", None)
|
||||
supported_types = get_supported_content_types(provider, api)
|
||||
|
||||
def is_auto_injected(content_type: str) -> bool:
|
||||
return any(content_type.startswith(t) for t in supported_types)
|
||||
|
||||
# Only add read_file tool if there are files that need it
|
||||
files_needing_tool = {
|
||||
name: f
|
||||
for name, f in files.items()
|
||||
if not is_auto_injected(f.content_type)
|
||||
}
|
||||
if files_needing_tool:
|
||||
tools = self._add_file_tools(tools, files_needing_tool)
|
||||
|
||||
return tools
|
||||
|
||||
def _get_agent_to_use(self, task: Task) -> BaseAgent | None:
|
||||
@@ -1308,6 +1406,22 @@ class Crew(FlowTrackable, BaseModel):
|
||||
return self._merge_tools(tools, cast(list[BaseTool], code_tools))
|
||||
return tools
|
||||
|
||||
def _add_file_tools(
|
||||
self, tools: list[BaseTool], files: dict[str, Any]
|
||||
) -> list[BaseTool]:
|
||||
"""Add file reading tool when input files are available.
|
||||
|
||||
Args:
|
||||
tools: Current list of tools.
|
||||
files: Dictionary of input files.
|
||||
|
||||
Returns:
|
||||
Updated list with file tool added.
|
||||
"""
|
||||
read_file_tool = ReadFileTool()
|
||||
read_file_tool.set_files(files)
|
||||
return self._merge_tools(tools, [read_file_tool])
|
||||
|
||||
def _add_delegation_tools(
|
||||
self, task: Task, tools: list[BaseTool]
|
||||
) -> list[BaseTool]:
|
||||
|
||||
@@ -3,13 +3,16 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Callable, Coroutine, Iterable
|
||||
from collections.abc import Callable, Coroutine, Iterable, Mapping
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from opentelemetry import baggage
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.crews.crew_output import CrewOutput
|
||||
from crewai.rag.embeddings.types import EmbedderConfig
|
||||
from crewai.types.streaming import CrewStreamingOutput, FlowStreamingOutput
|
||||
from crewai.utilities.file_store import store_files
|
||||
from crewai.utilities.streaming import (
|
||||
StreamingState,
|
||||
TaskInfo,
|
||||
@@ -17,7 +20,23 @@ from crewai.utilities.streaming import (
|
||||
)
|
||||
|
||||
|
||||
try:
|
||||
from crewai_files import (
|
||||
AudioFile,
|
||||
ImageFile,
|
||||
PDFFile,
|
||||
TextFile,
|
||||
VideoFile,
|
||||
)
|
||||
|
||||
_FILE_TYPES: tuple[type, ...] = (AudioFile, ImageFile, PDFFile, TextFile, VideoFile)
|
||||
except ImportError:
|
||||
_FILE_TYPES = ()
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai_files import FileInput
|
||||
|
||||
from crewai.crew import Crew
|
||||
|
||||
|
||||
@@ -176,7 +195,40 @@ def check_conditional_skip(
|
||||
return None
|
||||
|
||||
|
||||
def prepare_kickoff(crew: Crew, inputs: dict[str, Any] | None) -> dict[str, Any] | None:
|
||||
def _extract_files_from_inputs(inputs: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Extract file objects from inputs dict.
|
||||
|
||||
Scans inputs for FileInput objects (ImageFile, TextFile, etc.) and
|
||||
extracts them into a separate dict.
|
||||
|
||||
Args:
|
||||
inputs: The inputs dictionary to scan.
|
||||
|
||||
Returns:
|
||||
Dictionary of extracted file objects.
|
||||
"""
|
||||
if not _FILE_TYPES:
|
||||
return {}
|
||||
|
||||
files: dict[str, Any] = {}
|
||||
keys_to_remove: list[str] = []
|
||||
|
||||
for key, value in inputs.items():
|
||||
if isinstance(value, _FILE_TYPES):
|
||||
files[key] = value
|
||||
keys_to_remove.append(key)
|
||||
|
||||
for key in keys_to_remove:
|
||||
del inputs[key]
|
||||
|
||||
return files
|
||||
|
||||
|
||||
def prepare_kickoff(
|
||||
crew: Crew,
|
||||
inputs: dict[str, Any] | None,
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
) -> dict[str, Any] | None:
|
||||
"""Prepare crew for kickoff execution.
|
||||
|
||||
Handles before callbacks, event emission, task handler reset, input
|
||||
@@ -185,6 +237,7 @@ def prepare_kickoff(crew: Crew, inputs: dict[str, Any] | None) -> dict[str, Any]
|
||||
Args:
|
||||
crew: The crew instance to prepare.
|
||||
inputs: Optional input dictionary to pass to the crew.
|
||||
input_files: Optional dict of named file inputs for the crew.
|
||||
|
||||
Returns:
|
||||
The potentially modified inputs dictionary after before callbacks.
|
||||
@@ -198,14 +251,23 @@ def prepare_kickoff(crew: Crew, inputs: dict[str, Any] | None) -> dict[str, Any]
|
||||
reset_emission_counter()
|
||||
reset_last_event_id()
|
||||
|
||||
# Normalize inputs to dict[str, Any] for internal processing
|
||||
normalized: dict[str, Any] | None = None
|
||||
if inputs is not None:
|
||||
if not isinstance(inputs, Mapping):
|
||||
raise TypeError(
|
||||
f"inputs must be a dict or Mapping, got {type(inputs).__name__}"
|
||||
)
|
||||
normalized = dict(inputs)
|
||||
|
||||
for before_callback in crew.before_kickoff_callbacks:
|
||||
if inputs is None:
|
||||
inputs = {}
|
||||
inputs = before_callback(inputs)
|
||||
if normalized is None:
|
||||
normalized = {}
|
||||
normalized = before_callback(normalized)
|
||||
|
||||
future = crewai_event_bus.emit(
|
||||
crew,
|
||||
CrewKickoffStartedEvent(crew_name=crew.name, inputs=inputs),
|
||||
CrewKickoffStartedEvent(crew_name=crew.name, inputs=normalized),
|
||||
)
|
||||
if future is not None:
|
||||
try:
|
||||
@@ -216,9 +278,26 @@ def prepare_kickoff(crew: Crew, inputs: dict[str, Any] | None) -> dict[str, Any]
|
||||
crew._task_output_handler.reset()
|
||||
crew._logging_color = "bold_purple"
|
||||
|
||||
if inputs is not None:
|
||||
crew._inputs = inputs
|
||||
crew._interpolate_inputs(inputs)
|
||||
# Check for flow input files in baggage context (inherited from parent Flow)
|
||||
_flow_files = baggage.get_baggage("flow_input_files")
|
||||
flow_files: dict[str, Any] = _flow_files if isinstance(_flow_files, dict) else {}
|
||||
|
||||
if normalized is not None:
|
||||
# Extract file objects unpacked directly into inputs
|
||||
unpacked_files = _extract_files_from_inputs(normalized)
|
||||
|
||||
# Merge files: flow_files < input_files < unpacked_files (later takes precedence)
|
||||
all_files = {**flow_files, **(input_files or {}), **unpacked_files}
|
||||
if all_files:
|
||||
store_files(crew.id, all_files)
|
||||
|
||||
crew._inputs = normalized
|
||||
crew._interpolate_inputs(normalized)
|
||||
else:
|
||||
# No inputs dict provided
|
||||
all_files = {**flow_files, **(input_files or {})}
|
||||
if all_files:
|
||||
store_files(crew.id, all_files)
|
||||
crew._set_tasks_callbacks()
|
||||
crew._set_allow_crewai_trigger_context_for_first_task()
|
||||
|
||||
@@ -233,7 +312,7 @@ def prepare_kickoff(crew: Crew, inputs: dict[str, Any] | None) -> dict[str, Any]
|
||||
if crew.planning:
|
||||
crew._handle_crew_planning()
|
||||
|
||||
return inputs
|
||||
return normalized
|
||||
|
||||
|
||||
class StreamingContext:
|
||||
|
||||
@@ -767,7 +767,9 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
return sanitize_tool_name(tool_call.name)
|
||||
if isinstance(tool_call, dict):
|
||||
func_info = tool_call.get("function", {})
|
||||
return sanitize_tool_name(func_info.get("name", "") or tool_call.get("name", "unknown"))
|
||||
return sanitize_tool_name(
|
||||
func_info.get("name", "") or tool_call.get("name", "unknown")
|
||||
)
|
||||
return "unknown"
|
||||
|
||||
@router(execute_native_tool)
|
||||
@@ -913,6 +915,8 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
user_prompt = self._format_prompt(self.prompt["prompt"], inputs)
|
||||
self.state.messages.append(format_message_for_llm(user_prompt))
|
||||
|
||||
self._inject_files_from_inputs(inputs)
|
||||
|
||||
self.state.ask_for_human_input = bool(
|
||||
inputs.get("ask_for_human_input", False)
|
||||
)
|
||||
@@ -995,6 +999,8 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
user_prompt = self._format_prompt(self.prompt["prompt"], inputs)
|
||||
self.state.messages.append(format_message_for_llm(user_prompt))
|
||||
|
||||
self._inject_files_from_inputs(inputs)
|
||||
|
||||
self.state.ask_for_human_input = bool(
|
||||
inputs.get("ask_for_human_input", False)
|
||||
)
|
||||
@@ -1033,6 +1039,10 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
finally:
|
||||
self._is_executing = False
|
||||
|
||||
async def ainvoke(self, inputs: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Async version of invoke. Alias for invoke_async."""
|
||||
return await self.invoke_async(inputs)
|
||||
|
||||
def _handle_agent_action(
|
||||
self, formatted_answer: AgentAction, tool_result: ToolResult
|
||||
) -> AgentAction | AgentFinish:
|
||||
@@ -1180,6 +1190,22 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
training_data[agent_id] = agent_training_data
|
||||
training_handler.save(training_data)
|
||||
|
||||
def _inject_files_from_inputs(self, inputs: dict[str, Any]) -> None:
|
||||
"""Inject files from inputs into the last user message.
|
||||
|
||||
Args:
|
||||
inputs: Input dictionary that may contain a 'files' key.
|
||||
"""
|
||||
files = inputs.get("files")
|
||||
if not files:
|
||||
return
|
||||
|
||||
for i in range(len(self.state.messages) - 1, -1, -1):
|
||||
msg = self.state.messages[i]
|
||||
if msg.get("role") == "user":
|
||||
msg["files"] = files
|
||||
break
|
||||
|
||||
@staticmethod
|
||||
def _format_prompt(prompt: str, inputs: dict[str, str]) -> str:
|
||||
"""Format prompt template with input values.
|
||||
|
||||
@@ -83,6 +83,8 @@ from crewai.flow.utils import (
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai_files import FileInput
|
||||
|
||||
from crewai.flow.async_feedback.types import PendingFeedbackContext
|
||||
from crewai.flow.human_feedback import HumanFeedbackResult
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
@@ -1412,13 +1414,21 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
object.__setattr__(self._state, key, value)
|
||||
|
||||
def kickoff(
|
||||
self, inputs: dict[str, Any] | None = None
|
||||
self,
|
||||
inputs: dict[str, Any] | None = None,
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
) -> Any | FlowStreamingOutput:
|
||||
"""
|
||||
Start the flow execution in a synchronous context.
|
||||
"""Start the flow execution in a synchronous context.
|
||||
|
||||
This method wraps kickoff_async so that all state initialization and event
|
||||
emission is handled in the asynchronous method.
|
||||
|
||||
Args:
|
||||
inputs: Optional dictionary containing input values and/or a state ID.
|
||||
input_files: Optional dict of named file inputs for the flow.
|
||||
|
||||
Returns:
|
||||
The final output from the flow or FlowStreamingOutput if streaming.
|
||||
"""
|
||||
if self.stream:
|
||||
result_holder: list[Any] = []
|
||||
@@ -1438,7 +1448,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
def run_flow() -> None:
|
||||
try:
|
||||
self.stream = False
|
||||
result = self.kickoff(inputs=inputs)
|
||||
result = self.kickoff(inputs=inputs, input_files=input_files)
|
||||
result_holder.append(result)
|
||||
except Exception as e:
|
||||
# HumanFeedbackPending is expected control flow, not an error
|
||||
@@ -1460,15 +1470,16 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
return streaming_output
|
||||
|
||||
async def _run_flow() -> Any:
|
||||
return await self.kickoff_async(inputs)
|
||||
return await self.kickoff_async(inputs, input_files)
|
||||
|
||||
return asyncio.run(_run_flow())
|
||||
|
||||
async def kickoff_async(
|
||||
self, inputs: dict[str, Any] | None = None
|
||||
self,
|
||||
inputs: dict[str, Any] | None = None,
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
) -> Any | FlowStreamingOutput:
|
||||
"""
|
||||
Start the flow execution asynchronously.
|
||||
"""Start the flow execution asynchronously.
|
||||
|
||||
This method performs state restoration (if an 'id' is provided and persistence is available)
|
||||
and updates the flow state with any additional inputs. It then emits the FlowStartedEvent,
|
||||
@@ -1477,6 +1488,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
|
||||
Args:
|
||||
inputs: Optional dictionary containing input values and/or a state ID for restoration.
|
||||
input_files: Optional dict of named file inputs for the flow.
|
||||
|
||||
Returns:
|
||||
The final output from the flow, which is the result of the last executed method.
|
||||
@@ -1499,7 +1511,9 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
async def run_flow() -> None:
|
||||
try:
|
||||
self.stream = False
|
||||
result = await self.kickoff_async(inputs=inputs)
|
||||
result = await self.kickoff_async(
|
||||
inputs=inputs, input_files=input_files
|
||||
)
|
||||
result_holder.append(result)
|
||||
except Exception as e:
|
||||
# HumanFeedbackPending is expected control flow, not an error
|
||||
@@ -1523,6 +1537,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
return streaming_output
|
||||
|
||||
ctx = baggage.set_baggage("flow_inputs", inputs or {})
|
||||
ctx = baggage.set_baggage("flow_input_files", input_files or {}, context=ctx)
|
||||
flow_token = attach(ctx)
|
||||
|
||||
try:
|
||||
@@ -1705,18 +1720,20 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
detach(flow_token)
|
||||
|
||||
async def akickoff(
|
||||
self, inputs: dict[str, Any] | None = None
|
||||
self,
|
||||
inputs: dict[str, Any] | None = None,
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
) -> Any | FlowStreamingOutput:
|
||||
"""Native async method to start the flow execution. Alias for kickoff_async.
|
||||
|
||||
|
||||
Args:
|
||||
inputs: Optional dictionary containing input values and/or a state ID for restoration.
|
||||
input_files: Optional dict of named file inputs for the flow.
|
||||
|
||||
Returns:
|
||||
The final output from the flow, which is the result of the last executed method.
|
||||
"""
|
||||
return await self.kickoff_async(inputs)
|
||||
return await self.kickoff_async(inputs, input_files)
|
||||
|
||||
async def _execute_start_method(self, start_method_name: FlowMethodName) -> None:
|
||||
"""Executes a flow's start method and its triggered listeners.
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Callable
|
||||
import inspect
|
||||
import json
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Literal,
|
||||
cast,
|
||||
@@ -23,6 +26,10 @@ from pydantic import (
|
||||
)
|
||||
from typing_extensions import Self
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai_files import FileInput
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
|
||||
from crewai.agents.cache.cache_handler import CacheHandler
|
||||
@@ -296,9 +303,9 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
self,
|
||||
messages: str | list[LLMMessage],
|
||||
response_format: type[BaseModel] | None = None,
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
) -> LiteAgentOutput:
|
||||
"""
|
||||
Execute the agent with the given messages.
|
||||
"""Execute the agent with the given messages.
|
||||
|
||||
Args:
|
||||
messages: Either a string query or a list of message dictionaries.
|
||||
@@ -306,6 +313,8 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
If a list is provided, each dict should have 'role' and 'content' keys.
|
||||
response_format: Optional Pydantic model for structured output. If provided,
|
||||
overrides self.response_format for this execution.
|
||||
input_files: Optional dict of named files to attach to the message.
|
||||
Files can be paths, bytes, or File objects from crewai_files.
|
||||
|
||||
Returns:
|
||||
LiteAgentOutput: The result of the agent execution.
|
||||
@@ -327,7 +336,7 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
|
||||
# Format messages for the LLM
|
||||
self._messages = self._format_messages(
|
||||
messages, response_format=response_format
|
||||
messages, response_format=response_format, input_files=input_files
|
||||
)
|
||||
|
||||
return self._execute_core(
|
||||
@@ -464,19 +473,45 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
|
||||
return output
|
||||
|
||||
async def kickoff_async(self, messages: str | list[LLMMessage]) -> LiteAgentOutput:
|
||||
"""
|
||||
Execute the agent asynchronously with the given messages.
|
||||
async def kickoff_async(
|
||||
self,
|
||||
messages: str | list[LLMMessage],
|
||||
response_format: type[BaseModel] | None = None,
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
) -> LiteAgentOutput:
|
||||
"""Execute the agent asynchronously with the given messages.
|
||||
|
||||
Args:
|
||||
messages: Either a string query or a list of message dictionaries.
|
||||
If a string is provided, it will be converted to a user message.
|
||||
If a list is provided, each dict should have 'role' and 'content' keys.
|
||||
response_format: Optional Pydantic model for structured output.
|
||||
input_files: Optional dict of named files to attach to the message.
|
||||
|
||||
Returns:
|
||||
LiteAgentOutput: The result of the agent execution.
|
||||
"""
|
||||
return await asyncio.to_thread(self.kickoff, messages)
|
||||
return await asyncio.to_thread(
|
||||
self.kickoff, messages, response_format, input_files
|
||||
)
|
||||
|
||||
async def akickoff(
|
||||
self,
|
||||
messages: str | list[LLMMessage],
|
||||
response_format: type[BaseModel] | None = None,
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
) -> LiteAgentOutput:
|
||||
"""Async version of kickoff. Alias for kickoff_async.
|
||||
|
||||
Args:
|
||||
messages: Either a string query or a list of message dictionaries.
|
||||
response_format: Optional Pydantic model for structured output.
|
||||
input_files: Optional dict of named files to attach to the message.
|
||||
|
||||
Returns:
|
||||
LiteAgentOutput: The result of the agent execution.
|
||||
"""
|
||||
return await self.kickoff_async(messages, response_format, input_files)
|
||||
|
||||
def _get_default_system_prompt(
|
||||
self, response_format: type[BaseModel] | None = None
|
||||
@@ -520,12 +555,14 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
self,
|
||||
messages: str | list[LLMMessage],
|
||||
response_format: type[BaseModel] | None = None,
|
||||
input_files: dict[str, FileInput] | None = None,
|
||||
) -> list[LLMMessage]:
|
||||
"""Format messages for the LLM.
|
||||
|
||||
Args:
|
||||
messages: Input messages to format
|
||||
response_format: Optional response format to use instead of self.response_format
|
||||
messages: Input messages to format.
|
||||
response_format: Optional response format to use instead of self.response_format.
|
||||
input_files: Optional dict of named files to include with the messages.
|
||||
"""
|
||||
if isinstance(messages, str):
|
||||
messages = [{"role": "user", "content": messages}]
|
||||
@@ -540,6 +577,13 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
# Add the rest of the messages
|
||||
formatted_messages.extend(messages)
|
||||
|
||||
# Attach files to the last user message if provided
|
||||
if input_files:
|
||||
for msg in reversed(formatted_messages):
|
||||
if msg.get("role") == "user":
|
||||
msg["files"] = input_files
|
||||
break
|
||||
|
||||
return formatted_messages
|
||||
|
||||
def _invoke_loop(self) -> AgentFinish:
|
||||
|
||||
@@ -53,6 +53,14 @@ from crewai.utilities.logger_utils import suppress_warnings
|
||||
from crewai.utilities.string_utils import sanitize_tool_name
|
||||
|
||||
|
||||
try:
|
||||
from crewai_files import aformat_multimodal_content, format_multimodal_content
|
||||
|
||||
HAS_CREWAI_FILES = True
|
||||
except ImportError:
|
||||
HAS_CREWAI_FILES = False
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from litellm.exceptions import ContextWindowExceededError
|
||||
from litellm.litellm_core_utils.get_supported_openai_params import (
|
||||
@@ -588,6 +596,7 @@ class LLM(BaseLLM):
|
||||
stream: bool = False,
|
||||
interceptor: BaseInterceptor[httpx.Request, httpx.Response] | None = None,
|
||||
thinking: AnthropicThinkingConfig | dict[str, Any] | None = None,
|
||||
prefer_upload: bool = False,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize LLM instance.
|
||||
@@ -624,6 +633,7 @@ class LLM(BaseLLM):
|
||||
self.callbacks = callbacks
|
||||
self.context_window_size = 0
|
||||
self.reasoning_effort = reasoning_effort
|
||||
self.prefer_upload = prefer_upload
|
||||
self.additional_params = {
|
||||
k: v for k, v in kwargs.items() if k not in ("is_litellm", "provider")
|
||||
}
|
||||
@@ -661,12 +671,14 @@ class LLM(BaseLLM):
|
||||
self,
|
||||
messages: str | list[LLMMessage],
|
||||
tools: list[dict[str, BaseTool]] | None = None,
|
||||
skip_file_processing: bool = False,
|
||||
) -> dict[str, Any]:
|
||||
"""Prepare parameters for the completion call.
|
||||
|
||||
Args:
|
||||
messages: Input messages for the LLM
|
||||
tools: Optional list of tool schemas
|
||||
skip_file_processing: Skip file processing (used when already done async)
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: Parameters for the completion call
|
||||
@@ -674,6 +686,9 @@ class LLM(BaseLLM):
|
||||
# --- 1) Format messages according to provider requirements
|
||||
if isinstance(messages, str):
|
||||
messages = [{"role": "user", "content": messages}]
|
||||
# --- 1a) Process any file attachments into multimodal content
|
||||
if not skip_file_processing:
|
||||
messages = self._process_message_files(messages)
|
||||
formatted_messages = self._format_messages_for_provider(messages)
|
||||
|
||||
# --- 2) Prepare the parameters for the completion call
|
||||
@@ -684,7 +699,7 @@ class LLM(BaseLLM):
|
||||
"temperature": self.temperature,
|
||||
"top_p": self.top_p,
|
||||
"n": self.n,
|
||||
"stop": self.stop,
|
||||
"stop": self.stop or None,
|
||||
"max_tokens": self.max_tokens or self.max_completion_tokens,
|
||||
"presence_penalty": self.presence_penalty,
|
||||
"frequency_penalty": self.frequency_penalty,
|
||||
@@ -1799,6 +1814,9 @@ class LLM(BaseLLM):
|
||||
if isinstance(messages, str):
|
||||
messages = [{"role": "user", "content": messages}]
|
||||
|
||||
# Process file attachments asynchronously before preparing params
|
||||
messages = await self._aprocess_message_files(messages)
|
||||
|
||||
if "o1" in self.model.lower():
|
||||
for message in messages:
|
||||
if message.get("role") == "system":
|
||||
@@ -1809,7 +1827,9 @@ class LLM(BaseLLM):
|
||||
if callbacks and len(callbacks) > 0:
|
||||
self.set_callbacks(callbacks)
|
||||
try:
|
||||
params = self._prepare_completion_params(messages, tools)
|
||||
params = self._prepare_completion_params(
|
||||
messages, tools, skip_file_processing=True
|
||||
)
|
||||
|
||||
if self.stream:
|
||||
return await self._ahandle_streaming_response(
|
||||
@@ -1896,6 +1916,88 @@ class LLM(BaseLLM):
|
||||
),
|
||||
)
|
||||
|
||||
def _process_message_files(self, messages: list[LLMMessage]) -> list[LLMMessage]:
|
||||
"""Process files attached to messages and format for provider.
|
||||
|
||||
For each message with a `files` field, formats the files into
|
||||
provider-specific content blocks and updates the message content.
|
||||
|
||||
Args:
|
||||
messages: List of messages that may contain file attachments.
|
||||
|
||||
Returns:
|
||||
Messages with files formatted into content blocks.
|
||||
"""
|
||||
if not HAS_CREWAI_FILES or not self.supports_multimodal():
|
||||
return messages
|
||||
|
||||
provider = getattr(self, "provider", None) or self.model
|
||||
|
||||
for msg in messages:
|
||||
files = msg.get("files")
|
||||
if not files:
|
||||
continue
|
||||
|
||||
content_blocks = format_multimodal_content(files, provider)
|
||||
if not content_blocks:
|
||||
msg.pop("files", None)
|
||||
continue
|
||||
|
||||
existing_content = msg.get("content", "")
|
||||
if isinstance(existing_content, str):
|
||||
msg["content"] = [
|
||||
self.format_text_content(existing_content),
|
||||
*content_blocks,
|
||||
]
|
||||
elif isinstance(existing_content, list):
|
||||
msg["content"] = [*existing_content, *content_blocks]
|
||||
|
||||
msg.pop("files", None)
|
||||
|
||||
return messages
|
||||
|
||||
async def _aprocess_message_files(
|
||||
self, messages: list[LLMMessage]
|
||||
) -> list[LLMMessage]:
|
||||
"""Async process files attached to messages and format for provider.
|
||||
|
||||
For each message with a `files` field, formats the files into
|
||||
provider-specific content blocks and updates the message content.
|
||||
|
||||
Args:
|
||||
messages: List of messages that may contain file attachments.
|
||||
|
||||
Returns:
|
||||
Messages with files formatted into content blocks.
|
||||
"""
|
||||
if not HAS_CREWAI_FILES or not self.supports_multimodal():
|
||||
return messages
|
||||
|
||||
provider = getattr(self, "provider", None) or self.model
|
||||
|
||||
for msg in messages:
|
||||
files = msg.get("files")
|
||||
if not files:
|
||||
continue
|
||||
|
||||
content_blocks = await aformat_multimodal_content(files, provider)
|
||||
if not content_blocks:
|
||||
msg.pop("files", None)
|
||||
continue
|
||||
|
||||
existing_content = msg.get("content", "")
|
||||
if isinstance(existing_content, str):
|
||||
msg["content"] = [
|
||||
self.format_text_content(existing_content),
|
||||
*content_blocks,
|
||||
]
|
||||
elif isinstance(existing_content, list):
|
||||
msg["content"] = [*existing_content, *content_blocks]
|
||||
|
||||
msg.pop("files", None)
|
||||
|
||||
return messages
|
||||
|
||||
def _format_messages_for_provider(
|
||||
self, messages: list[LLMMessage]
|
||||
) -> list[dict[str, str]]:
|
||||
@@ -2123,6 +2225,7 @@ class LLM(BaseLLM):
|
||||
"reasoning_effort",
|
||||
"stream",
|
||||
"stop",
|
||||
"prefer_upload",
|
||||
]
|
||||
}
|
||||
|
||||
@@ -2150,6 +2253,7 @@ class LLM(BaseLLM):
|
||||
reasoning_effort=self.reasoning_effort,
|
||||
stream=self.stream,
|
||||
stop=self.stop,
|
||||
prefer_upload=self.prefer_upload,
|
||||
**filtered_params,
|
||||
)
|
||||
|
||||
@@ -2185,6 +2289,7 @@ class LLM(BaseLLM):
|
||||
"reasoning_effort",
|
||||
"stream",
|
||||
"stop",
|
||||
"prefer_upload",
|
||||
]
|
||||
}
|
||||
|
||||
@@ -2218,5 +2323,28 @@ class LLM(BaseLLM):
|
||||
reasoning_effort=self.reasoning_effort,
|
||||
stream=self.stream,
|
||||
stop=copy.deepcopy(self.stop, memo) if self.stop else None,
|
||||
prefer_upload=self.prefer_upload,
|
||||
**filtered_params,
|
||||
)
|
||||
|
||||
def supports_multimodal(self) -> bool:
|
||||
"""Check if the model supports multimodal inputs.
|
||||
|
||||
For litellm, check common vision-enabled model prefixes.
|
||||
|
||||
Returns:
|
||||
True if the model likely supports images.
|
||||
"""
|
||||
vision_prefixes = (
|
||||
"gpt-4o",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-vision",
|
||||
"gpt-4.1",
|
||||
"claude-3",
|
||||
"claude-4",
|
||||
"gemini",
|
||||
)
|
||||
model_lower = self.model.lower()
|
||||
return any(
|
||||
model_lower.startswith(p) or f"/{p}" in model_lower for p in vision_prefixes
|
||||
)
|
||||
|
||||
@@ -31,6 +31,14 @@ from crewai.events.types.tool_usage_events import (
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
|
||||
|
||||
try:
|
||||
from crewai_files import format_multimodal_content
|
||||
|
||||
HAS_CREWAI_FILES = True
|
||||
except ImportError:
|
||||
HAS_CREWAI_FILES = False
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agent.core import Agent
|
||||
from crewai.task import Task
|
||||
@@ -71,6 +79,7 @@ class BaseLLM(ABC):
|
||||
api_key: str | None = None,
|
||||
base_url: str | None = None,
|
||||
provider: str | None = None,
|
||||
prefer_upload: bool = False,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize the BaseLLM with default attributes.
|
||||
@@ -79,6 +88,7 @@ class BaseLLM(ABC):
|
||||
model: The model identifier/name.
|
||||
temperature: Optional temperature setting for response generation.
|
||||
stop: Optional list of stop sequences for generation.
|
||||
prefer_upload: Whether to prefer file upload over inline base64.
|
||||
**kwargs: Additional provider-specific parameters.
|
||||
"""
|
||||
if not model:
|
||||
@@ -88,6 +98,7 @@ class BaseLLM(ABC):
|
||||
self.temperature = temperature
|
||||
self.api_key = api_key
|
||||
self.base_url = base_url
|
||||
self.prefer_upload = prefer_upload
|
||||
# Store additional parameters for provider-specific use
|
||||
self.additional_params = kwargs
|
||||
self._provider = provider or "openai"
|
||||
@@ -280,6 +291,39 @@ class BaseLLM(ABC):
|
||||
# Default implementation - subclasses should override with model-specific values
|
||||
return DEFAULT_CONTEXT_WINDOW_SIZE
|
||||
|
||||
def supports_multimodal(self) -> bool:
|
||||
"""Check if the LLM supports multimodal inputs.
|
||||
|
||||
Returns:
|
||||
True if the LLM supports images, PDFs, audio, or video.
|
||||
"""
|
||||
return False
|
||||
|
||||
def format_text_content(self, text: str) -> dict[str, Any]:
|
||||
"""Format text as a content block for the LLM.
|
||||
|
||||
Default implementation uses OpenAI/Anthropic format.
|
||||
Subclasses should override for provider-specific formatting.
|
||||
|
||||
Args:
|
||||
text: The text content to format.
|
||||
|
||||
Returns:
|
||||
A content block in the provider's expected format.
|
||||
"""
|
||||
return {"type": "text", "text": text}
|
||||
|
||||
def get_file_uploader(self) -> Any:
|
||||
"""Get a file uploader configured with this LLM's client.
|
||||
|
||||
Returns an uploader instance that reuses this LLM's authenticated client,
|
||||
avoiding the need to create a new connection for file uploads.
|
||||
|
||||
Returns:
|
||||
A FileUploader instance, or None if not supported by this provider.
|
||||
"""
|
||||
return None
|
||||
|
||||
# Common helper methods for native SDK implementations
|
||||
|
||||
def _emit_call_started_event(
|
||||
@@ -506,6 +550,48 @@ class BaseLLM(ABC):
|
||||
f"Message at index {i} must have 'role' and 'content' keys"
|
||||
)
|
||||
|
||||
return self._process_message_files(messages)
|
||||
|
||||
def _process_message_files(self, messages: list[LLMMessage]) -> list[LLMMessage]:
|
||||
"""Process files attached to messages and format for the provider.
|
||||
|
||||
For each message with a `files` field, formats the files into
|
||||
provider-specific content blocks and updates the message content.
|
||||
|
||||
Args:
|
||||
messages: List of messages that may contain file attachments.
|
||||
|
||||
Returns:
|
||||
Messages with files formatted into content blocks.
|
||||
"""
|
||||
if not HAS_CREWAI_FILES or not self.supports_multimodal():
|
||||
return messages
|
||||
|
||||
provider = getattr(self, "provider", None) or getattr(self, "model", "openai")
|
||||
api = getattr(self, "api", None)
|
||||
|
||||
for msg in messages:
|
||||
files = msg.get("files")
|
||||
if not files:
|
||||
continue
|
||||
|
||||
existing_content = msg.get("content", "")
|
||||
text = existing_content if isinstance(existing_content, str) else None
|
||||
|
||||
content_blocks = format_multimodal_content(
|
||||
files, provider, api=api, prefer_upload=self.prefer_upload, text=text
|
||||
)
|
||||
if not content_blocks:
|
||||
msg.pop("files", None)
|
||||
continue
|
||||
|
||||
if isinstance(existing_content, list):
|
||||
msg["content"] = [*existing_content, *content_blocks]
|
||||
else:
|
||||
msg["content"] = content_blocks
|
||||
|
||||
msg.pop("files", None)
|
||||
|
||||
return messages
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -31,6 +31,32 @@ except ImportError:
|
||||
) from None
|
||||
|
||||
|
||||
ANTHROPIC_FILES_API_BETA = "files-api-2025-04-14"
|
||||
|
||||
|
||||
def _contains_file_id_reference(messages: list[dict[str, Any]]) -> bool:
|
||||
"""Check if any message content contains a file_id reference.
|
||||
|
||||
Anthropic's Files API is in beta and requires a special header when
|
||||
file_id references are used in content blocks.
|
||||
|
||||
Args:
|
||||
messages: List of message dicts to check.
|
||||
|
||||
Returns:
|
||||
True if any content block contains a file_id reference.
|
||||
"""
|
||||
for message in messages:
|
||||
content = message.get("content")
|
||||
if isinstance(content, list):
|
||||
for block in content:
|
||||
if isinstance(block, dict):
|
||||
source = block.get("source", {})
|
||||
if isinstance(source, dict) and source.get("type") == "file":
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class AnthropicThinkingConfig(BaseModel):
|
||||
type: Literal["enabled", "disabled"]
|
||||
budget_tokens: int | None = None
|
||||
@@ -549,8 +575,14 @@ class AnthropicCompletion(BaseLLM):
|
||||
params["tools"] = [structured_tool]
|
||||
params["tool_choice"] = {"type": "tool", "name": "structured_output"}
|
||||
|
||||
uses_file_api = _contains_file_id_reference(params.get("messages", []))
|
||||
|
||||
try:
|
||||
response: Message = self.client.messages.create(**params)
|
||||
if uses_file_api:
|
||||
params["betas"] = [ANTHROPIC_FILES_API_BETA]
|
||||
response = self.client.beta.messages.create(**params)
|
||||
else:
|
||||
response = self.client.messages.create(**params)
|
||||
|
||||
except Exception as e:
|
||||
if is_context_length_exceeded(e):
|
||||
@@ -973,8 +1005,14 @@ class AnthropicCompletion(BaseLLM):
|
||||
params["tools"] = [structured_tool]
|
||||
params["tool_choice"] = {"type": "tool", "name": "structured_output"}
|
||||
|
||||
uses_file_api = _contains_file_id_reference(params.get("messages", []))
|
||||
|
||||
try:
|
||||
response: Message = await self.async_client.messages.create(**params)
|
||||
if uses_file_api:
|
||||
params["betas"] = [ANTHROPIC_FILES_API_BETA]
|
||||
response = await self.async_client.beta.messages.create(**params)
|
||||
else:
|
||||
response = await self.async_client.messages.create(**params)
|
||||
|
||||
except Exception as e:
|
||||
if is_context_length_exceeded(e):
|
||||
@@ -1316,3 +1354,29 @@ class AnthropicCompletion(BaseLLM):
|
||||
"total_tokens": input_tokens + output_tokens,
|
||||
}
|
||||
return {"total_tokens": 0}
|
||||
|
||||
def supports_multimodal(self) -> bool:
|
||||
"""Check if the model supports multimodal inputs.
|
||||
|
||||
All Claude 3+ models support vision and PDFs.
|
||||
|
||||
Returns:
|
||||
True if the model supports images and PDFs.
|
||||
"""
|
||||
return "claude-3" in self.model.lower() or "claude-4" in self.model.lower()
|
||||
|
||||
def get_file_uploader(self) -> Any:
|
||||
"""Get an Anthropic file uploader using this LLM's clients.
|
||||
|
||||
Returns:
|
||||
AnthropicFileUploader instance with pre-configured sync and async clients.
|
||||
"""
|
||||
try:
|
||||
from crewai_files.uploaders.anthropic import AnthropicFileUploader
|
||||
|
||||
return AnthropicFileUploader(
|
||||
client=self.client,
|
||||
async_client=self.async_client,
|
||||
)
|
||||
except ImportError:
|
||||
return None
|
||||
|
||||
@@ -1073,3 +1073,14 @@ class AzureCompletion(BaseLLM):
|
||||
async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
|
||||
"""Async context manager exit."""
|
||||
await self.aclose()
|
||||
|
||||
def supports_multimodal(self) -> bool:
|
||||
"""Check if the model supports multimodal inputs.
|
||||
|
||||
Azure OpenAI vision-enabled models include GPT-4o and GPT-4 Turbo with Vision.
|
||||
|
||||
Returns:
|
||||
True if the model supports images.
|
||||
"""
|
||||
vision_models = ("gpt-4o", "gpt-4-turbo", "gpt-4-vision", "gpt-4v")
|
||||
return any(self.model.lower().startswith(m) for m in vision_models)
|
||||
|
||||
@@ -1360,11 +1360,15 @@ class BedrockCompletion(BaseLLM):
|
||||
)
|
||||
else:
|
||||
# Convert to Converse API format with proper content structure
|
||||
# Ensure content is not None
|
||||
text_content = content if content else ""
|
||||
converse_messages.append(
|
||||
{"role": role, "content": [{"text": text_content}]}
|
||||
)
|
||||
if isinstance(content, list):
|
||||
# Already formatted as multimodal content blocks
|
||||
converse_messages.append({"role": role, "content": content})
|
||||
else:
|
||||
# String content - wrap in text block
|
||||
text_content = content if content else ""
|
||||
converse_messages.append(
|
||||
{"role": role, "content": [{"text": text_content}]}
|
||||
)
|
||||
|
||||
# CRITICAL: Handle model-specific conversation requirements
|
||||
# Cohere and some other models require conversation to end with user message
|
||||
@@ -1591,3 +1595,118 @@ class BedrockCompletion(BaseLLM):
|
||||
|
||||
# Default context window size
|
||||
return int(8192 * CONTEXT_WINDOW_USAGE_RATIO)
|
||||
|
||||
def supports_multimodal(self) -> bool:
|
||||
"""Check if the model supports multimodal inputs.
|
||||
|
||||
Claude 3+ and Nova Lite/Pro/Premier on Bedrock support vision.
|
||||
|
||||
Returns:
|
||||
True if the model supports images.
|
||||
"""
|
||||
model_lower = self.model.lower()
|
||||
vision_models = (
|
||||
"anthropic.claude-3",
|
||||
"amazon.nova-lite",
|
||||
"amazon.nova-pro",
|
||||
"amazon.nova-premier",
|
||||
"us.amazon.nova-lite",
|
||||
"us.amazon.nova-pro",
|
||||
"us.amazon.nova-premier",
|
||||
)
|
||||
return any(model_lower.startswith(m) for m in vision_models)
|
||||
|
||||
def _is_nova_model(self) -> bool:
|
||||
"""Check if the model is an Amazon Nova model.
|
||||
|
||||
Only Nova models support S3 links for multimedia.
|
||||
|
||||
Returns:
|
||||
True if the model is a Nova model.
|
||||
"""
|
||||
model_lower = self.model.lower()
|
||||
return "amazon.nova-" in model_lower
|
||||
|
||||
def get_file_uploader(self) -> Any:
|
||||
"""Get a Bedrock S3 file uploader using this LLM's AWS credentials.
|
||||
|
||||
Creates an S3 client using the same AWS credentials configured for
|
||||
this Bedrock LLM instance.
|
||||
|
||||
Returns:
|
||||
BedrockFileUploader instance with pre-configured S3 client,
|
||||
or None if crewai_files is not installed.
|
||||
"""
|
||||
try:
|
||||
import boto3
|
||||
from crewai_files.uploaders.bedrock import BedrockFileUploader
|
||||
|
||||
s3_client = boto3.client(
|
||||
"s3",
|
||||
region_name=self.region_name,
|
||||
aws_access_key_id=self.aws_access_key_id,
|
||||
aws_secret_access_key=self.aws_secret_access_key,
|
||||
aws_session_token=self.aws_session_token,
|
||||
)
|
||||
return BedrockFileUploader(
|
||||
region=self.region_name,
|
||||
client=s3_client,
|
||||
)
|
||||
except ImportError:
|
||||
return None
|
||||
|
||||
def _get_document_format(self, content_type: str) -> str | None:
|
||||
"""Map content type to Bedrock document format.
|
||||
|
||||
Args:
|
||||
content_type: MIME type of the document.
|
||||
|
||||
Returns:
|
||||
Bedrock format string or None if unsupported.
|
||||
"""
|
||||
format_map = {
|
||||
"application/pdf": "pdf",
|
||||
"text/csv": "csv",
|
||||
"text/plain": "txt",
|
||||
"text/markdown": "md",
|
||||
"text/html": "html",
|
||||
"application/msword": "doc",
|
||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": "docx",
|
||||
"application/vnd.ms-excel": "xls",
|
||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": "xlsx",
|
||||
}
|
||||
return format_map.get(content_type)
|
||||
|
||||
def _get_video_format(self, content_type: str) -> str | None:
|
||||
"""Map content type to Bedrock video format.
|
||||
|
||||
Args:
|
||||
content_type: MIME type of the video.
|
||||
|
||||
Returns:
|
||||
Bedrock format string or None if unsupported.
|
||||
"""
|
||||
format_map = {
|
||||
"video/mp4": "mp4",
|
||||
"video/quicktime": "mov",
|
||||
"video/x-matroska": "mkv",
|
||||
"video/webm": "webm",
|
||||
"video/x-flv": "flv",
|
||||
"video/mpeg": "mpeg",
|
||||
"video/x-ms-wmv": "wmv",
|
||||
"video/3gpp": "three_gp",
|
||||
}
|
||||
return format_map.get(content_type)
|
||||
|
||||
def format_text_content(self, text: str) -> dict[str, Any]:
|
||||
"""Format text as a Bedrock content block.
|
||||
|
||||
Bedrock uses {"text": "..."} format instead of {"type": "text", "text": "..."}.
|
||||
|
||||
Args:
|
||||
text: The text content to format.
|
||||
|
||||
Returns:
|
||||
A content block in Bedrock's expected format.
|
||||
"""
|
||||
return {"text": text}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
@@ -516,17 +517,31 @@ class GeminiCompletion(BaseLLM):
|
||||
role = message["role"]
|
||||
content = message["content"]
|
||||
|
||||
# Convert content to string if it's a list
|
||||
# Build parts list from content
|
||||
parts: list[types.Part] = []
|
||||
if isinstance(content, list):
|
||||
text_content = " ".join(
|
||||
str(item.get("text", "")) if isinstance(item, dict) else str(item)
|
||||
for item in content
|
||||
)
|
||||
for item in content:
|
||||
if isinstance(item, dict):
|
||||
if "text" in item:
|
||||
parts.append(types.Part.from_text(text=str(item["text"])))
|
||||
elif "inlineData" in item:
|
||||
inline = item["inlineData"]
|
||||
parts.append(
|
||||
types.Part.from_bytes(
|
||||
data=base64.b64decode(inline["data"]),
|
||||
mime_type=inline["mimeType"],
|
||||
)
|
||||
)
|
||||
else:
|
||||
parts.append(types.Part.from_text(text=str(item)))
|
||||
else:
|
||||
text_content = str(content) if content else ""
|
||||
parts.append(types.Part.from_text(text=str(content) if content else ""))
|
||||
|
||||
if role == "system":
|
||||
# Extract system instruction - Gemini handles it separately
|
||||
text_content = " ".join(
|
||||
p.text for p in parts if hasattr(p, "text") and p.text
|
||||
)
|
||||
if system_instruction:
|
||||
system_instruction += f"\n\n{text_content}"
|
||||
else:
|
||||
@@ -583,9 +598,7 @@ class GeminiCompletion(BaseLLM):
|
||||
gemini_role = "model" if role == "assistant" else "user"
|
||||
|
||||
# Create Content object
|
||||
gemini_content = types.Content(
|
||||
role=gemini_role, parts=[types.Part.from_text(text=text_content)]
|
||||
)
|
||||
gemini_content = types.Content(role=gemini_role, parts=parts)
|
||||
contents.append(gemini_content)
|
||||
|
||||
return contents, system_instruction
|
||||
@@ -1177,3 +1190,39 @@ class GeminiCompletion(BaseLLM):
|
||||
)
|
||||
)
|
||||
return result
|
||||
|
||||
def supports_multimodal(self) -> bool:
|
||||
"""Check if the model supports multimodal inputs.
|
||||
|
||||
Gemini models support images, audio, video, and PDFs.
|
||||
|
||||
Returns:
|
||||
True if the model supports multimodal inputs.
|
||||
"""
|
||||
return True
|
||||
|
||||
def format_text_content(self, text: str) -> dict[str, Any]:
|
||||
"""Format text as a Gemini content block.
|
||||
|
||||
Gemini uses {"text": "..."} format instead of {"type": "text", "text": "..."}.
|
||||
|
||||
Args:
|
||||
text: The text content to format.
|
||||
|
||||
Returns:
|
||||
A content block in Gemini's expected format.
|
||||
"""
|
||||
return {"text": text}
|
||||
|
||||
def get_file_uploader(self) -> Any:
|
||||
"""Get a Gemini file uploader using this LLM's client.
|
||||
|
||||
Returns:
|
||||
GeminiFileUploader instance with pre-configured client.
|
||||
"""
|
||||
try:
|
||||
from crewai_files.uploaders.gemini import GeminiFileUploader
|
||||
|
||||
return GeminiFileUploader(client=self.client)
|
||||
except ImportError:
|
||||
return None
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -44,6 +44,25 @@ from crewai.tools.base_tool import BaseTool
|
||||
from crewai.utilities.config import process_config
|
||||
from crewai.utilities.constants import NOT_SPECIFIED, _NotSpecified
|
||||
from crewai.utilities.converter import Converter, convert_to_model
|
||||
from crewai.utilities.file_store import (
|
||||
clear_task_files,
|
||||
get_all_files,
|
||||
store_task_files,
|
||||
)
|
||||
|
||||
|
||||
try:
|
||||
from crewai_files import FileInput, FilePath, get_supported_content_types
|
||||
|
||||
HAS_CREWAI_FILES = True
|
||||
except ImportError:
|
||||
FileInput = Any # type: ignore[misc,assignment]
|
||||
HAS_CREWAI_FILES = False
|
||||
|
||||
def get_supported_content_types(provider: str, api: str | None = None) -> list[str]:
|
||||
return []
|
||||
|
||||
|
||||
from crewai.utilities.guardrail import (
|
||||
process_guardrail,
|
||||
)
|
||||
@@ -142,6 +161,10 @@ class Task(BaseModel):
|
||||
default_factory=list,
|
||||
description="Tools the agent is limited to use for this task.",
|
||||
)
|
||||
input_files: dict[str, FileInput] = Field(
|
||||
default_factory=dict,
|
||||
description="Named input files for this task. Keys are reference names, values are paths or File objects.",
|
||||
)
|
||||
security_config: SecurityConfig = Field(
|
||||
default_factory=SecurityConfig,
|
||||
description="Security configuration for the task.",
|
||||
@@ -357,6 +380,24 @@ class Task(BaseModel):
|
||||
"may_not_set_field", "This field is not to be set by the user.", {}
|
||||
)
|
||||
|
||||
@field_validator("input_files", mode="before")
|
||||
@classmethod
|
||||
def _normalize_input_files(cls, v: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Convert string paths to FilePath objects."""
|
||||
if not v:
|
||||
return v
|
||||
|
||||
if not HAS_CREWAI_FILES:
|
||||
return v
|
||||
|
||||
result = {}
|
||||
for key, value in v.items():
|
||||
if isinstance(value, str):
|
||||
result[key] = FilePath(path=Path(value))
|
||||
else:
|
||||
result[key] = value
|
||||
return result
|
||||
|
||||
@field_validator("output_file")
|
||||
@classmethod
|
||||
def output_file_validation(cls, value: str | None) -> str | None:
|
||||
@@ -495,10 +536,10 @@ class Task(BaseModel):
|
||||
) -> None:
|
||||
"""Execute the task asynchronously with context handling."""
|
||||
try:
|
||||
result = self._execute_core(agent, context, tools)
|
||||
future.set_result(result)
|
||||
result = self._execute_core(agent, context, tools)
|
||||
future.set_result(result)
|
||||
except Exception as e:
|
||||
future.set_exception(e)
|
||||
future.set_exception(e)
|
||||
|
||||
async def aexecute_sync(
|
||||
self,
|
||||
@@ -516,6 +557,7 @@ class Task(BaseModel):
|
||||
tools: list[Any] | None,
|
||||
) -> TaskOutput:
|
||||
"""Run the core execution logic of the task asynchronously."""
|
||||
self._store_input_files()
|
||||
try:
|
||||
agent = agent or self.agent
|
||||
self.agent = agent
|
||||
@@ -600,6 +642,8 @@ class Task(BaseModel):
|
||||
self.end_time = datetime.datetime.now()
|
||||
crewai_event_bus.emit(self, TaskFailedEvent(error=str(e), task=self)) # type: ignore[no-untyped-call]
|
||||
raise e # Re-raise the exception after emitting the event
|
||||
finally:
|
||||
clear_task_files(self.id)
|
||||
|
||||
def _execute_core(
|
||||
self,
|
||||
@@ -608,6 +652,7 @@ class Task(BaseModel):
|
||||
tools: list[Any] | None,
|
||||
) -> TaskOutput:
|
||||
"""Run the core execution logic of the task."""
|
||||
self._store_input_files()
|
||||
try:
|
||||
agent = agent or self.agent
|
||||
self.agent = agent
|
||||
@@ -693,6 +738,8 @@ class Task(BaseModel):
|
||||
self.end_time = datetime.datetime.now()
|
||||
crewai_event_bus.emit(self, TaskFailedEvent(error=str(e), task=self)) # type: ignore[no-untyped-call]
|
||||
raise e # Re-raise the exception after emitting the event
|
||||
finally:
|
||||
clear_task_files(self.id)
|
||||
|
||||
def prompt(self) -> str:
|
||||
"""Generates the task prompt with optional markdown formatting.
|
||||
@@ -715,6 +762,53 @@ class Task(BaseModel):
|
||||
if trigger_payload is not None:
|
||||
description += f"\n\nTrigger Payload: {trigger_payload}"
|
||||
|
||||
if self.agent and self.agent.crew:
|
||||
files = get_all_files(self.agent.crew.id, self.id)
|
||||
if files:
|
||||
supported_types: list[str] = []
|
||||
if self.agent.llm and self.agent.llm.supports_multimodal():
|
||||
provider = getattr(self.agent.llm, "provider", None) or getattr(
|
||||
self.agent.llm, "model", "openai"
|
||||
)
|
||||
api = getattr(self.agent.llm, "api", None)
|
||||
supported_types = get_supported_content_types(provider, api)
|
||||
|
||||
def is_auto_injected(content_type: str) -> bool:
|
||||
return any(content_type.startswith(t) for t in supported_types)
|
||||
|
||||
auto_injected_files = {
|
||||
name: f_input
|
||||
for name, f_input in files.items()
|
||||
if is_auto_injected(f_input.content_type)
|
||||
}
|
||||
tool_files = {
|
||||
name: f_input
|
||||
for name, f_input in files.items()
|
||||
if not is_auto_injected(f_input.content_type)
|
||||
}
|
||||
|
||||
file_lines: list[str] = []
|
||||
|
||||
if auto_injected_files:
|
||||
file_lines.append(
|
||||
"Input files (content already loaded in conversation):"
|
||||
)
|
||||
for name, file_input in auto_injected_files.items():
|
||||
filename = file_input.filename or name
|
||||
file_lines.append(f' - "{name}" ({filename})')
|
||||
|
||||
if tool_files:
|
||||
file_lines.append(
|
||||
"Available input files (use the name in quotes with read_file tool):"
|
||||
)
|
||||
for name, file_input in tool_files.items():
|
||||
filename = file_input.filename or name
|
||||
content_type = file_input.content_type
|
||||
file_lines.append(f' - "{name}" ({filename}, {content_type})')
|
||||
|
||||
if file_lines:
|
||||
description += "\n\n" + "\n".join(file_lines)
|
||||
|
||||
tasks_slices = [description]
|
||||
|
||||
output = self.i18n.slice("expected_output").format(
|
||||
@@ -948,6 +1042,13 @@ Follow these guidelines:
|
||||
) from e
|
||||
return
|
||||
|
||||
def _store_input_files(self) -> None:
|
||||
"""Store task input files in the file store."""
|
||||
if not HAS_CREWAI_FILES or not self.input_files:
|
||||
return
|
||||
|
||||
store_task_files(self.id, self.input_files)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"Task(description={self.description}, expected_output={self.expected_output})"
|
||||
|
||||
|
||||
78
lib/crewai/src/crewai/tools/agent_tools/read_file_tool.py
Normal file
78
lib/crewai/src/crewai/tools/agent_tools/read_file_tool.py
Normal file
@@ -0,0 +1,78 @@
|
||||
"""Tool for reading input files provided to the crew."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from pydantic import BaseModel, Field, PrivateAttr
|
||||
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai_files import FileInput
|
||||
|
||||
|
||||
class ReadFileToolSchema(BaseModel):
|
||||
"""Schema for read file tool arguments."""
|
||||
|
||||
file_name: str = Field(..., description="The name of the input file to read")
|
||||
|
||||
|
||||
class ReadFileTool(BaseTool):
|
||||
"""Tool for reading input files provided to the crew kickoff.
|
||||
|
||||
Provides agents access to files passed via the `files` key in inputs.
|
||||
"""
|
||||
|
||||
name: str = "read_file"
|
||||
description: str = (
|
||||
"Read content from an input file by name. "
|
||||
"Returns file content as text for text files, or base64 for binary files."
|
||||
)
|
||||
args_schema: type[BaseModel] = ReadFileToolSchema
|
||||
|
||||
_files: dict[str, FileInput] | None = PrivateAttr(default=None)
|
||||
|
||||
def set_files(self, files: dict[str, FileInput] | None) -> None:
|
||||
"""Set available input files.
|
||||
|
||||
Args:
|
||||
files: Dictionary mapping file names to file inputs.
|
||||
"""
|
||||
self._files = files
|
||||
|
||||
def _run(self, file_name: str, **kwargs: object) -> str:
|
||||
"""Read an input file by name.
|
||||
|
||||
Args:
|
||||
file_name: The name of the file to read.
|
||||
|
||||
Returns:
|
||||
File content as text for text files, or base64 encoded for binary.
|
||||
"""
|
||||
if not self._files:
|
||||
return "No input files available."
|
||||
|
||||
if file_name not in self._files:
|
||||
available = ", ".join(self._files.keys())
|
||||
return f"File '{file_name}' not found. Available files: {available}"
|
||||
|
||||
file_input = self._files[file_name]
|
||||
content = file_input.read()
|
||||
content_type = file_input.content_type
|
||||
filename = file_input.filename or file_name
|
||||
|
||||
text_types = (
|
||||
"text/",
|
||||
"application/json",
|
||||
"application/xml",
|
||||
"application/x-yaml",
|
||||
)
|
||||
|
||||
if any(content_type.startswith(t) for t in text_types):
|
||||
return content.decode("utf-8")
|
||||
|
||||
encoded = base64.b64encode(content).decode("ascii")
|
||||
return f"[Binary file: {filename} ({content_type})]\nBase64: {encoded}"
|
||||
@@ -613,13 +613,23 @@ def summarize_messages(
|
||||
) -> None:
|
||||
"""Summarize messages to fit within context window.
|
||||
|
||||
Preserves any files attached to user messages and re-attaches them to
|
||||
the summarized message. Files from all user messages are merged.
|
||||
|
||||
Args:
|
||||
messages: List of messages to summarize
|
||||
messages: List of messages to summarize (modified in-place)
|
||||
llm: LLM instance for summarization
|
||||
callbacks: List of callbacks for LLM
|
||||
i18n: I18N instance for messages
|
||||
"""
|
||||
messages_string = " ".join([message["content"] for message in messages]) # type: ignore[misc]
|
||||
preserved_files: dict[str, Any] = {}
|
||||
for msg in messages:
|
||||
if msg.get("role") == "user" and msg.get("files"):
|
||||
preserved_files.update(msg["files"])
|
||||
|
||||
messages_string = " ".join(
|
||||
[str(message.get("content", "")) for message in messages]
|
||||
)
|
||||
cut_size = llm.get_context_window_size()
|
||||
|
||||
messages_groups = [
|
||||
@@ -636,7 +646,7 @@ def summarize_messages(
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
messages = [
|
||||
summarization_messages = [
|
||||
format_message_for_llm(
|
||||
i18n.slice("summarizer_system_message"), role="system"
|
||||
),
|
||||
@@ -645,7 +655,7 @@ def summarize_messages(
|
||||
),
|
||||
]
|
||||
summary = llm.call(
|
||||
messages,
|
||||
summarization_messages,
|
||||
callbacks=callbacks,
|
||||
)
|
||||
summarized_contents.append({"content": str(summary)})
|
||||
@@ -653,11 +663,12 @@ def summarize_messages(
|
||||
merged_summary = " ".join(content["content"] for content in summarized_contents)
|
||||
|
||||
messages.clear()
|
||||
messages.append(
|
||||
format_message_for_llm(
|
||||
i18n.slice("summary").format(merged_summary=merged_summary)
|
||||
)
|
||||
summary_message = format_message_for_llm(
|
||||
i18n.slice("summary").format(merged_summary=merged_summary)
|
||||
)
|
||||
if preserved_files:
|
||||
summary_message["files"] = preserved_files
|
||||
messages.append(summary_message)
|
||||
|
||||
|
||||
def show_agent_logs(
|
||||
@@ -859,7 +870,11 @@ def extract_tool_call_info(
|
||||
if hasattr(tool_call, "function"):
|
||||
# OpenAI-style: has .function.name and .function.arguments
|
||||
call_id = getattr(tool_call, "id", f"call_{id(tool_call)}")
|
||||
return call_id, sanitize_tool_name(tool_call.function.name), tool_call.function.arguments
|
||||
return (
|
||||
call_id,
|
||||
sanitize_tool_name(tool_call.function.name),
|
||||
tool_call.function.arguments,
|
||||
)
|
||||
if hasattr(tool_call, "function_call") and tool_call.function_call:
|
||||
# Gemini-style: has .function_call.name and .function_call.args
|
||||
call_id = f"call_{id(tool_call)}"
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
"""JSON encoder for handling CrewAI specific types."""
|
||||
|
||||
import base64
|
||||
from datetime import date, datetime
|
||||
from decimal import Decimal
|
||||
from enum import Enum
|
||||
@@ -30,6 +31,9 @@ class CrewJSONEncoder(json.JSONEncoder):
|
||||
if isinstance(obj, (datetime, date)):
|
||||
return obj.isoformat()
|
||||
|
||||
if isinstance(obj, bytes):
|
||||
return base64.b64encode(obj).decode("utf-8")
|
||||
|
||||
return super().default(obj)
|
||||
|
||||
@staticmethod
|
||||
|
||||
239
lib/crewai/src/crewai/utilities/file_store.py
Normal file
239
lib/crewai/src/crewai/utilities/file_store.py
Normal file
@@ -0,0 +1,239 @@
|
||||
"""Global file store for crew and task execution."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Coroutine
|
||||
import concurrent.futures
|
||||
from typing import TYPE_CHECKING, TypeVar
|
||||
from uuid import UUID
|
||||
|
||||
from aiocache import Cache # type: ignore[import-untyped]
|
||||
from aiocache.serializers import PickleSerializer # type: ignore[import-untyped]
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai_files import FileInput
|
||||
|
||||
_file_store = Cache(Cache.MEMORY, serializer=PickleSerializer())
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
def _run_sync(coro: Coroutine[None, None, T]) -> T:
|
||||
"""Run a coroutine synchronously, handling nested event loops.
|
||||
|
||||
If called from within a running event loop, runs the coroutine in a
|
||||
separate thread to avoid "cannot run event loop while another is running".
|
||||
|
||||
Args:
|
||||
coro: The coroutine to run.
|
||||
|
||||
Returns:
|
||||
The result of the coroutine.
|
||||
"""
|
||||
try:
|
||||
asyncio.get_running_loop()
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
|
||||
future = executor.submit(asyncio.run, coro)
|
||||
return future.result()
|
||||
except RuntimeError:
|
||||
return asyncio.run(coro)
|
||||
|
||||
|
||||
DEFAULT_TTL = 3600
|
||||
|
||||
_CREW_PREFIX = "crew:"
|
||||
_TASK_PREFIX = "task:"
|
||||
|
||||
|
||||
async def astore_files(
|
||||
execution_id: UUID,
|
||||
files: dict[str, FileInput],
|
||||
ttl: int = DEFAULT_TTL,
|
||||
) -> None:
|
||||
"""Store files for a crew execution asynchronously.
|
||||
|
||||
Args:
|
||||
execution_id: Unique identifier for the crew execution.
|
||||
files: Dictionary mapping names to file inputs.
|
||||
ttl: Time-to-live in seconds.
|
||||
"""
|
||||
await _file_store.set(f"{_CREW_PREFIX}{execution_id}", files, ttl=ttl)
|
||||
|
||||
|
||||
async def aget_files(execution_id: UUID) -> dict[str, FileInput] | None:
|
||||
"""Retrieve files for a crew execution asynchronously.
|
||||
|
||||
Args:
|
||||
execution_id: Unique identifier for the crew execution.
|
||||
|
||||
Returns:
|
||||
Dictionary of files or None if not found.
|
||||
"""
|
||||
result: dict[str, FileInput] | None = await _file_store.get(
|
||||
f"{_CREW_PREFIX}{execution_id}"
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
async def aclear_files(execution_id: UUID) -> None:
|
||||
"""Clear files for a crew execution asynchronously.
|
||||
|
||||
Args:
|
||||
execution_id: Unique identifier for the crew execution.
|
||||
"""
|
||||
await _file_store.delete(f"{_CREW_PREFIX}{execution_id}")
|
||||
|
||||
|
||||
async def astore_task_files(
|
||||
task_id: UUID,
|
||||
files: dict[str, FileInput],
|
||||
ttl: int = DEFAULT_TTL,
|
||||
) -> None:
|
||||
"""Store files for a task execution asynchronously.
|
||||
|
||||
Args:
|
||||
task_id: Unique identifier for the task.
|
||||
files: Dictionary mapping names to file inputs.
|
||||
ttl: Time-to-live in seconds.
|
||||
"""
|
||||
await _file_store.set(f"{_TASK_PREFIX}{task_id}", files, ttl=ttl)
|
||||
|
||||
|
||||
async def aget_task_files(task_id: UUID) -> dict[str, FileInput] | None:
|
||||
"""Retrieve files for a task execution asynchronously.
|
||||
|
||||
Args:
|
||||
task_id: Unique identifier for the task.
|
||||
|
||||
Returns:
|
||||
Dictionary of files or None if not found.
|
||||
"""
|
||||
result: dict[str, FileInput] | None = await _file_store.get(
|
||||
f"{_TASK_PREFIX}{task_id}"
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
async def aclear_task_files(task_id: UUID) -> None:
|
||||
"""Clear files for a task execution asynchronously.
|
||||
|
||||
Args:
|
||||
task_id: Unique identifier for the task.
|
||||
"""
|
||||
await _file_store.delete(f"{_TASK_PREFIX}{task_id}")
|
||||
|
||||
|
||||
async def aget_all_files(
|
||||
crew_id: UUID,
|
||||
task_id: UUID | None = None,
|
||||
) -> dict[str, FileInput] | None:
|
||||
"""Get merged crew and task files asynchronously.
|
||||
|
||||
Task files override crew files with the same name.
|
||||
|
||||
Args:
|
||||
crew_id: Unique identifier for the crew execution.
|
||||
task_id: Optional task identifier for task-scoped files.
|
||||
|
||||
Returns:
|
||||
Merged dictionary of files or None if none found.
|
||||
"""
|
||||
crew_files = await aget_files(crew_id) or {}
|
||||
task_files = await aget_task_files(task_id) if task_id else {}
|
||||
|
||||
if not crew_files and not task_files:
|
||||
return None
|
||||
|
||||
return {**crew_files, **(task_files or {})}
|
||||
|
||||
|
||||
def store_files(
|
||||
execution_id: UUID,
|
||||
files: dict[str, FileInput],
|
||||
ttl: int = DEFAULT_TTL,
|
||||
) -> None:
|
||||
"""Store files for a crew execution.
|
||||
|
||||
Args:
|
||||
execution_id: Unique identifier for the crew execution.
|
||||
files: Dictionary mapping names to file inputs.
|
||||
ttl: Time-to-live in seconds.
|
||||
"""
|
||||
_run_sync(astore_files(execution_id, files, ttl))
|
||||
|
||||
|
||||
def get_files(execution_id: UUID) -> dict[str, FileInput] | None:
|
||||
"""Retrieve files for a crew execution.
|
||||
|
||||
Args:
|
||||
execution_id: Unique identifier for the crew execution.
|
||||
|
||||
Returns:
|
||||
Dictionary of files or None if not found.
|
||||
"""
|
||||
return _run_sync(aget_files(execution_id))
|
||||
|
||||
|
||||
def clear_files(execution_id: UUID) -> None:
|
||||
"""Clear files for a crew execution.
|
||||
|
||||
Args:
|
||||
execution_id: Unique identifier for the crew execution.
|
||||
"""
|
||||
_run_sync(aclear_files(execution_id))
|
||||
|
||||
|
||||
def store_task_files(
|
||||
task_id: UUID,
|
||||
files: dict[str, FileInput],
|
||||
ttl: int = DEFAULT_TTL,
|
||||
) -> None:
|
||||
"""Store files for a task execution.
|
||||
|
||||
Args:
|
||||
task_id: Unique identifier for the task.
|
||||
files: Dictionary mapping names to file inputs.
|
||||
ttl: Time-to-live in seconds.
|
||||
"""
|
||||
_run_sync(astore_task_files(task_id, files, ttl))
|
||||
|
||||
|
||||
def get_task_files(task_id: UUID) -> dict[str, FileInput] | None:
|
||||
"""Retrieve files for a task execution.
|
||||
|
||||
Args:
|
||||
task_id: Unique identifier for the task.
|
||||
|
||||
Returns:
|
||||
Dictionary of files or None if not found.
|
||||
"""
|
||||
return _run_sync(aget_task_files(task_id))
|
||||
|
||||
|
||||
def clear_task_files(task_id: UUID) -> None:
|
||||
"""Clear files for a task execution.
|
||||
|
||||
Args:
|
||||
task_id: Unique identifier for the task.
|
||||
"""
|
||||
_run_sync(aclear_task_files(task_id))
|
||||
|
||||
|
||||
def get_all_files(
|
||||
crew_id: UUID,
|
||||
task_id: UUID | None = None,
|
||||
) -> dict[str, FileInput] | None:
|
||||
"""Get merged crew and task files.
|
||||
|
||||
Task files override crew files with the same name.
|
||||
|
||||
Args:
|
||||
crew_id: Unique identifier for the crew execution.
|
||||
task_id: Optional task identifier for task-scoped files.
|
||||
|
||||
Returns:
|
||||
Merged dictionary of files or None if none found.
|
||||
"""
|
||||
return _run_sync(aget_all_files(crew_id, task_id))
|
||||
@@ -1,10 +1,18 @@
|
||||
"""Types for CrewAI utilities."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Literal
|
||||
|
||||
from typing_extensions import NotRequired, TypedDict
|
||||
|
||||
|
||||
try:
|
||||
from crewai_files import FileInput
|
||||
except ImportError:
|
||||
FileInput = Any # type: ignore[misc,assignment]
|
||||
|
||||
|
||||
class LLMMessage(TypedDict):
|
||||
"""Type for formatted LLM messages.
|
||||
|
||||
@@ -18,3 +26,4 @@ class LLMMessage(TypedDict):
|
||||
tool_call_id: NotRequired[str]
|
||||
name: NotRequired[str]
|
||||
tool_calls: NotRequired[list[dict[str, Any]]]
|
||||
files: NotRequired[dict[str, FileInput]]
|
||||
|
||||
@@ -829,3 +829,178 @@ def test_lite_agent_standalone_still_works():
|
||||
assert result is not None
|
||||
assert isinstance(result, LiteAgentOutput)
|
||||
assert result.raw is not None
|
||||
|
||||
|
||||
def test_agent_kickoff_with_files_parameter():
|
||||
"""Test that Agent.kickoff() accepts and passes files to the executor."""
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
from crewai_files import File
|
||||
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
|
||||
mock_llm = Mock(spec=LLM)
|
||||
mock_llm.call.return_value = "Final Answer: I can see the file content."
|
||||
mock_llm.stop = []
|
||||
mock_llm.supports_stop_words.return_value = False
|
||||
mock_llm.get_token_usage_summary.return_value = UsageMetrics(
|
||||
total_tokens=100,
|
||||
prompt_tokens=50,
|
||||
completion_tokens=50,
|
||||
cached_prompt_tokens=0,
|
||||
successful_requests=1,
|
||||
)
|
||||
|
||||
agent = Agent(
|
||||
role="File Analyzer",
|
||||
goal="Analyze files",
|
||||
backstory="An agent that analyzes files",
|
||||
llm=mock_llm,
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
test_file = File(source=b"mock pdf content")
|
||||
input_files = {"document.pdf": test_file}
|
||||
|
||||
with patch.object(
|
||||
agent, "_prepare_kickoff", wraps=agent._prepare_kickoff
|
||||
) as mock_prepare:
|
||||
result = agent.kickoff(messages="Analyze the document", input_files=input_files)
|
||||
|
||||
mock_prepare.assert_called_once()
|
||||
call_args = mock_prepare.call_args
|
||||
assert call_args.args[0] == "Analyze the document"
|
||||
called_files = call_args.kwargs.get("input_files") or call_args.args[2]
|
||||
assert "document.pdf" in called_files
|
||||
assert called_files["document.pdf"] is test_file
|
||||
|
||||
assert result is not None
|
||||
|
||||
|
||||
def test_prepare_kickoff_extracts_files_from_messages():
|
||||
"""Test that _prepare_kickoff extracts files from messages."""
|
||||
from unittest.mock import Mock
|
||||
|
||||
from crewai_files import File
|
||||
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
|
||||
mock_llm = Mock(spec=LLM)
|
||||
mock_llm.call.return_value = "Final Answer: Done."
|
||||
mock_llm.stop = []
|
||||
mock_llm.supports_stop_words.return_value = False
|
||||
mock_llm.get_token_usage_summary.return_value = UsageMetrics(
|
||||
total_tokens=100,
|
||||
prompt_tokens=50,
|
||||
completion_tokens=50,
|
||||
cached_prompt_tokens=0,
|
||||
successful_requests=1,
|
||||
)
|
||||
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test files",
|
||||
backstory="Test backstory",
|
||||
llm=mock_llm,
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
test_file = File(source=b"mock image content")
|
||||
messages = [
|
||||
{"role": "user", "content": "Analyze this", "files": {"img.png": test_file}}
|
||||
]
|
||||
|
||||
executor, inputs, agent_info, parsed_tools = agent._prepare_kickoff(messages=messages)
|
||||
|
||||
assert "files" in inputs
|
||||
assert "img.png" in inputs["files"]
|
||||
assert inputs["files"]["img.png"] is test_file
|
||||
|
||||
|
||||
def test_prepare_kickoff_merges_files_from_messages_and_parameter():
|
||||
"""Test that _prepare_kickoff merges files from messages and parameter."""
|
||||
from unittest.mock import Mock
|
||||
|
||||
from crewai_files import File
|
||||
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
|
||||
mock_llm = Mock(spec=LLM)
|
||||
mock_llm.call.return_value = "Final Answer: Done."
|
||||
mock_llm.stop = []
|
||||
mock_llm.supports_stop_words.return_value = False
|
||||
mock_llm.get_token_usage_summary.return_value = UsageMetrics(
|
||||
total_tokens=100,
|
||||
prompt_tokens=50,
|
||||
completion_tokens=50,
|
||||
cached_prompt_tokens=0,
|
||||
successful_requests=1,
|
||||
)
|
||||
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test files",
|
||||
backstory="Test backstory",
|
||||
llm=mock_llm,
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
msg_file = File(source=b"message file content")
|
||||
param_file = File(source=b"param file content")
|
||||
messages = [
|
||||
{"role": "user", "content": "Analyze these", "files": {"from_msg.png": msg_file}}
|
||||
]
|
||||
input_files = {"from_param.pdf": param_file}
|
||||
|
||||
executor, inputs, agent_info, parsed_tools = agent._prepare_kickoff(
|
||||
messages=messages, input_files=input_files
|
||||
)
|
||||
|
||||
assert "files" in inputs
|
||||
assert "from_msg.png" in inputs["files"]
|
||||
assert "from_param.pdf" in inputs["files"]
|
||||
assert inputs["files"]["from_msg.png"] is msg_file
|
||||
assert inputs["files"]["from_param.pdf"] is param_file
|
||||
|
||||
|
||||
def test_prepare_kickoff_param_files_override_message_files():
|
||||
"""Test that files parameter overrides files from messages with same name."""
|
||||
from unittest.mock import Mock
|
||||
|
||||
from crewai_files import File
|
||||
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
|
||||
mock_llm = Mock(spec=LLM)
|
||||
mock_llm.call.return_value = "Final Answer: Done."
|
||||
mock_llm.stop = []
|
||||
mock_llm.supports_stop_words.return_value = False
|
||||
mock_llm.get_token_usage_summary.return_value = UsageMetrics(
|
||||
total_tokens=100,
|
||||
prompt_tokens=50,
|
||||
completion_tokens=50,
|
||||
cached_prompt_tokens=0,
|
||||
successful_requests=1,
|
||||
)
|
||||
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test files",
|
||||
backstory="Test backstory",
|
||||
llm=mock_llm,
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
msg_file = File(source=b"message file content")
|
||||
param_file = File(source=b"param file content")
|
||||
messages = [
|
||||
{"role": "user", "content": "Analyze", "files": {"same.png": msg_file}}
|
||||
]
|
||||
input_files = {"same.png": param_file}
|
||||
|
||||
executor, inputs, agent_info, parsed_tools = agent._prepare_kickoff(
|
||||
messages=messages, input_files=input_files
|
||||
)
|
||||
|
||||
assert "files" in inputs
|
||||
assert inputs["files"]["same.png"] is param_file # param takes precedence
|
||||
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -0,0 +1,113 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"max_tokens":4096,"messages":[{"role":"user","content":[{"type":"text","text":"\nCurrent
|
||||
Task: What type of document is this?\n\nBegin! This is VERY important to you,
|
||||
use the tools available and give your best Final Answer, your job depends on
|
||||
it!\n\nThought:"},{"type":"document","source":{"type":"base64","media_type":"application/pdf","data":"JVBERi0xLjQKMSAwIG9iaiA8PCAvVHlwZSAvQ2F0YWxvZyAvUGFnZXMgMiAwIFIgPj4gZW5kb2JqCjIgMCBvYmogPDwgL1R5cGUgL1BhZ2VzIC9LaWRzIFszIDAgUl0gL0NvdW50IDEgPj4gZW5kb2JqCjMgMCBvYmogPDwgL1R5cGUgL1BhZ2UgL1BhcmVudCAyIDAgUiAvTWVkaWFCb3ggWzAgMCA2MTIgNzkyXSA+PiBlbmRvYmoKeHJlZgowIDQKMDAwMDAwMDAwMCA2NTUzNSBmCjAwMDAwMDAwMDkgMDAwMDAgbgowMDAwMDAwMDU4IDAwMDAwIG4KMDAwMDAwMDExNSAwMDAwMCBuCnRyYWlsZXIgPDwgL1NpemUgNCAvUm9vdCAxIDAgUiA+PgpzdGFydHhyZWYKMTk2CiUlRU9GCg=="},"cache_control":{"type":"ephemeral"}}]}],"model":"claude-3-5-haiku-20241022","stop_sequences":["\nObservation:"],"stream":false,"system":"You
|
||||
are File Analyst. Expert at analyzing various file types.\nYour personal goal
|
||||
is: Analyze and describe files accurately\nTo give my best complete final answer
|
||||
to the task respond using the exact following format:\n\nThought: I now can
|
||||
give a great answer\nFinal Answer: Your final answer must be the great and the
|
||||
most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
anthropic-version:
|
||||
- '2023-06-01'
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1351'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.anthropic.com
|
||||
x-api-key:
|
||||
- X-API-KEY-XXX
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 0.71.1
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
x-stainless-timeout:
|
||||
- NOT_GIVEN
|
||||
method: POST
|
||||
uri: https://api.anthropic.com/v1/messages
|
||||
response:
|
||||
body:
|
||||
string: '{"model":"claude-3-5-haiku-20241022","id":"msg_01AcygCF93tRhc7A3bfXMqe7","type":"message","role":"assistant","content":[{"type":"text","text":"Thought:
|
||||
I can see this is a PDF document, but the image appears to be completely white
|
||||
or blank. Without any visible content, I cannot definitively determine the
|
||||
specific type of document.\n\nFinal Answer: The document is a PDF file, but
|
||||
the provided image shows a blank white page with no discernible content or
|
||||
text. More information or a clearer image would be needed to identify the
|
||||
precise type of document."}],"stop_reason":"end_turn","stop_sequence":null,"usage":{"input_tokens":1750,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":89,"service_tier":"standard"}}'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 23 Jan 2026 19:08:04 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Robots-Tag:
|
||||
- none
|
||||
anthropic-organization-id:
|
||||
- ANTHROPIC-ORGANIZATION-ID-XXX
|
||||
anthropic-ratelimit-input-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-input-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-input-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-output-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-output-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-output-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-requests-limit:
|
||||
- '4000'
|
||||
anthropic-ratelimit-requests-remaining:
|
||||
- '3999'
|
||||
anthropic-ratelimit-requests-reset:
|
||||
- '2026-01-23T19:08:01Z'
|
||||
anthropic-ratelimit-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
request-id:
|
||||
- REQUEST-ID-XXX
|
||||
strict-transport-security:
|
||||
- STS-XXX
|
||||
x-envoy-upstream-service-time:
|
||||
- '2837'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -0,0 +1,111 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"max_tokens":4096,"messages":[{"role":"user","content":[{"type":"text","text":"\nCurrent
|
||||
Task: What is this document?\n\nBegin! This is VERY important to you, use the
|
||||
tools available and give your best Final Answer, your job depends on it!\n\nThought:"},{"type":"document","source":{"type":"base64","media_type":"application/pdf","data":"JVBERi0xLjQKMSAwIG9iaiA8PCAvVHlwZSAvQ2F0YWxvZyAvUGFnZXMgMiAwIFIgPj4gZW5kb2JqCjIgMCBvYmogPDwgL1R5cGUgL1BhZ2VzIC9LaWRzIFszIDAgUl0gL0NvdW50IDEgPj4gZW5kb2JqCjMgMCBvYmogPDwgL1R5cGUgL1BhZ2UgL1BhcmVudCAyIDAgUiAvTWVkaWFCb3ggWzAgMCA2MTIgNzkyXSA+PiBlbmRvYmoKeHJlZgowIDQKMDAwMDAwMDAwMCA2NTUzNSBmCjAwMDAwMDAwMDkgMDAwMDAgbgowMDAwMDAwMDU4IDAwMDAwIG4KMDAwMDAwMDExNSAwMDAwMCBuCnRyYWlsZXIgPDwgL1NpemUgNCAvUm9vdCAxIDAgUiA+PgpzdGFydHhyZWYKMTk2CiUlRU9GCg=="},"cache_control":{"type":"ephemeral"}}]}],"model":"claude-3-5-haiku-20241022","stop_sequences":["\nObservation:"],"stream":false,"system":"You
|
||||
are File Analyst. Expert at analyzing various file types.\nYour personal goal
|
||||
is: Analyze and describe files accurately\nTo give my best complete final answer
|
||||
to the task respond using the exact following format:\n\nThought: I now can
|
||||
give a great answer\nFinal Answer: Your final answer must be the great and the
|
||||
most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
anthropic-version:
|
||||
- '2023-06-01'
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1343'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.anthropic.com
|
||||
x-api-key:
|
||||
- X-API-KEY-XXX
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 0.71.1
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
x-stainless-timeout:
|
||||
- NOT_GIVEN
|
||||
method: POST
|
||||
uri: https://api.anthropic.com/v1/messages
|
||||
response:
|
||||
body:
|
||||
string: '{"model":"claude-3-5-haiku-20241022","id":"msg_01XwAhfdaMxwTNzTy7YhmA5e","type":"message","role":"assistant","content":[{"type":"text","text":"Thought:
|
||||
I can see this is a PDF document, but the image appears to be blank or completely
|
||||
white. Without any visible text or content, I cannot determine the specific
|
||||
type or purpose of this document.\n\nFinal Answer: The document appears to
|
||||
be a blank white PDF page with no discernible text, images, or content visible.
|
||||
It could be an empty document, a scanning error, or a placeholder file."}],"stop_reason":"end_turn","stop_sequence":null,"usage":{"input_tokens":1748,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":88,"service_tier":"standard"}}'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 23 Jan 2026 19:08:19 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Robots-Tag:
|
||||
- none
|
||||
anthropic-organization-id:
|
||||
- ANTHROPIC-ORGANIZATION-ID-XXX
|
||||
anthropic-ratelimit-input-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-input-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-input-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-output-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-output-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-output-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-requests-limit:
|
||||
- '4000'
|
||||
anthropic-ratelimit-requests-remaining:
|
||||
- '3999'
|
||||
anthropic-ratelimit-requests-reset:
|
||||
- '2026-01-23T19:08:16Z'
|
||||
anthropic-ratelimit-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
request-id:
|
||||
- REQUEST-ID-XXX
|
||||
strict-transport-security:
|
||||
- STS-XXX
|
||||
x-envoy-upstream-service-time:
|
||||
- '3114'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,137 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"input":[{"role":"user","content":[{"type":"input_text","text":"\nCurrent
|
||||
Task: What is this document?\n\nBegin! This is VERY important to you, use the
|
||||
tools available and give your best Final Answer, your job depends on it!\n\nThought:"},{"type":"input_file","filename":"document.pdf","file_data":"data:application/pdf;base64,JVBERi0xLjQKMSAwIG9iaiA8PCAvVHlwZSAvQ2F0YWxvZyAvUGFnZXMgMiAwIFIgPj4gZW5kb2JqCjIgMCBvYmogPDwgL1R5cGUgL1BhZ2VzIC9LaWRzIFszIDAgUl0gL0NvdW50IDEgPj4gZW5kb2JqCjMgMCBvYmogPDwgL1R5cGUgL1BhZ2UgL1BhcmVudCAyIDAgUiAvTWVkaWFCb3ggWzAgMCA2MTIgNzkyXSA+PiBlbmRvYmoKeHJlZgowIDQKMDAwMDAwMDAwMCA2NTUzNSBmCjAwMDAwMDAwMDkgMDAwMDAgbgowMDAwMDAwMDU4IDAwMDAwIG4KMDAwMDAwMDExNSAwMDAwMCBuCnRyYWlsZXIgPDwgL1NpemUgNCAvUm9vdCAxIDAgUiA+PgpzdGFydHhyZWYKMTk2CiUlRU9GCg=="}]}],"model":"gpt-4o-mini","instructions":"You
|
||||
are File Analyst. Expert at analyzing various file types.\nYour personal goal
|
||||
is: Analyze and describe files accurately\nTo give my best complete final answer
|
||||
to the task respond using the exact following format:\n\nThought: I now can
|
||||
give a great answer\nFinal Answer: Your final answer must be the great and the
|
||||
most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1235'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/responses
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"resp_059d23bc71d450aa006973c72416788197bddcc99157e3a313\",\n
|
||||
\ \"object\": \"response\",\n \"created_at\": 1769195300,\n \"status\":
|
||||
\"completed\",\n \"background\": false,\n \"billing\": {\n \"payer\":
|
||||
\"developer\"\n },\n \"completed_at\": 1769195307,\n \"error\": null,\n
|
||||
\ \"frequency_penalty\": 0.0,\n \"incomplete_details\": null,\n \"instructions\":
|
||||
\"You are File Analyst. Expert at analyzing various file types.\\nYour personal
|
||||
goal is: Analyze and describe files accurately\\nTo give my best complete
|
||||
final answer to the task respond using the exact following format:\\n\\nThought:
|
||||
I now can give a great answer\\nFinal Answer: Your final answer must be the
|
||||
great and the most complete as possible, it must be outcome described.\\n\\nI
|
||||
MUST use these formats, my job depends on it!\",\n \"max_output_tokens\":
|
||||
null,\n \"max_tool_calls\": null,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"output\": [\n {\n \"id\": \"msg_059d23bc71d450aa006973c724b1d881979787b0eeb53bdbd2\",\n
|
||||
\ \"type\": \"message\",\n \"status\": \"completed\",\n \"content\":
|
||||
[\n {\n \"type\": \"output_text\",\n \"annotations\":
|
||||
[],\n \"logprobs\": [],\n \"text\": \"Thought: I now can
|
||||
give a great answer. \\nFinal Answer: Without access to a specific document
|
||||
or its contents, I cannot provide a detailed analysis. However, in general,
|
||||
important aspects of a document can include its format (such as PDF, DOCX,
|
||||
or TXT), purpose (such as legal, informative, or persuasive), and key elements
|
||||
like headings, text structure, and any embedded media (such as images or charts).
|
||||
For a thorough analysis, it's essential to understand the context, audience,
|
||||
and intended use of the document. If you can provide the document itself or
|
||||
more context about it, I would be able to give a complete assessment.\"\n
|
||||
\ }\n ],\n \"role\": \"assistant\"\n }\n ],\n \"parallel_tool_calls\":
|
||||
true,\n \"presence_penalty\": 0.0,\n \"previous_response_id\": null,\n \"prompt_cache_key\":
|
||||
null,\n \"prompt_cache_retention\": null,\n \"reasoning\": {\n \"effort\":
|
||||
null,\n \"summary\": null\n },\n \"safety_identifier\": null,\n \"service_tier\":
|
||||
\"default\",\n \"store\": true,\n \"temperature\": 1.0,\n \"text\": {\n
|
||||
\ \"format\": {\n \"type\": \"text\"\n },\n \"verbosity\": \"medium\"\n
|
||||
\ },\n \"tool_choice\": \"auto\",\n \"tools\": [],\n \"top_logprobs\":
|
||||
0,\n \"top_p\": 1.0,\n \"truncation\": \"disabled\",\n \"usage\": {\n \"input_tokens\":
|
||||
137,\n \"input_tokens_details\": {\n \"cached_tokens\": 0\n },\n
|
||||
\ \"output_tokens\": 132,\n \"output_tokens_details\": {\n \"reasoning_tokens\":
|
||||
0\n },\n \"total_tokens\": 269\n },\n \"user\": null,\n \"metadata\":
|
||||
{}\n}"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 23 Jan 2026 19:08:27 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '7347'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '7350'
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,81 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"contents": [{"parts": [{"text": "\nCurrent Task: Summarize this text.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}, {"inlineData": {"data": "UmV2aWV3IEd1aWRlbGluZXMKCjEuIEJlIGNsZWFyIGFuZCBjb25jaXNlOiBXcml0ZSBmZWVkYmFjayB0aGF0IGlzIGVhc3kgdG8gdW5kZXJzdGFuZC4KMi4gRm9jdXMgb24gYmVoYXZpb3IgYW5kIG91dGNvbWVzOiBEZXNjcmliZSB3aGF0IGhhcHBlbmVkIGFuZCB3aHkgaXQgbWF0dGVycy4KMy4gQmUgc3BlY2lmaWM6IFByb3ZpZGUgZXhhbXBsZXMgdG8gc3VwcG9ydCB5b3VyIHBvaW50cy4KNC4gQmFsYW5jZSBwb3NpdGl2ZXMgYW5kIGltcHJvdmVtZW50czogSGlnaGxpZ2h0IHN0cmVuZ3RocyBhbmQgYXJlYXMgdG8gZ3Jvdy4KNS4gQmUgcmVzcGVjdGZ1bCBhbmQgY29uc3RydWN0aXZlOiBBc3N1bWUgcG9zaXRpdmUgaW50ZW50IGFuZCBvZmZlciBzb2x1dGlvbnMuCjYuIFVzZSBvYmplY3RpdmUgY3JpdGVyaWE6IFJlZmVyZW5jZSBnb2FscywgbWV0cmljcywgb3IgZXhwZWN0YXRpb25zIHdoZXJlIHBvc3NpYmxlLgo3LiBTdWdnZXN0IG5leHQgc3RlcHM6IFJlY29tbWVuZCBhY3Rpb25hYmxlIHdheXMgdG8gaW1wcm92ZS4KOC4gUHJvb2ZyZWFkOiBDaGVjayB0b25lLCBncmFtbWFyLCBhbmQgY2xhcml0eSBiZWZvcmUgc3VibWl0dGluZy4K",
|
||||
"mimeType": "text/plain"}}], "role": "user"}], "systemInstruction": {"parts":
|
||||
[{"text": "You are File Analyst. Expert at analyzing various file types.\nYour
|
||||
personal goal is: Analyze and describe files accurately\nTo give my best complete
|
||||
final answer to the task respond using the exact following format:\n\nThought:
|
||||
I now can give a great answer\nFinal Answer: Your final answer must be the great
|
||||
and the most complete as possible, it must be outcome described.\n\nI MUST use
|
||||
these formats, my job depends on it!"}], "role": "user"}, "generationConfig":
|
||||
{"stopSequences": ["\nObservation:"]}}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- '*/*'
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1619'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- generativelanguage.googleapis.com
|
||||
x-goog-api-client:
|
||||
- google-genai-sdk/1.49.0 gl-python/3.12.10
|
||||
x-goog-api-key:
|
||||
- X-GOOG-API-KEY-XXX
|
||||
method: POST
|
||||
uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\":
|
||||
[\n {\n \"text\": \"Thought: This text provides guidelines
|
||||
for giving effective feedback. I need to summarize these guidelines in a clear
|
||||
and concise manner.\\n\\nFinal Answer: The text outlines eight guidelines
|
||||
for providing effective feedback: be clear and concise, focus on behavior
|
||||
and outcomes, be specific with examples, balance positive aspects with areas
|
||||
for improvement, be respectful and constructive by offering solutions, use
|
||||
objective criteria, suggest actionable next steps, and proofread for tone,
|
||||
grammar, and clarity before submission. These guidelines aim to ensure feedback
|
||||
is easily understood, impactful, and geared towards positive growth.\\n\"\n
|
||||
\ }\n ],\n \"role\": \"model\"\n },\n \"finishReason\":
|
||||
\"STOP\",\n \"avgLogprobs\": -0.24753604923282657\n }\n ],\n \"usageMetadata\":
|
||||
{\n \"promptTokenCount\": 252,\n \"candidatesTokenCount\": 111,\n \"totalTokenCount\":
|
||||
363,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n
|
||||
\ \"tokenCount\": 252\n }\n ],\n \"candidatesTokensDetails\":
|
||||
[\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 111\n
|
||||
\ }\n ]\n },\n \"modelVersion\": \"gemini-2.0-flash\",\n \"responseId\":
|
||||
\"88lzae_VGaGOjMcPxNCokQI\"\n}\n"
|
||||
headers:
|
||||
Alt-Svc:
|
||||
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
|
||||
Content-Type:
|
||||
- application/json; charset=UTF-8
|
||||
Date:
|
||||
- Fri, 23 Jan 2026 19:20:20 GMT
|
||||
Server:
|
||||
- scaffolding on HTTPServer2
|
||||
Server-Timing:
|
||||
- gfet4t7; dur=1200
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
X-Frame-Options:
|
||||
- X-FRAME-OPTIONS-XXX
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -0,0 +1,78 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"contents": [{"parts": [{"text": "\nCurrent Task: Summarize this text
|
||||
briefly.\n\nBegin! This is VERY important to you, use the tools available and
|
||||
give your best Final Answer, your job depends on it!\n\nThought:"}, {"inlineData":
|
||||
{"data": "UmV2aWV3IEd1aWRlbGluZXMKCjEuIEJlIGNsZWFyIGFuZCBjb25jaXNlOiBXcml0ZSBmZWVkYmFjayB0aGF0IGlzIGVhc3kgdG8gdW5kZXJzdGFuZC4KMi4gRm9jdXMgb24gYmVoYXZpb3IgYW5kIG91dGNvbWVzOiBEZXNjcmliZSB3aGF0IGhhcHBlbmVkIGFuZCB3aHkgaXQgbWF0dGVycy4KMy4gQmUgc3BlY2lmaWM6IFByb3ZpZGUgZXhhbXBsZXMgdG8gc3VwcG9ydCB5b3VyIHBvaW50cy4KNC4gQmFsYW5jZSBwb3NpdGl2ZXMgYW5kIGltcHJvdmVtZW50czogSGlnaGxpZ2h0IHN0cmVuZ3RocyBhbmQgYXJlYXMgdG8gZ3Jvdy4KNS4gQmUgcmVzcGVjdGZ1bCBhbmQgY29uc3RydWN0aXZlOiBBc3N1bWUgcG9zaXRpdmUgaW50ZW50IGFuZCBvZmZlciBzb2x1dGlvbnMuCjYuIFVzZSBvYmplY3RpdmUgY3JpdGVyaWE6IFJlZmVyZW5jZSBnb2FscywgbWV0cmljcywgb3IgZXhwZWN0YXRpb25zIHdoZXJlIHBvc3NpYmxlLgo3LiBTdWdnZXN0IG5leHQgc3RlcHM6IFJlY29tbWVuZCBhY3Rpb25hYmxlIHdheXMgdG8gaW1wcm92ZS4KOC4gUHJvb2ZyZWFkOiBDaGVjayB0b25lLCBncmFtbWFyLCBhbmQgY2xhcml0eSBiZWZvcmUgc3VibWl0dGluZy4K",
|
||||
"mimeType": "text/plain"}}], "role": "user"}], "systemInstruction": {"parts":
|
||||
[{"text": "You are File Analyst. Expert at analyzing various file types.\nYour
|
||||
personal goal is: Analyze and describe files accurately\nTo give my best complete
|
||||
final answer to the task respond using the exact following format:\n\nThought:
|
||||
I now can give a great answer\nFinal Answer: Your final answer must be the great
|
||||
and the most complete as possible, it must be outcome described.\n\nI MUST use
|
||||
these formats, my job depends on it!"}], "role": "user"}, "generationConfig":
|
||||
{"stopSequences": ["\nObservation:"]}}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- '*/*'
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1627'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- generativelanguage.googleapis.com
|
||||
x-goog-api-client:
|
||||
- google-genai-sdk/1.49.0 gl-python/3.12.10
|
||||
x-goog-api-key:
|
||||
- X-GOOG-API-KEY-XXX
|
||||
method: POST
|
||||
uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\":
|
||||
[\n {\n \"text\": \"Thought: The text provides guidelines
|
||||
for giving effective feedback. I need to summarize these guidelines concisely.\\n\\nFinal
|
||||
Answer: The provided text outlines eight guidelines for delivering effective
|
||||
feedback, emphasizing clarity, focus on behavior and outcomes, specificity,
|
||||
balanced perspective, respect, objectivity, actionable suggestions, and proofreading.\\n\"\n
|
||||
\ }\n ],\n \"role\": \"model\"\n },\n \"finishReason\":
|
||||
\"STOP\",\n \"avgLogprobs\": -0.18550947507222493\n }\n ],\n \"usageMetadata\":
|
||||
{\n \"promptTokenCount\": 253,\n \"candidatesTokenCount\": 60,\n \"totalTokenCount\":
|
||||
313,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n
|
||||
\ \"tokenCount\": 253\n }\n ],\n \"candidatesTokensDetails\":
|
||||
[\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 60\n
|
||||
\ }\n ]\n },\n \"modelVersion\": \"gemini-2.0-flash\",\n \"responseId\":
|
||||
\"9MlzacewKpKMjMcPtu7joQI\"\n}\n"
|
||||
headers:
|
||||
Alt-Svc:
|
||||
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
|
||||
Content-Type:
|
||||
- application/json; charset=UTF-8
|
||||
Date:
|
||||
- Fri, 23 Jan 2026 19:20:21 GMT
|
||||
Server:
|
||||
- scaffolding on HTTPServer2
|
||||
Server-Timing:
|
||||
- gfet4t7; dur=890
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
X-Frame-Options:
|
||||
- X-FRAME-OPTIONS-XXX
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -0,0 +1,136 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"input":[{"role":"user","content":[{"type":"input_text","text":"\nCurrent
|
||||
Task: What type of document is this?\n\nBegin! This is VERY important to you,
|
||||
use the tools available and give your best Final Answer, your job depends on
|
||||
it!\n\nThought:"},{"type":"input_file","filename":"document.pdf","file_data":"data:application/pdf;base64,JVBERi0xLjQKMSAwIG9iaiA8PCAvVHlwZSAvQ2F0YWxvZyAvUGFnZXMgMiAwIFIgPj4gZW5kb2JqCjIgMCBvYmogPDwgL1R5cGUgL1BhZ2VzIC9LaWRzIFszIDAgUl0gL0NvdW50IDEgPj4gZW5kb2JqCjMgMCBvYmogPDwgL1R5cGUgL1BhZ2UgL1BhcmVudCAyIDAgUiAvTWVkaWFCb3ggWzAgMCA2MTIgNzkyXSA+PiBlbmRvYmoKeHJlZgowIDQKMDAwMDAwMDAwMCA2NTUzNSBmCjAwMDAwMDAwMDkgMDAwMDAgbgowMDAwMDAwMDU4IDAwMDAwIG4KMDAwMDAwMDExNSAwMDAwMCBuCnRyYWlsZXIgPDwgL1NpemUgNCAvUm9vdCAxIDAgUiA+PgpzdGFydHhyZWYKMTk2CiUlRU9GCg=="}]}],"model":"gpt-4o-mini","instructions":"You
|
||||
are File Analyst. Expert at analyzing various file types.\nYour personal goal
|
||||
is: Analyze and describe files accurately\nTo give my best complete final answer
|
||||
to the task respond using the exact following format:\n\nThought: I now can
|
||||
give a great answer\nFinal Answer: Your final answer must be the great and the
|
||||
most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1243'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/responses
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"resp_00f57987a2fb291d006973c701938081939b336e7a0cb669cf\",\n
|
||||
\ \"object\": \"response\",\n \"created_at\": 1769195265,\n \"status\":
|
||||
\"completed\",\n \"background\": false,\n \"billing\": {\n \"payer\":
|
||||
\"developer\"\n },\n \"completed_at\": 1769195269,\n \"error\": null,\n
|
||||
\ \"frequency_penalty\": 0.0,\n \"incomplete_details\": null,\n \"instructions\":
|
||||
\"You are File Analyst. Expert at analyzing various file types.\\nYour personal
|
||||
goal is: Analyze and describe files accurately\\nTo give my best complete
|
||||
final answer to the task respond using the exact following format:\\n\\nThought:
|
||||
I now can give a great answer\\nFinal Answer: Your final answer must be the
|
||||
great and the most complete as possible, it must be outcome described.\\n\\nI
|
||||
MUST use these formats, my job depends on it!\",\n \"max_output_tokens\":
|
||||
null,\n \"max_tool_calls\": null,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"output\": [\n {\n \"id\": \"msg_00f57987a2fb291d006973c7029b34819381420e8260962019\",\n
|
||||
\ \"type\": \"message\",\n \"status\": \"completed\",\n \"content\":
|
||||
[\n {\n \"type\": \"output_text\",\n \"annotations\":
|
||||
[],\n \"logprobs\": [],\n \"text\": \"Thought: I now can
|
||||
give a great answer. \\nFinal Answer: The document is an identifiable file
|
||||
type based on its characteristics. If it contains structured content, it might
|
||||
be a PDF, Word document, or Excel spreadsheet. If it's a text file, it could
|
||||
be a .txt or .csv. If images are present, it may be a .jpg, .png, or .gif.
|
||||
Additional metadata or content inspection can confirm its exact type. The
|
||||
format and extension provide critical insights into its intended use and functionality
|
||||
within various applications.\"\n }\n ],\n \"role\": \"assistant\"\n
|
||||
\ }\n ],\n \"parallel_tool_calls\": true,\n \"presence_penalty\": 0.0,\n
|
||||
\ \"previous_response_id\": null,\n \"prompt_cache_key\": null,\n \"prompt_cache_retention\":
|
||||
null,\n \"reasoning\": {\n \"effort\": null,\n \"summary\": null\n
|
||||
\ },\n \"safety_identifier\": null,\n \"service_tier\": \"default\",\n \"store\":
|
||||
true,\n \"temperature\": 1.0,\n \"text\": {\n \"format\": {\n \"type\":
|
||||
\"text\"\n },\n \"verbosity\": \"medium\"\n },\n \"tool_choice\":
|
||||
\"auto\",\n \"tools\": [],\n \"top_logprobs\": 0,\n \"top_p\": 1.0,\n \"truncation\":
|
||||
\"disabled\",\n \"usage\": {\n \"input_tokens\": 139,\n \"input_tokens_details\":
|
||||
{\n \"cached_tokens\": 0\n },\n \"output_tokens\": 109,\n \"output_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0\n },\n \"total_tokens\": 248\n },\n
|
||||
\ \"user\": null,\n \"metadata\": {}\n}"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 23 Jan 2026 19:07:49 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '3854'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '3857'
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,133 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"input":[{"role":"user","content":[{"type":"input_text","text":"\nCurrent
|
||||
Task: What type of document is this?\n\nBegin! This is VERY important to you,
|
||||
use the tools available and give your best Final Answer, your job depends on
|
||||
it!\n\nThought:"},{"type":"input_file","filename":"document.pdf","file_data":"data:application/pdf;base64,JVBERi0xLjQKMSAwIG9iaiA8PCAvVHlwZSAvQ2F0YWxvZyAvUGFnZXMgMiAwIFIgPj4gZW5kb2JqCjIgMCBvYmogPDwgL1R5cGUgL1BhZ2VzIC9LaWRzIFszIDAgUl0gL0NvdW50IDEgPj4gZW5kb2JqCjMgMCBvYmogPDwgL1R5cGUgL1BhZ2UgL1BhcmVudCAyIDAgUiAvTWVkaWFCb3ggWzAgMCA2MTIgNzkyXSA+PiBlbmRvYmoKeHJlZgowIDQKMDAwMDAwMDAwMCA2NTUzNSBmCjAwMDAwMDAwMDkgMDAwMDAgbgowMDAwMDAwMDU4IDAwMDAwIG4KMDAwMDAwMDExNSAwMDAwMCBuCnRyYWlsZXIgPDwgL1NpemUgNCAvUm9vdCAxIDAgUiA+PgpzdGFydHhyZWYKMTk2CiUlRU9GCg=="}]}],"model":"o4-mini","instructions":"You
|
||||
are File Analyst. Expert at analyzing various file types.\nYour personal goal
|
||||
is: Analyze and describe files accurately\nTo give my best complete final answer
|
||||
to the task respond using the exact following format:\n\nThought: I now can
|
||||
give a great answer\nFinal Answer: Your final answer must be the great and the
|
||||
most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1239'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/responses
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"resp_02b841f189494a24006973c705c84c81938ac9360927749cd2\",\n
|
||||
\ \"object\": \"response\",\n \"created_at\": 1769195269,\n \"status\":
|
||||
\"completed\",\n \"background\": false,\n \"billing\": {\n \"payer\":
|
||||
\"developer\"\n },\n \"completed_at\": 1769195274,\n \"error\": null,\n
|
||||
\ \"frequency_penalty\": 0.0,\n \"incomplete_details\": null,\n \"instructions\":
|
||||
\"You are File Analyst. Expert at analyzing various file types.\\nYour personal
|
||||
goal is: Analyze and describe files accurately\\nTo give my best complete
|
||||
final answer to the task respond using the exact following format:\\n\\nThought:
|
||||
I now can give a great answer\\nFinal Answer: Your final answer must be the
|
||||
great and the most complete as possible, it must be outcome described.\\n\\nI
|
||||
MUST use these formats, my job depends on it!\",\n \"max_output_tokens\":
|
||||
null,\n \"max_tool_calls\": null,\n \"model\": \"o4-mini-2025-04-16\",\n
|
||||
\ \"output\": [\n {\n \"id\": \"rs_02b841f189494a24006973c70641dc81938955c83f790392bd\",\n
|
||||
\ \"type\": \"reasoning\",\n \"summary\": []\n },\n {\n \"id\":
|
||||
\"msg_02b841f189494a24006973c709f6d081938e358e108f27434e\",\n \"type\":
|
||||
\"message\",\n \"status\": \"completed\",\n \"content\": [\n {\n
|
||||
\ \"type\": \"output_text\",\n \"annotations\": [],\n \"logprobs\":
|
||||
[],\n \"text\": \"I\\u2019m sorry, but I don\\u2019t see a document
|
||||
to analyze. Please provide the file or its content so I can determine its
|
||||
type.\"\n }\n ],\n \"role\": \"assistant\"\n }\n ],\n
|
||||
\ \"parallel_tool_calls\": true,\n \"presence_penalty\": 0.0,\n \"previous_response_id\":
|
||||
null,\n \"prompt_cache_key\": null,\n \"prompt_cache_retention\": null,\n
|
||||
\ \"reasoning\": {\n \"effort\": \"medium\",\n \"summary\": null\n },\n
|
||||
\ \"safety_identifier\": null,\n \"service_tier\": \"default\",\n \"store\":
|
||||
true,\n \"temperature\": 1.0,\n \"text\": {\n \"format\": {\n \"type\":
|
||||
\"text\"\n },\n \"verbosity\": \"medium\"\n },\n \"tool_choice\":
|
||||
\"auto\",\n \"tools\": [],\n \"top_logprobs\": 0,\n \"top_p\": 1.0,\n \"truncation\":
|
||||
\"disabled\",\n \"usage\": {\n \"input_tokens\": 138,\n \"input_tokens_details\":
|
||||
{\n \"cached_tokens\": 0\n },\n \"output_tokens\": 418,\n \"output_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 384\n },\n \"total_tokens\": 556\n },\n
|
||||
\ \"user\": null,\n \"metadata\": {}\n}"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 23 Jan 2026 19:07:54 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '4864'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '4867'
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -0,0 +1,112 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"max_tokens":4096,"messages":[{"role":"user","content":[{"type":"text","text":"\nCurrent
|
||||
Task: Describe the file(s) you see. Be brief, one sentence max.\n\nInput files
|
||||
(content already loaded in conversation):\n - \"document\" (document)\n\nThis
|
||||
is the expected criteria for your final answer: A brief description of the file.\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"},{"type":"document","source":{"type":"base64","media_type":"application/pdf","data":"JVBERi0xLjQKMSAwIG9iaiA8PCAvVHlwZSAvQ2F0YWxvZyAvUGFnZXMgMiAwIFIgPj4gZW5kb2JqCjIgMCBvYmogPDwgL1R5cGUgL1BhZ2VzIC9LaWRzIFszIDAgUl0gL0NvdW50IDEgPj4gZW5kb2JqCjMgMCBvYmogPDwgL1R5cGUgL1BhZ2UgL1BhcmVudCAyIDAgUiAvTWVkaWFCb3ggWzAgMCA2MTIgNzkyXSA+PiBlbmRvYmoKeHJlZgowIDQKMDAwMDAwMDAwMCA2NTUzNSBmCjAwMDAwMDAwMDkgMDAwMDAgbgowMDAwMDAwMDU4IDAwMDAwIG4KMDAwMDAwMDExNSAwMDAwMCBuCnRyYWlsZXIgPDwgL1NpemUgNCAvUm9vdCAxIDAgUiA+PgpzdGFydHhyZWYKMTk2CiUlRU9GCg=="},"cache_control":{"type":"ephemeral"}}]}],"model":"claude-3-5-haiku-20241022","stop_sequences":["\nObservation:"],"stream":false,"system":"You
|
||||
are File Analyst. Expert at analyzing various file types.\nYour personal goal
|
||||
is: Analyze and describe files accurately\nTo give my best complete final answer
|
||||
to the task respond using the exact following format:\n\nThought: I now can
|
||||
give a great answer\nFinal Answer: Your final answer must be the great and the
|
||||
most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
anthropic-version:
|
||||
- '2023-06-01'
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1634'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.anthropic.com
|
||||
x-api-key:
|
||||
- X-API-KEY-XXX
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 0.71.1
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
x-stainless-timeout:
|
||||
- NOT_GIVEN
|
||||
method: POST
|
||||
uri: https://api.anthropic.com/v1/messages
|
||||
response:
|
||||
body:
|
||||
string: '{"model":"claude-3-5-haiku-20241022","id":"msg_01HmgCWyKHbC5kBYcPir41zf","type":"message","role":"assistant","content":[{"type":"text","text":"Thought:
|
||||
I can analyze the file type and content.\n\nFinal Answer: A blank white PDF
|
||||
document with no visible text or content."}],"stop_reason":"end_turn","stop_sequence":null,"usage":{"input_tokens":1815,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":31,"service_tier":"standard"}}'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 23 Jan 2026 19:08:29 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Robots-Tag:
|
||||
- none
|
||||
anthropic-organization-id:
|
||||
- ANTHROPIC-ORGANIZATION-ID-XXX
|
||||
anthropic-ratelimit-input-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-input-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-input-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-output-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-output-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-output-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-requests-limit:
|
||||
- '4000'
|
||||
anthropic-ratelimit-requests-remaining:
|
||||
- '3999'
|
||||
anthropic-ratelimit-requests-reset:
|
||||
- '2026-01-23T19:08:28Z'
|
||||
anthropic-ratelimit-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
request-id:
|
||||
- REQUEST-ID-XXX
|
||||
strict-transport-security:
|
||||
- STS-XXX
|
||||
x-envoy-upstream-service-time:
|
||||
- '1444'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,210 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"max_tokens":4096,"messages":[{"role":"user","content":"\nCurrent Task:
|
||||
Describe the file(s) you see. Be brief, one sentence max.\n\nAvailable input
|
||||
files (use the name in quotes with read_file tool):\n - \"document\" (document,
|
||||
application/pdf)\n\nThis is the expected criteria for your final answer: A brief
|
||||
description of the file.\nyou MUST return the actual complete content as the
|
||||
final answer, not a summary.\n\nThis is VERY important to you, your job depends
|
||||
on it!"}],"model":"claude-sonnet-4-20250514","stop_sequences":["\nObservation:"],"stream":false,"system":"You
|
||||
are File Analyst. Expert at analyzing various file types.\nYour personal goal
|
||||
is: Analyze and describe files accurately","tools":[{"name":"read_file","description":"Read
|
||||
content from an input file by name. Returns file content as text for text files,
|
||||
or base64 for binary files.","input_schema":{"properties":{"file_name":{"description":"The
|
||||
name of the input file to read","title":"File Name","type":"string"}},"required":["file_name"],"type":"object"}}]}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
anthropic-version:
|
||||
- '2023-06-01'
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1035'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.anthropic.com
|
||||
x-api-key:
|
||||
- X-API-KEY-XXX
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 0.71.1
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
x-stainless-timeout:
|
||||
- NOT_GIVEN
|
||||
method: POST
|
||||
uri: https://api.anthropic.com/v1/messages
|
||||
response:
|
||||
body:
|
||||
string: '{"model":"claude-sonnet-4-20250514","id":"msg_01QQ1BGjRzaj6vneE9LNtCoz","type":"message","role":"assistant","content":[{"type":"text","text":"I''ll
|
||||
read and analyze the PDF document file for you."},{"type":"tool_use","id":"toolu_01QU7Hu64D5PxA5UUu5LG7Ff","name":"read_file","input":{"file_name":"document"}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":545,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":68,"service_tier":"standard"}}'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 23 Jan 2026 19:08:52 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Robots-Tag:
|
||||
- none
|
||||
anthropic-organization-id:
|
||||
- ANTHROPIC-ORGANIZATION-ID-XXX
|
||||
anthropic-ratelimit-input-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-input-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-input-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-output-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-output-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-output-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
request-id:
|
||||
- REQUEST-ID-XXX
|
||||
strict-transport-security:
|
||||
- STS-XXX
|
||||
x-envoy-upstream-service-time:
|
||||
- '2123'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"max_tokens":4096,"messages":[{"role":"user","content":"\nCurrent Task:
|
||||
Describe the file(s) you see. Be brief, one sentence max.\n\nAvailable input
|
||||
files (use the name in quotes with read_file tool):\n - \"document\" (document,
|
||||
application/pdf)\n\nThis is the expected criteria for your final answer: A brief
|
||||
description of the file.\nyou MUST return the actual complete content as the
|
||||
final answer, not a summary.\n\nThis is VERY important to you, your job depends
|
||||
on it!"},{"role":"assistant","content":[{"type":"tool_use","id":"toolu_01QU7Hu64D5PxA5UUu5LG7Ff","name":"read_file","input":{"file_name":"document"}}]},{"role":"user","content":[{"type":"tool_result","tool_use_id":"toolu_01QU7Hu64D5PxA5UUu5LG7Ff","content":"[Binary
|
||||
file: document (application/pdf)]\nBase64: JVBERi0xLjQKMSAwIG9iaiA8PCAvVHlwZSAvQ2F0YWxvZyAvUGFnZXMgMiAwIFIgPj4gZW5kb2JqCjIgMCBvYmogPDwgL1R5cGUgL1BhZ2VzIC9LaWRzIFszIDAgUl0gL0NvdW50IDEgPj4gZW5kb2JqCjMgMCBvYmogPDwgL1R5cGUgL1BhZ2UgL1BhcmVudCAyIDAgUiAvTWVkaWFCb3ggWzAgMCA2MTIgNzkyXSA+PiBlbmRvYmoKeHJlZgowIDQKMDAwMDAwMDAwMCA2NTUzNSBmCjAwMDAwMDAwMDkgMDAwMDAgbgowMDAwMDAwMDU4IDAwMDAwIG4KMDAwMDAwMDExNSAwMDAwMCBuCnRyYWlsZXIgPDwgL1NpemUgNCAvUm9vdCAxIDAgUiA+PgpzdGFydHhyZWYKMTk2CiUlRU9GCg=="}]},{"role":"user","content":"Analyze
|
||||
the tool result. If requirements are met, provide the Final Answer. Otherwise,
|
||||
call the next tool. Deliver only the answer without meta-commentary."}],"model":"claude-sonnet-4-20250514","stop_sequences":["\nObservation:"],"stream":false,"system":"You
|
||||
are File Analyst. Expert at analyzing various file types.\nYour personal goal
|
||||
is: Analyze and describe files accurately","tools":[{"name":"read_file","description":"Read
|
||||
content from an input file by name. Returns file content as text for text files,
|
||||
or base64 for binary files.","input_schema":{"properties":{"file_name":{"description":"The
|
||||
name of the input file to read","title":"File Name","type":"string"}},"required":["file_name"],"type":"object"}}]}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
anthropic-version:
|
||||
- '2023-06-01'
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1960'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.anthropic.com
|
||||
x-api-key:
|
||||
- X-API-KEY-XXX
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 0.71.1
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
x-stainless-timeout:
|
||||
- NOT_GIVEN
|
||||
method: POST
|
||||
uri: https://api.anthropic.com/v1/messages
|
||||
response:
|
||||
body:
|
||||
string: '{"model":"claude-sonnet-4-20250514","id":"msg_01CjWBqSxyLeArjUhqUTbedN","type":"message","role":"assistant","content":[{"type":"text","text":"The
|
||||
document is a minimal PDF file with basic structure containing one empty page
|
||||
with standard letter dimensions (612x792 points).\n\nJVBERi0xLjQKMSAwIG9iaiA8PCAvVHlwZSAvQ2F0YWxvZyAvUGFnZXMgMiAwIFIgPj4gZW5kb2JqCjIgMCBvYmogPDwgL1R5cGUgL1BhZ2VzIC9LaWRzIFszIDAgUl0gL0NvdW50IDEgPj4gZW5kb2JqCjMgMCBvYmogPDwgL1R5cGUgL1BhZ2UgL1BhcmVudCAyIDAgUiAvTWVkaWFCb3ggWzAgMCA2MTIgNzkyXSA+PiBlbmRvYmoKeHJlZgowIDQKMDAwMDAwMDAwMCA2NTUzNSBmCjAwMDAwMDAwMDkgMDAwMDAgbgowMDAwMDAwMDU4IDAwMDAwIG4KMDAwMDAwMDExNSAwMDAwMCBuCnRyYWlsZXIgPDwgL1NpemUgNCAvUm9vdCAxIDAgUiA+PgpzdGFydHhyZWYKMTk2CiUlRU9GCg=="}],"stop_reason":"end_turn","stop_sequence":null,"usage":{"input_tokens":1035,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":400,"service_tier":"standard"}}'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 23 Jan 2026 19:08:58 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Robots-Tag:
|
||||
- none
|
||||
anthropic-organization-id:
|
||||
- ANTHROPIC-ORGANIZATION-ID-XXX
|
||||
anthropic-ratelimit-input-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-input-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-input-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-output-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-output-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-output-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
request-id:
|
||||
- REQUEST-ID-XXX
|
||||
strict-transport-security:
|
||||
- STS-XXX
|
||||
x-envoy-upstream-service-time:
|
||||
- '5453'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
File diff suppressed because one or more lines are too long
@@ -0,0 +1,57 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": [{"text": "\nCurrent Task: Describe
|
||||
the file(s) you see. Be brief, one sentence max.\n\nInput files (content already
|
||||
loaded in conversation):\n - \"document\" (document)\n\nThis is the expected
|
||||
criteria for your final answer: A brief description of the file.\nyou MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:"}, {"document": {"name": "document", "format":
|
||||
"pdf", "source": {"bytes": "JVBERi0xLjQKMSAwIG9iaiA8PCAvVHlwZSAvQ2F0YWxvZyAvUGFnZXMgMiAwIFIgPj4gZW5kb2JqCjIgMCBvYmogPDwgL1R5cGUgL1BhZ2VzIC9LaWRzIFszIDAgUl0gL0NvdW50IDEgPj4gZW5kb2JqCjMgMCBvYmogPDwgL1R5cGUgL1BhZ2UgL1BhcmVudCAyIDAgUiAvTWVkaWFCb3ggWzAgMCA2MTIgNzkyXSA+PiBlbmRvYmoKeHJlZgowIDQKMDAwMDAwMDAwMCA2NTUzNSBmCjAwMDAwMDAwMDkgMDAwMDAgbgowMDAwMDAwMDU4IDAwMDAwIG4KMDAwMDAwMDExNSAwMDAwMCBuCnRyYWlsZXIgPDwgL1NpemUgNCAvUm9vdCAxIDAgUiA+PgpzdGFydHhyZWYKMTk2CiUlRU9GCg=="}}}]}],
|
||||
"inferenceConfig": {"stopSequences": ["\nObservation:"]}, "system": [{"text":
|
||||
"You are File Analyst. Expert at analyzing various file types.\nYour personal
|
||||
goal is: Analyze and describe files accurately\nTo give my best complete final
|
||||
answer to the task respond using the exact following format:\n\nThought: I now
|
||||
can give a great answer\nFinal Answer: Your final answer must be the great and
|
||||
the most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"}]}'
|
||||
headers:
|
||||
Content-Length:
|
||||
- '1545'
|
||||
Content-Type:
|
||||
- !!binary |
|
||||
YXBwbGljYXRpb24vanNvbg==
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
amz-sdk-invocation-id:
|
||||
- AMZ-SDK-INVOCATION-ID-XXX
|
||||
amz-sdk-request:
|
||||
- !!binary |
|
||||
YXR0ZW1wdD0x
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
x-amz-date:
|
||||
- X-AMZ-DATE-XXX
|
||||
method: POST
|
||||
uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/anthropic.claude-3-haiku-20240307-v1%3A0/converse
|
||||
response:
|
||||
body:
|
||||
string: '{"metrics":{"latencyMs":958},"output":{"message":{"content":[{"text":"Thought:
|
||||
I have reviewed the provided documents and can now give a complete answer.\n\nFinal
|
||||
Answer: The file \"document.pdf\" is an empty document with no text or content
|
||||
visible."}],"role":"assistant"}},"stopReason":"end_turn","usage":{"inputTokens":245,"outputTokens":42,"serverToolUsage":{},"totalTokens":287}}'
|
||||
headers:
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '384'
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 23 Jan 2026 19:16:37 GMT
|
||||
x-amzn-RequestId:
|
||||
- X-AMZN-REQUESTID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -0,0 +1,113 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"max_tokens":4096,"messages":[{"role":"user","content":[{"type":"text","text":"\nCurrent
|
||||
Task: Describe the file(s) you see. Be brief, one sentence max.\n\nInput files
|
||||
(content already loaded in conversation):\n - \"document\" (document)\n\nThis
|
||||
is the expected criteria for your final answer: A brief description of the file.\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"},{"type":"document","source":{"type":"base64","media_type":"application/pdf","data":"JVBERi0xLjQKMSAwIG9iaiA8PCAvVHlwZSAvQ2F0YWxvZyAvUGFnZXMgMiAwIFIgPj4gZW5kb2JqCjIgMCBvYmogPDwgL1R5cGUgL1BhZ2VzIC9LaWRzIFszIDAgUl0gL0NvdW50IDEgPj4gZW5kb2JqCjMgMCBvYmogPDwgL1R5cGUgL1BhZ2UgL1BhcmVudCAyIDAgUiAvTWVkaWFCb3ggWzAgMCA2MTIgNzkyXSA+PiBlbmRvYmoKeHJlZgowIDQKMDAwMDAwMDAwMCA2NTUzNSBmCjAwMDAwMDAwMDkgMDAwMDAgbgowMDAwMDAwMDU4IDAwMDAwIG4KMDAwMDAwMDExNSAwMDAwMCBuCnRyYWlsZXIgPDwgL1NpemUgNCAvUm9vdCAxIDAgUiA+PgpzdGFydHhyZWYKMTk2CiUlRU9GCg=="},"cache_control":{"type":"ephemeral"}}]}],"model":"claude-3-5-haiku-20241022","stop_sequences":["\nObservation:"],"stream":false,"system":"You
|
||||
are File Analyst. Expert at analyzing various file types.\nYour personal goal
|
||||
is: Analyze and describe files accurately\nTo give my best complete final answer
|
||||
to the task respond using the exact following format:\n\nThought: I now can
|
||||
give a great answer\nFinal Answer: Your final answer must be the great and the
|
||||
most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
anthropic-version:
|
||||
- '2023-06-01'
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1634'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.anthropic.com
|
||||
x-api-key:
|
||||
- X-API-KEY-XXX
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 0.71.1
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
x-stainless-timeout:
|
||||
- NOT_GIVEN
|
||||
method: POST
|
||||
uri: https://api.anthropic.com/v1/messages
|
||||
response:
|
||||
body:
|
||||
string: '{"model":"claude-3-5-haiku-20241022","id":"msg_01LwGGQrbGqSCmuTdqnKPdia","type":"message","role":"assistant","content":[{"type":"text","text":"Thought:
|
||||
I notice the document is a blank or completely white PDF page with no visible
|
||||
content.\n\nFinal Answer: A blank white PDF document with no text or visual
|
||||
elements."}],"stop_reason":"end_turn","stop_sequence":null,"usage":{"input_tokens":1815,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":39,"service_tier":"standard"}}'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 23 Jan 2026 19:08:23 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Robots-Tag:
|
||||
- none
|
||||
anthropic-organization-id:
|
||||
- ANTHROPIC-ORGANIZATION-ID-XXX
|
||||
anthropic-ratelimit-input-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-input-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-input-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-output-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-output-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-output-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-requests-limit:
|
||||
- '4000'
|
||||
anthropic-ratelimit-requests-remaining:
|
||||
- '3999'
|
||||
anthropic-ratelimit-requests-reset:
|
||||
- '2026-01-23T19:08:22Z'
|
||||
anthropic-ratelimit-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
request-id:
|
||||
- REQUEST-ID-XXX
|
||||
strict-transport-security:
|
||||
- STS-XXX
|
||||
x-envoy-upstream-service-time:
|
||||
- '1713'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,135 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"input":[{"role":"user","content":[{"type":"input_text","text":"\nCurrent
|
||||
Task: Describe the file(s) you see. Be brief, one sentence max.\n\nInput files
|
||||
(content already loaded in conversation):\n - \"document\" (document)\n\nThis
|
||||
is the expected criteria for your final answer: A brief description of the file.\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"},{"type":"input_file","filename":"document.pdf","file_data":"data:application/pdf;base64,JVBERi0xLjQKMSAwIG9iaiA8PCAvVHlwZSAvQ2F0YWxvZyAvUGFnZXMgMiAwIFIgPj4gZW5kb2JqCjIgMCBvYmogPDwgL1R5cGUgL1BhZ2VzIC9LaWRzIFszIDAgUl0gL0NvdW50IDEgPj4gZW5kb2JqCjMgMCBvYmogPDwgL1R5cGUgL1BhZ2UgL1BhcmVudCAyIDAgUiAvTWVkaWFCb3ggWzAgMCA2MTIgNzkyXSA+PiBlbmRvYmoKeHJlZgowIDQKMDAwMDAwMDAwMCA2NTUzNSBmCjAwMDAwMDAwMDkgMDAwMDAgbgowMDAwMDAwMDU4IDAwMDAwIG4KMDAwMDAwMDExNSAwMDAwMCBuCnRyYWlsZXIgPDwgL1NpemUgNCAvUm9vdCAxIDAgUiA+PgpzdGFydHhyZWYKMTk2CiUlRU9GCg=="}]}],"model":"gpt-4o-mini","instructions":"You
|
||||
are File Analyst. Expert at analyzing various file types.\nYour personal goal
|
||||
is: Analyze and describe files accurately\nTo give my best complete final answer
|
||||
to the task respond using the exact following format:\n\nThought: I now can
|
||||
give a great answer\nFinal Answer: Your final answer must be the great and the
|
||||
most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1526'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/responses
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"resp_05471d1edb407d99006973c72804a481978be02af19b36ae3f\",\n
|
||||
\ \"object\": \"response\",\n \"created_at\": 1769195304,\n \"status\":
|
||||
\"completed\",\n \"background\": false,\n \"billing\": {\n \"payer\":
|
||||
\"developer\"\n },\n \"completed_at\": 1769195305,\n \"error\": null,\n
|
||||
\ \"frequency_penalty\": 0.0,\n \"incomplete_details\": null,\n \"instructions\":
|
||||
\"You are File Analyst. Expert at analyzing various file types.\\nYour personal
|
||||
goal is: Analyze and describe files accurately\\nTo give my best complete
|
||||
final answer to the task respond using the exact following format:\\n\\nThought:
|
||||
I now can give a great answer\\nFinal Answer: Your final answer must be the
|
||||
great and the most complete as possible, it must be outcome described.\\n\\nI
|
||||
MUST use these formats, my job depends on it!\",\n \"max_output_tokens\":
|
||||
null,\n \"max_tool_calls\": null,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"output\": [\n {\n \"id\": \"msg_05471d1edb407d99006973c728d29c8197b79da007251f2f90\",\n
|
||||
\ \"type\": \"message\",\n \"status\": \"completed\",\n \"content\":
|
||||
[\n {\n \"type\": \"output_text\",\n \"annotations\":
|
||||
[],\n \"logprobs\": [],\n \"text\": \"Thought: I now can
|
||||
give a great answer \\nFinal Answer: The file \\\"document\\\" contains textual
|
||||
content that may include a range of topics or information, but specific details
|
||||
are not available for analysis.\"\n }\n ],\n \"role\": \"assistant\"\n
|
||||
\ }\n ],\n \"parallel_tool_calls\": true,\n \"presence_penalty\": 0.0,\n
|
||||
\ \"previous_response_id\": null,\n \"prompt_cache_key\": null,\n \"prompt_cache_retention\":
|
||||
null,\n \"reasoning\": {\n \"effort\": null,\n \"summary\": null\n
|
||||
\ },\n \"safety_identifier\": null,\n \"service_tier\": \"default\",\n \"store\":
|
||||
true,\n \"temperature\": 1.0,\n \"text\": {\n \"format\": {\n \"type\":
|
||||
\"text\"\n },\n \"verbosity\": \"medium\"\n },\n \"tool_choice\":
|
||||
\"auto\",\n \"tools\": [],\n \"top_logprobs\": 0,\n \"top_p\": 1.0,\n \"truncation\":
|
||||
\"disabled\",\n \"usage\": {\n \"input_tokens\": 197,\n \"input_tokens_details\":
|
||||
{\n \"cached_tokens\": 0\n },\n \"output_tokens\": 41,\n \"output_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0\n },\n \"total_tokens\": 238\n },\n
|
||||
\ \"user\": null,\n \"metadata\": {}\n}"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 23 Jan 2026 19:08:25 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '1739'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '1742'
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,87 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"contents": [{"parts": [{"text": "\nCurrent Task: Describe the file(s)
|
||||
you see. Be brief, one sentence max.\n\nInput files (content already loaded
|
||||
in conversation):\n - \"readme\" (review_guidelines.txt)\n\nThis is the expected
|
||||
criteria for your final answer: A brief description of the file.\nyou MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:"}, {"inlineData": {"data": "UmV2aWV3IEd1aWRlbGluZXMKCjEuIEJlIGNsZWFyIGFuZCBjb25jaXNlOiBXcml0ZSBmZWVkYmFjayB0aGF0IGlzIGVhc3kgdG8gdW5kZXJzdGFuZC4KMi4gRm9jdXMgb24gYmVoYXZpb3IgYW5kIG91dGNvbWVzOiBEZXNjcmliZSB3aGF0IGhhcHBlbmVkIGFuZCB3aHkgaXQgbWF0dGVycy4KMy4gQmUgc3BlY2lmaWM6IFByb3ZpZGUgZXhhbXBsZXMgdG8gc3VwcG9ydCB5b3VyIHBvaW50cy4KNC4gQmFsYW5jZSBwb3NpdGl2ZXMgYW5kIGltcHJvdmVtZW50czogSGlnaGxpZ2h0IHN0cmVuZ3RocyBhbmQgYXJlYXMgdG8gZ3Jvdy4KNS4gQmUgcmVzcGVjdGZ1bCBhbmQgY29uc3RydWN0aXZlOiBBc3N1bWUgcG9zaXRpdmUgaW50ZW50IGFuZCBvZmZlciBzb2x1dGlvbnMuCjYuIFVzZSBvYmplY3RpdmUgY3JpdGVyaWE6IFJlZmVyZW5jZSBnb2FscywgbWV0cmljcywgb3IgZXhwZWN0YXRpb25zIHdoZXJlIHBvc3NpYmxlLgo3LiBTdWdnZXN0IG5leHQgc3RlcHM6IFJlY29tbWVuZCBhY3Rpb25hYmxlIHdheXMgdG8gaW1wcm92ZS4KOC4gUHJvb2ZyZWFkOiBDaGVjayB0b25lLCBncmFtbWFyLCBhbmQgY2xhcml0eSBiZWZvcmUgc3VibWl0dGluZy4K",
|
||||
"mimeType": "text/plain"}}], "role": "user"}], "systemInstruction": {"parts":
|
||||
[{"text": "You are File Analyst. Expert at analyzing various file types.\nYour
|
||||
personal goal is: Analyze and describe files accurately\nTo give my best complete
|
||||
final answer to the task respond using the exact following format:\n\nThought:
|
||||
I now can give a great answer\nFinal Answer: Your final answer must be the great
|
||||
and the most complete as possible, it must be outcome described.\n\nI MUST use
|
||||
these formats, my job depends on it!"}], "role": "user"}, "generationConfig":
|
||||
{"stopSequences": ["\nObservation:"]}}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- '*/*'
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1923'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- generativelanguage.googleapis.com
|
||||
x-goog-api-client:
|
||||
- google-genai-sdk/1.49.0 gl-python/3.12.10
|
||||
x-goog-api-key:
|
||||
- X-GOOG-API-KEY-XXX
|
||||
method: POST
|
||||
uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\":
|
||||
[\n {\n \"text\": \"Thought:The file \\\"readme\\\" (review_guidelines.txt)
|
||||
contains guidelines for providing effective feedback, emphasizing clarity,
|
||||
specificity, respect, and actionable suggestions.\\nFinal Answer:Review Guidelines\\n\\n1.
|
||||
Be clear and concise: Write feedback that is easy to understand.\\n2. Focus
|
||||
on behavior and outcomes: Describe what happened and why it matters.\\n3.
|
||||
Be specific: Provide examples to support your points.\\n4. Balance positives
|
||||
and improvements: Highlight strengths and areas to grow.\\n5. Be respectful
|
||||
and constructive: Assume positive intent and offer solutions.\\n6. Use objective
|
||||
criteria: Reference goals, metrics, or expectations where possible.\\n7. Suggest
|
||||
next steps: Recommend actionable ways to improve.\\n8. Proofread: Check tone,
|
||||
grammar, and clarity before submitting.\\n\"\n }\n ],\n \"role\":
|
||||
\"model\"\n },\n \"finishReason\": \"STOP\",\n \"avgLogprobs\":
|
||||
-0.038670154265415521\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\":
|
||||
322,\n \"candidatesTokenCount\": 162,\n \"totalTokenCount\": 484,\n
|
||||
\ \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n
|
||||
\ \"tokenCount\": 322\n }\n ],\n \"candidatesTokensDetails\":
|
||||
[\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 162\n
|
||||
\ }\n ]\n },\n \"modelVersion\": \"gemini-2.0-flash\",\n \"responseId\":
|
||||
\"-slzaZfmHO-OjMcPirbssQI\"\n}\n"
|
||||
headers:
|
||||
Alt-Svc:
|
||||
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
|
||||
Content-Type:
|
||||
- application/json; charset=UTF-8
|
||||
Date:
|
||||
- Fri, 23 Jan 2026 19:20:27 GMT
|
||||
Server:
|
||||
- scaffolding on HTTPServer2
|
||||
Server-Timing:
|
||||
- gfet4t7; dur=1324
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
X-Frame-Options:
|
||||
- X-FRAME-OPTIONS-XXX
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -0,0 +1,86 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"contents": [{"parts": [{"text": "\nCurrent Task: Describe the file(s)
|
||||
you see. Be brief, one sentence max.\n\nInput files (content already loaded
|
||||
in conversation):\n - \"readme\" (review_guidelines.txt)\n\nThis is the expected
|
||||
criteria for your final answer: A brief description of the file.\nyou MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:"}, {"inlineData": {"data": "UmV2aWV3IEd1aWRlbGluZXMKCjEuIEJlIGNsZWFyIGFuZCBjb25jaXNlOiBXcml0ZSBmZWVkYmFjayB0aGF0IGlzIGVhc3kgdG8gdW5kZXJzdGFuZC4KMi4gRm9jdXMgb24gYmVoYXZpb3IgYW5kIG91dGNvbWVzOiBEZXNjcmliZSB3aGF0IGhhcHBlbmVkIGFuZCB3aHkgaXQgbWF0dGVycy4KMy4gQmUgc3BlY2lmaWM6IFByb3ZpZGUgZXhhbXBsZXMgdG8gc3VwcG9ydCB5b3VyIHBvaW50cy4KNC4gQmFsYW5jZSBwb3NpdGl2ZXMgYW5kIGltcHJvdmVtZW50czogSGlnaGxpZ2h0IHN0cmVuZ3RocyBhbmQgYXJlYXMgdG8gZ3Jvdy4KNS4gQmUgcmVzcGVjdGZ1bCBhbmQgY29uc3RydWN0aXZlOiBBc3N1bWUgcG9zaXRpdmUgaW50ZW50IGFuZCBvZmZlciBzb2x1dGlvbnMuCjYuIFVzZSBvYmplY3RpdmUgY3JpdGVyaWE6IFJlZmVyZW5jZSBnb2FscywgbWV0cmljcywgb3IgZXhwZWN0YXRpb25zIHdoZXJlIHBvc3NpYmxlLgo3LiBTdWdnZXN0IG5leHQgc3RlcHM6IFJlY29tbWVuZCBhY3Rpb25hYmxlIHdheXMgdG8gaW1wcm92ZS4KOC4gUHJvb2ZyZWFkOiBDaGVjayB0b25lLCBncmFtbWFyLCBhbmQgY2xhcml0eSBiZWZvcmUgc3VibWl0dGluZy4K",
|
||||
"mimeType": "text/plain"}}], "role": "user"}], "systemInstruction": {"parts":
|
||||
[{"text": "You are File Analyst. Expert at analyzing various file types.\nYour
|
||||
personal goal is: Analyze and describe files accurately\nTo give my best complete
|
||||
final answer to the task respond using the exact following format:\n\nThought:
|
||||
I now can give a great answer\nFinal Answer: Your final answer must be the great
|
||||
and the most complete as possible, it must be outcome described.\n\nI MUST use
|
||||
these formats, my job depends on it!"}], "role": "user"}, "generationConfig":
|
||||
{"stopSequences": ["\nObservation:"]}}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- '*/*'
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1923'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- generativelanguage.googleapis.com
|
||||
x-goog-api-client:
|
||||
- google-genai-sdk/1.49.0 gl-python/3.12.10
|
||||
x-goog-api-key:
|
||||
- X-GOOG-API-KEY-XXX
|
||||
method: POST
|
||||
uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\":
|
||||
[\n {\n \"text\": \"Thought:The file \\\"readme\\\" (review_guidelines.txt)
|
||||
contains guidelines for providing effective feedback.\\nFinal Answer:Review
|
||||
Guidelines\\n\\n1. Be clear and concise: Write feedback that is easy to understand.\\n2.
|
||||
Focus on behavior and outcomes: Describe what happened and why it matters.\\n3.
|
||||
Be specific: Provide examples to support your points.\\n4. Balance positives
|
||||
and improvements: Highlight strengths and areas to grow.\\n5. Be respectful
|
||||
and constructive: Assume positive intent and offer solutions.\\n6. Use objective
|
||||
criteria: Reference goals, metrics, or expectations where possible.\\n7. Suggest
|
||||
next steps: Recommend actionable ways to improve.\\n8. Proofread: Check tone,
|
||||
grammar, and clarity before submitting.\\n\"\n }\n ],\n \"role\":
|
||||
\"model\"\n },\n \"finishReason\": \"STOP\",\n \"avgLogprobs\":
|
||||
-0.010346503447223184\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\":
|
||||
322,\n \"candidatesTokenCount\": 151,\n \"totalTokenCount\": 473,\n
|
||||
\ \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n
|
||||
\ \"tokenCount\": 322\n }\n ],\n \"candidatesTokensDetails\":
|
||||
[\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 151\n
|
||||
\ }\n ]\n },\n \"modelVersion\": \"gemini-2.0-flash\",\n \"responseId\":
|
||||
\"7slzab-SIrzFjMcP7faewA4\"\n}\n"
|
||||
headers:
|
||||
Alt-Svc:
|
||||
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
|
||||
Content-Type:
|
||||
- application/json; charset=UTF-8
|
||||
Date:
|
||||
- Fri, 23 Jan 2026 19:20:15 GMT
|
||||
Server:
|
||||
- scaffolding on HTTPServer2
|
||||
Server-Timing:
|
||||
- gfet4t7; dur=1210
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
X-Frame-Options:
|
||||
- X-FRAME-OPTIONS-XXX
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -0,0 +1,134 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"input":[{"role":"user","content":[{"type":"input_text","text":"\nCurrent
|
||||
Task: Describe the file(s) you see. Be brief, one sentence max.\n\nInput files
|
||||
(content already loaded in conversation):\n - \"document\" (document)\n\nThis
|
||||
is the expected criteria for your final answer: A brief description of the file.\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"},{"type":"input_file","filename":"document.pdf","file_data":"data:application/pdf;base64,JVBERi0xLjQKMSAwIG9iaiA8PCAvVHlwZSAvQ2F0YWxvZyAvUGFnZXMgMiAwIFIgPj4gZW5kb2JqCjIgMCBvYmogPDwgL1R5cGUgL1BhZ2VzIC9LaWRzIFszIDAgUl0gL0NvdW50IDEgPj4gZW5kb2JqCjMgMCBvYmogPDwgL1R5cGUgL1BhZ2UgL1BhcmVudCAyIDAgUiAvTWVkaWFCb3ggWzAgMCA2MTIgNzkyXSA+PiBlbmRvYmoKeHJlZgowIDQKMDAwMDAwMDAwMCA2NTUzNSBmCjAwMDAwMDAwMDkgMDAwMDAgbgowMDAwMDAwMDU4IDAwMDAwIG4KMDAwMDAwMDExNSAwMDAwMCBuCnRyYWlsZXIgPDwgL1NpemUgNCAvUm9vdCAxIDAgUiA+PgpzdGFydHhyZWYKMTk2CiUlRU9GCg=="}]}],"model":"gpt-4o-mini","instructions":"You
|
||||
are File Analyst. Expert at analyzing various file types.\nYour personal goal
|
||||
is: Analyze and describe files accurately\nTo give my best complete final answer
|
||||
to the task respond using the exact following format:\n\nThought: I now can
|
||||
give a great answer\nFinal Answer: Your final answer must be the great and the
|
||||
most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1526'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/responses
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"resp_0ae34e7d3a963b0e006973c755f5648190baad1053ecbde4a2\",\n
|
||||
\ \"object\": \"response\",\n \"created_at\": 1769195350,\n \"status\":
|
||||
\"completed\",\n \"background\": false,\n \"billing\": {\n \"payer\":
|
||||
\"developer\"\n },\n \"completed_at\": 1769195351,\n \"error\": null,\n
|
||||
\ \"frequency_penalty\": 0.0,\n \"incomplete_details\": null,\n \"instructions\":
|
||||
\"You are File Analyst. Expert at analyzing various file types.\\nYour personal
|
||||
goal is: Analyze and describe files accurately\\nTo give my best complete
|
||||
final answer to the task respond using the exact following format:\\n\\nThought:
|
||||
I now can give a great answer\\nFinal Answer: Your final answer must be the
|
||||
great and the most complete as possible, it must be outcome described.\\n\\nI
|
||||
MUST use these formats, my job depends on it!\",\n \"max_output_tokens\":
|
||||
null,\n \"max_tool_calls\": null,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"output\": [\n {\n \"id\": \"msg_0ae34e7d3a963b0e006973c756702481909ce7960ef11dd6f6\",\n
|
||||
\ \"type\": \"message\",\n \"status\": \"completed\",\n \"content\":
|
||||
[\n {\n \"type\": \"output_text\",\n \"annotations\":
|
||||
[],\n \"logprobs\": [],\n \"text\": \"Thought: I now can
|
||||
give a great answer \\nFinal Answer: The file is a document containing text-based
|
||||
content, specifics not provided.\"\n }\n ],\n \"role\": \"assistant\"\n
|
||||
\ }\n ],\n \"parallel_tool_calls\": true,\n \"presence_penalty\": 0.0,\n
|
||||
\ \"previous_response_id\": null,\n \"prompt_cache_key\": null,\n \"prompt_cache_retention\":
|
||||
null,\n \"reasoning\": {\n \"effort\": null,\n \"summary\": null\n
|
||||
\ },\n \"safety_identifier\": null,\n \"service_tier\": \"default\",\n \"store\":
|
||||
true,\n \"temperature\": 1.0,\n \"text\": {\n \"format\": {\n \"type\":
|
||||
\"text\"\n },\n \"verbosity\": \"medium\"\n },\n \"tool_choice\":
|
||||
\"auto\",\n \"tools\": [],\n \"top_logprobs\": 0,\n \"top_p\": 1.0,\n \"truncation\":
|
||||
\"disabled\",\n \"usage\": {\n \"input_tokens\": 197,\n \"input_tokens_details\":
|
||||
{\n \"cached_tokens\": 0\n },\n \"output_tokens\": 28,\n \"output_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0\n },\n \"total_tokens\": 225\n },\n
|
||||
\ \"user\": null,\n \"metadata\": {}\n}"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 23 Jan 2026 19:09:11 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '1337'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '1340'
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,136 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"input":[{"role":"user","content":[{"type":"input_text","text":"\nCurrent
|
||||
Task: Describe the file(s) you see. Be brief, one sentence max.\n\nInput files
|
||||
(content already loaded in conversation):\n - \"document\" (document)\n\nThis
|
||||
is the expected criteria for your final answer: A brief description of the file.\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"},{"type":"input_file","filename":"document.pdf","file_data":"data:application/pdf;base64,JVBERi0xLjQKMSAwIG9iaiA8PCAvVHlwZSAvQ2F0YWxvZyAvUGFnZXMgMiAwIFIgPj4gZW5kb2JqCjIgMCBvYmogPDwgL1R5cGUgL1BhZ2VzIC9LaWRzIFszIDAgUl0gL0NvdW50IDEgPj4gZW5kb2JqCjMgMCBvYmogPDwgL1R5cGUgL1BhZ2UgL1BhcmVudCAyIDAgUiAvTWVkaWFCb3ggWzAgMCA2MTIgNzkyXSA+PiBlbmRvYmoKeHJlZgowIDQKMDAwMDAwMDAwMCA2NTUzNSBmCjAwMDAwMDAwMDkgMDAwMDAgbgowMDAwMDAwMDU4IDAwMDAwIG4KMDAwMDAwMDExNSAwMDAwMCBuCnRyYWlsZXIgPDwgL1NpemUgNCAvUm9vdCAxIDAgUiA+PgpzdGFydHhyZWYKMTk2CiUlRU9GCg=="}]}],"model":"o4-mini","instructions":"You
|
||||
are File Analyst. Expert at analyzing various file types.\nYour personal goal
|
||||
is: Analyze and describe files accurately\nTo give my best complete final answer
|
||||
to the task respond using the exact following format:\n\nThought: I now can
|
||||
give a great answer\nFinal Answer: Your final answer must be the great and the
|
||||
most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1522'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/responses
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"resp_0f3ca1e8c567449a006973c75772f48196b33478c88491c2b2\",\n
|
||||
\ \"object\": \"response\",\n \"created_at\": 1769195351,\n \"status\":
|
||||
\"completed\",\n \"background\": false,\n \"billing\": {\n \"payer\":
|
||||
\"developer\"\n },\n \"completed_at\": 1769195360,\n \"error\": null,\n
|
||||
\ \"frequency_penalty\": 0.0,\n \"incomplete_details\": null,\n \"instructions\":
|
||||
\"You are File Analyst. Expert at analyzing various file types.\\nYour personal
|
||||
goal is: Analyze and describe files accurately\\nTo give my best complete
|
||||
final answer to the task respond using the exact following format:\\n\\nThought:
|
||||
I now can give a great answer\\nFinal Answer: Your final answer must be the
|
||||
great and the most complete as possible, it must be outcome described.\\n\\nI
|
||||
MUST use these formats, my job depends on it!\",\n \"max_output_tokens\":
|
||||
null,\n \"max_tool_calls\": null,\n \"model\": \"o4-mini-2025-04-16\",\n
|
||||
\ \"output\": [\n {\n \"id\": \"rs_0f3ca1e8c567449a006973c757d264819686b5a624e57d0f3c\",\n
|
||||
\ \"type\": \"reasoning\",\n \"summary\": []\n },\n {\n \"id\":
|
||||
\"msg_0f3ca1e8c567449a006973c76063ac8196b09e99c8ddefb880\",\n \"type\":
|
||||
\"message\",\n \"status\": \"completed\",\n \"content\": [\n {\n
|
||||
\ \"type\": \"output_text\",\n \"annotations\": [],\n \"logprobs\":
|
||||
[],\n \"text\": \"Thought: I now can give a great answer \\nFinal
|
||||
Answer: The file named \\u201cdocument\\u201d is a multi-page PDF document
|
||||
containing formatted text and embedded images.\"\n }\n ],\n \"role\":
|
||||
\"assistant\"\n }\n ],\n \"parallel_tool_calls\": true,\n \"presence_penalty\":
|
||||
0.0,\n \"previous_response_id\": null,\n \"prompt_cache_key\": null,\n \"prompt_cache_retention\":
|
||||
null,\n \"reasoning\": {\n \"effort\": \"medium\",\n \"summary\": null\n
|
||||
\ },\n \"safety_identifier\": null,\n \"service_tier\": \"default\",\n \"store\":
|
||||
true,\n \"temperature\": 1.0,\n \"text\": {\n \"format\": {\n \"type\":
|
||||
\"text\"\n },\n \"verbosity\": \"medium\"\n },\n \"tool_choice\":
|
||||
\"auto\",\n \"tools\": [],\n \"top_logprobs\": 0,\n \"top_p\": 1.0,\n \"truncation\":
|
||||
\"disabled\",\n \"usage\": {\n \"input_tokens\": 196,\n \"input_tokens_details\":
|
||||
{\n \"cached_tokens\": 0\n },\n \"output_tokens\": 998,\n \"output_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 960\n },\n \"total_tokens\": 1194\n },\n
|
||||
\ \"user\": null,\n \"metadata\": {}\n}"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 23 Jan 2026 19:09:20 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '9524'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '9527'
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,216 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"max_tokens":4096,"messages":[{"role":"user","content":[{"type":"text","text":"\nCurrent
|
||||
Task: What type of file is this? Just name the file type.\n\nAvailable input
|
||||
files (use the name in quotes with read_file tool):\n - \"audio\" (sample_audio.wav,
|
||||
audio/x-wav)\n\nThis is the expected criteria for your final answer: The file
|
||||
type.\nyou MUST return the actual complete content as the final answer, not
|
||||
a summary.\n\nThis is VERY important to you, your job depends on it!"}]}],"model":"claude-3-5-haiku-20241022","stop_sequences":["\nObservation:"],"stream":false,"system":"You
|
||||
are File Analyst. Expert analyst.\nYour personal goal is: Analyze files","tools":[{"name":"read_file","description":"Read
|
||||
content from an input file by name. Returns file content as text for text files,
|
||||
or base64 for binary files.","input_schema":{"properties":{"file_name":{"description":"The
|
||||
name of the input file to read","title":"File Name","type":"string"}},"required":["file_name"],"type":"object"}}]}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
anthropic-version:
|
||||
- '2023-06-01'
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '990'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.anthropic.com
|
||||
x-api-key:
|
||||
- X-API-KEY-XXX
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 0.71.1
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
x-stainless-timeout:
|
||||
- NOT_GIVEN
|
||||
method: POST
|
||||
uri: https://api.anthropic.com/v1/messages
|
||||
response:
|
||||
body:
|
||||
string: '{"model":"claude-3-5-haiku-20241022","id":"msg_01QU6yxsqjgEv42P6oiBunJt","type":"message","role":"assistant","content":[{"type":"text","text":"I''ll
|
||||
help you determine the file type by reading the file."},{"type":"tool_use","id":"toolu_01SyxRH53DtusM3NbV1FbTxm","name":"read_file","input":{"file_name":"sample_audio.wav"}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":488,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":73,"service_tier":"standard"}}'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 23 Jan 2026 19:09:06 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Robots-Tag:
|
||||
- none
|
||||
anthropic-organization-id:
|
||||
- ANTHROPIC-ORGANIZATION-ID-XXX
|
||||
anthropic-ratelimit-input-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-input-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-input-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-output-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-output-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-output-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-requests-limit:
|
||||
- '4000'
|
||||
anthropic-ratelimit-requests-remaining:
|
||||
- '3999'
|
||||
anthropic-ratelimit-requests-reset:
|
||||
- '2026-01-23T19:09:04Z'
|
||||
anthropic-ratelimit-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
request-id:
|
||||
- REQUEST-ID-XXX
|
||||
strict-transport-security:
|
||||
- STS-XXX
|
||||
x-envoy-upstream-service-time:
|
||||
- '1456'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"max_tokens":4096,"messages":[{"role":"user","content":[{"type":"text","text":"\nCurrent
|
||||
Task: What type of file is this? Just name the file type.\n\nAvailable input
|
||||
files (use the name in quotes with read_file tool):\n - \"audio\" (sample_audio.wav,
|
||||
audio/x-wav)\n\nThis is the expected criteria for your final answer: The file
|
||||
type.\nyou MUST return the actual complete content as the final answer, not
|
||||
a summary.\n\nThis is VERY important to you, your job depends on it!"}]},{"role":"assistant","content":[{"type":"tool_use","id":"toolu_01SyxRH53DtusM3NbV1FbTxm","name":"read_file","input":{"file_name":"sample_audio.wav"}}]},{"role":"user","content":[{"type":"tool_result","tool_use_id":"toolu_01SyxRH53DtusM3NbV1FbTxm","content":"File
|
||||
''sample_audio.wav'' not found. Available files: audio"}]},{"role":"user","content":"Analyze
|
||||
the tool result. If requirements are met, provide the Final Answer. Otherwise,
|
||||
call the next tool. Deliver only the answer without meta-commentary."}],"model":"claude-3-5-haiku-20241022","stop_sequences":["\nObservation:"],"stream":false,"system":"You
|
||||
are File Analyst. Expert analyst.\nYour personal goal is: Analyze files","tools":[{"name":"read_file","description":"Read
|
||||
content from an input file by name. Returns file content as text for text files,
|
||||
or base64 for binary files.","input_schema":{"properties":{"file_name":{"description":"The
|
||||
name of the input file to read","title":"File Name","type":"string"}},"required":["file_name"],"type":"object"}}]}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
anthropic-version:
|
||||
- '2023-06-01'
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1493'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.anthropic.com
|
||||
x-api-key:
|
||||
- X-API-KEY-XXX
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 0.71.1
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
x-stainless-timeout:
|
||||
- NOT_GIVEN
|
||||
method: POST
|
||||
uri: https://api.anthropic.com/v1/messages
|
||||
response:
|
||||
body:
|
||||
string: '{"model":"claude-3-5-haiku-20241022","id":"msg_01DV3EtdNM2aejaez5csxZut","type":"message","role":"assistant","content":[{"type":"text","text":"wav"}],"stop_reason":"end_turn","stop_sequence":null,"usage":{"input_tokens":614,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":4,"service_tier":"standard"}}'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 23 Jan 2026 19:09:07 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Robots-Tag:
|
||||
- none
|
||||
anthropic-organization-id:
|
||||
- ANTHROPIC-ORGANIZATION-ID-XXX
|
||||
anthropic-ratelimit-input-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-input-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-input-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-output-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-output-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-output-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-requests-limit:
|
||||
- '4000'
|
||||
anthropic-ratelimit-requests-remaining:
|
||||
- '3999'
|
||||
anthropic-ratelimit-requests-reset:
|
||||
- '2026-01-23T19:09:06Z'
|
||||
anthropic-ratelimit-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
request-id:
|
||||
- REQUEST-ID-XXX
|
||||
strict-transport-security:
|
||||
- STS-XXX
|
||||
x-envoy-upstream-service-time:
|
||||
- '1355'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -0,0 +1,112 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"max_tokens":4096,"messages":[{"role":"user","content":[{"type":"text","text":"\nCurrent
|
||||
Task: Describe the file(s) you see. Be brief, one sentence max.\n\nInput files
|
||||
(content already loaded in conversation):\n - \"document\" (document)\n\nThis
|
||||
is the expected criteria for your final answer: A brief description of the file.\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"},{"type":"document","source":{"type":"base64","media_type":"application/pdf","data":"JVBERi0xLjQKMSAwIG9iaiA8PCAvVHlwZSAvQ2F0YWxvZyAvUGFnZXMgMiAwIFIgPj4gZW5kb2JqCjIgMCBvYmogPDwgL1R5cGUgL1BhZ2VzIC9LaWRzIFszIDAgUl0gL0NvdW50IDEgPj4gZW5kb2JqCjMgMCBvYmogPDwgL1R5cGUgL1BhZ2UgL1BhcmVudCAyIDAgUiAvTWVkaWFCb3ggWzAgMCA2MTIgNzkyXSA+PiBlbmRvYmoKeHJlZgowIDQKMDAwMDAwMDAwMCA2NTUzNSBmCjAwMDAwMDAwMDkgMDAwMDAgbgowMDAwMDAwMDU4IDAwMDAwIG4KMDAwMDAwMDExNSAwMDAwMCBuCnRyYWlsZXIgPDwgL1NpemUgNCAvUm9vdCAxIDAgUiA+PgpzdGFydHhyZWYKMTk2CiUlRU9GCg=="},"cache_control":{"type":"ephemeral"}}]}],"model":"claude-3-5-haiku-20241022","stop_sequences":["\nObservation:"],"stream":false,"system":"You
|
||||
are File Analyst. Expert at analyzing various file types.\nYour personal goal
|
||||
is: Analyze and describe files accurately\nTo give my best complete final answer
|
||||
to the task respond using the exact following format:\n\nThought: I now can
|
||||
give a great answer\nFinal Answer: Your final answer must be the great and the
|
||||
most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
anthropic-version:
|
||||
- '2023-06-01'
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1634'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.anthropic.com
|
||||
x-api-key:
|
||||
- X-API-KEY-XXX
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 0.71.1
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
x-stainless-timeout:
|
||||
- NOT_GIVEN
|
||||
method: POST
|
||||
uri: https://api.anthropic.com/v1/messages
|
||||
response:
|
||||
body:
|
||||
string: '{"model":"claude-3-5-haiku-20241022","id":"msg_01VjpjCRfkFSNtf4MRmmtmoy","type":"message","role":"assistant","content":[{"type":"text","text":"Thought:
|
||||
I see a PDF document with a blank white page.\n\nFinal Answer: A blank white
|
||||
PDF document with no visible content."}],"stop_reason":"end_turn","stop_sequence":null,"usage":{"input_tokens":1815,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":31,"service_tier":"standard"}}'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 23 Jan 2026 19:07:55 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Robots-Tag:
|
||||
- none
|
||||
anthropic-organization-id:
|
||||
- ANTHROPIC-ORGANIZATION-ID-XXX
|
||||
anthropic-ratelimit-input-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-input-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-input-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-output-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-output-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-output-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-requests-limit:
|
||||
- '4000'
|
||||
anthropic-ratelimit-requests-remaining:
|
||||
- '3999'
|
||||
anthropic-ratelimit-requests-reset:
|
||||
- '2026-01-23T19:07:53Z'
|
||||
anthropic-ratelimit-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
request-id:
|
||||
- REQUEST-ID-XXX
|
||||
strict-transport-security:
|
||||
- STS-XXX
|
||||
x-envoy-upstream-service-time:
|
||||
- '1584'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -0,0 +1,86 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"contents": [{"parts": [{"text": "\nCurrent Task: Describe the file(s)
|
||||
you see. Be brief, one sentence max.\n\nInput files (content already loaded
|
||||
in conversation):\n - \"readme\" (review_guidelines.txt)\n\nThis is the expected
|
||||
criteria for your final answer: A brief description of the file.\nyou MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:"}, {"inlineData": {"data": "UmV2aWV3IEd1aWRlbGluZXMKCjEuIEJlIGNsZWFyIGFuZCBjb25jaXNlOiBXcml0ZSBmZWVkYmFjayB0aGF0IGlzIGVhc3kgdG8gdW5kZXJzdGFuZC4KMi4gRm9jdXMgb24gYmVoYXZpb3IgYW5kIG91dGNvbWVzOiBEZXNjcmliZSB3aGF0IGhhcHBlbmVkIGFuZCB3aHkgaXQgbWF0dGVycy4KMy4gQmUgc3BlY2lmaWM6IFByb3ZpZGUgZXhhbXBsZXMgdG8gc3VwcG9ydCB5b3VyIHBvaW50cy4KNC4gQmFsYW5jZSBwb3NpdGl2ZXMgYW5kIGltcHJvdmVtZW50czogSGlnaGxpZ2h0IHN0cmVuZ3RocyBhbmQgYXJlYXMgdG8gZ3Jvdy4KNS4gQmUgcmVzcGVjdGZ1bCBhbmQgY29uc3RydWN0aXZlOiBBc3N1bWUgcG9zaXRpdmUgaW50ZW50IGFuZCBvZmZlciBzb2x1dGlvbnMuCjYuIFVzZSBvYmplY3RpdmUgY3JpdGVyaWE6IFJlZmVyZW5jZSBnb2FscywgbWV0cmljcywgb3IgZXhwZWN0YXRpb25zIHdoZXJlIHBvc3NpYmxlLgo3LiBTdWdnZXN0IG5leHQgc3RlcHM6IFJlY29tbWVuZCBhY3Rpb25hYmxlIHdheXMgdG8gaW1wcm92ZS4KOC4gUHJvb2ZyZWFkOiBDaGVjayB0b25lLCBncmFtbWFyLCBhbmQgY2xhcml0eSBiZWZvcmUgc3VibWl0dGluZy4K",
|
||||
"mimeType": "text/plain"}}], "role": "user"}], "systemInstruction": {"parts":
|
||||
[{"text": "You are File Analyst. Expert at analyzing various file types.\nYour
|
||||
personal goal is: Analyze and describe files accurately\nTo give my best complete
|
||||
final answer to the task respond using the exact following format:\n\nThought:
|
||||
I now can give a great answer\nFinal Answer: Your final answer must be the great
|
||||
and the most complete as possible, it must be outcome described.\n\nI MUST use
|
||||
these formats, my job depends on it!"}], "role": "user"}, "generationConfig":
|
||||
{"stopSequences": ["\nObservation:"]}}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- '*/*'
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1923'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- generativelanguage.googleapis.com
|
||||
x-goog-api-client:
|
||||
- google-genai-sdk/1.49.0 gl-python/3.12.10
|
||||
x-goog-api-key:
|
||||
- X-GOOG-API-KEY-XXX
|
||||
method: POST
|
||||
uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\":
|
||||
[\n {\n \"text\": \"Thought:The file \\\"readme\\\" (review_guidelines.txt)
|
||||
contains a set of guidelines for providing effective feedback.\\nFinal Answer:Review
|
||||
Guidelines\\n\\n1. Be clear and concise: Write feedback that is easy to understand.\\n2.
|
||||
Focus on behavior and outcomes: Describe what happened and why it matters.\\n3.
|
||||
Be specific: Provide examples to support your points.\\n4. Balance positives
|
||||
and improvements: Highlight strengths and areas to grow.\\n5. Be respectful
|
||||
and constructive: Assume positive intent and offer solutions.\\n6. Use objective
|
||||
criteria: Reference goals, metrics, or expectations where possible.\\n7. Suggest
|
||||
next steps: Recommend actionable ways to improve.\\n8. Proofread: Check tone,
|
||||
grammar, and clarity before submitting.\\n\"\n }\n ],\n \"role\":
|
||||
\"model\"\n },\n \"finishReason\": \"STOP\",\n \"avgLogprobs\":
|
||||
-0.01580470103722114\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\":
|
||||
322,\n \"candidatesTokenCount\": 154,\n \"totalTokenCount\": 476,\n
|
||||
\ \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n
|
||||
\ \"tokenCount\": 322\n }\n ],\n \"candidatesTokensDetails\":
|
||||
[\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 154\n
|
||||
\ }\n ]\n },\n \"modelVersion\": \"gemini-2.0-flash\",\n \"responseId\":
|
||||
\"88lzadVvppOMxw_QtcepAg\"\n}\n"
|
||||
headers:
|
||||
Alt-Svc:
|
||||
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
|
||||
Content-Type:
|
||||
- application/json; charset=UTF-8
|
||||
Date:
|
||||
- Fri, 23 Jan 2026 19:20:20 GMT
|
||||
Server:
|
||||
- scaffolding on HTTPServer2
|
||||
Server-Timing:
|
||||
- gfet4t7; dur=1211
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
X-Frame-Options:
|
||||
- X-FRAME-OPTIONS-XXX
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user