mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-05-05 01:02:37 +00:00
Merge branch 'main' of github.com:crewAIInc/crewAI into lorenze/feat/file-discovery-tools
This commit is contained in:
@@ -33,8 +33,11 @@ def test_brave_tool_search(mock_get, brave_tool):
|
||||
mock_get.return_value.json.return_value = mock_response
|
||||
|
||||
result = brave_tool.run(query="test")
|
||||
assert "Test Title" in result
|
||||
assert "http://test.com" in result
|
||||
data = json.loads(result)
|
||||
assert isinstance(data, list)
|
||||
assert len(data) >= 1
|
||||
assert data[0]["title"] == "Test Title"
|
||||
assert data[0]["url"] == "http://test.com"
|
||||
|
||||
|
||||
@patch("requests.get")
|
||||
|
||||
@@ -14,7 +14,7 @@ dependencies = [
|
||||
"instructor>=1.3.3",
|
||||
# Text Processing
|
||||
"pdfplumber~=0.11.4",
|
||||
"regex~=2024.9.11",
|
||||
"regex~=2026.1.15",
|
||||
# Telemetry and Monitoring
|
||||
"opentelemetry-api~=1.34.0",
|
||||
"opentelemetry-sdk~=1.34.0",
|
||||
@@ -36,7 +36,7 @@ dependencies = [
|
||||
"json5~=0.10.0",
|
||||
"portalocker~=2.7.0",
|
||||
"pydantic-settings~=2.10.1",
|
||||
"mcp~=1.23.1",
|
||||
"mcp~=1.26.0",
|
||||
"uv~=0.9.13",
|
||||
"aiosqlite~=0.21.0",
|
||||
]
|
||||
|
||||
@@ -118,6 +118,8 @@ MCP_TOOL_EXECUTION_TIMEOUT: Final[int] = 30
|
||||
MCP_DISCOVERY_TIMEOUT: Final[int] = 15
|
||||
MCP_MAX_RETRIES: Final[int] = 3
|
||||
|
||||
_passthrough_exceptions: tuple[type[Exception], ...] = ()
|
||||
|
||||
# Simple in-memory cache for MCP tool schemas (duration: 5 minutes)
|
||||
_mcp_schema_cache: dict[str, Any] = {}
|
||||
_cache_ttl: Final[int] = 300 # 5 minutes
|
||||
@@ -479,6 +481,8 @@ class Agent(BaseAgent):
|
||||
),
|
||||
)
|
||||
raise e
|
||||
if isinstance(e, _passthrough_exceptions):
|
||||
raise
|
||||
self._times_executed += 1
|
||||
if self._times_executed > self.max_retry_limit:
|
||||
crewai_event_bus.emit(
|
||||
@@ -711,6 +715,8 @@ class Agent(BaseAgent):
|
||||
),
|
||||
)
|
||||
raise e
|
||||
if isinstance(e, _passthrough_exceptions):
|
||||
raise
|
||||
self._times_executed += 1
|
||||
if self._times_executed > self.max_retry_limit:
|
||||
crewai_event_bus.emit(
|
||||
|
||||
@@ -37,9 +37,10 @@ class BaseAgentAdapter(BaseAgent, ABC):
|
||||
tools: Optional list of BaseTool instances to be configured
|
||||
"""
|
||||
|
||||
def configure_structured_output(self, structured_output: Any) -> None:
|
||||
@abstractmethod
|
||||
def configure_structured_output(self, task: Any) -> None:
|
||||
"""Configure the structured output for the specific agent implementation.
|
||||
|
||||
Args:
|
||||
structured_output: The structured output to be configured
|
||||
task: The task object containing output format specifications.
|
||||
"""
|
||||
|
||||
@@ -4,7 +4,6 @@ import time
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from crewai.agents.parser import AgentFinish
|
||||
from crewai.events.event_listener import event_listener
|
||||
from crewai.memory.entity.entity_memory_item import EntityMemoryItem
|
||||
from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem
|
||||
from crewai.utilities.converter import ConverterError
|
||||
@@ -138,52 +137,3 @@ class CrewAgentExecutorMixin:
|
||||
content="Long term memory is enabled, but entity memory is not enabled. Please configure entity memory or set memory=True to automatically enable it.",
|
||||
color="bold_yellow",
|
||||
)
|
||||
|
||||
def _ask_human_input(self, final_answer: str) -> str:
|
||||
"""Prompt human input with mode-appropriate messaging.
|
||||
|
||||
Note: The final answer is already displayed via the AgentLogsExecutionEvent
|
||||
panel, so we only show the feedback prompt here.
|
||||
"""
|
||||
from rich.panel import Panel
|
||||
from rich.text import Text
|
||||
|
||||
formatter = event_listener.formatter
|
||||
formatter.pause_live_updates()
|
||||
|
||||
try:
|
||||
# Training mode prompt (single iteration)
|
||||
if self.crew and getattr(self.crew, "_train", False):
|
||||
prompt_text = (
|
||||
"TRAINING MODE: Provide feedback to improve the agent's performance.\n\n"
|
||||
"This will be used to train better versions of the agent.\n"
|
||||
"Please provide detailed feedback about the result quality and reasoning process."
|
||||
)
|
||||
title = "🎓 Training Feedback Required"
|
||||
# Regular human-in-the-loop prompt (multiple iterations)
|
||||
else:
|
||||
prompt_text = (
|
||||
"Provide feedback on the Final Result above.\n\n"
|
||||
"• If you are happy with the result, simply hit Enter without typing anything.\n"
|
||||
"• Otherwise, provide specific improvement requests.\n"
|
||||
"• You can provide multiple rounds of feedback until satisfied."
|
||||
)
|
||||
title = "💬 Human Feedback Required"
|
||||
|
||||
content = Text()
|
||||
content.append(prompt_text, style="yellow")
|
||||
|
||||
prompt_panel = Panel(
|
||||
content,
|
||||
title=title,
|
||||
border_style="yellow",
|
||||
padding=(1, 2),
|
||||
)
|
||||
formatter.console.print(prompt_panel)
|
||||
|
||||
response = input()
|
||||
if response.strip() != "":
|
||||
formatter.console.print("\n[cyan]Processing your feedback...[/cyan]")
|
||||
return response
|
||||
finally:
|
||||
formatter.resume_live_updates()
|
||||
|
||||
@@ -19,6 +19,7 @@ from crewai.agents.parser import (
|
||||
AgentFinish,
|
||||
OutputParserError,
|
||||
)
|
||||
from crewai.core.providers.human_input import ExecutorContext, get_provider
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.logging_events import (
|
||||
AgentLogsExecutionEvent,
|
||||
@@ -175,15 +176,16 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
"""
|
||||
return self.llm.supports_stop_words() if self.llm else False
|
||||
|
||||
def invoke(self, inputs: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Execute the agent with given inputs.
|
||||
def _setup_messages(self, inputs: dict[str, Any]) -> None:
|
||||
"""Set up messages for the agent execution.
|
||||
|
||||
Args:
|
||||
inputs: Input dictionary containing prompt variables.
|
||||
|
||||
Returns:
|
||||
Dictionary with agent output.
|
||||
"""
|
||||
provider = get_provider()
|
||||
if provider.setup_messages(cast(ExecutorContext, cast(object, self))):
|
||||
return
|
||||
|
||||
if "system" in self.prompt:
|
||||
system_prompt = self._format_prompt(
|
||||
cast(str, self.prompt.get("system", "")), inputs
|
||||
@@ -197,6 +199,19 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
user_prompt = self._format_prompt(self.prompt.get("prompt", ""), inputs)
|
||||
self.messages.append(format_message_for_llm(user_prompt))
|
||||
|
||||
provider.post_setup_messages(cast(ExecutorContext, cast(object, self)))
|
||||
|
||||
def invoke(self, inputs: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Execute the agent with given inputs.
|
||||
|
||||
Args:
|
||||
inputs: Input dictionary containing prompt variables.
|
||||
|
||||
Returns:
|
||||
Dictionary with agent output.
|
||||
"""
|
||||
self._setup_messages(inputs)
|
||||
|
||||
self._inject_multimodal_files(inputs)
|
||||
|
||||
self._show_start_logs()
|
||||
@@ -799,6 +814,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
agent_key=agent_key,
|
||||
),
|
||||
)
|
||||
error_event_emitted = False
|
||||
|
||||
track_delegation_if_needed(func_name, args_dict, self.task)
|
||||
|
||||
@@ -881,6 +897,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
error=e,
|
||||
),
|
||||
)
|
||||
error_event_emitted = True
|
||||
elif max_usage_reached and original_tool:
|
||||
# Return error message when max usage limit is reached
|
||||
result = f"Tool '{func_name}' has reached its usage limit of {original_tool.max_usage_count} times and cannot be used anymore."
|
||||
@@ -908,20 +925,20 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
color="red",
|
||||
)
|
||||
|
||||
# Emit tool usage finished event
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=ToolUsageFinishedEvent(
|
||||
output=result,
|
||||
tool_name=func_name,
|
||||
tool_args=args_dict,
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
agent_key=agent_key,
|
||||
started_at=started_at,
|
||||
finished_at=datetime.now(),
|
||||
),
|
||||
)
|
||||
if not error_event_emitted:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=ToolUsageFinishedEvent(
|
||||
output=result,
|
||||
tool_name=func_name,
|
||||
tool_args=args_dict,
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
agent_key=agent_key,
|
||||
started_at=started_at,
|
||||
finished_at=datetime.now(),
|
||||
),
|
||||
)
|
||||
|
||||
# Append tool result message
|
||||
tool_message: LLMMessage = {
|
||||
@@ -970,18 +987,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
Returns:
|
||||
Dictionary with agent output.
|
||||
"""
|
||||
if "system" in self.prompt:
|
||||
system_prompt = self._format_prompt(
|
||||
cast(str, self.prompt.get("system", "")), inputs
|
||||
)
|
||||
user_prompt = self._format_prompt(
|
||||
cast(str, self.prompt.get("user", "")), inputs
|
||||
)
|
||||
self.messages.append(format_message_for_llm(system_prompt, role="system"))
|
||||
self.messages.append(format_message_for_llm(user_prompt))
|
||||
else:
|
||||
user_prompt = self._format_prompt(self.prompt.get("prompt", ""), inputs)
|
||||
self.messages.append(format_message_for_llm(user_prompt))
|
||||
self._setup_messages(inputs)
|
||||
|
||||
await self._ainject_multimodal_files(inputs)
|
||||
|
||||
@@ -1003,7 +1009,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
raise
|
||||
|
||||
if self.ask_for_human_input:
|
||||
formatted_answer = self._handle_human_feedback(formatted_answer)
|
||||
formatted_answer = await self._ahandle_human_feedback(formatted_answer)
|
||||
|
||||
self._create_short_term_memory(formatted_answer)
|
||||
self._create_long_term_memory(formatted_answer)
|
||||
@@ -1491,7 +1497,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
return prompt.replace("{tools}", inputs["tools"])
|
||||
|
||||
def _handle_human_feedback(self, formatted_answer: AgentFinish) -> AgentFinish:
|
||||
"""Process human feedback.
|
||||
"""Process human feedback via the configured provider.
|
||||
|
||||
Args:
|
||||
formatted_answer: Initial agent result.
|
||||
@@ -1499,17 +1505,22 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
Returns:
|
||||
Final answer after feedback.
|
||||
"""
|
||||
output_str = (
|
||||
formatted_answer.output
|
||||
if isinstance(formatted_answer.output, str)
|
||||
else formatted_answer.output.model_dump_json()
|
||||
)
|
||||
human_feedback = self._ask_human_input(output_str)
|
||||
provider = get_provider()
|
||||
return provider.handle_feedback(formatted_answer, self)
|
||||
|
||||
if self._is_training_mode():
|
||||
return self._handle_training_feedback(formatted_answer, human_feedback)
|
||||
async def _ahandle_human_feedback(
|
||||
self, formatted_answer: AgentFinish
|
||||
) -> AgentFinish:
|
||||
"""Process human feedback asynchronously via the configured provider.
|
||||
|
||||
return self._handle_regular_feedback(formatted_answer, human_feedback)
|
||||
Args:
|
||||
formatted_answer: Initial agent result.
|
||||
|
||||
Returns:
|
||||
Final answer after feedback.
|
||||
"""
|
||||
provider = get_provider()
|
||||
return await provider.handle_feedback_async(formatted_answer, self)
|
||||
|
||||
def _is_training_mode(self) -> bool:
|
||||
"""Check if training mode is active.
|
||||
@@ -1519,74 +1530,18 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
"""
|
||||
return bool(self.crew and self.crew._train)
|
||||
|
||||
def _handle_training_feedback(
|
||||
self, initial_answer: AgentFinish, feedback: str
|
||||
) -> AgentFinish:
|
||||
"""Process training feedback.
|
||||
def _format_feedback_message(self, feedback: str) -> LLMMessage:
|
||||
"""Format feedback as a message for the LLM.
|
||||
|
||||
Args:
|
||||
initial_answer: Initial agent output.
|
||||
feedback: Training feedback.
|
||||
feedback: User feedback string.
|
||||
|
||||
Returns:
|
||||
Improved answer.
|
||||
Formatted message dict.
|
||||
"""
|
||||
self._handle_crew_training_output(initial_answer, feedback)
|
||||
self.messages.append(
|
||||
format_message_for_llm(
|
||||
self._i18n.slice("feedback_instructions").format(feedback=feedback)
|
||||
)
|
||||
return format_message_for_llm(
|
||||
self._i18n.slice("feedback_instructions").format(feedback=feedback)
|
||||
)
|
||||
improved_answer = self._invoke_loop()
|
||||
self._handle_crew_training_output(improved_answer)
|
||||
self.ask_for_human_input = False
|
||||
return improved_answer
|
||||
|
||||
def _handle_regular_feedback(
|
||||
self, current_answer: AgentFinish, initial_feedback: str
|
||||
) -> AgentFinish:
|
||||
"""Process regular feedback iteratively.
|
||||
|
||||
Args:
|
||||
current_answer: Current agent output.
|
||||
initial_feedback: Initial user feedback.
|
||||
|
||||
Returns:
|
||||
Final answer after iterations.
|
||||
"""
|
||||
feedback = initial_feedback
|
||||
answer = current_answer
|
||||
|
||||
while self.ask_for_human_input:
|
||||
# If the user provides a blank response, assume they are happy with the result
|
||||
if feedback.strip() == "":
|
||||
self.ask_for_human_input = False
|
||||
else:
|
||||
answer = self._process_feedback_iteration(feedback)
|
||||
output_str = (
|
||||
answer.output
|
||||
if isinstance(answer.output, str)
|
||||
else answer.output.model_dump_json()
|
||||
)
|
||||
feedback = self._ask_human_input(output_str)
|
||||
|
||||
return answer
|
||||
|
||||
def _process_feedback_iteration(self, feedback: str) -> AgentFinish:
|
||||
"""Process single feedback iteration.
|
||||
|
||||
Args:
|
||||
feedback: User feedback.
|
||||
|
||||
Returns:
|
||||
Updated agent response.
|
||||
"""
|
||||
self.messages.append(
|
||||
format_message_for_llm(
|
||||
self._i18n.slice("feedback_instructions").format(feedback=feedback)
|
||||
)
|
||||
)
|
||||
return self._invoke_loop()
|
||||
|
||||
@classmethod
|
||||
def __get_pydantic_core_schema__(
|
||||
|
||||
@@ -143,6 +143,12 @@ def create_folder_structure(
|
||||
(folder_path / "src" / folder_name).mkdir(parents=True)
|
||||
(folder_path / "src" / folder_name / "tools").mkdir(parents=True)
|
||||
(folder_path / "src" / folder_name / "config").mkdir(parents=True)
|
||||
|
||||
# Copy AGENTS.md to project root (top-level projects only)
|
||||
package_dir = Path(__file__).parent
|
||||
agents_md_src = package_dir / "templates" / "AGENTS.md"
|
||||
if agents_md_src.exists():
|
||||
shutil.copy2(agents_md_src, folder_path / "AGENTS.md")
|
||||
|
||||
return folder_path, folder_name, class_name
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
import click
|
||||
@@ -34,6 +35,11 @@ def create_flow(name):
|
||||
package_dir = Path(__file__).parent
|
||||
templates_dir = package_dir / "templates" / "flow"
|
||||
|
||||
# Copy AGENTS.md to project root
|
||||
agents_md_src = package_dir / "templates" / "AGENTS.md"
|
||||
if agents_md_src.exists():
|
||||
shutil.copy2(agents_md_src, project_root / "AGENTS.md")
|
||||
|
||||
# List of template files to copy
|
||||
root_template_files = [".gitignore", "pyproject.toml", "README.md"]
|
||||
src_template_files = ["__init__.py", "main.py"]
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import os
|
||||
from typing import Any
|
||||
from urllib.parse import urljoin
|
||||
import os
|
||||
|
||||
import httpx
|
||||
import requests
|
||||
|
||||
from crewai.cli.config import Settings
|
||||
@@ -33,7 +35,11 @@ class PlusAPI:
|
||||
if settings.org_uuid:
|
||||
self.headers["X-Crewai-Organization-Id"] = settings.org_uuid
|
||||
|
||||
self.base_url = os.getenv("CREWAI_PLUS_URL") or str(settings.enterprise_base_url) or DEFAULT_CREWAI_ENTERPRISE_URL
|
||||
self.base_url = (
|
||||
os.getenv("CREWAI_PLUS_URL")
|
||||
or str(settings.enterprise_base_url)
|
||||
or DEFAULT_CREWAI_ENTERPRISE_URL
|
||||
)
|
||||
|
||||
def _make_request(
|
||||
self, method: str, endpoint: str, **kwargs: Any
|
||||
@@ -49,8 +55,10 @@ class PlusAPI:
|
||||
def get_tool(self, handle: str) -> requests.Response:
|
||||
return self._make_request("GET", f"{self.TOOLS_RESOURCE}/{handle}")
|
||||
|
||||
def get_agent(self, handle: str) -> requests.Response:
|
||||
return self._make_request("GET", f"{self.AGENTS_RESOURCE}/{handle}")
|
||||
async def get_agent(self, handle: str) -> httpx.Response:
|
||||
url = urljoin(self.base_url, f"{self.AGENTS_RESOURCE}/{handle}")
|
||||
async with httpx.AsyncClient() as client:
|
||||
return await client.get(url, headers=self.headers)
|
||||
|
||||
def publish_tool(
|
||||
self,
|
||||
|
||||
1017
lib/crewai/src/crewai/cli/templates/AGENTS.md
Normal file
1017
lib/crewai/src/crewai/cli/templates/AGENTS.md
Normal file
File diff suppressed because it is too large
Load Diff
@@ -2,6 +2,7 @@ import base64
|
||||
from json import JSONDecodeError
|
||||
import os
|
||||
from pathlib import Path
|
||||
import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
from typing import Any
|
||||
@@ -55,6 +56,11 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
|
||||
tree_find_and_replace(project_root, "{{folder_name}}", folder_name)
|
||||
tree_find_and_replace(project_root, "{{class_name}}", class_name)
|
||||
|
||||
# Copy AGENTS.md to project root
|
||||
agents_md_src = Path(__file__).parent.parent / "templates" / "AGENTS.md"
|
||||
if agents_md_src.exists():
|
||||
shutil.copy2(agents_md_src, project_root / "AGENTS.md")
|
||||
|
||||
old_directory = os.getcwd()
|
||||
os.chdir(project_root)
|
||||
try:
|
||||
|
||||
@@ -6,12 +6,12 @@ from functools import lru_cache
|
||||
import importlib.metadata
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Any, cast
|
||||
from typing import Any
|
||||
from urllib import request
|
||||
from urllib.error import URLError
|
||||
|
||||
import appdirs
|
||||
from packaging.version import InvalidVersion, parse
|
||||
from packaging.version import InvalidVersion, Version, parse
|
||||
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
@@ -42,21 +42,88 @@ def _is_cache_valid(cache_data: Mapping[str, Any]) -> bool:
|
||||
return False
|
||||
|
||||
|
||||
def _find_latest_non_yanked_version(
|
||||
releases: Mapping[str, list[dict[str, Any]]],
|
||||
) -> str | None:
|
||||
"""Find the latest non-yanked version from PyPI releases data.
|
||||
|
||||
Args:
|
||||
releases: PyPI releases dict mapping version strings to file info lists.
|
||||
|
||||
Returns:
|
||||
The latest non-yanked version string, or None if all versions are yanked.
|
||||
"""
|
||||
best_version: Version | None = None
|
||||
best_version_str: str | None = None
|
||||
|
||||
for version_str, files in releases.items():
|
||||
try:
|
||||
v = parse(version_str)
|
||||
except InvalidVersion:
|
||||
continue
|
||||
|
||||
if v.is_prerelease or v.is_devrelease:
|
||||
continue
|
||||
|
||||
if not files:
|
||||
continue
|
||||
|
||||
all_yanked = all(f.get("yanked", False) for f in files)
|
||||
if all_yanked:
|
||||
continue
|
||||
|
||||
if best_version is None or v > best_version:
|
||||
best_version = v
|
||||
best_version_str = version_str
|
||||
|
||||
return best_version_str
|
||||
|
||||
|
||||
def _is_version_yanked(
|
||||
version_str: str,
|
||||
releases: Mapping[str, list[dict[str, Any]]],
|
||||
) -> tuple[bool, str]:
|
||||
"""Check if a specific version is yanked.
|
||||
|
||||
Args:
|
||||
version_str: The version string to check.
|
||||
releases: PyPI releases dict mapping version strings to file info lists.
|
||||
|
||||
Returns:
|
||||
Tuple of (is_yanked, yanked_reason).
|
||||
"""
|
||||
files = releases.get(version_str, [])
|
||||
if not files:
|
||||
return False, ""
|
||||
|
||||
all_yanked = all(f.get("yanked", False) for f in files)
|
||||
if not all_yanked:
|
||||
return False, ""
|
||||
|
||||
for f in files:
|
||||
reason = f.get("yanked_reason", "")
|
||||
if reason:
|
||||
return True, str(reason)
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_latest_version_from_pypi(timeout: int = 2) -> str | None:
|
||||
"""Get the latest version of CrewAI from PyPI.
|
||||
"""Get the latest non-yanked version of CrewAI from PyPI.
|
||||
|
||||
Args:
|
||||
timeout: Request timeout in seconds.
|
||||
|
||||
Returns:
|
||||
Latest version string or None if unable to fetch.
|
||||
Latest non-yanked version string or None if unable to fetch.
|
||||
"""
|
||||
cache_file = _get_cache_file()
|
||||
if cache_file.exists():
|
||||
try:
|
||||
cache_data = json.loads(cache_file.read_text())
|
||||
if _is_cache_valid(cache_data):
|
||||
return cast(str | None, cache_data.get("version"))
|
||||
if _is_cache_valid(cache_data) and "current_version" in cache_data:
|
||||
version: str | None = cache_data.get("version")
|
||||
return version
|
||||
except (json.JSONDecodeError, OSError):
|
||||
pass
|
||||
|
||||
@@ -65,11 +132,18 @@ def get_latest_version_from_pypi(timeout: int = 2) -> str | None:
|
||||
"https://pypi.org/pypi/crewai/json", timeout=timeout
|
||||
) as response:
|
||||
data = json.loads(response.read())
|
||||
latest_version = cast(str, data["info"]["version"])
|
||||
releases: dict[str, list[dict[str, Any]]] = data["releases"]
|
||||
latest_version = _find_latest_non_yanked_version(releases)
|
||||
|
||||
current_version = get_crewai_version()
|
||||
is_yanked, yanked_reason = _is_version_yanked(current_version, releases)
|
||||
|
||||
cache_data = {
|
||||
"version": latest_version,
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"current_version": current_version,
|
||||
"current_version_yanked": is_yanked,
|
||||
"current_version_yanked_reason": yanked_reason,
|
||||
}
|
||||
cache_file.write_text(json.dumps(cache_data))
|
||||
|
||||
@@ -78,6 +152,40 @@ def get_latest_version_from_pypi(timeout: int = 2) -> str | None:
|
||||
return None
|
||||
|
||||
|
||||
def is_current_version_yanked() -> tuple[bool, str]:
|
||||
"""Check if the currently installed version has been yanked on PyPI.
|
||||
|
||||
Reads from cache if available, otherwise triggers a fetch.
|
||||
|
||||
Returns:
|
||||
Tuple of (is_yanked, yanked_reason).
|
||||
"""
|
||||
cache_file = _get_cache_file()
|
||||
if cache_file.exists():
|
||||
try:
|
||||
cache_data = json.loads(cache_file.read_text())
|
||||
if _is_cache_valid(cache_data) and "current_version" in cache_data:
|
||||
current = get_crewai_version()
|
||||
if cache_data.get("current_version") == current:
|
||||
return (
|
||||
bool(cache_data.get("current_version_yanked", False)),
|
||||
str(cache_data.get("current_version_yanked_reason", "")),
|
||||
)
|
||||
except (json.JSONDecodeError, OSError):
|
||||
pass
|
||||
|
||||
get_latest_version_from_pypi()
|
||||
|
||||
try:
|
||||
cache_data = json.loads(cache_file.read_text())
|
||||
return (
|
||||
bool(cache_data.get("current_version_yanked", False)),
|
||||
str(cache_data.get("current_version_yanked_reason", "")),
|
||||
)
|
||||
except (json.JSONDecodeError, OSError):
|
||||
return False, ""
|
||||
|
||||
|
||||
def check_version() -> tuple[str, str | None]:
|
||||
"""Check current and latest versions.
|
||||
|
||||
|
||||
@@ -43,3 +43,23 @@ def platform_context(integration_token: str) -> Generator[None, Any, None]:
|
||||
yield
|
||||
finally:
|
||||
_platform_integration_token.reset(token)
|
||||
|
||||
|
||||
_current_task_id: contextvars.ContextVar[str | None] = contextvars.ContextVar(
|
||||
"current_task_id", default=None
|
||||
)
|
||||
|
||||
|
||||
def set_current_task_id(task_id: str | None) -> contextvars.Token[str | None]:
|
||||
"""Set the current task ID in the context. Returns a token for reset."""
|
||||
return _current_task_id.set(task_id)
|
||||
|
||||
|
||||
def reset_current_task_id(token: contextvars.Token[str | None]) -> None:
|
||||
"""Reset the current task ID to its previous value."""
|
||||
_current_task_id.reset(token)
|
||||
|
||||
|
||||
def get_current_task_id() -> str | None:
|
||||
"""Get the current task ID from the context."""
|
||||
return _current_task_id.get()
|
||||
|
||||
1
lib/crewai/src/crewai/core/__init__.py
Normal file
1
lib/crewai/src/crewai/core/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Core crewAI components and interfaces."""
|
||||
1
lib/crewai/src/crewai/core/providers/__init__.py
Normal file
1
lib/crewai/src/crewai/core/providers/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Provider interfaces for extensible crewAI components."""
|
||||
78
lib/crewai/src/crewai/core/providers/content_processor.py
Normal file
78
lib/crewai/src/crewai/core/providers/content_processor.py
Normal file
@@ -0,0 +1,78 @@
|
||||
"""Content processor provider for extensible content processing."""
|
||||
|
||||
from contextvars import ContextVar
|
||||
from typing import Any, Protocol, runtime_checkable
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class ContentProcessorProvider(Protocol):
|
||||
"""Protocol for content processing during task execution."""
|
||||
|
||||
def process(self, content: str, context: dict[str, Any] | None = None) -> str:
|
||||
"""Process content before use.
|
||||
|
||||
Args:
|
||||
content: The content to process.
|
||||
context: Optional context information.
|
||||
|
||||
Returns:
|
||||
The processed content.
|
||||
"""
|
||||
...
|
||||
|
||||
|
||||
class NoOpContentProcessor:
|
||||
"""Default processor that returns content unchanged."""
|
||||
|
||||
def process(self, content: str, context: dict[str, Any] | None = None) -> str:
|
||||
"""Return content unchanged.
|
||||
|
||||
Args:
|
||||
content: The content to process.
|
||||
context: Optional context information (unused).
|
||||
|
||||
Returns:
|
||||
The original content unchanged.
|
||||
"""
|
||||
return content
|
||||
|
||||
|
||||
_content_processor: ContextVar[ContentProcessorProvider | None] = ContextVar(
|
||||
"_content_processor", default=None
|
||||
)
|
||||
|
||||
_default_processor = NoOpContentProcessor()
|
||||
|
||||
|
||||
def get_processor() -> ContentProcessorProvider:
|
||||
"""Get the current content processor.
|
||||
|
||||
Returns:
|
||||
The registered content processor or the default no-op processor.
|
||||
"""
|
||||
processor = _content_processor.get()
|
||||
if processor is not None:
|
||||
return processor
|
||||
return _default_processor
|
||||
|
||||
|
||||
def set_processor(processor: ContentProcessorProvider) -> None:
|
||||
"""Set the content processor for the current context.
|
||||
|
||||
Args:
|
||||
processor: The content processor to use.
|
||||
"""
|
||||
_content_processor.set(processor)
|
||||
|
||||
|
||||
def process_content(content: str, context: dict[str, Any] | None = None) -> str:
|
||||
"""Process content using the registered processor.
|
||||
|
||||
Args:
|
||||
content: The content to process.
|
||||
context: Optional context information.
|
||||
|
||||
Returns:
|
||||
The processed content.
|
||||
"""
|
||||
return get_processor().process(content, context)
|
||||
489
lib/crewai/src/crewai/core/providers/human_input.py
Normal file
489
lib/crewai/src/crewai/core/providers/human_input.py
Normal file
@@ -0,0 +1,489 @@
|
||||
"""Human input provider for HITL (Human-in-the-Loop) flows."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from contextvars import ContextVar, Token
|
||||
import sys
|
||||
from typing import TYPE_CHECKING, Protocol, runtime_checkable
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agent.core import Agent
|
||||
from crewai.agents.parser import AgentFinish
|
||||
from crewai.crew import Crew
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.task import Task
|
||||
from crewai.utilities.types import LLMMessage
|
||||
|
||||
|
||||
class ExecutorContext(Protocol):
|
||||
"""Context interface for human input providers to interact with executor."""
|
||||
|
||||
task: Task | None
|
||||
crew: Crew | None
|
||||
messages: list[LLMMessage]
|
||||
ask_for_human_input: bool
|
||||
llm: BaseLLM
|
||||
agent: Agent
|
||||
|
||||
def _invoke_loop(self) -> AgentFinish:
|
||||
"""Invoke the agent loop and return the result."""
|
||||
...
|
||||
|
||||
def _is_training_mode(self) -> bool:
|
||||
"""Check if training mode is active."""
|
||||
...
|
||||
|
||||
def _handle_crew_training_output(
|
||||
self,
|
||||
result: AgentFinish,
|
||||
human_feedback: str | None = None,
|
||||
) -> None:
|
||||
"""Handle training output."""
|
||||
...
|
||||
|
||||
def _format_feedback_message(self, feedback: str) -> LLMMessage:
|
||||
"""Format feedback as a message."""
|
||||
...
|
||||
|
||||
|
||||
class AsyncExecutorContext(ExecutorContext, Protocol):
|
||||
"""Extended context for executors that support async invocation."""
|
||||
|
||||
async def _ainvoke_loop(self) -> AgentFinish:
|
||||
"""Invoke the agent loop asynchronously and return the result."""
|
||||
...
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class HumanInputProvider(Protocol):
|
||||
"""Protocol for human input handling.
|
||||
|
||||
Implementations handle the full feedback flow:
|
||||
- Sync: prompt user, loop until satisfied
|
||||
- Async: use non-blocking I/O and async invoke loop
|
||||
"""
|
||||
|
||||
def setup_messages(self, context: ExecutorContext) -> bool:
|
||||
"""Set up messages for execution.
|
||||
|
||||
Called before standard message setup. Allows providers to handle
|
||||
conversation resumption or other custom message initialization.
|
||||
|
||||
Args:
|
||||
context: Executor context with messages list to modify.
|
||||
|
||||
Returns:
|
||||
True if messages were set up (skip standard setup),
|
||||
False to use standard setup.
|
||||
"""
|
||||
...
|
||||
|
||||
def post_setup_messages(self, context: ExecutorContext) -> None:
|
||||
"""Called after standard message setup.
|
||||
|
||||
Allows providers to modify messages after standard setup completes.
|
||||
Only called when setup_messages returned False.
|
||||
|
||||
Args:
|
||||
context: Executor context with messages list to modify.
|
||||
"""
|
||||
...
|
||||
|
||||
def handle_feedback(
|
||||
self,
|
||||
formatted_answer: AgentFinish,
|
||||
context: ExecutorContext,
|
||||
) -> AgentFinish:
|
||||
"""Handle the full human feedback flow synchronously.
|
||||
|
||||
Args:
|
||||
formatted_answer: The agent's current answer.
|
||||
context: Executor context for callbacks.
|
||||
|
||||
Returns:
|
||||
The final answer after feedback processing.
|
||||
|
||||
Raises:
|
||||
Exception: Async implementations may raise to signal external handling.
|
||||
"""
|
||||
...
|
||||
|
||||
async def handle_feedback_async(
|
||||
self,
|
||||
formatted_answer: AgentFinish,
|
||||
context: AsyncExecutorContext,
|
||||
) -> AgentFinish:
|
||||
"""Handle the full human feedback flow asynchronously.
|
||||
|
||||
Uses non-blocking I/O for user prompts and async invoke loop
|
||||
for agent re-execution.
|
||||
|
||||
Args:
|
||||
formatted_answer: The agent's current answer.
|
||||
context: Async executor context for callbacks.
|
||||
|
||||
Returns:
|
||||
The final answer after feedback processing.
|
||||
"""
|
||||
...
|
||||
|
||||
@staticmethod
|
||||
def _get_output_string(answer: AgentFinish) -> str:
|
||||
"""Extract output string from answer.
|
||||
|
||||
Args:
|
||||
answer: The agent's finished answer.
|
||||
|
||||
Returns:
|
||||
String representation of the output.
|
||||
"""
|
||||
if isinstance(answer.output, str):
|
||||
return answer.output
|
||||
return answer.output.model_dump_json()
|
||||
|
||||
|
||||
class SyncHumanInputProvider(HumanInputProvider):
|
||||
"""Default human input provider with sync and async support."""
|
||||
|
||||
def setup_messages(self, context: ExecutorContext) -> bool:
|
||||
"""Use standard message setup.
|
||||
|
||||
Args:
|
||||
context: Executor context (unused).
|
||||
|
||||
Returns:
|
||||
False to use standard setup.
|
||||
"""
|
||||
return False
|
||||
|
||||
def post_setup_messages(self, context: ExecutorContext) -> None:
|
||||
"""No-op for sync provider.
|
||||
|
||||
Args:
|
||||
context: Executor context (unused).
|
||||
"""
|
||||
|
||||
def handle_feedback(
|
||||
self,
|
||||
formatted_answer: AgentFinish,
|
||||
context: ExecutorContext,
|
||||
) -> AgentFinish:
|
||||
"""Handle feedback synchronously with terminal prompts.
|
||||
|
||||
Args:
|
||||
formatted_answer: The agent's current answer.
|
||||
context: Executor context for callbacks.
|
||||
|
||||
Returns:
|
||||
The final answer after feedback processing.
|
||||
"""
|
||||
feedback = self._prompt_input(context.crew)
|
||||
|
||||
if context._is_training_mode():
|
||||
return self._handle_training_feedback(formatted_answer, feedback, context)
|
||||
|
||||
return self._handle_regular_feedback(formatted_answer, feedback, context)
|
||||
|
||||
async def handle_feedback_async(
|
||||
self,
|
||||
formatted_answer: AgentFinish,
|
||||
context: AsyncExecutorContext,
|
||||
) -> AgentFinish:
|
||||
"""Handle feedback asynchronously without blocking the event loop.
|
||||
|
||||
Args:
|
||||
formatted_answer: The agent's current answer.
|
||||
context: Async executor context for callbacks.
|
||||
|
||||
Returns:
|
||||
The final answer after feedback processing.
|
||||
"""
|
||||
feedback = await self._prompt_input_async(context.crew)
|
||||
|
||||
if context._is_training_mode():
|
||||
return await self._handle_training_feedback_async(
|
||||
formatted_answer, feedback, context
|
||||
)
|
||||
|
||||
return await self._handle_regular_feedback_async(
|
||||
formatted_answer, feedback, context
|
||||
)
|
||||
|
||||
# ── Sync helpers ──────────────────────────────────────────────────
|
||||
|
||||
@staticmethod
|
||||
def _handle_training_feedback(
|
||||
initial_answer: AgentFinish,
|
||||
feedback: str,
|
||||
context: ExecutorContext,
|
||||
) -> AgentFinish:
|
||||
"""Process training feedback (single iteration).
|
||||
|
||||
Args:
|
||||
initial_answer: The agent's initial answer.
|
||||
feedback: Human feedback string.
|
||||
context: Executor context for callbacks.
|
||||
|
||||
Returns:
|
||||
Improved answer after processing feedback.
|
||||
"""
|
||||
context._handle_crew_training_output(initial_answer, feedback)
|
||||
context.messages.append(context._format_feedback_message(feedback))
|
||||
improved_answer = context._invoke_loop()
|
||||
context._handle_crew_training_output(improved_answer)
|
||||
context.ask_for_human_input = False
|
||||
return improved_answer
|
||||
|
||||
def _handle_regular_feedback(
|
||||
self,
|
||||
current_answer: AgentFinish,
|
||||
initial_feedback: str,
|
||||
context: ExecutorContext,
|
||||
) -> AgentFinish:
|
||||
"""Process regular feedback with iteration loop.
|
||||
|
||||
Args:
|
||||
current_answer: The agent's current answer.
|
||||
initial_feedback: Initial human feedback string.
|
||||
context: Executor context for callbacks.
|
||||
|
||||
Returns:
|
||||
Final answer after all feedback iterations.
|
||||
"""
|
||||
feedback = initial_feedback
|
||||
answer = current_answer
|
||||
|
||||
while context.ask_for_human_input:
|
||||
if feedback.strip() == "":
|
||||
context.ask_for_human_input = False
|
||||
else:
|
||||
context.messages.append(context._format_feedback_message(feedback))
|
||||
answer = context._invoke_loop()
|
||||
feedback = self._prompt_input(context.crew)
|
||||
|
||||
return answer
|
||||
|
||||
# ── Async helpers ─────────────────────────────────────────────────
|
||||
|
||||
@staticmethod
|
||||
async def _handle_training_feedback_async(
|
||||
initial_answer: AgentFinish,
|
||||
feedback: str,
|
||||
context: AsyncExecutorContext,
|
||||
) -> AgentFinish:
|
||||
"""Process training feedback asynchronously (single iteration).
|
||||
|
||||
Args:
|
||||
initial_answer: The agent's initial answer.
|
||||
feedback: Human feedback string.
|
||||
context: Async executor context for callbacks.
|
||||
|
||||
Returns:
|
||||
Improved answer after processing feedback.
|
||||
"""
|
||||
context._handle_crew_training_output(initial_answer, feedback)
|
||||
context.messages.append(context._format_feedback_message(feedback))
|
||||
improved_answer = await context._ainvoke_loop()
|
||||
context._handle_crew_training_output(improved_answer)
|
||||
context.ask_for_human_input = False
|
||||
return improved_answer
|
||||
|
||||
async def _handle_regular_feedback_async(
|
||||
self,
|
||||
current_answer: AgentFinish,
|
||||
initial_feedback: str,
|
||||
context: AsyncExecutorContext,
|
||||
) -> AgentFinish:
|
||||
"""Process regular feedback with async iteration loop.
|
||||
|
||||
Args:
|
||||
current_answer: The agent's current answer.
|
||||
initial_feedback: Initial human feedback string.
|
||||
context: Async executor context for callbacks.
|
||||
|
||||
Returns:
|
||||
Final answer after all feedback iterations.
|
||||
"""
|
||||
feedback = initial_feedback
|
||||
answer = current_answer
|
||||
|
||||
while context.ask_for_human_input:
|
||||
if feedback.strip() == "":
|
||||
context.ask_for_human_input = False
|
||||
else:
|
||||
context.messages.append(context._format_feedback_message(feedback))
|
||||
answer = await context._ainvoke_loop()
|
||||
feedback = await self._prompt_input_async(context.crew)
|
||||
|
||||
return answer
|
||||
|
||||
# ── I/O ───────────────────────────────────────────────────────────
|
||||
|
||||
@staticmethod
|
||||
def _prompt_input(crew: Crew | None) -> str:
|
||||
"""Show rich panel and prompt for input.
|
||||
|
||||
Args:
|
||||
crew: The crew instance for context.
|
||||
|
||||
Returns:
|
||||
User input string from terminal.
|
||||
"""
|
||||
from rich.panel import Panel
|
||||
from rich.text import Text
|
||||
|
||||
from crewai.events.event_listener import event_listener
|
||||
|
||||
formatter = event_listener.formatter
|
||||
formatter.pause_live_updates()
|
||||
|
||||
try:
|
||||
if crew and getattr(crew, "_train", False):
|
||||
prompt_text = (
|
||||
"TRAINING MODE: Provide feedback to improve the agent's performance.\n\n"
|
||||
"This will be used to train better versions of the agent.\n"
|
||||
"Please provide detailed feedback about the result quality and reasoning process."
|
||||
)
|
||||
title = "🎓 Training Feedback Required"
|
||||
else:
|
||||
prompt_text = (
|
||||
"Provide feedback on the Final Result above.\n\n"
|
||||
"• If you are happy with the result, simply hit Enter without typing anything.\n"
|
||||
"• Otherwise, provide specific improvement requests.\n"
|
||||
"• You can provide multiple rounds of feedback until satisfied."
|
||||
)
|
||||
title = "💬 Human Feedback Required"
|
||||
|
||||
content = Text()
|
||||
content.append(prompt_text, style="yellow")
|
||||
|
||||
prompt_panel = Panel(
|
||||
content,
|
||||
title=title,
|
||||
border_style="yellow",
|
||||
padding=(1, 2),
|
||||
)
|
||||
formatter.console.print(prompt_panel)
|
||||
|
||||
response = input()
|
||||
if response.strip() != "":
|
||||
formatter.console.print("\n[cyan]Processing your feedback...[/cyan]")
|
||||
return response
|
||||
finally:
|
||||
formatter.resume_live_updates()
|
||||
|
||||
@staticmethod
|
||||
async def _prompt_input_async(crew: Crew | None) -> str:
|
||||
"""Show rich panel and prompt for input without blocking the event loop.
|
||||
|
||||
Args:
|
||||
crew: The crew instance for context.
|
||||
|
||||
Returns:
|
||||
User input string from terminal.
|
||||
"""
|
||||
from rich.panel import Panel
|
||||
from rich.text import Text
|
||||
|
||||
from crewai.events.event_listener import event_listener
|
||||
|
||||
formatter = event_listener.formatter
|
||||
formatter.pause_live_updates()
|
||||
|
||||
try:
|
||||
if crew and getattr(crew, "_train", False):
|
||||
prompt_text = (
|
||||
"TRAINING MODE: Provide feedback to improve the agent's performance.\n\n"
|
||||
"This will be used to train better versions of the agent.\n"
|
||||
"Please provide detailed feedback about the result quality and reasoning process."
|
||||
)
|
||||
title = "🎓 Training Feedback Required"
|
||||
else:
|
||||
prompt_text = (
|
||||
"Provide feedback on the Final Result above.\n\n"
|
||||
"• If you are happy with the result, simply hit Enter without typing anything.\n"
|
||||
"• Otherwise, provide specific improvement requests.\n"
|
||||
"• You can provide multiple rounds of feedback until satisfied."
|
||||
)
|
||||
title = "💬 Human Feedback Required"
|
||||
|
||||
content = Text()
|
||||
content.append(prompt_text, style="yellow")
|
||||
|
||||
prompt_panel = Panel(
|
||||
content,
|
||||
title=title,
|
||||
border_style="yellow",
|
||||
padding=(1, 2),
|
||||
)
|
||||
formatter.console.print(prompt_panel)
|
||||
|
||||
response = await _async_readline()
|
||||
if response.strip() != "":
|
||||
formatter.console.print("\n[cyan]Processing your feedback...[/cyan]")
|
||||
return response
|
||||
finally:
|
||||
formatter.resume_live_updates()
|
||||
|
||||
|
||||
async def _async_readline() -> str:
|
||||
"""Read a line from stdin using the event loop's native I/O.
|
||||
|
||||
Falls back to asyncio.to_thread on platforms where piping stdin
|
||||
is unsupported.
|
||||
|
||||
Returns:
|
||||
The line read from stdin, with trailing newline stripped.
|
||||
"""
|
||||
loop = asyncio.get_running_loop()
|
||||
try:
|
||||
reader = asyncio.StreamReader()
|
||||
protocol = asyncio.StreamReaderProtocol(reader)
|
||||
await loop.connect_read_pipe(lambda: protocol, sys.stdin)
|
||||
raw = await reader.readline()
|
||||
return raw.decode().rstrip("\n")
|
||||
except (OSError, NotImplementedError, ValueError):
|
||||
return await asyncio.to_thread(input)
|
||||
|
||||
|
||||
_provider: ContextVar[HumanInputProvider | None] = ContextVar(
|
||||
"human_input_provider",
|
||||
default=None,
|
||||
)
|
||||
|
||||
|
||||
def get_provider() -> HumanInputProvider:
|
||||
"""Get the current human input provider.
|
||||
|
||||
Returns:
|
||||
The current provider, or a new SyncHumanInputProvider if none set.
|
||||
"""
|
||||
provider = _provider.get()
|
||||
if provider is None:
|
||||
initialized_provider = SyncHumanInputProvider()
|
||||
set_provider(initialized_provider)
|
||||
return initialized_provider
|
||||
return provider
|
||||
|
||||
|
||||
def set_provider(provider: HumanInputProvider) -> Token[HumanInputProvider | None]:
|
||||
"""Set the human input provider for the current context.
|
||||
|
||||
Args:
|
||||
provider: The provider to use.
|
||||
|
||||
Returns:
|
||||
Token that can be used to reset to previous value.
|
||||
"""
|
||||
return _provider.set(provider)
|
||||
|
||||
|
||||
def reset_provider(token: Token[HumanInputProvider | None]) -> None:
|
||||
"""Reset the provider to its previous value.
|
||||
|
||||
Args:
|
||||
token: Token returned from set_provider.
|
||||
"""
|
||||
_provider.reset(token)
|
||||
@@ -187,6 +187,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
_task_output_handler: TaskOutputStorageHandler = PrivateAttr(
|
||||
default_factory=TaskOutputStorageHandler
|
||||
)
|
||||
_kickoff_event_id: str | None = PrivateAttr(default=None)
|
||||
|
||||
name: str | None = Field(default="crew")
|
||||
cache: bool = Field(default=True)
|
||||
@@ -751,19 +752,28 @@ class Crew(FlowTrackable, BaseModel):
|
||||
for after_callback in self.after_kickoff_callbacks:
|
||||
result = after_callback(result)
|
||||
|
||||
result = self._post_kickoff(result)
|
||||
|
||||
self.usage_metrics = self.calculate_usage_metrics()
|
||||
|
||||
return result
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
CrewKickoffFailedEvent(error=str(e), crew_name=self.name),
|
||||
CrewKickoffFailedEvent(
|
||||
error=str(e),
|
||||
crew_name=self.name,
|
||||
started_event_id=self._kickoff_event_id,
|
||||
),
|
||||
)
|
||||
raise
|
||||
finally:
|
||||
clear_files(self.id)
|
||||
detach(token)
|
||||
|
||||
def _post_kickoff(self, result: CrewOutput) -> CrewOutput:
|
||||
return result
|
||||
|
||||
def kickoff_for_each(
|
||||
self,
|
||||
inputs: list[dict[str, Any]],
|
||||
@@ -936,13 +946,19 @@ class Crew(FlowTrackable, BaseModel):
|
||||
for after_callback in self.after_kickoff_callbacks:
|
||||
result = after_callback(result)
|
||||
|
||||
result = self._post_kickoff(result)
|
||||
|
||||
self.usage_metrics = self.calculate_usage_metrics()
|
||||
|
||||
return result
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
CrewKickoffFailedEvent(error=str(e), crew_name=self.name),
|
||||
CrewKickoffFailedEvent(
|
||||
error=str(e),
|
||||
crew_name=self.name,
|
||||
started_event_id=self._kickoff_event_id,
|
||||
),
|
||||
)
|
||||
raise
|
||||
finally:
|
||||
@@ -1181,6 +1197,9 @@ class Crew(FlowTrackable, BaseModel):
|
||||
self.manager_agent = manager
|
||||
manager.crew = self
|
||||
|
||||
def _get_execution_start_index(self, tasks: list[Task]) -> int | None:
|
||||
return None
|
||||
|
||||
def _execute_tasks(
|
||||
self,
|
||||
tasks: list[Task],
|
||||
@@ -1197,6 +1216,9 @@ class Crew(FlowTrackable, BaseModel):
|
||||
Returns:
|
||||
CrewOutput: Final output of the crew
|
||||
"""
|
||||
custom_start = self._get_execution_start_index(tasks)
|
||||
if custom_start is not None:
|
||||
start_index = custom_start
|
||||
|
||||
task_outputs: list[TaskOutput] = []
|
||||
futures: list[tuple[Task, Future[TaskOutput], int]] = []
|
||||
@@ -1305,8 +1327,10 @@ class Crew(FlowTrackable, BaseModel):
|
||||
if files:
|
||||
supported_types: list[str] = []
|
||||
if agent and agent.llm and agent.llm.supports_multimodal():
|
||||
provider = getattr(agent.llm, "provider", None) or getattr(
|
||||
agent.llm, "model", "openai"
|
||||
provider = (
|
||||
getattr(agent.llm, "provider", None)
|
||||
or getattr(agent.llm, "model", None)
|
||||
or "openai"
|
||||
)
|
||||
api = getattr(agent.llm, "api", None)
|
||||
supported_types = get_supported_content_types(provider, api)
|
||||
@@ -1502,12 +1526,14 @@ class Crew(FlowTrackable, BaseModel):
|
||||
final_string_output = final_task_output.raw
|
||||
self._finish_execution(final_string_output)
|
||||
self.token_usage = self.calculate_usage_metrics()
|
||||
crewai_event_bus.flush()
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
CrewKickoffCompletedEvent(
|
||||
crew_name=self.name,
|
||||
output=final_task_output,
|
||||
total_tokens=self.token_usage.total_tokens,
|
||||
started_event_id=self._kickoff_event_id,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -2011,7 +2037,13 @@ class Crew(FlowTrackable, BaseModel):
|
||||
@staticmethod
|
||||
def _show_tracing_disabled_message() -> None:
|
||||
"""Show a message when tracing is disabled."""
|
||||
from crewai.events.listeners.tracing.utils import has_user_declined_tracing
|
||||
from crewai.events.listeners.tracing.utils import (
|
||||
has_user_declined_tracing,
|
||||
should_suppress_tracing_messages,
|
||||
)
|
||||
|
||||
if should_suppress_tracing_messages():
|
||||
return
|
||||
|
||||
console = Console()
|
||||
|
||||
|
||||
@@ -265,10 +265,9 @@ def prepare_kickoff(
|
||||
normalized = {}
|
||||
normalized = before_callback(normalized)
|
||||
|
||||
future = crewai_event_bus.emit(
|
||||
crew,
|
||||
CrewKickoffStartedEvent(crew_name=crew.name, inputs=normalized),
|
||||
)
|
||||
started_event = CrewKickoffStartedEvent(crew_name=crew.name, inputs=normalized)
|
||||
crew._kickoff_event_id = started_event.event_id
|
||||
future = crewai_event_bus.emit(crew, started_event)
|
||||
if future is not None:
|
||||
try:
|
||||
future.result()
|
||||
|
||||
@@ -195,6 +195,7 @@ __all__ = [
|
||||
"ToolUsageFinishedEvent",
|
||||
"ToolUsageStartedEvent",
|
||||
"ToolValidateInputErrorEvent",
|
||||
"_extension_exports",
|
||||
"crewai_event_bus",
|
||||
]
|
||||
|
||||
@@ -210,14 +211,29 @@ _AGENT_EVENT_MAPPING = {
|
||||
"LiteAgentExecutionStartedEvent": "crewai.events.types.agent_events",
|
||||
}
|
||||
|
||||
_extension_exports: dict[str, Any] = {}
|
||||
|
||||
|
||||
def __getattr__(name: str) -> Any:
|
||||
"""Lazy import for agent events to avoid circular imports."""
|
||||
"""Lazy import for agent events and registered extensions."""
|
||||
if name in _AGENT_EVENT_MAPPING:
|
||||
import importlib
|
||||
|
||||
module_path = _AGENT_EVENT_MAPPING[name]
|
||||
module = importlib.import_module(module_path)
|
||||
return getattr(module, name)
|
||||
|
||||
if name in _extension_exports:
|
||||
import importlib
|
||||
|
||||
value = _extension_exports[name]
|
||||
if isinstance(value, str):
|
||||
module_path, _, attr_name = value.rpartition(".")
|
||||
if module_path:
|
||||
module = importlib.import_module(module_path)
|
||||
return getattr(module, attr_name)
|
||||
return importlib.import_module(value)
|
||||
return value
|
||||
|
||||
msg = f"module {__name__!r} has no attribute {name!r}"
|
||||
raise AttributeError(msg)
|
||||
|
||||
@@ -63,6 +63,7 @@ class BaseEvent(BaseModel):
|
||||
parent_event_id: str | None = None
|
||||
previous_event_id: str | None = None
|
||||
triggered_by_event_id: str | None = None
|
||||
started_event_id: str | None = None
|
||||
emission_sequence: int | None = None
|
||||
|
||||
def to_json(self, exclude: set[str] | None = None) -> Serializable:
|
||||
|
||||
@@ -227,6 +227,39 @@ class CrewAIEventsBus:
|
||||
|
||||
return decorator
|
||||
|
||||
def off(
|
||||
self,
|
||||
event_type: type[BaseEvent],
|
||||
handler: Callable[..., Any],
|
||||
) -> None:
|
||||
"""Unregister an event handler for a specific event type.
|
||||
|
||||
Args:
|
||||
event_type: The event class to stop listening for
|
||||
handler: The handler function to unregister
|
||||
"""
|
||||
with self._rwlock.w_locked():
|
||||
if event_type in self._sync_handlers:
|
||||
existing_sync = self._sync_handlers[event_type]
|
||||
if handler in existing_sync:
|
||||
self._sync_handlers[event_type] = existing_sync - {handler}
|
||||
if not self._sync_handlers[event_type]:
|
||||
del self._sync_handlers[event_type]
|
||||
|
||||
if event_type in self._async_handlers:
|
||||
existing_async = self._async_handlers[event_type]
|
||||
if handler in existing_async:
|
||||
self._async_handlers[event_type] = existing_async - {handler}
|
||||
if not self._async_handlers[event_type]:
|
||||
del self._async_handlers[event_type]
|
||||
|
||||
if event_type in self._handler_dependencies:
|
||||
self._handler_dependencies[event_type].pop(handler, None)
|
||||
if not self._handler_dependencies[event_type]:
|
||||
del self._handler_dependencies[event_type]
|
||||
|
||||
self._execution_plan_cache.pop(event_type, None)
|
||||
|
||||
def _call_handlers(
|
||||
self,
|
||||
source: Any,
|
||||
@@ -374,7 +407,8 @@ class CrewAIEventsBus:
|
||||
if popped is None:
|
||||
handle_empty_pop(event_type_name)
|
||||
else:
|
||||
_, popped_type = popped
|
||||
popped_event_id, popped_type = popped
|
||||
event.started_event_id = popped_event_id
|
||||
expected_start = VALID_EVENT_PAIRS.get(event_type_name)
|
||||
if expected_start and popped_type and popped_type != expected_start:
|
||||
handle_mismatch(event_type_name, popped_type, expected_start)
|
||||
@@ -536,24 +570,52 @@ class CrewAIEventsBus:
|
||||
... # Do stuff...
|
||||
... # Handlers are cleared after the context
|
||||
"""
|
||||
with self._rwlock.w_locked():
|
||||
prev_sync = self._sync_handlers
|
||||
prev_async = self._async_handlers
|
||||
prev_deps = self._handler_dependencies
|
||||
prev_cache = self._execution_plan_cache
|
||||
self._sync_handlers = {}
|
||||
self._async_handlers = {}
|
||||
self._handler_dependencies = {}
|
||||
self._execution_plan_cache = {}
|
||||
with self._rwlock.r_locked():
|
||||
saved_sync: dict[type[BaseEvent], frozenset[SyncHandler]] = dict(
|
||||
self._sync_handlers
|
||||
)
|
||||
saved_async: dict[type[BaseEvent], frozenset[AsyncHandler]] = dict(
|
||||
self._async_handlers
|
||||
)
|
||||
saved_deps: dict[type[BaseEvent], dict[Handler, list[Depends[Any]]]] = {
|
||||
event_type: dict(handlers)
|
||||
for event_type, handlers in self._handler_dependencies.items()
|
||||
}
|
||||
|
||||
for event_type, sync_handlers in saved_sync.items():
|
||||
for sync_handler in sync_handlers:
|
||||
self.off(event_type, sync_handler)
|
||||
|
||||
for event_type, async_handlers in saved_async.items():
|
||||
for async_handler in async_handlers:
|
||||
self.off(event_type, async_handler)
|
||||
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
with self._rwlock.w_locked():
|
||||
self._sync_handlers = prev_sync
|
||||
self._async_handlers = prev_async
|
||||
self._handler_dependencies = prev_deps
|
||||
self._execution_plan_cache = prev_cache
|
||||
with self._rwlock.r_locked():
|
||||
current_sync = dict(self._sync_handlers)
|
||||
current_async = dict(self._async_handlers)
|
||||
|
||||
for event_type, cur_sync in current_sync.items():
|
||||
orig_sync = saved_sync.get(event_type, frozenset())
|
||||
for new_handler in cur_sync - orig_sync:
|
||||
self.off(event_type, new_handler)
|
||||
|
||||
for event_type, cur_async in current_async.items():
|
||||
orig_async = saved_async.get(event_type, frozenset())
|
||||
for new_async_handler in cur_async - orig_async:
|
||||
self.off(event_type, new_async_handler)
|
||||
|
||||
for event_type, sync_handlers in saved_sync.items():
|
||||
for sync_handler in sync_handlers:
|
||||
deps = saved_deps.get(event_type, {}).get(sync_handler)
|
||||
self._register_handler(event_type, sync_handler, deps)
|
||||
|
||||
for event_type, async_handlers in saved_async.items():
|
||||
for async_handler in async_handlers:
|
||||
deps = saved_deps.get(event_type, {}).get(async_handler)
|
||||
self._register_handler(event_type, async_handler, deps)
|
||||
|
||||
def shutdown(self, wait: bool = True) -> None:
|
||||
"""Gracefully shutdown the event loop and wait for all tasks to finish.
|
||||
|
||||
@@ -797,7 +797,13 @@ class TraceCollectionListener(BaseEventListener):
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
|
||||
from crewai.events.listeners.tracing.utils import has_user_declined_tracing
|
||||
from crewai.events.listeners.tracing.utils import (
|
||||
has_user_declined_tracing,
|
||||
should_suppress_tracing_messages,
|
||||
)
|
||||
|
||||
if should_suppress_tracing_messages():
|
||||
return
|
||||
|
||||
console = Console()
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from collections.abc import Callable
|
||||
from contextvars import ContextVar, Token
|
||||
from datetime import datetime
|
||||
import getpass
|
||||
@@ -26,6 +27,35 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
_tracing_enabled: ContextVar[bool | None] = ContextVar("_tracing_enabled", default=None)
|
||||
|
||||
_first_time_trace_hook: ContextVar[Callable[[], bool] | None] = ContextVar(
|
||||
"_first_time_trace_hook", default=None
|
||||
)
|
||||
|
||||
_suppress_tracing_messages: ContextVar[bool] = ContextVar(
|
||||
"_suppress_tracing_messages", default=False
|
||||
)
|
||||
|
||||
|
||||
def set_suppress_tracing_messages(suppress: bool) -> object:
|
||||
"""Set whether to suppress tracing-related console messages.
|
||||
|
||||
Args:
|
||||
suppress: True to suppress messages, False to show them.
|
||||
|
||||
Returns:
|
||||
A token that can be used to restore the previous value.
|
||||
"""
|
||||
return _suppress_tracing_messages.set(suppress)
|
||||
|
||||
|
||||
def should_suppress_tracing_messages() -> bool:
|
||||
"""Check if tracing messages should be suppressed.
|
||||
|
||||
Returns:
|
||||
True if messages should be suppressed, False otherwise.
|
||||
"""
|
||||
return _suppress_tracing_messages.get()
|
||||
|
||||
|
||||
def should_enable_tracing(*, override: bool | None = None) -> bool:
|
||||
"""Determine if tracing should be enabled.
|
||||
@@ -407,10 +437,13 @@ def truncate_messages(
|
||||
def should_auto_collect_first_time_traces() -> bool:
|
||||
"""True if we should auto-collect traces for first-time user.
|
||||
|
||||
|
||||
Returns:
|
||||
True if first-time user AND telemetry not disabled AND tracing not explicitly enabled, False otherwise.
|
||||
"""
|
||||
hook = _first_time_trace_hook.get()
|
||||
if hook is not None:
|
||||
return hook()
|
||||
|
||||
if _is_test_environment():
|
||||
return False
|
||||
|
||||
@@ -432,6 +465,9 @@ def prompt_user_for_trace_viewing(timeout_seconds: int = 20) -> bool:
|
||||
if _is_test_environment():
|
||||
return False
|
||||
|
||||
if should_suppress_tracing_messages():
|
||||
return False
|
||||
|
||||
try:
|
||||
import threading
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ class ToolUsageEvent(BaseEvent):
|
||||
tool_name: str
|
||||
tool_args: dict[str, Any] | str
|
||||
tool_class: str | None = None
|
||||
run_attempts: int | None = None
|
||||
run_attempts: int = 0
|
||||
delegations: int | None = None
|
||||
agent: Any | None = None
|
||||
task_name: str | None = None
|
||||
@@ -26,7 +26,7 @@ class ToolUsageEvent(BaseEvent):
|
||||
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
def __init__(self, **data):
|
||||
def __init__(self, **data: Any) -> None:
|
||||
if data.get("from_task"):
|
||||
task = data["from_task"]
|
||||
data["task_id"] = str(task.id)
|
||||
@@ -96,10 +96,10 @@ class ToolExecutionErrorEvent(BaseEvent):
|
||||
type: str = "tool_execution_error"
|
||||
tool_name: str
|
||||
tool_args: dict[str, Any]
|
||||
tool_class: Callable
|
||||
tool_class: Callable[..., Any]
|
||||
agent: Any | None = None
|
||||
|
||||
def __init__(self, **data):
|
||||
def __init__(self, **data: Any) -> None:
|
||||
super().__init__(**data)
|
||||
# Set fingerprint data from the agent
|
||||
if self.agent and hasattr(self.agent, "fingerprint") and self.agent.fingerprint:
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from contextvars import ContextVar
|
||||
import os
|
||||
import threading
|
||||
from typing import Any, ClassVar, cast
|
||||
@@ -7,7 +8,37 @@ from rich.live import Live
|
||||
from rich.panel import Panel
|
||||
from rich.text import Text
|
||||
|
||||
from crewai.cli.version import is_newer_version_available
|
||||
from crewai.cli.version import is_current_version_yanked, is_newer_version_available
|
||||
|
||||
|
||||
_disable_version_check: ContextVar[bool] = ContextVar(
|
||||
"_disable_version_check", default=False
|
||||
)
|
||||
|
||||
_suppress_console_output: ContextVar[bool] = ContextVar(
|
||||
"_suppress_console_output", default=False
|
||||
)
|
||||
|
||||
|
||||
def set_suppress_console_output(suppress: bool) -> object:
|
||||
"""Set whether to suppress all console output.
|
||||
|
||||
Args:
|
||||
suppress: True to suppress output, False to show it.
|
||||
|
||||
Returns:
|
||||
A token that can be used to restore the previous value.
|
||||
"""
|
||||
return _suppress_console_output.set(suppress)
|
||||
|
||||
|
||||
def should_suppress_console_output() -> bool:
|
||||
"""Check if console output should be suppressed.
|
||||
|
||||
Returns:
|
||||
True if output should be suppressed, False otherwise.
|
||||
"""
|
||||
return _suppress_console_output.get()
|
||||
|
||||
|
||||
class ConsoleFormatter:
|
||||
@@ -46,9 +77,15 @@ class ConsoleFormatter:
|
||||
if not self.verbose:
|
||||
return
|
||||
|
||||
if _disable_version_check.get():
|
||||
return
|
||||
|
||||
if os.getenv("CI", "").lower() in ("true", "1"):
|
||||
return
|
||||
|
||||
if os.getenv("CREWAI_DISABLE_VERSION_CHECK", "").lower() in ("true", "1"):
|
||||
return
|
||||
|
||||
try:
|
||||
is_newer, current, latest = is_newer_version_available()
|
||||
if is_newer and latest:
|
||||
@@ -67,6 +104,22 @@ To update, run: uv sync --upgrade-package crewai"""
|
||||
)
|
||||
self.console.print(panel)
|
||||
self.console.print()
|
||||
|
||||
is_yanked, yanked_reason = is_current_version_yanked()
|
||||
if is_yanked:
|
||||
yanked_message = f"Version {current} has been yanked from PyPI."
|
||||
if yanked_reason:
|
||||
yanked_message += f"\nReason: {yanked_reason}"
|
||||
yanked_message += "\n\nTo update, run: uv sync --upgrade-package crewai"
|
||||
|
||||
yanked_panel = Panel(
|
||||
yanked_message,
|
||||
title="Yanked Version",
|
||||
border_style="red",
|
||||
padding=(1, 2),
|
||||
)
|
||||
self.console.print(yanked_panel)
|
||||
self.console.print()
|
||||
except Exception: # noqa: S110
|
||||
# Silently ignore errors in version check - it's non-critical
|
||||
pass
|
||||
@@ -76,8 +129,12 @@ To update, run: uv sync --upgrade-package crewai"""
|
||||
from crewai.events.listeners.tracing.utils import (
|
||||
has_user_declined_tracing,
|
||||
is_tracing_enabled_in_context,
|
||||
should_suppress_tracing_messages,
|
||||
)
|
||||
|
||||
if should_suppress_tracing_messages():
|
||||
return
|
||||
|
||||
if not is_tracing_enabled_in_context():
|
||||
if has_user_declined_tracing():
|
||||
message = """Info: Tracing is disabled.
|
||||
@@ -129,6 +186,8 @@ To enable tracing, do any one of these:
|
||||
|
||||
def print(self, *args: Any, **kwargs: Any) -> None:
|
||||
"""Print to console. Simplified to only handle panel-based output."""
|
||||
if should_suppress_console_output():
|
||||
return
|
||||
# Skip blank lines during streaming
|
||||
if len(args) == 0 and self._is_streaming:
|
||||
return
|
||||
@@ -485,6 +544,9 @@ To enable tracing, do any one of these:
|
||||
if not self.verbose:
|
||||
return
|
||||
|
||||
if should_suppress_console_output():
|
||||
return
|
||||
|
||||
self._is_streaming = True
|
||||
self._last_stream_call_type = call_type
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ from crewai.agents.parser import (
|
||||
AgentFinish,
|
||||
OutputParserError,
|
||||
)
|
||||
from crewai.core.providers.human_input import get_provider
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.listeners.tracing.utils import (
|
||||
is_tracing_enabled_in_context,
|
||||
@@ -31,7 +32,8 @@ from crewai.events.types.tool_usage_events import (
|
||||
ToolUsageFinishedEvent,
|
||||
ToolUsageStartedEvent,
|
||||
)
|
||||
from crewai.flow.flow import Flow, listen, or_, router, start
|
||||
from crewai.flow.flow import Flow, StateProxy, listen, or_, router, start
|
||||
from crewai.flow.types import FlowMethodName
|
||||
from crewai.hooks.llm_hooks import (
|
||||
get_after_llm_call_hooks,
|
||||
get_before_llm_call_hooks,
|
||||
@@ -41,7 +43,12 @@ from crewai.hooks.tool_hooks import (
|
||||
get_after_tool_call_hooks,
|
||||
get_before_tool_call_hooks,
|
||||
)
|
||||
from crewai.hooks.types import AfterLLMCallHookType, BeforeLLMCallHookType
|
||||
from crewai.hooks.types import (
|
||||
AfterLLMCallHookCallable,
|
||||
AfterLLMCallHookType,
|
||||
BeforeLLMCallHookCallable,
|
||||
BeforeLLMCallHookType,
|
||||
)
|
||||
from crewai.utilities.agent_utils import (
|
||||
convert_tools_to_openai_schema,
|
||||
enforce_rpm_limit,
|
||||
@@ -191,8 +198,12 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
|
||||
self._instance_id = str(uuid4())[:8]
|
||||
|
||||
self.before_llm_call_hooks: list[BeforeLLMCallHookType] = []
|
||||
self.after_llm_call_hooks: list[AfterLLMCallHookType] = []
|
||||
self.before_llm_call_hooks: list[
|
||||
BeforeLLMCallHookType | BeforeLLMCallHookCallable
|
||||
] = []
|
||||
self.after_llm_call_hooks: list[
|
||||
AfterLLMCallHookType | AfterLLMCallHookCallable
|
||||
] = []
|
||||
self.before_llm_call_hooks.extend(get_before_llm_call_hooks())
|
||||
self.after_llm_call_hooks.extend(get_after_llm_call_hooks())
|
||||
|
||||
@@ -207,6 +218,71 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
)
|
||||
self._state = AgentReActState()
|
||||
|
||||
@property
|
||||
def messages(self) -> list[LLMMessage]:
|
||||
"""Delegate to state for ExecutorContext conformance."""
|
||||
return self._state.messages
|
||||
|
||||
@messages.setter
|
||||
def messages(self, value: list[LLMMessage]) -> None:
|
||||
"""Delegate to state for ExecutorContext conformance."""
|
||||
if self._flow_initialized and hasattr(self, "_state_lock"):
|
||||
with self._state_lock:
|
||||
self._state.messages = value
|
||||
else:
|
||||
self._state.messages = value
|
||||
|
||||
@property
|
||||
def ask_for_human_input(self) -> bool:
|
||||
"""Delegate to state for ExecutorContext conformance."""
|
||||
return self._state.ask_for_human_input
|
||||
|
||||
@ask_for_human_input.setter
|
||||
def ask_for_human_input(self, value: bool) -> None:
|
||||
"""Delegate to state for ExecutorContext conformance."""
|
||||
self._state.ask_for_human_input = value
|
||||
|
||||
def _invoke_loop(self) -> AgentFinish:
|
||||
"""Invoke the agent loop and return the result.
|
||||
|
||||
Required by ExecutorContext protocol.
|
||||
"""
|
||||
self._state.iterations = 0
|
||||
self._state.is_finished = False
|
||||
self._state.current_answer = None
|
||||
|
||||
self.kickoff()
|
||||
|
||||
answer = self._state.current_answer
|
||||
if not isinstance(answer, AgentFinish):
|
||||
raise RuntimeError("Agent loop did not produce a final answer")
|
||||
return answer
|
||||
|
||||
async def _ainvoke_loop(self) -> AgentFinish:
|
||||
"""Invoke the agent loop asynchronously and return the result.
|
||||
|
||||
Required by AsyncExecutorContext protocol.
|
||||
"""
|
||||
self._state.iterations = 0
|
||||
self._state.is_finished = False
|
||||
self._state.current_answer = None
|
||||
|
||||
await self.akickoff()
|
||||
|
||||
answer = self._state.current_answer
|
||||
if not isinstance(answer, AgentFinish):
|
||||
raise RuntimeError("Agent loop did not produce a final answer")
|
||||
return answer
|
||||
|
||||
def _format_feedback_message(self, feedback: str) -> LLMMessage:
|
||||
"""Format feedback as a message for the LLM.
|
||||
|
||||
Required by ExecutorContext protocol.
|
||||
"""
|
||||
return format_message_for_llm(
|
||||
self._i18n.slice("feedback_instructions").format(feedback=feedback)
|
||||
)
|
||||
|
||||
def _ensure_flow_initialized(self) -> None:
|
||||
"""Ensure Flow.__init__() has been called.
|
||||
|
||||
@@ -298,18 +374,10 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
Flow initialization is deferred to prevent event emission during agent setup.
|
||||
Returns the temporary state until invoke() is called.
|
||||
"""
|
||||
if self._flow_initialized and hasattr(self, "_state_lock"):
|
||||
return StateProxy(self._state, self._state_lock) # type: ignore[return-value]
|
||||
return self._state
|
||||
|
||||
@property
|
||||
def messages(self) -> list[LLMMessage]:
|
||||
"""Compatibility property for mixin - returns state messages."""
|
||||
return self._state.messages
|
||||
|
||||
@messages.setter
|
||||
def messages(self, value: list[LLMMessage]) -> None:
|
||||
"""Set state messages."""
|
||||
self._state.messages = value
|
||||
|
||||
@property
|
||||
def iterations(self) -> int:
|
||||
"""Compatibility property for mixin - returns state iterations."""
|
||||
@@ -416,15 +484,14 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
raise
|
||||
|
||||
@listen("continue_reasoning_native")
|
||||
def call_llm_native_tools(
|
||||
self,
|
||||
) -> Literal["native_tool_calls", "native_finished", "context_error"]:
|
||||
def call_llm_native_tools(self) -> None:
|
||||
"""Execute LLM call with native function calling.
|
||||
|
||||
Always calls the LLM so it can read reflection prompts and decide
|
||||
whether to provide a final answer or request more tools.
|
||||
|
||||
Returns routing decision based on whether tool calls or final answer.
|
||||
Note: This is a listener, not a router. The route_native_tool_result
|
||||
router fires after this to determine the next step based on state.
|
||||
"""
|
||||
try:
|
||||
# Clear pending tools - LLM will decide what to do next after reading
|
||||
@@ -454,8 +521,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
if isinstance(answer, list) and answer and self._is_tool_call_list(answer):
|
||||
# Store tool calls for sequential processing
|
||||
self.state.pending_tool_calls = list(answer)
|
||||
|
||||
return "native_tool_calls"
|
||||
return # Router will check pending_tool_calls
|
||||
|
||||
if isinstance(answer, BaseModel):
|
||||
self.state.current_answer = AgentFinish(
|
||||
@@ -465,7 +531,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
)
|
||||
self._invoke_step_callback(self.state.current_answer)
|
||||
self._append_message_to_state(answer.model_dump_json())
|
||||
return "native_finished"
|
||||
return # Router will check current_answer
|
||||
|
||||
# Text response - this is the final answer
|
||||
if isinstance(answer, str):
|
||||
@@ -476,8 +542,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
)
|
||||
self._invoke_step_callback(self.state.current_answer)
|
||||
self._append_message_to_state(answer)
|
||||
|
||||
return "native_finished"
|
||||
return # Router will check current_answer
|
||||
|
||||
# Unexpected response type, treat as final answer
|
||||
self.state.current_answer = AgentFinish(
|
||||
@@ -487,13 +552,12 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
)
|
||||
self._invoke_step_callback(self.state.current_answer)
|
||||
self._append_message_to_state(str(answer))
|
||||
|
||||
return "native_finished"
|
||||
# Router will check current_answer
|
||||
|
||||
except Exception as e:
|
||||
if is_context_length_exceeded(e):
|
||||
self._last_context_error = e
|
||||
return "context_error"
|
||||
return # Router will check _last_context_error
|
||||
if e.__class__.__module__.startswith("litellm"):
|
||||
raise e
|
||||
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
|
||||
@@ -506,6 +570,22 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
return "execute_tool"
|
||||
return "agent_finished"
|
||||
|
||||
@router(call_llm_native_tools)
|
||||
def route_native_tool_result(
|
||||
self,
|
||||
) -> Literal["native_tool_calls", "native_finished", "context_error"]:
|
||||
"""Route based on LLM response for native tool calling.
|
||||
|
||||
Checks state set by call_llm_native_tools to determine next step.
|
||||
This router is needed because only router return values trigger
|
||||
downstream listeners.
|
||||
"""
|
||||
if self._last_context_error is not None:
|
||||
return "context_error"
|
||||
if self.state.pending_tool_calls:
|
||||
return "native_tool_calls"
|
||||
return "native_finished"
|
||||
|
||||
@listen("execute_tool")
|
||||
def execute_tool_action(self) -> Literal["tool_completed", "tool_result_is_final"]:
|
||||
"""Execute the tool action and handle the result."""
|
||||
@@ -689,6 +769,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
agent_key=agent_key,
|
||||
),
|
||||
)
|
||||
error_event_emitted = False
|
||||
|
||||
track_delegation_if_needed(func_name, args_dict, self.task)
|
||||
|
||||
@@ -764,6 +845,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
error=e,
|
||||
),
|
||||
)
|
||||
error_event_emitted = True
|
||||
elif max_usage_reached and original_tool:
|
||||
# Return error message when max usage limit is reached
|
||||
result = f"Tool '{func_name}' has reached its usage limit of {original_tool.max_usage_count} times and cannot be used anymore."
|
||||
@@ -792,20 +874,20 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
color="red",
|
||||
)
|
||||
|
||||
# Emit tool usage finished event
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=ToolUsageFinishedEvent(
|
||||
output=result,
|
||||
tool_name=func_name,
|
||||
tool_args=args_dict,
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
agent_key=agent_key,
|
||||
started_at=started_at,
|
||||
finished_at=datetime.now(),
|
||||
),
|
||||
)
|
||||
if not error_event_emitted:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=ToolUsageFinishedEvent(
|
||||
output=result,
|
||||
tool_name=func_name,
|
||||
tool_args=args_dict,
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
agent_key=agent_key,
|
||||
started_at=started_at,
|
||||
finished_at=datetime.now(),
|
||||
),
|
||||
)
|
||||
|
||||
# Append tool result message
|
||||
tool_message: LLMMessage = {
|
||||
@@ -861,9 +943,11 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
self.state.iterations += 1
|
||||
return "initialized"
|
||||
|
||||
@listen("initialized")
|
||||
@listen(or_("initialized", "tool_completed", "native_tool_completed"))
|
||||
def continue_iteration(self) -> Literal["check_iteration"]:
|
||||
"""Bridge listener that connects iteration loop back to iteration check."""
|
||||
if self._flow_initialized:
|
||||
self._discard_or_listener(FlowMethodName("continue_iteration"))
|
||||
return "check_iteration"
|
||||
|
||||
@router(or_(initialize_reasoning, continue_iteration))
|
||||
@@ -1105,7 +1189,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
)
|
||||
|
||||
if self.state.ask_for_human_input:
|
||||
formatted_answer = self._handle_human_feedback(formatted_answer)
|
||||
formatted_answer = await self._ahandle_human_feedback(formatted_answer)
|
||||
|
||||
self._create_short_term_memory(formatted_answer)
|
||||
self._create_long_term_memory(formatted_answer)
|
||||
@@ -1319,17 +1403,22 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
Returns:
|
||||
Final answer after feedback.
|
||||
"""
|
||||
output_str = (
|
||||
str(formatted_answer.output)
|
||||
if isinstance(formatted_answer.output, BaseModel)
|
||||
else formatted_answer.output
|
||||
)
|
||||
human_feedback = self._ask_human_input(output_str)
|
||||
provider = get_provider()
|
||||
return provider.handle_feedback(formatted_answer, self)
|
||||
|
||||
if self._is_training_mode():
|
||||
return self._handle_training_feedback(formatted_answer, human_feedback)
|
||||
async def _ahandle_human_feedback(
|
||||
self, formatted_answer: AgentFinish
|
||||
) -> AgentFinish:
|
||||
"""Process human feedback asynchronously and refine answer.
|
||||
|
||||
return self._handle_regular_feedback(formatted_answer, human_feedback)
|
||||
Args:
|
||||
formatted_answer: Initial agent result.
|
||||
|
||||
Returns:
|
||||
Final answer after feedback.
|
||||
"""
|
||||
provider = get_provider()
|
||||
return await provider.handle_feedback_async(formatted_answer, self)
|
||||
|
||||
def _is_training_mode(self) -> bool:
|
||||
"""Check if training mode is active.
|
||||
@@ -1339,101 +1428,6 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
"""
|
||||
return bool(self.crew and self.crew._train)
|
||||
|
||||
def _handle_training_feedback(
|
||||
self, initial_answer: AgentFinish, feedback: str
|
||||
) -> AgentFinish:
|
||||
"""Process training feedback and generate improved answer.
|
||||
|
||||
Args:
|
||||
initial_answer: Initial agent output.
|
||||
feedback: Training feedback.
|
||||
|
||||
Returns:
|
||||
Improved answer.
|
||||
"""
|
||||
self._handle_crew_training_output(initial_answer, feedback)
|
||||
self.state.messages.append(
|
||||
format_message_for_llm(
|
||||
self._i18n.slice("feedback_instructions").format(feedback=feedback)
|
||||
)
|
||||
)
|
||||
|
||||
# Re-run flow for improved answer
|
||||
self.state.iterations = 0
|
||||
self.state.is_finished = False
|
||||
self.state.current_answer = None
|
||||
|
||||
self.kickoff()
|
||||
|
||||
# Get improved answer from state
|
||||
improved_answer = self.state.current_answer
|
||||
if not isinstance(improved_answer, AgentFinish):
|
||||
raise RuntimeError(
|
||||
"Training feedback iteration did not produce final answer"
|
||||
)
|
||||
|
||||
self._handle_crew_training_output(improved_answer)
|
||||
self.state.ask_for_human_input = False
|
||||
return improved_answer
|
||||
|
||||
def _handle_regular_feedback(
|
||||
self, current_answer: AgentFinish, initial_feedback: str
|
||||
) -> AgentFinish:
|
||||
"""Process regular feedback iteratively until user is satisfied.
|
||||
|
||||
Args:
|
||||
current_answer: Current agent output.
|
||||
initial_feedback: Initial user feedback.
|
||||
|
||||
Returns:
|
||||
Final answer after iterations.
|
||||
"""
|
||||
feedback = initial_feedback
|
||||
answer = current_answer
|
||||
|
||||
while self.state.ask_for_human_input:
|
||||
if feedback.strip() == "":
|
||||
self.state.ask_for_human_input = False
|
||||
else:
|
||||
answer = self._process_feedback_iteration(feedback)
|
||||
output_str = (
|
||||
str(answer.output)
|
||||
if isinstance(answer.output, BaseModel)
|
||||
else answer.output
|
||||
)
|
||||
feedback = self._ask_human_input(output_str)
|
||||
|
||||
return answer
|
||||
|
||||
def _process_feedback_iteration(self, feedback: str) -> AgentFinish:
|
||||
"""Process a single feedback iteration and generate updated response.
|
||||
|
||||
Args:
|
||||
feedback: User feedback.
|
||||
|
||||
Returns:
|
||||
Updated agent response.
|
||||
"""
|
||||
self.state.messages.append(
|
||||
format_message_for_llm(
|
||||
self._i18n.slice("feedback_instructions").format(feedback=feedback)
|
||||
)
|
||||
)
|
||||
|
||||
# Re-run flow
|
||||
self.state.iterations = 0
|
||||
self.state.is_finished = False
|
||||
self.state.current_answer = None
|
||||
|
||||
self.kickoff()
|
||||
|
||||
# Get answer from state
|
||||
answer = self.state.current_answer
|
||||
if not isinstance(answer, AgentFinish):
|
||||
raise RuntimeError("Feedback iteration did not produce final answer")
|
||||
|
||||
return answer
|
||||
|
||||
@classmethod
|
||||
def __get_pydantic_core_schema__(
|
||||
cls, _source_type: Any, _handler: GetCoreSchemaHandler
|
||||
|
||||
@@ -28,6 +28,8 @@ Example:
|
||||
```
|
||||
"""
|
||||
|
||||
from typing import Any
|
||||
|
||||
from crewai.flow.async_feedback.providers import ConsoleProvider
|
||||
from crewai.flow.async_feedback.types import (
|
||||
HumanFeedbackPending,
|
||||
@@ -41,4 +43,15 @@ __all__ = [
|
||||
"HumanFeedbackPending",
|
||||
"HumanFeedbackProvider",
|
||||
"PendingFeedbackContext",
|
||||
"_extension_exports",
|
||||
]
|
||||
|
||||
_extension_exports: dict[str, Any] = {}
|
||||
|
||||
|
||||
def __getattr__(name: str) -> Any:
|
||||
"""Support extensions via dynamic attribute lookup."""
|
||||
if name in _extension_exports:
|
||||
return _extension_exports[name]
|
||||
msg = f"module {__name__!r} has no attribute {name!r}"
|
||||
raise AttributeError(msg)
|
||||
|
||||
@@ -7,7 +7,14 @@ for building event-driven workflows with conditional execution and routing.
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Callable, Sequence
|
||||
from collections.abc import (
|
||||
Callable,
|
||||
ItemsView,
|
||||
Iterator,
|
||||
KeysView,
|
||||
Sequence,
|
||||
ValuesView,
|
||||
)
|
||||
from concurrent.futures import Future
|
||||
import copy
|
||||
import inspect
|
||||
@@ -45,6 +52,7 @@ from crewai.events.listeners.tracing.utils import (
|
||||
has_user_declined_tracing,
|
||||
set_tracing_enabled,
|
||||
should_enable_tracing,
|
||||
should_suppress_tracing_messages,
|
||||
)
|
||||
from crewai.events.types.flow_events import (
|
||||
FlowCreatedEvent,
|
||||
@@ -408,6 +416,132 @@ def and_(*conditions: str | FlowCondition | Callable[..., Any]) -> FlowCondition
|
||||
return {"type": AND_CONDITION, "conditions": processed_conditions}
|
||||
|
||||
|
||||
class LockedListProxy(Generic[T]):
|
||||
"""Thread-safe proxy for list operations.
|
||||
|
||||
Wraps a list and uses a lock for all mutating operations.
|
||||
"""
|
||||
|
||||
def __init__(self, lst: list[T], lock: threading.Lock) -> None:
|
||||
self._list = lst
|
||||
self._lock = lock
|
||||
|
||||
def append(self, item: T) -> None:
|
||||
with self._lock:
|
||||
self._list.append(item)
|
||||
|
||||
def extend(self, items: list[T]) -> None:
|
||||
with self._lock:
|
||||
self._list.extend(items)
|
||||
|
||||
def insert(self, index: int, item: T) -> None:
|
||||
with self._lock:
|
||||
self._list.insert(index, item)
|
||||
|
||||
def remove(self, item: T) -> None:
|
||||
with self._lock:
|
||||
self._list.remove(item)
|
||||
|
||||
def pop(self, index: int = -1) -> T:
|
||||
with self._lock:
|
||||
return self._list.pop(index)
|
||||
|
||||
def clear(self) -> None:
|
||||
with self._lock:
|
||||
self._list.clear()
|
||||
|
||||
def __setitem__(self, index: int, value: T) -> None:
|
||||
with self._lock:
|
||||
self._list[index] = value
|
||||
|
||||
def __delitem__(self, index: int) -> None:
|
||||
with self._lock:
|
||||
del self._list[index]
|
||||
|
||||
def __getitem__(self, index: int) -> T:
|
||||
return self._list[index]
|
||||
|
||||
def __len__(self) -> int:
|
||||
return len(self._list)
|
||||
|
||||
def __iter__(self) -> Iterator[T]:
|
||||
return iter(self._list)
|
||||
|
||||
def __contains__(self, item: object) -> bool:
|
||||
return item in self._list
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return repr(self._list)
|
||||
|
||||
def __bool__(self) -> bool:
|
||||
return bool(self._list)
|
||||
|
||||
|
||||
class LockedDictProxy(Generic[T]):
|
||||
"""Thread-safe proxy for dict operations.
|
||||
|
||||
Wraps a dict and uses a lock for all mutating operations.
|
||||
"""
|
||||
|
||||
def __init__(self, d: dict[str, T], lock: threading.Lock) -> None:
|
||||
self._dict = d
|
||||
self._lock = lock
|
||||
|
||||
def __setitem__(self, key: str, value: T) -> None:
|
||||
with self._lock:
|
||||
self._dict[key] = value
|
||||
|
||||
def __delitem__(self, key: str) -> None:
|
||||
with self._lock:
|
||||
del self._dict[key]
|
||||
|
||||
def pop(self, key: str, *default: T) -> T:
|
||||
with self._lock:
|
||||
return self._dict.pop(key, *default)
|
||||
|
||||
def update(self, other: dict[str, T]) -> None:
|
||||
with self._lock:
|
||||
self._dict.update(other)
|
||||
|
||||
def clear(self) -> None:
|
||||
with self._lock:
|
||||
self._dict.clear()
|
||||
|
||||
def setdefault(self, key: str, default: T) -> T:
|
||||
with self._lock:
|
||||
return self._dict.setdefault(key, default)
|
||||
|
||||
def __getitem__(self, key: str) -> T:
|
||||
return self._dict[key]
|
||||
|
||||
def __len__(self) -> int:
|
||||
return len(self._dict)
|
||||
|
||||
def __iter__(self) -> Iterator[str]:
|
||||
return iter(self._dict)
|
||||
|
||||
def __contains__(self, key: object) -> bool:
|
||||
return key in self._dict
|
||||
|
||||
def keys(self) -> KeysView[str]:
|
||||
return self._dict.keys()
|
||||
|
||||
def values(self) -> ValuesView[T]:
|
||||
return self._dict.values()
|
||||
|
||||
def items(self) -> ItemsView[str, T]:
|
||||
return self._dict.items()
|
||||
|
||||
def get(self, key: str, default: T | None = None) -> T | None:
|
||||
return self._dict.get(key, default)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return repr(self._dict)
|
||||
|
||||
def __bool__(self) -> bool:
|
||||
return bool(self._dict)
|
||||
|
||||
|
||||
class StateProxy(Generic[T]):
|
||||
"""Proxy that provides thread-safe access to flow state.
|
||||
|
||||
@@ -422,7 +556,13 @@ class StateProxy(Generic[T]):
|
||||
object.__setattr__(self, "_proxy_lock", lock)
|
||||
|
||||
def __getattr__(self, name: str) -> Any:
|
||||
return getattr(object.__getattribute__(self, "_proxy_state"), name)
|
||||
value = getattr(object.__getattribute__(self, "_proxy_state"), name)
|
||||
lock = object.__getattribute__(self, "_proxy_lock")
|
||||
if isinstance(value, list):
|
||||
return LockedListProxy(value, lock)
|
||||
if isinstance(value, dict):
|
||||
return LockedDictProxy(value, lock)
|
||||
return value
|
||||
|
||||
def __setattr__(self, name: str, value: Any) -> None:
|
||||
if name in ("_proxy_state", "_proxy_lock"):
|
||||
@@ -1592,7 +1732,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
reset_emission_counter()
|
||||
reset_last_event_id()
|
||||
|
||||
# Emit FlowStartedEvent and log the start of the flow.
|
||||
if not self.suppress_flow_events:
|
||||
future = crewai_event_bus.emit(
|
||||
self,
|
||||
@@ -1603,7 +1742,10 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
),
|
||||
)
|
||||
if future:
|
||||
self._event_futures.append(future)
|
||||
try:
|
||||
await asyncio.wrap_future(future)
|
||||
except Exception:
|
||||
logger.warning("FlowStartedEvent handler failed", exc_info=True)
|
||||
self._log_flow_event(
|
||||
f"Flow started with ID: {self.flow_id}", color="bold magenta"
|
||||
)
|
||||
@@ -1695,6 +1837,12 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
|
||||
final_output = self._method_outputs[-1] if self._method_outputs else None
|
||||
|
||||
if self._event_futures:
|
||||
await asyncio.gather(
|
||||
*[asyncio.wrap_future(f) for f in self._event_futures]
|
||||
)
|
||||
self._event_futures.clear()
|
||||
|
||||
if not self.suppress_flow_events:
|
||||
future = crewai_event_bus.emit(
|
||||
self,
|
||||
@@ -1706,13 +1854,12 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
),
|
||||
)
|
||||
if future:
|
||||
self._event_futures.append(future)
|
||||
|
||||
if self._event_futures:
|
||||
await asyncio.gather(
|
||||
*[asyncio.wrap_future(f) for f in self._event_futures]
|
||||
)
|
||||
self._event_futures.clear()
|
||||
try:
|
||||
await asyncio.wrap_future(future)
|
||||
except Exception:
|
||||
logger.warning(
|
||||
"FlowFinishedEvent handler failed", exc_info=True
|
||||
)
|
||||
|
||||
if not self.suppress_flow_events:
|
||||
trace_listener = TraceCollectionListener()
|
||||
@@ -1787,40 +1934,14 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
await self._execute_listeners(start_method_name, result, finished_event_id)
|
||||
# Then execute listeners for the router result (e.g., "approved")
|
||||
router_result_trigger = FlowMethodName(str(result))
|
||||
listeners_for_result = self._find_triggered_methods(
|
||||
router_result_trigger, router_only=False
|
||||
listener_result = (
|
||||
self.last_human_feedback
|
||||
if self.last_human_feedback is not None
|
||||
else result
|
||||
)
|
||||
await self._execute_listeners(
|
||||
router_result_trigger, listener_result, finished_event_id
|
||||
)
|
||||
if listeners_for_result:
|
||||
# Pass the HumanFeedbackResult if available
|
||||
listener_result = (
|
||||
self.last_human_feedback
|
||||
if self.last_human_feedback is not None
|
||||
else result
|
||||
)
|
||||
racing_group = self._get_racing_group_for_listeners(
|
||||
listeners_for_result
|
||||
)
|
||||
if racing_group:
|
||||
racing_members, _ = racing_group
|
||||
other_listeners = [
|
||||
name
|
||||
for name in listeners_for_result
|
||||
if name not in racing_members
|
||||
]
|
||||
await self._execute_racing_listeners(
|
||||
racing_members,
|
||||
other_listeners,
|
||||
listener_result,
|
||||
finished_event_id,
|
||||
)
|
||||
else:
|
||||
tasks = [
|
||||
self._execute_single_listener(
|
||||
listener_name, listener_result, finished_event_id
|
||||
)
|
||||
for listener_name in listeners_for_result
|
||||
]
|
||||
await asyncio.gather(*tasks)
|
||||
else:
|
||||
await self._execute_listeners(start_method_name, result, finished_event_id)
|
||||
|
||||
@@ -2026,15 +2147,14 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
router_input = router_result_to_feedback.get(
|
||||
str(current_trigger), current_result
|
||||
)
|
||||
current_triggering_event_id = await self._execute_single_listener(
|
||||
(
|
||||
router_result,
|
||||
current_triggering_event_id,
|
||||
) = await self._execute_single_listener(
|
||||
router_name, router_input, current_triggering_event_id
|
||||
)
|
||||
# After executing router, the router's result is the path
|
||||
router_result = (
|
||||
self._method_outputs[-1] if self._method_outputs else None
|
||||
)
|
||||
if router_result: # Only add non-None results
|
||||
router_results.append(router_result)
|
||||
router_results.append(FlowMethodName(str(router_result)))
|
||||
# If this was a human_feedback router, map the outcome to the feedback
|
||||
if self.last_human_feedback is not None:
|
||||
router_result_to_feedback[str(router_result)] = (
|
||||
@@ -2074,12 +2194,14 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
racing_members,
|
||||
other_listeners,
|
||||
listener_result,
|
||||
triggering_event_id,
|
||||
current_triggering_event_id,
|
||||
)
|
||||
else:
|
||||
tasks = [
|
||||
self._execute_single_listener(
|
||||
listener_name, listener_result, triggering_event_id
|
||||
listener_name,
|
||||
listener_result,
|
||||
current_triggering_event_id,
|
||||
)
|
||||
for listener_name in listeners_triggered
|
||||
]
|
||||
@@ -2262,7 +2384,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
listener_name: FlowMethodName,
|
||||
result: Any,
|
||||
triggering_event_id: str | None = None,
|
||||
) -> str | None:
|
||||
) -> tuple[Any, str | None]:
|
||||
"""Executes a single listener method with proper event handling.
|
||||
|
||||
This internal method manages the execution of an individual listener,
|
||||
@@ -2275,8 +2397,9 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
used for causal chain tracking.
|
||||
|
||||
Returns:
|
||||
The event_id of the MethodExecutionFinishedEvent emitted by this listener,
|
||||
or None if events are suppressed.
|
||||
A tuple of (listener_result, event_id) where listener_result is the return
|
||||
value of the listener method and event_id is the MethodExecutionFinishedEvent
|
||||
id, or (None, None) if skipped during resumption.
|
||||
|
||||
Note:
|
||||
- Inspects method signature to determine if it accepts the trigger result
|
||||
@@ -2302,7 +2425,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
):
|
||||
# This conditional start was executed, continue its chain
|
||||
await self._execute_start_method(start_method_name)
|
||||
return None
|
||||
return (None, None)
|
||||
# For cyclic flows, clear from completed to allow re-execution
|
||||
self._completed_methods.discard(listener_name)
|
||||
# Also clear from fired OR listeners for cyclic flows
|
||||
@@ -2340,46 +2463,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
listener_name, listener_result, finished_event_id
|
||||
)
|
||||
|
||||
# If this listener is also a router (e.g., has @human_feedback with emit),
|
||||
# we need to trigger listeners for the router result as well
|
||||
if listener_name in self._routers and listener_result is not None:
|
||||
router_result_trigger = FlowMethodName(str(listener_result))
|
||||
listeners_for_result = self._find_triggered_methods(
|
||||
router_result_trigger, router_only=False
|
||||
)
|
||||
if listeners_for_result:
|
||||
# Pass the HumanFeedbackResult if available
|
||||
feedback_result = (
|
||||
self.last_human_feedback
|
||||
if self.last_human_feedback is not None
|
||||
else listener_result
|
||||
)
|
||||
racing_group = self._get_racing_group_for_listeners(
|
||||
listeners_for_result
|
||||
)
|
||||
if racing_group:
|
||||
racing_members, _ = racing_group
|
||||
other_listeners = [
|
||||
name
|
||||
for name in listeners_for_result
|
||||
if name not in racing_members
|
||||
]
|
||||
await self._execute_racing_listeners(
|
||||
racing_members,
|
||||
other_listeners,
|
||||
feedback_result,
|
||||
finished_event_id,
|
||||
)
|
||||
else:
|
||||
tasks = [
|
||||
self._execute_single_listener(
|
||||
name, feedback_result, finished_event_id
|
||||
)
|
||||
for name in listeners_for_result
|
||||
]
|
||||
await asyncio.gather(*tasks)
|
||||
|
||||
return finished_event_id
|
||||
return (listener_result, finished_event_id)
|
||||
|
||||
except Exception as e:
|
||||
# Don't log HumanFeedbackPending as an error - it's expected control flow
|
||||
@@ -2626,6 +2710,8 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
@staticmethod
|
||||
def _show_tracing_disabled_message() -> None:
|
||||
"""Show a message when tracing is disabled."""
|
||||
if should_suppress_tracing_messages():
|
||||
return
|
||||
|
||||
console = Console()
|
||||
|
||||
|
||||
@@ -3,7 +3,12 @@ from __future__ import annotations
|
||||
from typing import TYPE_CHECKING, Any, cast
|
||||
|
||||
from crewai.events.event_listener import event_listener
|
||||
from crewai.hooks.types import AfterLLMCallHookType, BeforeLLMCallHookType
|
||||
from crewai.hooks.types import (
|
||||
AfterLLMCallHookCallable,
|
||||
AfterLLMCallHookType,
|
||||
BeforeLLMCallHookCallable,
|
||||
BeforeLLMCallHookType,
|
||||
)
|
||||
from crewai.utilities.printer import Printer
|
||||
|
||||
|
||||
@@ -149,12 +154,12 @@ class LLMCallHookContext:
|
||||
event_listener.formatter.resume_live_updates()
|
||||
|
||||
|
||||
_before_llm_call_hooks: list[BeforeLLMCallHookType] = []
|
||||
_after_llm_call_hooks: list[AfterLLMCallHookType] = []
|
||||
_before_llm_call_hooks: list[BeforeLLMCallHookType | BeforeLLMCallHookCallable] = []
|
||||
_after_llm_call_hooks: list[AfterLLMCallHookType | AfterLLMCallHookCallable] = []
|
||||
|
||||
|
||||
def register_before_llm_call_hook(
|
||||
hook: BeforeLLMCallHookType,
|
||||
hook: BeforeLLMCallHookType | BeforeLLMCallHookCallable,
|
||||
) -> None:
|
||||
"""Register a global before_llm_call hook.
|
||||
|
||||
@@ -190,7 +195,7 @@ def register_before_llm_call_hook(
|
||||
|
||||
|
||||
def register_after_llm_call_hook(
|
||||
hook: AfterLLMCallHookType,
|
||||
hook: AfterLLMCallHookType | AfterLLMCallHookCallable,
|
||||
) -> None:
|
||||
"""Register a global after_llm_call hook.
|
||||
|
||||
@@ -217,7 +222,9 @@ def register_after_llm_call_hook(
|
||||
_after_llm_call_hooks.append(hook)
|
||||
|
||||
|
||||
def get_before_llm_call_hooks() -> list[BeforeLLMCallHookType]:
|
||||
def get_before_llm_call_hooks() -> list[
|
||||
BeforeLLMCallHookType | BeforeLLMCallHookCallable
|
||||
]:
|
||||
"""Get all registered global before_llm_call hooks.
|
||||
|
||||
Returns:
|
||||
@@ -226,7 +233,7 @@ def get_before_llm_call_hooks() -> list[BeforeLLMCallHookType]:
|
||||
return _before_llm_call_hooks.copy()
|
||||
|
||||
|
||||
def get_after_llm_call_hooks() -> list[AfterLLMCallHookType]:
|
||||
def get_after_llm_call_hooks() -> list[AfterLLMCallHookType | AfterLLMCallHookCallable]:
|
||||
"""Get all registered global after_llm_call hooks.
|
||||
|
||||
Returns:
|
||||
@@ -236,7 +243,7 @@ def get_after_llm_call_hooks() -> list[AfterLLMCallHookType]:
|
||||
|
||||
|
||||
def unregister_before_llm_call_hook(
|
||||
hook: BeforeLLMCallHookType,
|
||||
hook: BeforeLLMCallHookType | BeforeLLMCallHookCallable,
|
||||
) -> bool:
|
||||
"""Unregister a specific global before_llm_call hook.
|
||||
|
||||
@@ -262,7 +269,7 @@ def unregister_before_llm_call_hook(
|
||||
|
||||
|
||||
def unregister_after_llm_call_hook(
|
||||
hook: AfterLLMCallHookType,
|
||||
hook: AfterLLMCallHookType | AfterLLMCallHookCallable,
|
||||
) -> bool:
|
||||
"""Unregister a specific global after_llm_call hook.
|
||||
|
||||
|
||||
@@ -3,7 +3,12 @@ from __future__ import annotations
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from crewai.events.event_listener import event_listener
|
||||
from crewai.hooks.types import AfterToolCallHookType, BeforeToolCallHookType
|
||||
from crewai.hooks.types import (
|
||||
AfterToolCallHookCallable,
|
||||
AfterToolCallHookType,
|
||||
BeforeToolCallHookCallable,
|
||||
BeforeToolCallHookType,
|
||||
)
|
||||
from crewai.utilities.printer import Printer
|
||||
|
||||
|
||||
@@ -112,12 +117,12 @@ class ToolCallHookContext:
|
||||
|
||||
|
||||
# Global hook registries
|
||||
_before_tool_call_hooks: list[BeforeToolCallHookType] = []
|
||||
_after_tool_call_hooks: list[AfterToolCallHookType] = []
|
||||
_before_tool_call_hooks: list[BeforeToolCallHookType | BeforeToolCallHookCallable] = []
|
||||
_after_tool_call_hooks: list[AfterToolCallHookType | AfterToolCallHookCallable] = []
|
||||
|
||||
|
||||
def register_before_tool_call_hook(
|
||||
hook: BeforeToolCallHookType,
|
||||
hook: BeforeToolCallHookType | BeforeToolCallHookCallable,
|
||||
) -> None:
|
||||
"""Register a global before_tool_call hook.
|
||||
|
||||
@@ -154,7 +159,7 @@ def register_before_tool_call_hook(
|
||||
|
||||
|
||||
def register_after_tool_call_hook(
|
||||
hook: AfterToolCallHookType,
|
||||
hook: AfterToolCallHookType | AfterToolCallHookCallable,
|
||||
) -> None:
|
||||
"""Register a global after_tool_call hook.
|
||||
|
||||
@@ -184,7 +189,9 @@ def register_after_tool_call_hook(
|
||||
_after_tool_call_hooks.append(hook)
|
||||
|
||||
|
||||
def get_before_tool_call_hooks() -> list[BeforeToolCallHookType]:
|
||||
def get_before_tool_call_hooks() -> list[
|
||||
BeforeToolCallHookType | BeforeToolCallHookCallable
|
||||
]:
|
||||
"""Get all registered global before_tool_call hooks.
|
||||
|
||||
Returns:
|
||||
@@ -193,7 +200,9 @@ def get_before_tool_call_hooks() -> list[BeforeToolCallHookType]:
|
||||
return _before_tool_call_hooks.copy()
|
||||
|
||||
|
||||
def get_after_tool_call_hooks() -> list[AfterToolCallHookType]:
|
||||
def get_after_tool_call_hooks() -> list[
|
||||
AfterToolCallHookType | AfterToolCallHookCallable
|
||||
]:
|
||||
"""Get all registered global after_tool_call hooks.
|
||||
|
||||
Returns:
|
||||
@@ -203,7 +212,7 @@ def get_after_tool_call_hooks() -> list[AfterToolCallHookType]:
|
||||
|
||||
|
||||
def unregister_before_tool_call_hook(
|
||||
hook: BeforeToolCallHookType,
|
||||
hook: BeforeToolCallHookType | BeforeToolCallHookCallable,
|
||||
) -> bool:
|
||||
"""Unregister a specific global before_tool_call hook.
|
||||
|
||||
@@ -229,7 +238,7 @@ def unregister_before_tool_call_hook(
|
||||
|
||||
|
||||
def unregister_after_tool_call_hook(
|
||||
hook: AfterToolCallHookType,
|
||||
hook: AfterToolCallHookType | AfterToolCallHookCallable,
|
||||
) -> bool:
|
||||
"""Unregister a specific global after_tool_call hook.
|
||||
|
||||
|
||||
1
lib/crewai/src/crewai/knowledge/source/utils/__init__.py
Normal file
1
lib/crewai/src/crewai/knowledge/source/utils/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Knowledge source utilities."""
|
||||
@@ -0,0 +1,70 @@
|
||||
"""Helper utilities for knowledge sources."""
|
||||
|
||||
from typing import Any, ClassVar
|
||||
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.knowledge.source.csv_knowledge_source import CSVKnowledgeSource
|
||||
from crewai.knowledge.source.excel_knowledge_source import ExcelKnowledgeSource
|
||||
from crewai.knowledge.source.json_knowledge_source import JSONKnowledgeSource
|
||||
from crewai.knowledge.source.pdf_knowledge_source import PDFKnowledgeSource
|
||||
from crewai.knowledge.source.text_file_knowledge_source import TextFileKnowledgeSource
|
||||
|
||||
|
||||
class SourceHelper:
|
||||
"""Helper class for creating and managing knowledge sources."""
|
||||
|
||||
SUPPORTED_FILE_TYPES: ClassVar[list[str]] = [
|
||||
".csv",
|
||||
".pdf",
|
||||
".json",
|
||||
".txt",
|
||||
".xlsx",
|
||||
".xls",
|
||||
]
|
||||
|
||||
_FILE_TYPE_MAP: ClassVar[dict[str, type[BaseKnowledgeSource]]] = {
|
||||
".csv": CSVKnowledgeSource,
|
||||
".pdf": PDFKnowledgeSource,
|
||||
".json": JSONKnowledgeSource,
|
||||
".txt": TextFileKnowledgeSource,
|
||||
".xlsx": ExcelKnowledgeSource,
|
||||
".xls": ExcelKnowledgeSource,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def is_supported_file(cls, file_path: str) -> bool:
|
||||
"""Check if a file type is supported.
|
||||
|
||||
Args:
|
||||
file_path: Path to the file.
|
||||
|
||||
Returns:
|
||||
True if the file type is supported.
|
||||
"""
|
||||
return file_path.lower().endswith(tuple(cls.SUPPORTED_FILE_TYPES))
|
||||
|
||||
@classmethod
|
||||
def get_source(
|
||||
cls, file_path: str, metadata: dict[str, Any] | None = None
|
||||
) -> BaseKnowledgeSource:
|
||||
"""Create appropriate KnowledgeSource based on file extension.
|
||||
|
||||
Args:
|
||||
file_path: Path to the file.
|
||||
metadata: Optional metadata to attach to the source.
|
||||
|
||||
Returns:
|
||||
The appropriate KnowledgeSource instance.
|
||||
|
||||
Raises:
|
||||
ValueError: If the file type is not supported.
|
||||
"""
|
||||
if not cls.is_supported_file(file_path):
|
||||
raise ValueError(f"Unsupported file type: {file_path}")
|
||||
|
||||
lower_path = file_path.lower()
|
||||
for ext, source_cls in cls._FILE_TYPE_MAP.items():
|
||||
if lower_path.endswith(ext):
|
||||
return source_cls(file_path=[file_path], metadata=metadata)
|
||||
|
||||
raise ValueError(f"Unsupported file type: {file_path}")
|
||||
@@ -1580,10 +1580,12 @@ class AnthropicCompletion(BaseLLM):
|
||||
usage = response.usage
|
||||
input_tokens = getattr(usage, "input_tokens", 0)
|
||||
output_tokens = getattr(usage, "output_tokens", 0)
|
||||
cache_read_tokens = getattr(usage, "cache_read_input_tokens", 0) or 0
|
||||
return {
|
||||
"input_tokens": input_tokens,
|
||||
"output_tokens": output_tokens,
|
||||
"total_tokens": input_tokens + output_tokens,
|
||||
"cached_prompt_tokens": cache_read_tokens,
|
||||
}
|
||||
return {"total_tokens": 0}
|
||||
|
||||
|
||||
@@ -425,8 +425,9 @@ class AzureCompletion(BaseLLM):
|
||||
"stream": self.stream,
|
||||
}
|
||||
|
||||
model_extras: dict[str, Any] = {}
|
||||
if self.stream:
|
||||
params["model_extras"] = {"stream_options": {"include_usage": True}}
|
||||
model_extras["stream_options"] = {"include_usage": True}
|
||||
|
||||
if response_model and self.is_openai_model:
|
||||
model_description = generate_model_description(response_model)
|
||||
@@ -464,6 +465,13 @@ class AzureCompletion(BaseLLM):
|
||||
params["tools"] = self._convert_tools_for_interference(tools)
|
||||
params["tool_choice"] = "auto"
|
||||
|
||||
prompt_cache_key = self.additional_params.get("prompt_cache_key")
|
||||
if prompt_cache_key:
|
||||
model_extras["prompt_cache_key"] = prompt_cache_key
|
||||
|
||||
if model_extras:
|
||||
params["model_extras"] = model_extras
|
||||
|
||||
additional_params = self.additional_params
|
||||
additional_drop_params = additional_params.get("additional_drop_params")
|
||||
drop_params = additional_params.get("drop_params")
|
||||
@@ -1063,10 +1071,15 @@ class AzureCompletion(BaseLLM):
|
||||
"""Extract token usage from Azure response."""
|
||||
if hasattr(response, "usage") and response.usage:
|
||||
usage = response.usage
|
||||
cached_tokens = 0
|
||||
prompt_details = getattr(usage, "prompt_tokens_details", None)
|
||||
if prompt_details:
|
||||
cached_tokens = getattr(prompt_details, "cached_tokens", 0) or 0
|
||||
return {
|
||||
"prompt_tokens": getattr(usage, "prompt_tokens", 0),
|
||||
"completion_tokens": getattr(usage, "completion_tokens", 0),
|
||||
"total_tokens": getattr(usage, "total_tokens", 0),
|
||||
"cached_prompt_tokens": cached_tokens,
|
||||
}
|
||||
return {"total_tokens": 0}
|
||||
|
||||
|
||||
@@ -1295,11 +1295,13 @@ class GeminiCompletion(BaseLLM):
|
||||
"""Extract token usage from Gemini response."""
|
||||
if response.usage_metadata:
|
||||
usage = response.usage_metadata
|
||||
cached_tokens = getattr(usage, "cached_content_token_count", 0) or 0
|
||||
return {
|
||||
"prompt_token_count": getattr(usage, "prompt_token_count", 0),
|
||||
"candidates_token_count": getattr(usage, "candidates_token_count", 0),
|
||||
"total_token_count": getattr(usage, "total_token_count", 0),
|
||||
"total_tokens": getattr(usage, "total_token_count", 0),
|
||||
"cached_prompt_tokens": cached_tokens,
|
||||
}
|
||||
return {"total_tokens": 0}
|
||||
|
||||
|
||||
@@ -1094,11 +1094,7 @@ class OpenAICompletion(BaseLLM):
|
||||
if reasoning_items:
|
||||
self._last_reasoning_items = reasoning_items
|
||||
if event.response and event.response.usage:
|
||||
usage = {
|
||||
"prompt_tokens": event.response.usage.input_tokens,
|
||||
"completion_tokens": event.response.usage.output_tokens,
|
||||
"total_tokens": event.response.usage.total_tokens,
|
||||
}
|
||||
usage = self._extract_responses_token_usage(event.response)
|
||||
self._track_token_usage_internal(usage)
|
||||
|
||||
# If parse_tool_outputs is enabled, return structured result
|
||||
@@ -1222,11 +1218,7 @@ class OpenAICompletion(BaseLLM):
|
||||
if reasoning_items:
|
||||
self._last_reasoning_items = reasoning_items
|
||||
if event.response and event.response.usage:
|
||||
usage = {
|
||||
"prompt_tokens": event.response.usage.input_tokens,
|
||||
"completion_tokens": event.response.usage.output_tokens,
|
||||
"total_tokens": event.response.usage.total_tokens,
|
||||
}
|
||||
usage = self._extract_responses_token_usage(event.response)
|
||||
self._track_token_usage_internal(usage)
|
||||
|
||||
# If parse_tool_outputs is enabled, return structured result
|
||||
@@ -1310,11 +1302,18 @@ class OpenAICompletion(BaseLLM):
|
||||
def _extract_responses_token_usage(self, response: Response) -> dict[str, Any]:
|
||||
"""Extract token usage from Responses API response."""
|
||||
if response.usage:
|
||||
return {
|
||||
result = {
|
||||
"prompt_tokens": response.usage.input_tokens,
|
||||
"completion_tokens": response.usage.output_tokens,
|
||||
"total_tokens": response.usage.total_tokens,
|
||||
}
|
||||
# Extract cached prompt tokens from input_tokens_details
|
||||
input_details = getattr(response.usage, "input_tokens_details", None)
|
||||
if input_details:
|
||||
result["cached_prompt_tokens"] = (
|
||||
getattr(input_details, "cached_tokens", 0) or 0
|
||||
)
|
||||
return result
|
||||
return {"total_tokens": 0}
|
||||
|
||||
def _extract_builtin_tool_outputs(self, response: Response) -> ResponsesAPIResult:
|
||||
@@ -1696,6 +1695,99 @@ class OpenAICompletion(BaseLLM):
|
||||
|
||||
return content
|
||||
|
||||
def _finalize_streaming_response(
|
||||
self,
|
||||
full_response: str,
|
||||
tool_calls: dict[int, dict[str, Any]],
|
||||
usage_data: dict[str, int],
|
||||
params: dict[str, Any],
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
) -> str | list[dict[str, Any]]:
|
||||
"""Finalize a streaming response with usage tracking, tool call handling, and events.
|
||||
|
||||
Args:
|
||||
full_response: The accumulated text response from the stream.
|
||||
tool_calls: Accumulated tool calls from the stream, keyed by index.
|
||||
usage_data: Token usage data from the stream.
|
||||
params: The completion parameters containing messages.
|
||||
available_functions: Available functions for tool calling.
|
||||
from_task: Task that initiated the call.
|
||||
from_agent: Agent that initiated the call.
|
||||
|
||||
Returns:
|
||||
Tool calls list when tools were invoked without available_functions,
|
||||
tool execution result when available_functions is provided,
|
||||
or the text response string.
|
||||
"""
|
||||
self._track_token_usage_internal(usage_data)
|
||||
|
||||
if tool_calls and not available_functions:
|
||||
tool_calls_list = [
|
||||
{
|
||||
"id": call_data["id"],
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": call_data["name"],
|
||||
"arguments": call_data["arguments"],
|
||||
},
|
||||
"index": call_data["index"],
|
||||
}
|
||||
for call_data in tool_calls.values()
|
||||
]
|
||||
self._emit_call_completed_event(
|
||||
response=tool_calls_list,
|
||||
call_type=LLMCallType.TOOL_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
return tool_calls_list
|
||||
|
||||
if tool_calls and available_functions:
|
||||
for call_data in tool_calls.values():
|
||||
function_name = call_data["name"]
|
||||
arguments = call_data["arguments"]
|
||||
|
||||
if not function_name or not arguments:
|
||||
continue
|
||||
|
||||
if function_name not in available_functions:
|
||||
logging.warning(
|
||||
f"Function '{function_name}' not found in available functions"
|
||||
)
|
||||
continue
|
||||
|
||||
try:
|
||||
function_args = json.loads(arguments)
|
||||
except json.JSONDecodeError as e:
|
||||
logging.error(f"Failed to parse streamed tool arguments: {e}")
|
||||
continue
|
||||
|
||||
result = self._handle_tool_execution(
|
||||
function_name=function_name,
|
||||
function_args=function_args,
|
||||
available_functions=available_functions,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
)
|
||||
|
||||
if result is not None:
|
||||
return result
|
||||
|
||||
full_response = self._apply_stop_words(full_response)
|
||||
|
||||
self._emit_call_completed_event(
|
||||
response=full_response,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
|
||||
return full_response
|
||||
|
||||
def _handle_streaming_completion(
|
||||
self,
|
||||
params: dict[str, Any],
|
||||
@@ -1703,7 +1795,7 @@ class OpenAICompletion(BaseLLM):
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str | BaseModel:
|
||||
) -> str | list[dict[str, Any]] | BaseModel:
|
||||
"""Handle streaming chat completion."""
|
||||
full_response = ""
|
||||
tool_calls: dict[int, dict[str, Any]] = {}
|
||||
@@ -1820,54 +1912,20 @@ class OpenAICompletion(BaseLLM):
|
||||
response_id=response_id_stream,
|
||||
)
|
||||
|
||||
self._track_token_usage_internal(usage_data)
|
||||
|
||||
if tool_calls and available_functions:
|
||||
for call_data in tool_calls.values():
|
||||
function_name = call_data["name"]
|
||||
arguments = call_data["arguments"]
|
||||
|
||||
# Skip if function name is empty or arguments are empty
|
||||
if not function_name or not arguments:
|
||||
continue
|
||||
|
||||
# Check if function exists in available functions
|
||||
if function_name not in available_functions:
|
||||
logging.warning(
|
||||
f"Function '{function_name}' not found in available functions"
|
||||
)
|
||||
continue
|
||||
|
||||
try:
|
||||
function_args = json.loads(arguments)
|
||||
except json.JSONDecodeError as e:
|
||||
logging.error(f"Failed to parse streamed tool arguments: {e}")
|
||||
continue
|
||||
|
||||
result = self._handle_tool_execution(
|
||||
function_name=function_name,
|
||||
function_args=function_args,
|
||||
available_functions=available_functions,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
)
|
||||
|
||||
if result is not None:
|
||||
return result
|
||||
|
||||
full_response = self._apply_stop_words(full_response)
|
||||
|
||||
self._emit_call_completed_event(
|
||||
response=full_response,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
result = self._finalize_streaming_response(
|
||||
full_response=full_response,
|
||||
tool_calls=tool_calls,
|
||||
usage_data=usage_data,
|
||||
params=params,
|
||||
available_functions=available_functions,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
|
||||
return self._invoke_after_llm_call_hooks(
|
||||
params["messages"], full_response, from_agent
|
||||
)
|
||||
if isinstance(result, str):
|
||||
return self._invoke_after_llm_call_hooks(
|
||||
params["messages"], result, from_agent
|
||||
)
|
||||
return result
|
||||
|
||||
async def _ahandle_completion(
|
||||
self,
|
||||
@@ -2016,7 +2074,7 @@ class OpenAICompletion(BaseLLM):
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str | BaseModel:
|
||||
) -> str | list[dict[str, Any]] | BaseModel:
|
||||
"""Handle async streaming chat completion."""
|
||||
full_response = ""
|
||||
tool_calls: dict[int, dict[str, Any]] = {}
|
||||
@@ -2142,51 +2200,16 @@ class OpenAICompletion(BaseLLM):
|
||||
response_id=response_id_stream,
|
||||
)
|
||||
|
||||
self._track_token_usage_internal(usage_data)
|
||||
|
||||
if tool_calls and available_functions:
|
||||
for call_data in tool_calls.values():
|
||||
function_name = call_data["name"]
|
||||
arguments = call_data["arguments"]
|
||||
|
||||
if not function_name or not arguments:
|
||||
continue
|
||||
|
||||
if function_name not in available_functions:
|
||||
logging.warning(
|
||||
f"Function '{function_name}' not found in available functions"
|
||||
)
|
||||
continue
|
||||
|
||||
try:
|
||||
function_args = json.loads(arguments)
|
||||
except json.JSONDecodeError as e:
|
||||
logging.error(f"Failed to parse streamed tool arguments: {e}")
|
||||
continue
|
||||
|
||||
result = self._handle_tool_execution(
|
||||
function_name=function_name,
|
||||
function_args=function_args,
|
||||
available_functions=available_functions,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
)
|
||||
|
||||
if result is not None:
|
||||
return result
|
||||
|
||||
full_response = self._apply_stop_words(full_response)
|
||||
|
||||
self._emit_call_completed_event(
|
||||
response=full_response,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
return self._finalize_streaming_response(
|
||||
full_response=full_response,
|
||||
tool_calls=tool_calls,
|
||||
usage_data=usage_data,
|
||||
params=params,
|
||||
available_functions=available_functions,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
|
||||
return full_response
|
||||
|
||||
def supports_function_calling(self) -> bool:
|
||||
"""Check if the model supports function calling."""
|
||||
return not self.is_o1_model
|
||||
@@ -2240,11 +2263,18 @@ class OpenAICompletion(BaseLLM):
|
||||
"""Extract token usage from OpenAI ChatCompletion or ChatCompletionChunk response."""
|
||||
if hasattr(response, "usage") and response.usage:
|
||||
usage = response.usage
|
||||
return {
|
||||
result = {
|
||||
"prompt_tokens": getattr(usage, "prompt_tokens", 0),
|
||||
"completion_tokens": getattr(usage, "completion_tokens", 0),
|
||||
"total_tokens": getattr(usage, "total_tokens", 0),
|
||||
}
|
||||
# Extract cached prompt tokens from prompt_tokens_details
|
||||
prompt_details = getattr(usage, "prompt_tokens_details", None)
|
||||
if prompt_details:
|
||||
result["cached_prompt_tokens"] = (
|
||||
getattr(prompt_details, "cached_tokens", 0) or 0
|
||||
)
|
||||
return result
|
||||
return {"total_tokens": 0}
|
||||
|
||||
def _format_messages(self, messages: str | list[LLMMessage]) -> list[LLMMessage]:
|
||||
|
||||
@@ -27,6 +27,8 @@ if TYPE_CHECKING:
|
||||
from crewai import Agent, Task
|
||||
from crewai.agents.cache.cache_handler import CacheHandler
|
||||
from crewai.crews.crew_output import CrewOutput
|
||||
from crewai.hooks.llm_hooks import LLMCallHookContext
|
||||
from crewai.hooks.tool_hooks import ToolCallHookContext
|
||||
from crewai.project.wrappers import (
|
||||
CrewInstance,
|
||||
OutputJsonClass,
|
||||
@@ -34,6 +36,8 @@ if TYPE_CHECKING:
|
||||
)
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
|
||||
_post_initialize_crew_hooks: list[Callable[[Any], None]] = []
|
||||
|
||||
|
||||
class AgentConfig(TypedDict, total=False):
|
||||
"""Type definition for agent configuration dictionary.
|
||||
@@ -266,6 +270,9 @@ class CrewBaseMeta(type):
|
||||
instance.map_all_agent_variables()
|
||||
instance.map_all_task_variables()
|
||||
|
||||
for hook in _post_initialize_crew_hooks:
|
||||
hook(instance)
|
||||
|
||||
original_methods = {
|
||||
name: method
|
||||
for name, method in cls.__dict__.items()
|
||||
@@ -485,47 +492,61 @@ def _register_crew_hooks(instance: CrewInstance, cls: type) -> None:
|
||||
if has_agent_filter:
|
||||
agents_filter = hook_method._filter_agents
|
||||
|
||||
def make_filtered_before_llm(bound_fn, agents_list):
|
||||
def filtered(context):
|
||||
def make_filtered_before_llm(
|
||||
bound_fn: Callable[[LLMCallHookContext], bool | None],
|
||||
agents_list: list[str],
|
||||
) -> Callable[[LLMCallHookContext], bool | None]:
|
||||
def filtered(context: LLMCallHookContext) -> bool | None:
|
||||
if context.agent and context.agent.role not in agents_list:
|
||||
return None
|
||||
return bound_fn(context)
|
||||
|
||||
return filtered
|
||||
|
||||
final_hook = make_filtered_before_llm(bound_hook, agents_filter)
|
||||
before_llm_hook = make_filtered_before_llm(bound_hook, agents_filter)
|
||||
else:
|
||||
final_hook = bound_hook
|
||||
before_llm_hook = bound_hook
|
||||
|
||||
register_before_llm_call_hook(final_hook)
|
||||
instance._registered_hook_functions.append(("before_llm_call", final_hook))
|
||||
register_before_llm_call_hook(before_llm_hook)
|
||||
instance._registered_hook_functions.append(
|
||||
("before_llm_call", before_llm_hook)
|
||||
)
|
||||
|
||||
if hasattr(hook_method, "is_after_llm_call_hook"):
|
||||
if has_agent_filter:
|
||||
agents_filter = hook_method._filter_agents
|
||||
|
||||
def make_filtered_after_llm(bound_fn, agents_list):
|
||||
def filtered(context):
|
||||
def make_filtered_after_llm(
|
||||
bound_fn: Callable[[LLMCallHookContext], str | None],
|
||||
agents_list: list[str],
|
||||
) -> Callable[[LLMCallHookContext], str | None]:
|
||||
def filtered(context: LLMCallHookContext) -> str | None:
|
||||
if context.agent and context.agent.role not in agents_list:
|
||||
return None
|
||||
return bound_fn(context)
|
||||
|
||||
return filtered
|
||||
|
||||
final_hook = make_filtered_after_llm(bound_hook, agents_filter)
|
||||
after_llm_hook = make_filtered_after_llm(bound_hook, agents_filter)
|
||||
else:
|
||||
final_hook = bound_hook
|
||||
after_llm_hook = bound_hook
|
||||
|
||||
register_after_llm_call_hook(final_hook)
|
||||
instance._registered_hook_functions.append(("after_llm_call", final_hook))
|
||||
register_after_llm_call_hook(after_llm_hook)
|
||||
instance._registered_hook_functions.append(
|
||||
("after_llm_call", after_llm_hook)
|
||||
)
|
||||
|
||||
if hasattr(hook_method, "is_before_tool_call_hook"):
|
||||
if has_tool_filter or has_agent_filter:
|
||||
tools_filter = getattr(hook_method, "_filter_tools", None)
|
||||
agents_filter = getattr(hook_method, "_filter_agents", None)
|
||||
|
||||
def make_filtered_before_tool(bound_fn, tools_list, agents_list):
|
||||
def filtered(context):
|
||||
def make_filtered_before_tool(
|
||||
bound_fn: Callable[[ToolCallHookContext], bool | None],
|
||||
tools_list: list[str] | None,
|
||||
agents_list: list[str] | None,
|
||||
) -> Callable[[ToolCallHookContext], bool | None]:
|
||||
def filtered(context: ToolCallHookContext) -> bool | None:
|
||||
if tools_list and context.tool_name not in tools_list:
|
||||
return None
|
||||
if (
|
||||
@@ -538,22 +559,28 @@ def _register_crew_hooks(instance: CrewInstance, cls: type) -> None:
|
||||
|
||||
return filtered
|
||||
|
||||
final_hook = make_filtered_before_tool(
|
||||
before_tool_hook = make_filtered_before_tool(
|
||||
bound_hook, tools_filter, agents_filter
|
||||
)
|
||||
else:
|
||||
final_hook = bound_hook
|
||||
before_tool_hook = bound_hook
|
||||
|
||||
register_before_tool_call_hook(final_hook)
|
||||
instance._registered_hook_functions.append(("before_tool_call", final_hook))
|
||||
register_before_tool_call_hook(before_tool_hook)
|
||||
instance._registered_hook_functions.append(
|
||||
("before_tool_call", before_tool_hook)
|
||||
)
|
||||
|
||||
if hasattr(hook_method, "is_after_tool_call_hook"):
|
||||
if has_tool_filter or has_agent_filter:
|
||||
tools_filter = getattr(hook_method, "_filter_tools", None)
|
||||
agents_filter = getattr(hook_method, "_filter_agents", None)
|
||||
|
||||
def make_filtered_after_tool(bound_fn, tools_list, agents_list):
|
||||
def filtered(context):
|
||||
def make_filtered_after_tool(
|
||||
bound_fn: Callable[[ToolCallHookContext], str | None],
|
||||
tools_list: list[str] | None,
|
||||
agents_list: list[str] | None,
|
||||
) -> Callable[[ToolCallHookContext], str | None]:
|
||||
def filtered(context: ToolCallHookContext) -> str | None:
|
||||
if tools_list and context.tool_name not in tools_list:
|
||||
return None
|
||||
if (
|
||||
@@ -566,14 +593,16 @@ def _register_crew_hooks(instance: CrewInstance, cls: type) -> None:
|
||||
|
||||
return filtered
|
||||
|
||||
final_hook = make_filtered_after_tool(
|
||||
after_tool_hook = make_filtered_after_tool(
|
||||
bound_hook, tools_filter, agents_filter
|
||||
)
|
||||
else:
|
||||
final_hook = bound_hook
|
||||
after_tool_hook = bound_hook
|
||||
|
||||
register_after_tool_call_hook(final_hook)
|
||||
instance._registered_hook_functions.append(("after_tool_call", final_hook))
|
||||
register_after_tool_call_hook(after_tool_hook)
|
||||
instance._registered_hook_functions.append(
|
||||
("after_tool_call", after_tool_hook)
|
||||
)
|
||||
|
||||
instance._hooks_being_registered = False
|
||||
|
||||
|
||||
@@ -72,6 +72,8 @@ class CrewInstance(Protocol):
|
||||
__crew_metadata__: CrewMetadata
|
||||
_mcp_server_adapter: Any
|
||||
_all_methods: dict[str, Callable[..., Any]]
|
||||
_registered_hook_functions: list[tuple[str, Callable[..., Any]]]
|
||||
_hooks_being_registered: bool
|
||||
agents: list[Agent]
|
||||
tasks: list[Task]
|
||||
base_directory: Path
|
||||
|
||||
@@ -31,6 +31,8 @@ from pydantic_core import PydanticCustomError
|
||||
from typing_extensions import Self
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.context import reset_current_task_id, set_current_task_id
|
||||
from crewai.core.providers.content_processor import process_content
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.task_events import (
|
||||
TaskCompletedEvent,
|
||||
@@ -496,6 +498,7 @@ class Task(BaseModel):
|
||||
tools: list[BaseTool] | None = None,
|
||||
) -> TaskOutput:
|
||||
"""Execute the task synchronously."""
|
||||
self.start_time = datetime.datetime.now()
|
||||
return self._execute_core(agent, context, tools)
|
||||
|
||||
@property
|
||||
@@ -536,6 +539,7 @@ class Task(BaseModel):
|
||||
) -> None:
|
||||
"""Execute the task asynchronously with context handling."""
|
||||
try:
|
||||
self.start_time = datetime.datetime.now()
|
||||
result = self._execute_core(agent, context, tools)
|
||||
future.set_result(result)
|
||||
except Exception as e:
|
||||
@@ -548,6 +552,7 @@ class Task(BaseModel):
|
||||
tools: list[BaseTool] | None = None,
|
||||
) -> TaskOutput:
|
||||
"""Execute the task asynchronously using native async/await."""
|
||||
self.start_time = datetime.datetime.now()
|
||||
return await self._aexecute_core(agent, context, tools)
|
||||
|
||||
async def _aexecute_core(
|
||||
@@ -557,6 +562,7 @@ class Task(BaseModel):
|
||||
tools: list[Any] | None,
|
||||
) -> TaskOutput:
|
||||
"""Run the core execution logic of the task asynchronously."""
|
||||
task_id_token = set_current_task_id(str(self.id))
|
||||
self._store_input_files()
|
||||
try:
|
||||
agent = agent or self.agent
|
||||
@@ -566,8 +572,6 @@ class Task(BaseModel):
|
||||
f"The task '{self.description}' has no agent assigned, therefore it can't be executed directly and should be executed in a Crew using a specific process that support that, like hierarchical."
|
||||
)
|
||||
|
||||
self.start_time = datetime.datetime.now()
|
||||
|
||||
self.prompt_context = context
|
||||
tools = tools or self.tools or []
|
||||
|
||||
@@ -579,6 +583,8 @@ class Task(BaseModel):
|
||||
tools=tools,
|
||||
)
|
||||
|
||||
self._post_agent_execution(agent)
|
||||
|
||||
if not self._guardrails and not self._guardrail:
|
||||
pydantic_output, json_output = self._export_output(result)
|
||||
else:
|
||||
@@ -644,6 +650,7 @@ class Task(BaseModel):
|
||||
raise e # Re-raise the exception after emitting the event
|
||||
finally:
|
||||
clear_task_files(self.id)
|
||||
reset_current_task_id(task_id_token)
|
||||
|
||||
def _execute_core(
|
||||
self,
|
||||
@@ -652,6 +659,7 @@ class Task(BaseModel):
|
||||
tools: list[Any] | None,
|
||||
) -> TaskOutput:
|
||||
"""Run the core execution logic of the task."""
|
||||
task_id_token = set_current_task_id(str(self.id))
|
||||
self._store_input_files()
|
||||
try:
|
||||
agent = agent or self.agent
|
||||
@@ -661,8 +669,6 @@ class Task(BaseModel):
|
||||
f"The task '{self.description}' has no agent assigned, therefore it can't be executed directly and should be executed in a Crew using a specific process that support that, like hierarchical."
|
||||
)
|
||||
|
||||
self.start_time = datetime.datetime.now()
|
||||
|
||||
self.prompt_context = context
|
||||
tools = tools or self.tools or []
|
||||
|
||||
@@ -674,6 +680,8 @@ class Task(BaseModel):
|
||||
tools=tools,
|
||||
)
|
||||
|
||||
self._post_agent_execution(agent)
|
||||
|
||||
if not self._guardrails and not self._guardrail:
|
||||
pydantic_output, json_output = self._export_output(result)
|
||||
else:
|
||||
@@ -740,6 +748,10 @@ class Task(BaseModel):
|
||||
raise e # Re-raise the exception after emitting the event
|
||||
finally:
|
||||
clear_task_files(self.id)
|
||||
reset_current_task_id(task_id_token)
|
||||
|
||||
def _post_agent_execution(self, agent: BaseAgent) -> None:
|
||||
pass
|
||||
|
||||
def prompt(self) -> str:
|
||||
"""Generates the task prompt with optional markdown formatting.
|
||||
@@ -863,6 +875,11 @@ Follow these guidelines:
|
||||
except ValueError as e:
|
||||
raise ValueError(f"Error interpolating description: {e!s}") from e
|
||||
|
||||
self.description = process_content(self.description, {"task": self})
|
||||
self._original_expected_output = process_content(
|
||||
self._original_expected_output, {"task": self}
|
||||
)
|
||||
|
||||
try:
|
||||
self.expected_output = interpolate_only(
|
||||
input_string=self._original_expected_output, inputs=inputs
|
||||
|
||||
@@ -6,6 +6,7 @@ Classes:
|
||||
HallucinationGuardrail: Placeholder guardrail that validates task outputs.
|
||||
"""
|
||||
|
||||
from collections.abc import Callable
|
||||
from typing import Any
|
||||
|
||||
from crewai.llm import LLM
|
||||
@@ -13,32 +14,36 @@ from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.utilities.logger import Logger
|
||||
|
||||
|
||||
_validate_output_hook: Callable[..., tuple[bool, Any]] | None = None
|
||||
|
||||
|
||||
class HallucinationGuardrail:
|
||||
"""Placeholder for the HallucinationGuardrail feature.
|
||||
|
||||
Attributes:
|
||||
context: The reference context that outputs would be checked against.
|
||||
context: Optional reference context that outputs would be checked against.
|
||||
llm: The language model that would be used for evaluation.
|
||||
threshold: Optional minimum faithfulness score that would be required to pass.
|
||||
tool_response: Optional tool response information that would be used in evaluation.
|
||||
|
||||
Examples:
|
||||
>>> # Basic usage with default verdict logic
|
||||
>>> # Basic usage without context (uses task expected_output as context)
|
||||
>>> guardrail = HallucinationGuardrail(llm=agent.llm)
|
||||
|
||||
>>> # With context for reference
|
||||
>>> guardrail = HallucinationGuardrail(
|
||||
... context="AI helps with various tasks including analysis and generation.",
|
||||
... llm=agent.llm,
|
||||
... context="AI helps with various tasks including analysis and generation.",
|
||||
... )
|
||||
|
||||
>>> # With custom threshold for stricter validation
|
||||
>>> strict_guardrail = HallucinationGuardrail(
|
||||
... context="Quantum computing uses qubits in superposition.",
|
||||
... llm=agent.llm,
|
||||
... threshold=8.0, # Would require score >= 8 to pass in enterprise version
|
||||
... threshold=8.0, # Require score >= 8 to pass
|
||||
... )
|
||||
|
||||
>>> # With tool response for additional context
|
||||
>>> guardrail_with_tools = HallucinationGuardrail(
|
||||
... context="The current weather data",
|
||||
... llm=agent.llm,
|
||||
... tool_response="Weather API returned: Temperature 22°C, Humidity 65%",
|
||||
... )
|
||||
@@ -46,16 +51,17 @@ class HallucinationGuardrail:
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
context: str,
|
||||
llm: LLM,
|
||||
context: str | None = None,
|
||||
threshold: float | None = None,
|
||||
tool_response: str = "",
|
||||
):
|
||||
"""Initialize the HallucinationGuardrail placeholder.
|
||||
|
||||
Args:
|
||||
context: The reference context that outputs would be checked against.
|
||||
llm: The language model that would be used for evaluation.
|
||||
context: Optional reference context that outputs would be checked against.
|
||||
If not provided, the task's expected_output will be used as context.
|
||||
threshold: Optional minimum faithfulness score that would be required to pass.
|
||||
tool_response: Optional tool response information that would be used in evaluation.
|
||||
"""
|
||||
@@ -78,16 +84,17 @@ class HallucinationGuardrail:
|
||||
def __call__(self, task_output: TaskOutput) -> tuple[bool, Any]:
|
||||
"""Validate a task output against hallucination criteria.
|
||||
|
||||
In the open source, this method always returns that the output is valid.
|
||||
|
||||
Args:
|
||||
task_output: The output to be validated.
|
||||
|
||||
Returns:
|
||||
A tuple containing:
|
||||
- True
|
||||
- The raw task output
|
||||
- True if validation passed, False otherwise
|
||||
- The raw task output if valid, or error feedback if invalid
|
||||
"""
|
||||
if callable(_validate_output_hook):
|
||||
return _validate_output_hook(self, task_output)
|
||||
|
||||
self._logger.log(
|
||||
"warning",
|
||||
"Premium hallucination detection skipped (use for free at https://app.crewai.com)\n",
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
import asyncio
|
||||
from collections.abc import Coroutine
|
||||
import inspect
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from typing_extensions import TypeIs
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.lite_agent_output import LiteAgentOutput
|
||||
@@ -8,6 +12,13 @@ from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
|
||||
|
||||
def _is_coroutine(
|
||||
obj: LiteAgentOutput | Coroutine[Any, Any, LiteAgentOutput],
|
||||
) -> TypeIs[Coroutine[Any, Any, LiteAgentOutput]]:
|
||||
"""Check if obj is a coroutine for type narrowing."""
|
||||
return inspect.iscoroutine(obj)
|
||||
|
||||
|
||||
class LLMGuardrailResult(BaseModel):
|
||||
valid: bool = Field(
|
||||
description="Whether the task output complies with the guardrail"
|
||||
@@ -62,7 +73,10 @@ class LLMGuardrail:
|
||||
- If the Task result complies with the guardrail, saying that is valid
|
||||
"""
|
||||
|
||||
return agent.kickoff(query, response_format=LLMGuardrailResult)
|
||||
kickoff_result = agent.kickoff(query, response_format=LLMGuardrailResult)
|
||||
if _is_coroutine(kickoff_result):
|
||||
return asyncio.run(kickoff_result)
|
||||
return kickoff_result
|
||||
|
||||
def __call__(self, task_output: TaskOutput) -> tuple[bool, Any]:
|
||||
"""Validates the output of a task based on specified criteria.
|
||||
|
||||
@@ -903,7 +903,7 @@ class Telemetry:
|
||||
{
|
||||
"id": str(task.id),
|
||||
"description": task.description,
|
||||
"output": task.output.raw_output,
|
||||
"output": task.output.raw if task.output else "",
|
||||
}
|
||||
for task in crew.tasks
|
||||
]
|
||||
@@ -923,6 +923,9 @@ class Telemetry:
|
||||
value: The attribute value.
|
||||
"""
|
||||
|
||||
if span is None:
|
||||
return
|
||||
|
||||
def _operation() -> None:
|
||||
return span.set_attribute(key, value)
|
||||
|
||||
|
||||
@@ -270,6 +270,7 @@ class ToolUsage:
|
||||
result = None # type: ignore
|
||||
should_retry = False
|
||||
available_tool = None
|
||||
error_event_emitted = False
|
||||
|
||||
try:
|
||||
if self.tools_handler and self.tools_handler.cache:
|
||||
@@ -408,6 +409,7 @@ class ToolUsage:
|
||||
|
||||
except Exception as e:
|
||||
self.on_tool_error(tool=tool, tool_calling=calling, e=e)
|
||||
error_event_emitted = True
|
||||
self._run_attempts += 1
|
||||
if self._run_attempts > self._max_parsing_attempts:
|
||||
self._telemetry.tool_usage_error(llm=self.function_calling_llm)
|
||||
@@ -435,7 +437,7 @@ class ToolUsage:
|
||||
result = self._format_result(result=result)
|
||||
|
||||
finally:
|
||||
if started_event_emitted:
|
||||
if started_event_emitted and not error_event_emitted:
|
||||
self.on_tool_use_finished(
|
||||
tool=tool,
|
||||
tool_calling=calling,
|
||||
@@ -500,6 +502,7 @@ class ToolUsage:
|
||||
result = None # type: ignore
|
||||
should_retry = False
|
||||
available_tool = None
|
||||
error_event_emitted = False
|
||||
|
||||
try:
|
||||
if self.tools_handler and self.tools_handler.cache:
|
||||
@@ -638,6 +641,7 @@ class ToolUsage:
|
||||
|
||||
except Exception as e:
|
||||
self.on_tool_error(tool=tool, tool_calling=calling, e=e)
|
||||
error_event_emitted = True
|
||||
self._run_attempts += 1
|
||||
if self._run_attempts > self._max_parsing_attempts:
|
||||
self._telemetry.tool_usage_error(llm=self.function_calling_llm)
|
||||
@@ -665,7 +669,7 @@ class ToolUsage:
|
||||
result = self._format_result(result=result)
|
||||
|
||||
finally:
|
||||
if started_event_emitted:
|
||||
if started_event_emitted and not error_event_emitted:
|
||||
self.on_tool_use_finished(
|
||||
tool=tool,
|
||||
tool_calling=calling,
|
||||
|
||||
@@ -22,9 +22,9 @@
|
||||
"expected_output": "\nThis is the expected criteria for your final answer: {expected_output}\nyou MUST return the actual complete content as the final answer, not a summary.",
|
||||
"human_feedback": "You got human feedback on your work, re-evaluate it and give a new Final Answer when ready.\n {human_feedback}",
|
||||
"getting_input": "This is the agent's final answer: {final_answer}\n\n",
|
||||
"summarizer_system_message": "You are a helpful assistant that summarizes text.",
|
||||
"summarize_instruction": "Summarize the following text, make sure to include all the important information: {group}",
|
||||
"summary": "This is a summary of our conversation so far:\n{merged_summary}",
|
||||
"summarizer_system_message": "You are a precise assistant that creates structured summaries of agent conversations. You preserve critical context needed for seamless task continuation.",
|
||||
"summarize_instruction": "Analyze the following conversation and create a structured summary that preserves all information needed to continue the task seamlessly.\n\n<conversation>\n{conversation}\n</conversation>\n\nCreate a summary with these sections:\n1. **Task Overview**: What is the agent trying to accomplish?\n2. **Current State**: What has been completed so far? What step is the agent on?\n3. **Important Discoveries**: Key facts, data, tool results, or findings that must not be lost.\n4. **Next Steps**: What should the agent do next based on the conversation?\n5. **Context to Preserve**: Any specific values, names, URLs, code snippets, or details referenced in the conversation.\n\nWrap your entire summary in <summary> tags.\n\n<summary>\n[Your structured summary here]\n</summary>",
|
||||
"summary": "<summary>\n{merged_summary}\n</summary>\n\nContinue the task from where the conversation left off. The above is a structured summary of prior context.",
|
||||
"manager_request": "Your best answer to your coworker asking you this, accounting for the context shared.",
|
||||
"formatted_task_instructions": "Format your final answer according to the following OpenAPI schema: {output_format}\n\nIMPORTANT: Preserve the original content exactly as-is. Do NOT rewrite, paraphrase, or modify the meaning of the content. Only structure it to match the schema format.\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python.",
|
||||
"conversation_history_instruction": "You are a member of a crew collaborating to achieve a common goal. Your task is a specific action that contributes to this larger objective. For additional context, please review the conversation history between you and the user that led to the initiation of this crew. Use any relevant information or feedback from the conversation to inform your task execution and ensure your response aligns with both the immediate task and the crew's overall goals.",
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
"""Human-in-the-loop (HITL) type definitions.
|
||||
|
||||
This module provides type definitions for human-in-the-loop interactions
|
||||
in crew executions.
|
||||
"""
|
||||
|
||||
from typing import TypedDict
|
||||
|
||||
|
||||
class HITLResumeInfo(TypedDict, total=False):
|
||||
"""HITL resume information passed from flow to crew.
|
||||
|
||||
Attributes:
|
||||
task_id: Unique identifier for the task.
|
||||
crew_execution_id: Unique identifier for the crew execution.
|
||||
task_key: Key identifying the specific task.
|
||||
task_output: Output from the task before human intervention.
|
||||
human_feedback: Feedback provided by the human.
|
||||
previous_messages: History of messages in the conversation.
|
||||
"""
|
||||
|
||||
task_id: str
|
||||
crew_execution_id: str
|
||||
task_key: str
|
||||
task_output: str
|
||||
human_feedback: str
|
||||
previous_messages: list[dict[str, str]]
|
||||
|
||||
|
||||
class CrewInputsWithHITL(TypedDict, total=False):
|
||||
"""Crew inputs that may contain HITL resume information.
|
||||
|
||||
Attributes:
|
||||
_hitl_resume: Optional HITL resume information for continuing execution.
|
||||
"""
|
||||
|
||||
_hitl_resume: HITLResumeInfo
|
||||
@@ -2,6 +2,7 @@ from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Callable, Sequence
|
||||
import concurrent.futures
|
||||
import json
|
||||
import re
|
||||
from typing import TYPE_CHECKING, Any, Final, Literal, TypedDict
|
||||
@@ -42,6 +43,8 @@ if TYPE_CHECKING:
|
||||
from crewai.llm import LLM
|
||||
from crewai.task import Task
|
||||
|
||||
_create_plus_client_hook: Callable[[], Any] | None = None
|
||||
|
||||
|
||||
class SummaryContent(TypedDict):
|
||||
"""Structure for summary content entries.
|
||||
@@ -91,7 +94,11 @@ def parse_tools(tools: list[BaseTool]) -> list[CrewStructuredTool]:
|
||||
|
||||
for tool in tools:
|
||||
if isinstance(tool, CrewAITool):
|
||||
tools_list.append(tool.to_structured_tool())
|
||||
structured_tool = tool.to_structured_tool()
|
||||
structured_tool.current_usage_count = 0
|
||||
if structured_tool._original_tool:
|
||||
structured_tool._original_tool.current_usage_count = 0
|
||||
tools_list.append(structured_tool)
|
||||
else:
|
||||
raise ValueError("Tool is not a CrewStructuredTool or BaseTool")
|
||||
|
||||
@@ -634,6 +641,180 @@ def handle_context_length(
|
||||
)
|
||||
|
||||
|
||||
def _estimate_token_count(text: str) -> int:
|
||||
"""Estimate token count using a conservative cross-provider heuristic.
|
||||
|
||||
Args:
|
||||
text: The text to estimate tokens for.
|
||||
|
||||
Returns:
|
||||
Estimated token count (roughly 1 token per 4 characters).
|
||||
"""
|
||||
return len(text) // 4
|
||||
|
||||
|
||||
def _format_messages_for_summary(messages: list[LLMMessage]) -> str:
|
||||
"""Format messages with role labels for summarization.
|
||||
|
||||
Skips system messages. Handles None content, tool_calls, and
|
||||
multimodal content blocks.
|
||||
|
||||
Args:
|
||||
messages: List of messages to format.
|
||||
|
||||
Returns:
|
||||
Role-labeled conversation text.
|
||||
"""
|
||||
lines: list[str] = []
|
||||
for msg in messages:
|
||||
role = msg.get("role", "user")
|
||||
if role == "system":
|
||||
continue
|
||||
|
||||
content = msg.get("content")
|
||||
if content is None:
|
||||
# Check for tool_calls on assistant messages with no content
|
||||
tool_calls = msg.get("tool_calls")
|
||||
if tool_calls:
|
||||
tool_names = []
|
||||
for tc in tool_calls:
|
||||
func = tc.get("function", {})
|
||||
name = (
|
||||
func.get("name", "unknown")
|
||||
if isinstance(func, dict)
|
||||
else "unknown"
|
||||
)
|
||||
tool_names.append(name)
|
||||
content = f"[Called tools: {', '.join(tool_names)}]"
|
||||
else:
|
||||
content = ""
|
||||
elif isinstance(content, list):
|
||||
# Multimodal content blocks — extract text parts
|
||||
text_parts = [
|
||||
block.get("text", "")
|
||||
for block in content
|
||||
if isinstance(block, dict) and block.get("type") == "text"
|
||||
]
|
||||
content = " ".join(text_parts) if text_parts else "[multimodal content]"
|
||||
|
||||
if role == "assistant":
|
||||
label = "[ASSISTANT]:"
|
||||
elif role == "tool":
|
||||
tool_name = msg.get("name", "unknown")
|
||||
label = f"[TOOL_RESULT ({tool_name})]:"
|
||||
else:
|
||||
label = "[USER]:"
|
||||
|
||||
lines.append(f"{label} {content}")
|
||||
|
||||
return "\n\n".join(lines)
|
||||
|
||||
|
||||
def _split_messages_into_chunks(
|
||||
messages: list[LLMMessage], max_tokens: int
|
||||
) -> list[list[LLMMessage]]:
|
||||
"""Split messages into chunks at message boundaries.
|
||||
|
||||
Excludes system messages from chunks. Each chunk stays under
|
||||
max_tokens based on estimated token count.
|
||||
|
||||
Args:
|
||||
messages: List of messages to split.
|
||||
max_tokens: Maximum estimated tokens per chunk.
|
||||
|
||||
Returns:
|
||||
List of message chunks.
|
||||
"""
|
||||
non_system = [m for m in messages if m.get("role") != "system"]
|
||||
if not non_system:
|
||||
return []
|
||||
|
||||
chunks: list[list[LLMMessage]] = []
|
||||
current_chunk: list[LLMMessage] = []
|
||||
current_tokens = 0
|
||||
|
||||
for msg in non_system:
|
||||
content = msg.get("content")
|
||||
if content is None:
|
||||
msg_text = ""
|
||||
elif isinstance(content, list):
|
||||
msg_text = str(content)
|
||||
else:
|
||||
msg_text = str(content)
|
||||
|
||||
msg_tokens = _estimate_token_count(msg_text)
|
||||
|
||||
# If adding this message would exceed the limit and we already have
|
||||
# messages in the current chunk, start a new chunk
|
||||
if current_chunk and (current_tokens + msg_tokens) > max_tokens:
|
||||
chunks.append(current_chunk)
|
||||
current_chunk = []
|
||||
current_tokens = 0
|
||||
|
||||
current_chunk.append(msg)
|
||||
current_tokens += msg_tokens
|
||||
|
||||
if current_chunk:
|
||||
chunks.append(current_chunk)
|
||||
|
||||
return chunks
|
||||
|
||||
|
||||
def _extract_summary_tags(text: str) -> str:
|
||||
"""Extract content between <summary></summary> tags.
|
||||
|
||||
Falls back to the full text if no tags are found.
|
||||
|
||||
Args:
|
||||
text: Text potentially containing summary tags.
|
||||
|
||||
Returns:
|
||||
Extracted summary content, or full text if no tags found.
|
||||
"""
|
||||
match = re.search(r"<summary>(.*?)</summary>", text, re.DOTALL)
|
||||
if match:
|
||||
return match.group(1).strip()
|
||||
return text.strip()
|
||||
|
||||
|
||||
async def _asummarize_chunks(
|
||||
chunks: list[list[LLMMessage]],
|
||||
llm: LLM | BaseLLM,
|
||||
callbacks: list[TokenCalcHandler],
|
||||
i18n: I18N,
|
||||
) -> list[SummaryContent]:
|
||||
"""Summarize multiple message chunks concurrently using asyncio.
|
||||
|
||||
Args:
|
||||
chunks: List of message chunks to summarize.
|
||||
llm: LLM instance (must support ``acall``).
|
||||
callbacks: List of callbacks for the LLM.
|
||||
i18n: I18N instance for prompt templates.
|
||||
|
||||
Returns:
|
||||
Ordered list of summary contents, one per chunk.
|
||||
"""
|
||||
|
||||
async def _summarize_one(chunk: list[LLMMessage]) -> SummaryContent:
|
||||
conversation_text = _format_messages_for_summary(chunk)
|
||||
summarization_messages = [
|
||||
format_message_for_llm(
|
||||
i18n.slice("summarizer_system_message"), role="system"
|
||||
),
|
||||
format_message_for_llm(
|
||||
i18n.slice("summarize_instruction").format(
|
||||
conversation=conversation_text
|
||||
),
|
||||
),
|
||||
]
|
||||
summary = await llm.acall(summarization_messages, callbacks=callbacks)
|
||||
extracted = _extract_summary_tags(str(summary))
|
||||
return {"content": extracted}
|
||||
|
||||
results = await asyncio.gather(*[_summarize_one(chunk) for chunk in chunks])
|
||||
return list(results)
|
||||
|
||||
|
||||
def summarize_messages(
|
||||
messages: list[LLMMessage],
|
||||
llm: LLM | BaseLLM,
|
||||
@@ -643,6 +824,10 @@ def summarize_messages(
|
||||
) -> None:
|
||||
"""Summarize messages to fit within context window.
|
||||
|
||||
Uses structured context compaction: preserves system messages,
|
||||
splits at message boundaries, formats with role labels, and
|
||||
produces structured summaries for seamless task continuation.
|
||||
|
||||
Preserves any files attached to user messages and re-attaches them to
|
||||
the summarized message. Files from all user messages are merged.
|
||||
|
||||
@@ -651,49 +836,74 @@ def summarize_messages(
|
||||
llm: LLM instance for summarization
|
||||
callbacks: List of callbacks for LLM
|
||||
i18n: I18N instance for messages
|
||||
verbose: Whether to print progress.
|
||||
"""
|
||||
# 1. Extract & preserve file attachments from user messages
|
||||
preserved_files: dict[str, Any] = {}
|
||||
for msg in messages:
|
||||
if msg.get("role") == "user" and msg.get("files"):
|
||||
preserved_files.update(msg["files"])
|
||||
|
||||
messages_string = " ".join(
|
||||
[str(message.get("content", "")) for message in messages]
|
||||
)
|
||||
cut_size = llm.get_context_window_size()
|
||||
# 2. Extract system messages — never summarize them
|
||||
system_messages = [m for m in messages if m.get("role") == "system"]
|
||||
non_system_messages = [m for m in messages if m.get("role") != "system"]
|
||||
|
||||
messages_groups = [
|
||||
{"content": messages_string[i : i + cut_size]}
|
||||
for i in range(0, len(messages_string), cut_size)
|
||||
]
|
||||
# If there are only system messages (or no non-system messages), nothing to summarize
|
||||
if not non_system_messages:
|
||||
return
|
||||
|
||||
summarized_contents: list[SummaryContent] = []
|
||||
# 3. Split non-system messages into chunks at message boundaries
|
||||
max_tokens = llm.get_context_window_size()
|
||||
chunks = _split_messages_into_chunks(non_system_messages, max_tokens)
|
||||
|
||||
total_groups = len(messages_groups)
|
||||
for idx, group in enumerate(messages_groups, 1):
|
||||
# 4. Summarize each chunk with role-labeled formatting
|
||||
total_chunks = len(chunks)
|
||||
|
||||
if total_chunks <= 1:
|
||||
# Single chunk — no benefit from async overhead
|
||||
summarized_contents: list[SummaryContent] = []
|
||||
for idx, chunk in enumerate(chunks, 1):
|
||||
if verbose:
|
||||
Printer().print(
|
||||
content=f"Summarizing {idx}/{total_chunks}...",
|
||||
color="yellow",
|
||||
)
|
||||
conversation_text = _format_messages_for_summary(chunk)
|
||||
summarization_messages = [
|
||||
format_message_for_llm(
|
||||
i18n.slice("summarizer_system_message"), role="system"
|
||||
),
|
||||
format_message_for_llm(
|
||||
i18n.slice("summarize_instruction").format(
|
||||
conversation=conversation_text
|
||||
),
|
||||
),
|
||||
]
|
||||
summary = llm.call(summarization_messages, callbacks=callbacks)
|
||||
extracted = _extract_summary_tags(str(summary))
|
||||
summarized_contents.append({"content": extracted})
|
||||
else:
|
||||
# Multiple chunks — summarize in parallel via asyncio
|
||||
if verbose:
|
||||
Printer().print(
|
||||
content=f"Summarizing {idx}/{total_groups}...",
|
||||
content=f"Summarizing {total_chunks} chunks in parallel...",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
summarization_messages = [
|
||||
format_message_for_llm(
|
||||
i18n.slice("summarizer_system_message"), role="system"
|
||||
),
|
||||
format_message_for_llm(
|
||||
i18n.slice("summarize_instruction").format(group=group["content"]),
|
||||
),
|
||||
]
|
||||
summary = llm.call(
|
||||
summarization_messages,
|
||||
callbacks=callbacks,
|
||||
coro = _asummarize_chunks(
|
||||
chunks=chunks, llm=llm, callbacks=callbacks, i18n=i18n
|
||||
)
|
||||
summarized_contents.append({"content": str(summary)})
|
||||
if is_inside_event_loop():
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as pool:
|
||||
summarized_contents = pool.submit(asyncio.run, coro).result()
|
||||
else:
|
||||
summarized_contents = asyncio.run(coro)
|
||||
|
||||
merged_summary = " ".join(content["content"] for content in summarized_contents)
|
||||
merged_summary = "\n\n".join(content["content"] for content in summarized_contents)
|
||||
|
||||
# 6. Reconstruct messages: [system messages...] + [summary user message]
|
||||
messages.clear()
|
||||
messages.extend(system_messages)
|
||||
|
||||
summary_message = format_message_for_llm(
|
||||
i18n.slice("summary").format(merged_summary=merged_summary)
|
||||
)
|
||||
@@ -818,12 +1028,15 @@ def load_agent_from_repository(from_repository: str) -> dict[str, Any]:
|
||||
if from_repository:
|
||||
import importlib
|
||||
|
||||
from crewai.cli.authentication.token import get_auth_token
|
||||
from crewai.cli.plus_api import PlusAPI
|
||||
if callable(_create_plus_client_hook):
|
||||
client = _create_plus_client_hook()
|
||||
else:
|
||||
from crewai.cli.authentication.token import get_auth_token
|
||||
from crewai.cli.plus_api import PlusAPI
|
||||
|
||||
client = PlusAPI(api_key=get_auth_token())
|
||||
client = PlusAPI(api_key=get_auth_token())
|
||||
_print_current_organization()
|
||||
response = client.get_agent(from_repository)
|
||||
response = asyncio.run(client.get_agent(from_repository))
|
||||
if response.status_code == 404:
|
||||
raise AgentRepositoryError(
|
||||
f"Agent {from_repository} does not exist, make sure the name is correct or the agent is available on your organization."
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections import defaultdict
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from pydantic import BaseModel, Field, InstanceOf
|
||||
from rich.box import HEAVY_EDGE
|
||||
@@ -36,7 +36,13 @@ class CrewEvaluator:
|
||||
iteration: The current iteration of the evaluation.
|
||||
"""
|
||||
|
||||
def __init__(self, crew: Crew, eval_llm: InstanceOf[BaseLLM]) -> None:
|
||||
def __init__(
|
||||
self,
|
||||
crew: Crew,
|
||||
eval_llm: InstanceOf[BaseLLM] | str | None = None,
|
||||
openai_model_name: str | None = None,
|
||||
llm: InstanceOf[BaseLLM] | str | None = None,
|
||||
) -> None:
|
||||
self.crew = crew
|
||||
self.llm = eval_llm
|
||||
self.tasks_scores: defaultdict[int, list[float]] = defaultdict(list)
|
||||
@@ -86,7 +92,9 @@ class CrewEvaluator:
|
||||
"""
|
||||
self.iteration = iteration
|
||||
|
||||
def print_crew_evaluation_result(self) -> None:
|
||||
def print_crew_evaluation_result(
|
||||
self, token_usage: list[dict[str, Any]] | None = None
|
||||
) -> None:
|
||||
"""
|
||||
Prints the evaluation result of the crew in a table.
|
||||
A Crew with 2 tasks using the command crewai test -n 3
|
||||
@@ -204,7 +212,7 @@ class CrewEvaluator:
|
||||
CrewTestResultEvent(
|
||||
quality=quality_score,
|
||||
execution_duration=current_task.execution_duration,
|
||||
model=self.llm.model,
|
||||
model=getattr(self.llm, "model", str(self.llm)),
|
||||
crew_name=self.crew.name,
|
||||
crew=self.crew,
|
||||
),
|
||||
|
||||
@@ -4,6 +4,8 @@ from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Final, Literal, NamedTuple
|
||||
|
||||
from crewai.events.utils.console_formatter import should_suppress_console_output
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from _typeshed import SupportsWrite
|
||||
@@ -77,6 +79,8 @@ class Printer:
|
||||
file: A file-like object (stream); defaults to the current sys.stdout.
|
||||
flush: Whether to forcibly flush the stream.
|
||||
"""
|
||||
if should_suppress_console_output():
|
||||
return
|
||||
if isinstance(content, str):
|
||||
content = [ColoredText(content, color)]
|
||||
print(
|
||||
|
||||
@@ -19,6 +19,7 @@ def to_serializable(
|
||||
exclude: set[str] | None = None,
|
||||
max_depth: int = 5,
|
||||
_current_depth: int = 0,
|
||||
_ancestors: set[int] | None = None,
|
||||
) -> Serializable:
|
||||
"""Converts a Python object into a JSON-compatible representation.
|
||||
|
||||
@@ -31,6 +32,7 @@ def to_serializable(
|
||||
exclude: Set of keys to exclude from the result.
|
||||
max_depth: Maximum recursion depth. Defaults to 5.
|
||||
_current_depth: Current recursion depth (for internal use).
|
||||
_ancestors: Set of ancestor object ids for cycle detection (for internal use).
|
||||
|
||||
Returns:
|
||||
Serializable: A JSON-compatible structure.
|
||||
@@ -41,16 +43,29 @@ def to_serializable(
|
||||
if exclude is None:
|
||||
exclude = set()
|
||||
|
||||
if _ancestors is None:
|
||||
_ancestors = set()
|
||||
|
||||
if isinstance(obj, (str, int, float, bool, type(None))):
|
||||
return obj
|
||||
if isinstance(obj, uuid.UUID):
|
||||
return str(obj)
|
||||
if isinstance(obj, (date, datetime)):
|
||||
return obj.isoformat()
|
||||
|
||||
object_id = id(obj)
|
||||
if object_id in _ancestors:
|
||||
return f"<circular_ref:{type(obj).__name__}>"
|
||||
new_ancestors = _ancestors | {object_id}
|
||||
|
||||
if isinstance(obj, (list, tuple, set)):
|
||||
return [
|
||||
to_serializable(
|
||||
item, max_depth=max_depth, _current_depth=_current_depth + 1
|
||||
item,
|
||||
exclude=exclude,
|
||||
max_depth=max_depth,
|
||||
_current_depth=_current_depth + 1,
|
||||
_ancestors=new_ancestors,
|
||||
)
|
||||
for item in obj
|
||||
]
|
||||
@@ -61,6 +76,7 @@ def to_serializable(
|
||||
exclude=exclude,
|
||||
max_depth=max_depth,
|
||||
_current_depth=_current_depth + 1,
|
||||
_ancestors=new_ancestors,
|
||||
)
|
||||
for key, value in obj.items()
|
||||
if key not in exclude
|
||||
@@ -71,12 +87,16 @@ def to_serializable(
|
||||
obj=obj.model_dump(exclude=exclude),
|
||||
max_depth=max_depth,
|
||||
_current_depth=_current_depth + 1,
|
||||
_ancestors=new_ancestors,
|
||||
)
|
||||
except Exception:
|
||||
try:
|
||||
return {
|
||||
_to_serializable_key(k): to_serializable(
|
||||
v, max_depth=max_depth, _current_depth=_current_depth + 1
|
||||
v,
|
||||
max_depth=max_depth,
|
||||
_current_depth=_current_depth + 1,
|
||||
_ancestors=new_ancestors,
|
||||
)
|
||||
for k, v in obj.__dict__.items()
|
||||
if k not in (exclude or set())
|
||||
|
||||
@@ -51,6 +51,10 @@ class ConcreteAgentAdapter(BaseAgentAdapter):
|
||||
# Dummy implementation for MCP tools
|
||||
return []
|
||||
|
||||
def configure_structured_output(self, task: Any) -> None:
|
||||
# Dummy implementation for structured output
|
||||
pass
|
||||
|
||||
async def aexecute_task(
|
||||
self,
|
||||
task: Any,
|
||||
|
||||
@@ -703,6 +703,8 @@ def test_agent_definition_based_on_dict():
|
||||
# test for human input
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_human_input():
|
||||
from crewai.core.providers.human_input import SyncHumanInputProvider
|
||||
|
||||
# Agent configuration
|
||||
config = {
|
||||
"role": "test role",
|
||||
@@ -720,7 +722,7 @@ def test_agent_human_input():
|
||||
human_input=True,
|
||||
)
|
||||
|
||||
# Side effect function for _ask_human_input to simulate multiple feedback iterations
|
||||
# Side effect function for _prompt_input to simulate multiple feedback iterations
|
||||
feedback_responses = iter(
|
||||
[
|
||||
"Don't say hi, say Hello instead!", # First feedback: instruct change
|
||||
@@ -728,16 +730,16 @@ def test_agent_human_input():
|
||||
]
|
||||
)
|
||||
|
||||
def ask_human_input_side_effect(*args, **kwargs):
|
||||
def prompt_input_side_effect(*args, **kwargs):
|
||||
return next(feedback_responses)
|
||||
|
||||
# Patch both _ask_human_input and _invoke_loop to avoid real API/network calls.
|
||||
# Patch both _prompt_input on provider and _invoke_loop to avoid real API/network calls.
|
||||
with (
|
||||
patch.object(
|
||||
CrewAgentExecutor,
|
||||
"_ask_human_input",
|
||||
side_effect=ask_human_input_side_effect,
|
||||
) as mock_human_input,
|
||||
SyncHumanInputProvider,
|
||||
"_prompt_input",
|
||||
side_effect=prompt_input_side_effect,
|
||||
) as mock_prompt_input,
|
||||
patch.object(
|
||||
CrewAgentExecutor,
|
||||
"_invoke_loop",
|
||||
@@ -749,7 +751,7 @@ def test_agent_human_input():
|
||||
|
||||
# Assertions to ensure the agent behaves correctly.
|
||||
# It should have requested feedback twice.
|
||||
assert mock_human_input.call_count == 2
|
||||
assert mock_prompt_input.call_count == 2
|
||||
# The final result should be processed to "Hello"
|
||||
assert output.strip().lower() == "hello"
|
||||
|
||||
|
||||
@@ -606,9 +606,10 @@ def test_lite_agent_with_invalid_llm():
|
||||
|
||||
|
||||
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token"})
|
||||
@patch("crewai_tools.tools.crewai_platform_tools.crewai_platform_action_tool.requests.post")
|
||||
@patch("crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder.requests.get")
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_kickoff_with_platform_tools(mock_get):
|
||||
def test_agent_kickoff_with_platform_tools(mock_get, mock_post):
|
||||
"""Test that Agent.kickoff() properly integrates platform tools with LiteAgent"""
|
||||
mock_response = Mock()
|
||||
mock_response.raise_for_status.return_value = None
|
||||
@@ -632,6 +633,15 @@ def test_agent_kickoff_with_platform_tools(mock_get):
|
||||
}
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
# Mock the platform tool execution
|
||||
mock_post_response = Mock()
|
||||
mock_post_response.ok = True
|
||||
mock_post_response.json.return_value = {
|
||||
"success": True,
|
||||
"issue_url": "https://github.com/test/repo/issues/1"
|
||||
}
|
||||
mock_post.return_value = mock_post_response
|
||||
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
|
||||
@@ -1,98 +1,227 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test backstory\nYour personal goal is: Test goal\n\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\nTool Name: create_issue\nTool Arguments: {''title'': {''description'': ''Issue title'', ''type'': ''str''}, ''body'': {''description'': ''Issue body'', ''type'': ''Union[str, NoneType]''}}\nTool Description: Create a GitHub issue\nDetailed Parameter Structure:\nObject with properties:\n - title: Issue title (required)\n - body: Issue body (optional)\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought: you should always think about what to do\nAction: the action to take, only one name of [create_issue], just the name, exactly as it''s written.\nAction Input: the input to the action, just a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce all necessary information
|
||||
is gathered, return the following format:\n\n```\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n```"}, {"role": "user", "content": "Create a GitHub issue"}], "model": "gpt-3.5-turbo", "stream": false}'
|
||||
body: '{"messages":[{"role":"system","content":"You are Test Agent. Test backstory\nYour
|
||||
personal goal is: Test goal"},{"role":"user","content":"\nCurrent Task: Create
|
||||
a GitHub issue"}],"model":"gpt-3.5-turbo","tool_choice":"auto","tools":[{"type":"function","function":{"name":"create_issue","description":"Create
|
||||
a GitHub issue","strict":true,"parameters":{"additionalProperties":false,"properties":{"title":{"description":"Issue
|
||||
title","title":"Title","type":"string"},"body":{"default":null,"description":"Issue
|
||||
body","title":"Body","type":"string"}},"required":["title","body"],"type":"object"}}}]}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1233'
|
||||
- '596'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
- 3.13.5
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-CULxKTEIB85AVItcEQ09z4Xi0JCID\",\n \"object\": \"chat.completion\",\n \"created\": 1761350274,\n \"model\": \"gpt-3.5-turbo-0125\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"I will need more specific information to create a GitHub issue. Could you please provide more details such as the title and body of the issue you would like to create?\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 255,\n \"completion_tokens\": 33,\n \"total_tokens\": 288,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n \
|
||||
\ }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": null\n}\n"
|
||||
string: "{\n \"id\": \"chatcmpl-D6L3fqygkUIZ3bN4wvSpAhdaSk7MF\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1770403287,\n \"model\": \"gpt-3.5-turbo-0125\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
|
||||
\ \"id\": \"call_RuWuYzjzgRL3byVGhLlPi0rq\",\n \"type\":
|
||||
\"function\",\n \"function\": {\n \"name\": \"create_issue\",\n
|
||||
\ \"arguments\": \"{\\\"title\\\":\\\"Test issue\\\",\\\"body\\\":\\\"This
|
||||
is a test issue created for testing purposes.\\\"}\"\n }\n }\n
|
||||
\ ],\n \"refusal\": null,\n \"annotations\": []\n },\n
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n }\n
|
||||
\ ],\n \"usage\": {\n \"prompt_tokens\": 93,\n \"completion_tokens\":
|
||||
28,\n \"total_tokens\": 121,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": null\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 993d6b4be9862379-SJC
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 24 Oct 2025 23:57:54 GMT
|
||||
- Fri, 06 Feb 2026 18:41:28 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=WY9bgemMDI_hUYISAPlQ2a.DBGeZfM6AjVEa3SKNg1c-1761350274-1.0.1.1-K3Qm2cl6IlDAgmocoKZ8IMUTmue6Q81hH9stECprUq_SM8LF8rR9d1sHktvRCN3.jEM.twEuFFYDNpBnN8NBRJFZcea1yvpm8Uo0G_UhyDs; path=/; expires=Sat, 25-Oct-25 00:27:54 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
- _cfuvid=JklLS4i3hBGELpS9cz1KMpTbj72hCwP41LyXDSxWIv8-1761350274521-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '487'
|
||||
- '1406'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '526'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- '50000000'
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '49999727'
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- req_1708dc0928c64882aaa5bc2c168c140f
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Test Agent. Test backstory\nYour
|
||||
personal goal is: Test goal"},{"role":"user","content":"\nCurrent Task: Create
|
||||
a GitHub issue"},{"role":"assistant","content":null,"tool_calls":[{"id":"call_RuWuYzjzgRL3byVGhLlPi0rq","type":"function","function":{"name":"create_issue","arguments":"{\"title\":\"Test
|
||||
issue\",\"body\":\"This is a test issue created for testing purposes.\"}"}}]},{"role":"tool","tool_call_id":"call_RuWuYzjzgRL3byVGhLlPi0rq","name":"create_issue","content":"{\n \"success\":
|
||||
true,\n \"issue_url\": \"https://github.com/test/repo/issues/1\"\n}"}],"model":"gpt-3.5-turbo","tool_choice":"auto","tools":[{"type":"function","function":{"name":"create_issue","description":"Create
|
||||
a GitHub issue","strict":true,"parameters":{"additionalProperties":false,"properties":{"title":{"description":"Issue
|
||||
title","title":"Title","type":"string"},"body":{"default":null,"description":"Issue
|
||||
body","title":"Body","type":"string"}},"required":["title","body"],"type":"object"}}}]}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1028'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- COOKIE-XXX
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.5
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D6L3hfuBxk36LIb3ekD1IVwFD5VVL\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1770403289,\n \"model\": \"gpt-3.5-turbo-0125\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I have successfully created a GitHub
|
||||
issue for testing purposes. You can view the issue at this URL: [Test issue](https://github.com/test/repo/issues/1)\",\n
|
||||
\ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
154,\n \"completion_tokens\": 36,\n \"total_tokens\": 190,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": null\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 06 Feb 2026 18:41:29 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '888'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
|
||||
@@ -1,400 +1,428 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Sports Analyst. You are an expert at gathering and organizing information. You carefully collect details and present them in a structured way.\nYour personal goal is: Gather information about the best soccer players\n\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user", "content": "Top 10 best players in the world?"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||
body: '{"messages":[{"role":"system","content":"You are Sports Analyst. You are
|
||||
an expert at gathering and organizing information. You carefully collect details
|
||||
and present them in a structured way.\nYour personal goal is: Gather information
|
||||
about the best soccer players"},{"role":"user","content":"\nCurrent Task: Top
|
||||
10 best players in the world?\n\nProvide your complete response:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '694'
|
||||
- '404'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.78.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.78.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
- 3.13.5
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-BgufUtDqGzvqPZx2NmkqqxdW4G8rQ\",\n \"object\": \"chat.completion\",\n \"created\": 1749567308,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Thought: I now can give a great answer \\nFinal Answer: The top 10 best soccer players in the world, as of October 2023, can be identified based on their recent performances, skills, impact on games, and overall contributions to their teams. Here is the structured list:\\n\\n1. **Lionel Messi (Inter Miami CF)**\\n - Position: Forward\\n - Key Attributes: Dribbling, vision, goal-scoring ability.\\n - Achievements: Multiple Ballon d'Or winner, Copa America champion, World Cup champion (2022).\\n\\n2. **Kylian Mbappé (Paris Saint-Germain)**\\n - Position: Forward\\n - Key Attributes: Speed, technique, finishing.\\n - Achievements: FIFA World Cup champion (2018), Ligue 1 titles, multiple\
|
||||
\ domestic cups.\\n\\n3. **Erling Haaland (Manchester City)**\\n - Position: Forward\\n - Key Attributes: Power, speed, goal-scoring instinct.\\n - Achievements: Bundesliga top scorer, UEFA Champions League winner (2023), Premier League titles.\\n\\n4. **Kevin De Bruyne (Manchester City)**\\n - Position: Midfielder\\n - Key Attributes: Passing, vision, creativity.\\n - Achievements: Multiple Premier League titles, FA Cups, UEFA Champions League winner (2023).\\n\\n5. **Karim Benzema (Al-Ittihad)**\\n - Position: Forward\\n - Key Attributes: Goal-scoring, playmaking, tactical intelligence.\\n - Achievements: 2022 Ballon d'Or winner, multiple Champions Leagues with Real Madrid.\\n\\n6. **Neymar Jr. (Al Hilal)**\\n - Position: Forward\\n - Key Attributes: Flair, dribbling, creativity.\\n - Achievements: Multiple domestic league titles, Champions League runner-up.\\n\\n7. **Robert Lewandowski (FC Barcelona)**\\n - Position: Forward\\n - Key Attributes: Finishing,\
|
||||
\ positioning, aerial ability.\\n - Achievements: FIFA Best Men's Player, multiple Bundesliga titles, La Liga champion (2023).\\n\\n8. **Mohamed Salah (Liverpool)**\\n - Position: Forward\\n - Key Attributes: Speed, finishing, dribbling.\\n - Achievements: Premier League champion, FA Cup, UEFA Champions League winner.\\n\\n9. **Vinícius Júnior (Real Madrid)**\\n - Position: Forward\\n - Key Attributes: Speed, dribbling, creativity.\\n - Achievements: UEFA Champions League winner (2022), La Liga champion (2023).\\n\\n10. **Luka Modrić (Real Madrid)**\\n - Position: Midfielder\\n - Key Attributes: Passing, vision, tactical intelligence.\\n - Achievements: Multiple Champions League titles, Ballon d'Or winner (2018).\\n\\nThis list is compiled based on their current form, past performances, and contributions to their respective teams in both domestic and international competitions. Player rankings can vary based on personal opinion and specific criteria used for\
|
||||
\ evaluation, but these players have consistently been regarded as some of the best in the world as of October 2023.\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 122,\n \"completion_tokens\": 643,\n \"total_tokens\": 765,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_34a54ae93c\"\n}\n"
|
||||
string: "{\n \"id\": \"chatcmpl-D6L3hzoRVVEa07HZsM9wpi2RVRKQp\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1770403289,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Here is a structured list of the top
|
||||
10 best soccer players in the world as of 2024, based on recent performances,
|
||||
awards, and overall impact on the game:\\n\\n1. **Kylian Mbapp\xE9** \\n
|
||||
\ - Nationality: French \\n - Club: Paris Saint-Germain (PSG) \\n -
|
||||
Position: Forward \\n - Key Highlights: Multiple Ligue 1 titles, World
|
||||
Cup winner (2018), known for speed, dribbling, and scoring prowess.\\n\\n2.
|
||||
**Erling Haaland** \\n - Nationality: Norwegian \\n - Club: Manchester
|
||||
City \\n - Position: Striker \\n - Key Highlights: Premier League Golden
|
||||
Boot winner, incredible goal-scoring record, physical presence, and finishing
|
||||
skills.\\n\\n3. **Lionel Messi** \\n - Nationality: Argentine \\n -
|
||||
Club: Inter Miami \\n - Position: Forward/Attacking Midfielder \\n -
|
||||
Key Highlights: Seven Ballon d\u2019Or awards, World Cup winner (2022), exceptional
|
||||
playmaking and dribbling ability.\\n\\n4. **Kevin De Bruyne** \\n - Nationality:
|
||||
Belgian \\n - Club: Manchester City \\n - Position: Midfielder \\n
|
||||
\ - Key Highlights: One of the best playmakers globally, assists leader,
|
||||
consistent high-level performance in the Premier League.\\n\\n5. **Robert
|
||||
Lewandowski** \\n - Nationality: Polish \\n - Club: FC Barcelona \\n
|
||||
\ - Position: Striker \\n - Key Highlights: Exceptional goal-scoring record,
|
||||
multiple Bundesliga top scorer awards, key figure in Bayern Munich\u2019s
|
||||
dominance before transferring.\\n\\n6. **Karim Benzema** \\n - Nationality:
|
||||
French \\n - Club: Al-Ittihad \\n - Position: Striker \\n - Key Highlights:
|
||||
Ballon d\u2019Or winner (2022), excellent technical skills, leadership at
|
||||
Real Madrid before recent transfer.\\n\\n7. **Mohamed Salah** \\n - Nationality:
|
||||
Egyptian \\n - Club: Liverpool \\n - Position: Forward \\n - Key
|
||||
Highlights: Premier League Golden Boot winner, known for speed, dribbling,
|
||||
and goal-scoring consistency.\\n\\n8. **Vin\xEDcius J\xFAnior** \\n - Nationality:
|
||||
Brazilian \\n - Club: Real Madrid \\n - Position: Winger \\n - Key
|
||||
Highlights: Key player for Real Madrid, exceptional dribbling and pace, rising
|
||||
star in world football.\\n\\n9. **Jude Bellingham** \\n - Nationality:
|
||||
English \\n - Club: Real Madrid \\n - Position: Midfielder \\n -
|
||||
Key Highlights: Young talent with maturity beyond years, influential midfielder
|
||||
with great vision and work rate.\\n\\n10. **Thibaut Courtois** \\n - Nationality:
|
||||
Belgian \\n - Club: Real Madrid \\n - Position: Goalkeeper \\n -
|
||||
Key Highlights: One of the best goalkeepers globally, crucial performances
|
||||
in La Liga and Champions League.\\n\\nThese rankings consider individual talent,
|
||||
recent achievements, influence on matches, and overall contribution to club
|
||||
and country.\",\n \"refusal\": null,\n \"annotations\": []\n
|
||||
\ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n
|
||||
\ ],\n \"usage\": {\n \"prompt_tokens\": 68,\n \"completion_tokens\":
|
||||
621,\n \"total_tokens\": 689,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_75546bd1a7\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 94d9b5400dcd624b-GRU
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 10 Jun 2025 14:55:42 GMT
|
||||
- Fri, 06 Feb 2026 18:41:40 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=8Yv8F0ZCFAo2lf.qoqxao70yxyjVvIV90zQqVF6bVzQ-1749567342-1.0.1.1-fZgnv3RDfunvCO1koxwwFJrHnxSx_rwS_FHvQ6xxDPpKHwYr7dTqIQLZrNgSX5twGyK4F22rUmkuiS6KMVogcinChk8lmHtJBTUVTFjr2KU; path=/; expires=Tue, 10-Jun-25 15:25:42 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
- _cfuvid=wzh8YnmXvLq1G0RcIVijtzboQtCZyIe2uZiochkBLqE-1749567342267-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '33288'
|
||||
- '10634'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-envoy-upstream-service-time:
|
||||
- '33292'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999859'
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- req_6a587ea22edef774ecdada790a320cab
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Sports Analyst. You are an expert at gathering and organizing information. You carefully collect details and present them in a structured way.\nYour personal goal is: Gather information about the best soccer players\n\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user", "content": "Top 10 best players in the world?"}, {"role": "assistant", "content": "Thought: I now can give a great answer \nFinal Answer: The top 10 best soccer players in the world, as of October 2023, can be identified based on their recent performances, skills, impact on games, and overall contributions to their teams. Here is the structured list:\n\n1. **Lionel Messi (Inter Miami CF)**\n -
|
||||
Position: Forward\n - Key Attributes: Dribbling, vision, goal-scoring ability.\n - Achievements: Multiple Ballon d''Or winner, Copa America champion, World Cup champion (2022).\n\n2. **Kylian Mbapp\u00e9 (Paris Saint-Germain)**\n - Position: Forward\n - Key Attributes: Speed, technique, finishing.\n - Achievements: FIFA World Cup champion (2018), Ligue 1 titles, multiple domestic cups.\n\n3. **Erling Haaland (Manchester City)**\n - Position: Forward\n - Key Attributes: Power, speed, goal-scoring instinct.\n - Achievements: Bundesliga top scorer, UEFA Champions League winner (2023), Premier League titles.\n\n4. **Kevin De Bruyne (Manchester City)**\n - Position: Midfielder\n - Key Attributes: Passing, vision, creativity.\n - Achievements: Multiple Premier League titles, FA Cups, UEFA Champions League winner (2023).\n\n5. **Karim Benzema (Al-Ittihad)**\n - Position: Forward\n - Key Attributes: Goal-scoring, playmaking, tactical intelligence.\n - Achievements:
|
||||
2022 Ballon d''Or winner, multiple Champions Leagues with Real Madrid.\n\n6. **Neymar Jr. (Al Hilal)**\n - Position: Forward\n - Key Attributes: Flair, dribbling, creativity.\n - Achievements: Multiple domestic league titles, Champions League runner-up.\n\n7. **Robert Lewandowski (FC Barcelona)**\n - Position: Forward\n - Key Attributes: Finishing, positioning, aerial ability.\n - Achievements: FIFA Best Men''s Player, multiple Bundesliga titles, La Liga champion (2023).\n\n8. **Mohamed Salah (Liverpool)**\n - Position: Forward\n - Key Attributes: Speed, finishing, dribbling.\n - Achievements: Premier League champion, FA Cup, UEFA Champions League winner.\n\n9. **Vin\u00edcius J\u00fanior (Real Madrid)**\n - Position: Forward\n - Key Attributes: Speed, dribbling, creativity.\n - Achievements: UEFA Champions League winner (2022), La Liga champion (2023).\n\n10. **Luka Modri\u0107 (Real Madrid)**\n - Position: Midfielder\n - Key Attributes: Passing, vision,
|
||||
tactical intelligence.\n - Achievements: Multiple Champions League titles, Ballon d''Or winner (2018).\n\nThis list is compiled based on their current form, past performances, and contributions to their respective teams in both domestic and international competitions. Player rankings can vary based on personal opinion and specific criteria used for evaluation, but these players have consistently been regarded as some of the best in the world as of October 2023."}, {"role": "user", "content": "You are not allowed to include Brazilian players"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||
body: '{"messages":[{"role":"system","content":"You are Sports Analyst. You are
|
||||
an expert at gathering and organizing information. You carefully collect details
|
||||
and present them in a structured way.\nYour personal goal is: Gather information
|
||||
about the best soccer players"},{"role":"user","content":"\nCurrent Task: Top
|
||||
10 best players in the world?\n\nProvide your complete response:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '3594'
|
||||
- '404'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=8Yv8F0ZCFAo2lf.qoqxao70yxyjVvIV90zQqVF6bVzQ-1749567342-1.0.1.1-fZgnv3RDfunvCO1koxwwFJrHnxSx_rwS_FHvQ6xxDPpKHwYr7dTqIQLZrNgSX5twGyK4F22rUmkuiS6KMVogcinChk8lmHtJBTUVTFjr2KU; _cfuvid=wzh8YnmXvLq1G0RcIVijtzboQtCZyIe2uZiochkBLqE-1749567342267-0.0.1.1-604800000
|
||||
- COOKIE-XXX
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.78.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.78.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
- 3.13.5
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-BgugJkCDtB2EfvAMiIFK0reeLKFBl\",\n \"object\": \"chat.completion\",\n \"created\": 1749567359,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Thought: I now can give a great answer \\nFinal Answer: Here is an updated list of the top 10 best soccer players in the world as of October 2023, excluding Brazilian players:\\n\\n1. **Lionel Messi (Inter Miami CF)**\\n - Position: Forward\\n - Key Attributes: Dribbling, vision, goal-scoring ability.\\n - Achievements: Multiple Ballon d'Or winner, Copa America champion, World Cup champion (2022).\\n\\n2. **Kylian Mbappé (Paris Saint-Germain)**\\n - Position: Forward\\n - Key Attributes: Speed, technique, finishing.\\n - Achievements: FIFA World Cup champion (2018), Ligue 1 titles, multiple domestic cups.\\n\\n3. **Erling Haaland (Manchester City)**\\n - Position: Forward\\\
|
||||
n - Key Attributes: Power, speed, goal-scoring instinct.\\n - Achievements: Bundesliga top scorer, UEFA Champions League winner (2023), Premier League titles.\\n\\n4. **Kevin De Bruyne (Manchester City)**\\n - Position: Midfielder\\n - Key Attributes: Passing, vision, creativity.\\n - Achievements: Multiple Premier League titles, FA Cups, UEFA Champions League winner (2023).\\n\\n5. **Karim Benzema (Al-Ittihad)**\\n - Position: Forward\\n - Key Attributes: Goal-scoring, playmaking, tactical intelligence.\\n - Achievements: 2022 Ballon d'Or winner, multiple Champions Leagues with Real Madrid.\\n\\n6. **Robert Lewandowski (FC Barcelona)**\\n - Position: Forward\\n - Key Attributes: Finishing, positioning, aerial ability.\\n - Achievements: FIFA Best Men's Player, multiple Bundesliga titles, La Liga champion (2023).\\n\\n7. **Mohamed Salah (Liverpool)**\\n - Position: Forward\\n - Key Attributes: Speed, finishing, dribbling.\\n - Achievements: Premier League\
|
||||
\ champion, FA Cup, UEFA Champions League winner.\\n\\n8. **Vinícius Júnior (Real Madrid)**\\n - Position: Forward\\n - Key Attributes: Speed, dribbling, creativity.\\n - Achievements: UEFA Champions League winner (2022), La Liga champion (2023).\\n\\n9. **Luka Modrić (Real Madrid)**\\n - Position: Midfielder\\n - Key Attributes: Passing, vision, tactical intelligence.\\n - Achievements: Multiple Champions League titles, Ballon d'Or winner (2018).\\n\\n10. **Harry Kane (Bayern Munich)**\\n - Position: Forward\\n - Key Attributes: Goal-scoring, technique, playmaking.\\n - Achievements: Golden Boot winner, Premier League titles, UEFA European Championship runner-up.\\n\\nThis list has been adjusted to exclude Brazilian players and focuses on those who have made significant impacts in their clubs and on the international stage as of October 2023. Each player is recognized for their exceptional skills, performances, and achievements.\",\n \"refusal\": null,\n\
|
||||
\ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 781,\n \"completion_tokens\": 610,\n \"total_tokens\": 1391,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_34a54ae93c\"\n}\n"
|
||||
string: "{\n \"id\": \"chatcmpl-D6L3sn9nSnGGOMKrS88avliVF7XTv\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1770403300,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Certainly! Here's a structured list
|
||||
of the top 10 best soccer players in the world as of 2024, considering their
|
||||
performance, skills, achievements, and impact in recent seasons:\\n\\n###
|
||||
Top 10 Best Soccer Players in the World (2024)\\n\\n| Rank | Player Name |
|
||||
Nationality | Club (2023/24 Season) | Position | Key Attributes
|
||||
\ | Recent Achievements |\\n|-------|---------------------|-------------|----------------------------|------------------|---------------------------------|------------------------------------|\\n|
|
||||
1 | Lionel Messi | Argentina | Paris Saint-Germain (PSG) |
|
||||
Forward/Playmaker| Dribbling, Vision, Free kicks | 2023 World Cup Golden
|
||||
Ball, Club Successes |\\n| 2 | Kylian Mbapp\xE9 | France |
|
||||
Paris Saint-Germain (PSG) | Forward | Speed, Finishing, Dribbling
|
||||
\ | Ligue 1 Top Scorer, World Cup Winner 2018|\\n| 3 | Erling Haaland
|
||||
\ | Norway | Manchester City | Striker | Strength,
|
||||
Finishing, Positioning| Premier League Golden Boot, Champions League Impact|\\n|
|
||||
4 | Kevin De Bruyne | Belgium | Manchester City |
|
||||
Midfielder | Passing, Vision, Creativity | Premier League Titles,
|
||||
Key Playmaker|\\n| 5 | Robert Lewandowski | Poland | FC Barcelona
|
||||
\ | Striker | Finishing, Positioning, Composure| La
|
||||
Liga Top Scorer, Consistent Scorer|\\n| 6 | Neymar Jr. | Brazil
|
||||
\ | Al-Hilal | Forward/Winger | Dribbling, Creativity,
|
||||
Flair | Copa America Titles, Club Success |\\n| 7 | Mohamed Salah |
|
||||
Egypt | Liverpool | Forward/Winger | Pace, Finishing,
|
||||
Work Rate | Premier League Golden Boot, Champions League Winner|\\n|
|
||||
8 | Vin\xEDcius Jr. | Brazil | Real Madrid |
|
||||
Winger | Speed, Dribbling, Crossing | La Liga Titles, UEFA Champions
|
||||
League Winner|\\n| 9 | Luka Modri\u0107 | Croatia | Real Madrid
|
||||
\ | Midfielder | Passing, Control, Experience | Ballon
|
||||
d\u2019Or 2018, Multiple Champions League Titles|\\n| 10 | Karim Benzema
|
||||
\ | France | Al-Ittihad | Striker | Finishing,
|
||||
Link-up Play, Movements| Ballon d\u2019Or 2022, UEFA Champions League Top
|
||||
Scorer |\\n\\n### Notes:\\n- The rankings reflect a combination of individual
|
||||
skill, recent performance, consistency, and influence on the game.\\n- Players\u2019
|
||||
clubs are based on the 2023/24 season affiliations.\\n- Achievements highlight
|
||||
recent titles, awards, or standout contributions.\\n\\nIf you would like me
|
||||
to focus on specific leagues, historical players, or emerging talents, just
|
||||
let me know!\",\n \"refusal\": null,\n \"annotations\": []\n
|
||||
\ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n
|
||||
\ ],\n \"usage\": {\n \"prompt_tokens\": 68,\n \"completion_tokens\":
|
||||
605,\n \"total_tokens\": 673,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_75546bd1a7\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 94d9b6782db84d3b-GRU
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 10 Jun 2025 14:56:30 GMT
|
||||
- Fri, 06 Feb 2026 18:41:49 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '31484'
|
||||
- '9044'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-envoy-upstream-service-time:
|
||||
- '31490'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999166'
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- req_aa737cf40bb76af9f458bfd35f7a77a1
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Sports Analyst. You are an expert at gathering and organizing information. You carefully collect details and present them in a structured way.\nYour personal goal is: Gather information about the best soccer players\n\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user", "content": "Top 10 best players in the world?"}, {"role": "assistant", "content": "Thought: I now can give a great answer \nFinal Answer: The top 10 best soccer players in the world, as of October 2023, can be identified based on their recent performances, skills, impact on games, and overall contributions to their teams. Here is the structured list:\n\n1. **Lionel Messi (Inter Miami CF)**\n -
|
||||
Position: Forward\n - Key Attributes: Dribbling, vision, goal-scoring ability.\n - Achievements: Multiple Ballon d''Or winner, Copa America champion, World Cup champion (2022).\n\n2. **Kylian Mbapp\u00e9 (Paris Saint-Germain)**\n - Position: Forward\n - Key Attributes: Speed, technique, finishing.\n - Achievements: FIFA World Cup champion (2018), Ligue 1 titles, multiple domestic cups.\n\n3. **Erling Haaland (Manchester City)**\n - Position: Forward\n - Key Attributes: Power, speed, goal-scoring instinct.\n - Achievements: Bundesliga top scorer, UEFA Champions League winner (2023), Premier League titles.\n\n4. **Kevin De Bruyne (Manchester City)**\n - Position: Midfielder\n - Key Attributes: Passing, vision, creativity.\n - Achievements: Multiple Premier League titles, FA Cups, UEFA Champions League winner (2023).\n\n5. **Karim Benzema (Al-Ittihad)**\n - Position: Forward\n - Key Attributes: Goal-scoring, playmaking, tactical intelligence.\n - Achievements:
|
||||
2022 Ballon d''Or winner, multiple Champions Leagues with Real Madrid.\n\n6. **Neymar Jr. (Al Hilal)**\n - Position: Forward\n - Key Attributes: Flair, dribbling, creativity.\n - Achievements: Multiple domestic league titles, Champions League runner-up.\n\n7. **Robert Lewandowski (FC Barcelona)**\n - Position: Forward\n - Key Attributes: Finishing, positioning, aerial ability.\n - Achievements: FIFA Best Men''s Player, multiple Bundesliga titles, La Liga champion (2023).\n\n8. **Mohamed Salah (Liverpool)**\n - Position: Forward\n - Key Attributes: Speed, finishing, dribbling.\n - Achievements: Premier League champion, FA Cup, UEFA Champions League winner.\n\n9. **Vin\u00edcius J\u00fanior (Real Madrid)**\n - Position: Forward\n - Key Attributes: Speed, dribbling, creativity.\n - Achievements: UEFA Champions League winner (2022), La Liga champion (2023).\n\n10. **Luka Modri\u0107 (Real Madrid)**\n - Position: Midfielder\n - Key Attributes: Passing, vision,
|
||||
tactical intelligence.\n - Achievements: Multiple Champions League titles, Ballon d''Or winner (2018).\n\nThis list is compiled based on their current form, past performances, and contributions to their respective teams in both domestic and international competitions. Player rankings can vary based on personal opinion and specific criteria used for evaluation, but these players have consistently been regarded as some of the best in the world as of October 2023."}, {"role": "user", "content": "You are not allowed to include Brazilian players"}, {"role": "assistant", "content": "Thought: I now can give a great answer \nFinal Answer: Here is an updated list of the top 10 best soccer players in the world as of October 2023, excluding Brazilian players:\n\n1. **Lionel Messi (Inter Miami CF)**\n - Position: Forward\n - Key Attributes: Dribbling, vision, goal-scoring ability.\n - Achievements: Multiple Ballon d''Or winner, Copa America champion, World Cup champion (2022).\n\n2.
|
||||
**Kylian Mbapp\u00e9 (Paris Saint-Germain)**\n - Position: Forward\n - Key Attributes: Speed, technique, finishing.\n - Achievements: FIFA World Cup champion (2018), Ligue 1 titles, multiple domestic cups.\n\n3. **Erling Haaland (Manchester City)**\n - Position: Forward\n - Key Attributes: Power, speed, goal-scoring instinct.\n - Achievements: Bundesliga top scorer, UEFA Champions League winner (2023), Premier League titles.\n\n4. **Kevin De Bruyne (Manchester City)**\n - Position: Midfielder\n - Key Attributes: Passing, vision, creativity.\n - Achievements: Multiple Premier League titles, FA Cups, UEFA Champions League winner (2023).\n\n5. **Karim Benzema (Al-Ittihad)**\n - Position: Forward\n - Key Attributes: Goal-scoring, playmaking, tactical intelligence.\n - Achievements: 2022 Ballon d''Or winner, multiple Champions Leagues with Real Madrid.\n\n6. **Robert Lewandowski (FC Barcelona)**\n - Position: Forward\n - Key Attributes: Finishing, positioning,
|
||||
aerial ability.\n - Achievements: FIFA Best Men''s Player, multiple Bundesliga titles, La Liga champion (2023).\n\n7. **Mohamed Salah (Liverpool)**\n - Position: Forward\n - Key Attributes: Speed, finishing, dribbling.\n - Achievements: Premier League champion, FA Cup, UEFA Champions League winner.\n\n8. **Vin\u00edcius J\u00fanior (Real Madrid)**\n - Position: Forward\n - Key Attributes: Speed, dribbling, creativity.\n - Achievements: UEFA Champions League winner (2022), La Liga champion (2023).\n\n9. **Luka Modri\u0107 (Real Madrid)**\n - Position: Midfielder\n - Key Attributes: Passing, vision, tactical intelligence.\n - Achievements: Multiple Champions League titles, Ballon d''Or winner (2018).\n\n10. **Harry Kane (Bayern Munich)**\n - Position: Forward\n - Key Attributes: Goal-scoring, technique, playmaking.\n - Achievements: Golden Boot winner, Premier League titles, UEFA European Championship runner-up.\n\nThis list has been adjusted to exclude Brazilian
|
||||
players and focuses on those who have made significant impacts in their clubs and on the international stage as of October 2023. Each player is recognized for their exceptional skills, performances, and achievements."}, {"role": "user", "content": "You are not allowed to include Brazilian players"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||
body: '{"messages":[{"role":"system","content":"You are Sports Analyst. You are
|
||||
an expert at gathering and organizing information. You carefully collect details
|
||||
and present them in a structured way.\nYour personal goal is: Gather information
|
||||
about the best soccer players"},{"role":"user","content":"\nCurrent Task: Top
|
||||
10 best players in the world?\n\nProvide your complete response:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '6337'
|
||||
- '404'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=8Yv8F0ZCFAo2lf.qoqxao70yxyjVvIV90zQqVF6bVzQ-1749567342-1.0.1.1-fZgnv3RDfunvCO1koxwwFJrHnxSx_rwS_FHvQ6xxDPpKHwYr7dTqIQLZrNgSX5twGyK4F22rUmkuiS6KMVogcinChk8lmHtJBTUVTFjr2KU; _cfuvid=wzh8YnmXvLq1G0RcIVijtzboQtCZyIe2uZiochkBLqE-1749567342267-0.0.1.1-604800000
|
||||
- COOKIE-XXX
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.78.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.78.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
- 3.13.5
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-BgugsAmyI50uQ6SpCp89ZZY4eD1Pz\",\n \"object\": \"chat.completion\",\n \"created\": 1749567394,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Thought: I now can give a great answer \\nFinal Answer: Here is the revised list of the top 10 best soccer players in the world as of October 2023, explicitly excluding Brazilian players:\\n\\n1. **Lionel Messi (Inter Miami CF)**\\n - Position: Forward\\n - Key Attributes: Dribbling, vision, goal-scoring ability.\\n - Achievements: Multiple Ballon d'Or winner, Copa America champion, World Cup champion (2022).\\n\\n2. **Kylian Mbappé (Paris Saint-Germain)**\\n - Position: Forward\\n - Key Attributes: Speed, technique, finishing.\\n - Achievements: FIFA World Cup champion (2018), multiple Ligue 1 titles, and various domestic cups.\\n\\n3. **Erling Haaland (Manchester City)**\\n\
|
||||
\ - Position: Forward\\n - Key Attributes: Power, speed, goal-scoring instinct.\\n - Achievements: Bundesliga top scorer, UEFA Champions League winner (2023), Premier League titles.\\n\\n4. **Kevin De Bruyne (Manchester City)**\\n - Position: Midfielder\\n - Key Attributes: Passing, vision, creativity.\\n - Achievements: Multiple Premier League titles, FA Cups, UEFA Champions League winner (2023).\\n\\n5. **Karim Benzema (Al-Ittihad)**\\n - Position: Forward\\n - Key Attributes: Goal-scoring, playmaking, tactical intelligence.\\n - Achievements: 2022 Ballon d'Or winner, multiple Champions Leagues with Real Madrid.\\n\\n6. **Robert Lewandowski (FC Barcelona)**\\n - Position: Forward\\n - Key Attributes: Finishing, positioning, aerial ability.\\n - Achievements: FIFA Best Men's Player, multiple Bundesliga titles, La Liga champion (2023).\\n\\n7. **Mohamed Salah (Liverpool)**\\n - Position: Forward\\n - Key Attributes: Speed, finishing, dribbling.\\n -\
|
||||
\ Achievements: Premier League champion, FA Cup, UEFA Champions League winner.\\n\\n8. **Luka Modrić (Real Madrid)**\\n - Position: Midfielder\\n - Key Attributes: Passing, vision, tactical intelligence.\\n - Achievements: Multiple Champions League titles, Ballon d'Or winner (2018).\\n\\n9. **Harry Kane (Bayern Munich)**\\n - Position: Forward\\n - Key Attributes: Goal-scoring, technique, playmaking.\\n - Achievements: Golden Boot winner, Premier League titles, UEFA European Championship runner-up.\\n\\n10. **Rodri (Manchester City)**\\n - Position: Midfielder\\n - Key Attributes: Defensive skills, passing, positional awareness.\\n - Achievements: Premier League titles, UEFA Champions League winner (2023).\\n\\nThis list is curated while adhering to the restriction of excluding Brazilian players. Each player included has demonstrated exceptional skills and remarkable performances, solidifying their status as some of the best in the world as of October 2023.\"\
|
||||
,\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 1407,\n \"completion_tokens\": 605,\n \"total_tokens\": 2012,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_62a23a81ef\"\n}\n"
|
||||
string: "{\n \"id\": \"chatcmpl-D6L4102eMwTEPeHxfyN9Kh7rjBoX6\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1770403309,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Certainly! Here is a list of the top
|
||||
10 best soccer players in the world as of 2024, considering their recent performances,
|
||||
skills, impact, and accolades:\\n\\n1. **Lionel Messi** \\n - Nationality:
|
||||
Argentine \\n - Position: Forward \\n - Key Achievements: 7 Ballon d'Or
|
||||
awards, led Argentina to 2021 Copa Am\xE9rica victory and 2022 FIFA World
|
||||
Cup triumph, exceptional dribbling and playmaking skills.\\n\\n2. **Kylian
|
||||
Mbapp\xE9** \\n - Nationality: French \\n - Position: Forward \\n -
|
||||
Key Achievements: FIFA World Cup winner (2018), multiple Ligue 1 titles, known
|
||||
for incredible speed, finishing, and consistency.\\n\\n3. **Erling Haaland**
|
||||
\ \\n - Nationality: Norwegian \\n - Position: Striker \\n - Key Achievements:
|
||||
Premier League Golden Boot winner (2022-23), prolific goal scorer, physical
|
||||
presence, and finishing ability.\\n\\n4. **Karim Benzema** \\n - Nationality:
|
||||
French \\n - Position: Forward \\n - Key Achievements: 2022 Ballon d'Or
|
||||
winner, key player for Real Madrid\u2019s recent Champions League victories,
|
||||
excellent technical skills and leadership.\\n\\n5. **Kevin De Bruyne** \\n
|
||||
\ - Nationality: Belgian \\n - Position: Midfielder \\n - Key Achievements:
|
||||
Premier League playmaker, known for vision, passing accuracy, and creativity.\\n\\n6.
|
||||
**Robert Lewandowski** \\n - Nationality: Polish \\n - Position: Striker
|
||||
\ \\n - Key Achievements: Multiple Bundesliga top scorer titles, consistent
|
||||
goal scorer, known for positioning and finishing.\\n\\n7. **Neymar Jr.** \\n
|
||||
\ - Nationality: Brazilian \\n - Position: Forward \\n - Key Achievements:
|
||||
Exceptional dribbling, creativity, and flair; multiple domestic titles and
|
||||
Copa Libertadores winner.\\n\\n8. **Mohamed Salah** \\n - Nationality:
|
||||
Egyptian \\n - Position: Forward \\n - Key Achievements: Premier League
|
||||
Golden Boot, consistent goal scoring with Liverpool, known for speed and finishing.\\n\\n9.
|
||||
**Luka Modri\u0107** \\n - Nationality: Croatian \\n - Position: Midfielder
|
||||
\ \\n - Key Achievements: 2018 Ballon d\u2019Or winner, pivotal midfield
|
||||
maestro, excellent passing and control.\\n\\n10. **Thibaut Courtois** \\n
|
||||
\ - Nationality: Belgian \\n - Position: Goalkeeper \\n - Key Achievements:
|
||||
Exceptional shot-stopper, key player in Real Madrid's recent successes.\\n\\nThis
|
||||
list includes a blend of forwards, midfielders, and a goalkeeper, showcasing
|
||||
the best talents in various positions worldwide. The rankings may vary slightly
|
||||
depending on current form and opinions, but these players consistently rank
|
||||
among the best globally.\",\n \"refusal\": null,\n \"annotations\":
|
||||
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 68,\n \"completion_tokens\":
|
||||
575,\n \"total_tokens\": 643,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_75546bd1a7\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 94d9b7561f204d3b-GRU
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 10 Jun 2025 14:56:46 GMT
|
||||
- Fri, 06 Feb 2026 18:41:57 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '12189'
|
||||
- '7948'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-envoy-upstream-service-time:
|
||||
- '12193'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149998513'
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- req_1098f5a5384f4a26aecf0c9e4e4d1fc0
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Sports Analyst. You are an expert at gathering and organizing information. You carefully collect details and present them in a structured way.\nYour personal goal is: Gather information about the best soccer players\n\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user", "content": "Top 10 best players in the world?"}, {"role": "assistant", "content": "Thought: I now can give a great answer \nFinal Answer: The top 10 best soccer players in the world, as of October 2023, can be identified based on their recent performances, skills, impact on games, and overall contributions to their teams. Here is the structured list:\n\n1. **Lionel Messi (Inter Miami CF)**\n -
|
||||
Position: Forward\n - Key Attributes: Dribbling, vision, goal-scoring ability.\n - Achievements: Multiple Ballon d''Or winner, Copa America champion, World Cup champion (2022).\n\n2. **Kylian Mbapp\u00e9 (Paris Saint-Germain)**\n - Position: Forward\n - Key Attributes: Speed, technique, finishing.\n - Achievements: FIFA World Cup champion (2018), Ligue 1 titles, multiple domestic cups.\n\n3. **Erling Haaland (Manchester City)**\n - Position: Forward\n - Key Attributes: Power, speed, goal-scoring instinct.\n - Achievements: Bundesliga top scorer, UEFA Champions League winner (2023), Premier League titles.\n\n4. **Kevin De Bruyne (Manchester City)**\n - Position: Midfielder\n - Key Attributes: Passing, vision, creativity.\n - Achievements: Multiple Premier League titles, FA Cups, UEFA Champions League winner (2023).\n\n5. **Karim Benzema (Al-Ittihad)**\n - Position: Forward\n - Key Attributes: Goal-scoring, playmaking, tactical intelligence.\n - Achievements:
|
||||
2022 Ballon d''Or winner, multiple Champions Leagues with Real Madrid.\n\n6. **Neymar Jr. (Al Hilal)**\n - Position: Forward\n - Key Attributes: Flair, dribbling, creativity.\n - Achievements: Multiple domestic league titles, Champions League runner-up.\n\n7. **Robert Lewandowski (FC Barcelona)**\n - Position: Forward\n - Key Attributes: Finishing, positioning, aerial ability.\n - Achievements: FIFA Best Men''s Player, multiple Bundesliga titles, La Liga champion (2023).\n\n8. **Mohamed Salah (Liverpool)**\n - Position: Forward\n - Key Attributes: Speed, finishing, dribbling.\n - Achievements: Premier League champion, FA Cup, UEFA Champions League winner.\n\n9. **Vin\u00edcius J\u00fanior (Real Madrid)**\n - Position: Forward\n - Key Attributes: Speed, dribbling, creativity.\n - Achievements: UEFA Champions League winner (2022), La Liga champion (2023).\n\n10. **Luka Modri\u0107 (Real Madrid)**\n - Position: Midfielder\n - Key Attributes: Passing, vision,
|
||||
tactical intelligence.\n - Achievements: Multiple Champions League titles, Ballon d''Or winner (2018).\n\nThis list is compiled based on their current form, past performances, and contributions to their respective teams in both domestic and international competitions. Player rankings can vary based on personal opinion and specific criteria used for evaluation, but these players have consistently been regarded as some of the best in the world as of October 2023."}, {"role": "user", "content": "You are not allowed to include Brazilian players"}, {"role": "assistant", "content": "Thought: I now can give a great answer \nFinal Answer: Here is an updated list of the top 10 best soccer players in the world as of October 2023, excluding Brazilian players:\n\n1. **Lionel Messi (Inter Miami CF)**\n - Position: Forward\n - Key Attributes: Dribbling, vision, goal-scoring ability.\n - Achievements: Multiple Ballon d''Or winner, Copa America champion, World Cup champion (2022).\n\n2.
|
||||
**Kylian Mbapp\u00e9 (Paris Saint-Germain)**\n - Position: Forward\n - Key Attributes: Speed, technique, finishing.\n - Achievements: FIFA World Cup champion (2018), Ligue 1 titles, multiple domestic cups.\n\n3. **Erling Haaland (Manchester City)**\n - Position: Forward\n - Key Attributes: Power, speed, goal-scoring instinct.\n - Achievements: Bundesliga top scorer, UEFA Champions League winner (2023), Premier League titles.\n\n4. **Kevin De Bruyne (Manchester City)**\n - Position: Midfielder\n - Key Attributes: Passing, vision, creativity.\n - Achievements: Multiple Premier League titles, FA Cups, UEFA Champions League winner (2023).\n\n5. **Karim Benzema (Al-Ittihad)**\n - Position: Forward\n - Key Attributes: Goal-scoring, playmaking, tactical intelligence.\n - Achievements: 2022 Ballon d''Or winner, multiple Champions Leagues with Real Madrid.\n\n6. **Robert Lewandowski (FC Barcelona)**\n - Position: Forward\n - Key Attributes: Finishing, positioning,
|
||||
aerial ability.\n - Achievements: FIFA Best Men''s Player, multiple Bundesliga titles, La Liga champion (2023).\n\n7. **Mohamed Salah (Liverpool)**\n - Position: Forward\n - Key Attributes: Speed, finishing, dribbling.\n - Achievements: Premier League champion, FA Cup, UEFA Champions League winner.\n\n8. **Vin\u00edcius J\u00fanior (Real Madrid)**\n - Position: Forward\n - Key Attributes: Speed, dribbling, creativity.\n - Achievements: UEFA Champions League winner (2022), La Liga champion (2023).\n\n9. **Luka Modri\u0107 (Real Madrid)**\n - Position: Midfielder\n - Key Attributes: Passing, vision, tactical intelligence.\n - Achievements: Multiple Champions League titles, Ballon d''Or winner (2018).\n\n10. **Harry Kane (Bayern Munich)**\n - Position: Forward\n - Key Attributes: Goal-scoring, technique, playmaking.\n - Achievements: Golden Boot winner, Premier League titles, UEFA European Championship runner-up.\n\nThis list has been adjusted to exclude Brazilian
|
||||
players and focuses on those who have made significant impacts in their clubs and on the international stage as of October 2023. Each player is recognized for their exceptional skills, performances, and achievements."}, {"role": "user", "content": "You are not allowed to include Brazilian players"}, {"role": "assistant", "content": "Thought: I now can give a great answer \nFinal Answer: Here is the revised list of the top 10 best soccer players in the world as of October 2023, explicitly excluding Brazilian players:\n\n1. **Lionel Messi (Inter Miami CF)**\n - Position: Forward\n - Key Attributes: Dribbling, vision, goal-scoring ability.\n - Achievements: Multiple Ballon d''Or winner, Copa America champion, World Cup champion (2022).\n\n2. **Kylian Mbapp\u00e9 (Paris Saint-Germain)**\n - Position: Forward\n - Key Attributes: Speed, technique, finishing.\n - Achievements: FIFA World Cup champion (2018), multiple Ligue 1 titles, and various domestic cups.\n\n3. **Erling Haaland
|
||||
(Manchester City)**\n - Position: Forward\n - Key Attributes: Power, speed, goal-scoring instinct.\n - Achievements: Bundesliga top scorer, UEFA Champions League winner (2023), Premier League titles.\n\n4. **Kevin De Bruyne (Manchester City)**\n - Position: Midfielder\n - Key Attributes: Passing, vision, creativity.\n - Achievements: Multiple Premier League titles, FA Cups, UEFA Champions League winner (2023).\n\n5. **Karim Benzema (Al-Ittihad)**\n - Position: Forward\n - Key Attributes: Goal-scoring, playmaking, tactical intelligence.\n - Achievements: 2022 Ballon d''Or winner, multiple Champions Leagues with Real Madrid.\n\n6. **Robert Lewandowski (FC Barcelona)**\n - Position: Forward\n - Key Attributes: Finishing, positioning, aerial ability.\n - Achievements: FIFA Best Men''s Player, multiple Bundesliga titles, La Liga champion (2023).\n\n7. **Mohamed Salah (Liverpool)**\n - Position: Forward\n - Key Attributes: Speed, finishing, dribbling.\n - Achievements:
|
||||
Premier League champion, FA Cup, UEFA Champions League winner.\n\n8. **Luka Modri\u0107 (Real Madrid)**\n - Position: Midfielder\n - Key Attributes: Passing, vision, tactical intelligence.\n - Achievements: Multiple Champions League titles, Ballon d''Or winner (2018).\n\n9. **Harry Kane (Bayern Munich)**\n - Position: Forward\n - Key Attributes: Goal-scoring, technique, playmaking.\n - Achievements: Golden Boot winner, Premier League titles, UEFA European Championship runner-up.\n\n10. **Rodri (Manchester City)**\n - Position: Midfielder\n - Key Attributes: Defensive skills, passing, positional awareness.\n - Achievements: Premier League titles, UEFA Champions League winner (2023).\n\nThis list is curated while adhering to the restriction of excluding Brazilian players. Each player included has demonstrated exceptional skills and remarkable performances, solidifying their status as some of the best in the world as of October 2023."}, {"role": "user", "content":
|
||||
"You are not allowed to include Brazilian players"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '9093'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=8Yv8F0ZCFAo2lf.qoqxao70yxyjVvIV90zQqVF6bVzQ-1749567342-1.0.1.1-fZgnv3RDfunvCO1koxwwFJrHnxSx_rwS_FHvQ6xxDPpKHwYr7dTqIQLZrNgSX5twGyK4F22rUmkuiS6KMVogcinChk8lmHtJBTUVTFjr2KU; _cfuvid=wzh8YnmXvLq1G0RcIVijtzboQtCZyIe2uZiochkBLqE-1749567342267-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.78.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.78.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-BguhCefN1bN2OeYRo5ChhUqNBLUda\",\n \"object\": \"chat.completion\",\n \"created\": 1749567414,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Thought: I now can give a great answer \\nFinal Answer: Here is a refined list of the top 10 best soccer players in the world as of October 2023, ensuring that no Brazilian players are included:\\n\\n1. **Lionel Messi (Inter Miami CF)**\\n - Position: Forward\\n - Key Attributes: Dribbling, vision, goal-scoring ability.\\n - Achievements: Multiple Ballon d'Or winner, Copa America champion, World Cup champion (2022).\\n\\n2. **Kylian Mbappé (Paris Saint-Germain)**\\n - Position: Forward\\n - Key Attributes: Speed, technique, finishing.\\n - Achievements: FIFA World Cup champion (2018), multiple Ligue 1 titles, various domestic cups.\\n\\n3. **Erling Haaland (Manchester City)**\\\
|
||||
n - Position: Forward\\n - Key Attributes: Power, speed, goal-scoring instinct.\\n - Achievements: Bundesliga top scorer, UEFA Champions League winner (2023), Premier League titles.\\n\\n4. **Kevin De Bruyne (Manchester City)**\\n - Position: Midfielder\\n - Key Attributes: Passing, vision, creativity.\\n - Achievements: Multiple Premier League titles, FA Cups, UEFA Champions League winner (2023).\\n\\n5. **Karim Benzema (Al-Ittihad)**\\n - Position: Forward\\n - Key Attributes: Goal-scoring, playmaking, tactical intelligence.\\n - Achievements: 2022 Ballon d'Or winner, multiple Champions Leagues with Real Madrid.\\n\\n6. **Robert Lewandowski (FC Barcelona)**\\n - Position: Forward\\n - Key Attributes: Finishing, positioning, aerial ability.\\n - Achievements: FIFA Best Men's Player, multiple Bundesliga titles, La Liga champion (2023).\\n\\n7. **Mohamed Salah (Liverpool)**\\n - Position: Forward\\n - Key Attributes: Speed, finishing, dribbling.\\n -\
|
||||
\ Achievements: Premier League champion, FA Cup, UEFA Champions League winner.\\n\\n8. **Luka Modrić (Real Madrid)**\\n - Position: Midfielder\\n - Key Attributes: Passing, vision, tactical intelligence.\\n - Achievements: Multiple Champions League titles, Ballon d'Or winner (2018).\\n\\n9. **Harry Kane (Bayern Munich)**\\n - Position: Forward\\n - Key Attributes: Goal-scoring, technique, playmaking.\\n - Achievements: Golden Boot winner, multiple Premier League titles, UEFA European Championship runner-up.\\n\\n10. **Son Heung-min (Tottenham Hotspur)**\\n - Position: Forward\\n - Key Attributes: Speed, finishing, playmaking.\\n - Achievements: Premier League Golden Boot winner, multiple domestic cup titles.\\n\\nThis list has been carefully revised to exclude all Brazilian players while highlighting some of the most talented individuals in soccer as of October 2023. Each player has showcased remarkable effectiveness and skill, contributing significantly to their\
|
||||
\ teams on both domestic and international stages.\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 2028,\n \"completion_tokens\": 614,\n \"total_tokens\": 2642,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 1280,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_34a54ae93c\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 94d9b7d24d991d2c-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 10 Jun 2025 14:57:29 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '35291'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-envoy-upstream-service-time:
|
||||
- '35294'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149997855'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_4676152d4227ac1825d1240ddef231d6
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
|
||||
@@ -1,14 +1,8 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Test Agent. A helpful
|
||||
test assistant\nYour personal goal is: Answer questions\nTo give my best complete
|
||||
final answer to the task respond using the exact following format:\n\nThought:
|
||||
I now can give a great answer\nFinal Answer: Your final answer must be the great
|
||||
and the most complete as possible, it must be outcome described.\n\nI MUST use
|
||||
these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task:
|
||||
What is 2+2? Reply with just the number.\n\nBegin! This is VERY important to
|
||||
you, use the tools available and give your best Final Answer, your job depends
|
||||
on it!\n\nThought:"}],"model":"gpt-4o-mini"}'
|
||||
test assistant\nYour personal goal is: Answer questions"},{"role":"user","content":"\nCurrent
|
||||
Task: What is 2+2? Reply with just the number.\n\nProvide your complete response:"}],"model":"gpt-4o-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
@@ -21,7 +15,7 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '673'
|
||||
- '272'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
@@ -43,23 +37,22 @@ interactions:
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
- 3.13.5
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-Cy7b0HjL79y39EkUcMLrRhPFe3XGj\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768444914,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
string: "{\n \"id\": \"chatcmpl-D6L4AzMHXLXDfyclWS6fJSwS0cvOl\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1770403318,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||
Answer: 4\",\n \"refusal\": null,\n \"annotations\": []\n },\n
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||
\ \"usage\": {\n \"prompt_tokens\": 136,\n \"completion_tokens\": 13,\n
|
||||
\ \"total_tokens\": 149,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
\"assistant\",\n \"content\": \"4\",\n \"refusal\": null,\n
|
||||
\ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 50,\n \"completion_tokens\":
|
||||
1,\n \"total_tokens\": 51,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_8bbc38b4db\"\n}\n"
|
||||
\"default\",\n \"system_fingerprint\": \"fp_f4ae844694\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
@@ -68,7 +61,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 15 Jan 2026 02:41:55 GMT
|
||||
- Fri, 06 Feb 2026 18:41:58 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
@@ -85,18 +78,14 @@ interactions:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
content-length:
|
||||
- '857'
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '341'
|
||||
- '264'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '358'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
|
||||
@@ -1,14 +1,8 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Standalone Agent. A helpful
|
||||
assistant\nYour personal goal is: Answer questions\nTo give my best complete
|
||||
final answer to the task respond using the exact following format:\n\nThought:
|
||||
I now can give a great answer\nFinal Answer: Your final answer must be the great
|
||||
and the most complete as possible, it must be outcome described.\n\nI MUST use
|
||||
these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task:
|
||||
What is 5+5? Reply with just the number.\n\nBegin! This is VERY important to
|
||||
you, use the tools available and give your best Final Answer, your job depends
|
||||
on it!\n\nThought:"}],"model":"gpt-4o-mini"}'
|
||||
assistant\nYour personal goal is: Answer questions"},{"role":"user","content":"\nCurrent
|
||||
Task: What is 5+5? Reply with just the number.\n\nProvide your complete response:"}],"model":"gpt-4o-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
@@ -21,7 +15,7 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '674'
|
||||
- '273'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
@@ -43,23 +37,22 @@ interactions:
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
- 3.13.5
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-Cy7azhPwUHQ0p5tdhxSAmLPoE8UgC\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768444913,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
string: "{\n \"id\": \"chatcmpl-D6L3cLs2ndBaXV2wnqYCdi6X1ykvv\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1770403284,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||
Answer: 10\",\n \"refusal\": null,\n \"annotations\": []\n },\n
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||
\ \"usage\": {\n \"prompt_tokens\": 136,\n \"completion_tokens\": 13,\n
|
||||
\ \"total_tokens\": 149,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
\"assistant\",\n \"content\": \"10\",\n \"refusal\": null,\n
|
||||
\ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 50,\n \"completion_tokens\":
|
||||
1,\n \"total_tokens\": 51,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_29330a9688\"\n}\n"
|
||||
\"default\",\n \"system_fingerprint\": \"fp_f4ae844694\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
@@ -68,7 +61,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 15 Jan 2026 02:41:54 GMT
|
||||
- Fri, 06 Feb 2026 18:41:25 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
@@ -85,18 +78,14 @@ interactions:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
content-length:
|
||||
- '858'
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '455'
|
||||
- '270'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '583'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
|
||||
@@ -1,13 +1,8 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are First Agent. A friendly
|
||||
greeter\nYour personal goal is: Greet users\nTo give my best complete final
|
||||
answer to the task respond using the exact following format:\n\nThought: I now
|
||||
can give a great answer\nFinal Answer: Your final answer must be the great and
|
||||
the most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Say
|
||||
hello\n\nBegin! This is VERY important to you, use the tools available and give
|
||||
your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4o-mini"}'
|
||||
greeter\nYour personal goal is: Greet users"},{"role":"user","content":"\nCurrent
|
||||
Task: Say hello\n\nProvide your complete response:"}],"model":"gpt-4o-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
@@ -20,7 +15,7 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '632'
|
||||
- '231'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
@@ -42,24 +37,22 @@ interactions:
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
- 3.13.5
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-CyRKzgODZ9yn3F9OkaXsscLk2Ln3N\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768520801,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
string: "{\n \"id\": \"chatcmpl-D6L4A8Aad6P1YUxWjQpvyltn8GaKT\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1770403318,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||
Answer: Hello! Welcome! I'm so glad to see you here. If you need any assistance
|
||||
or have any questions, feel free to ask. Have a wonderful day!\",\n \"refusal\":
|
||||
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
127,\n \"completion_tokens\": 43,\n \"total_tokens\": 170,\n \"prompt_tokens_details\":
|
||||
\"assistant\",\n \"content\": \"Hello! \U0001F60A How are you today?\",\n
|
||||
\ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
41,\n \"completion_tokens\": 8,\n \"total_tokens\": 49,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_c4585b5b9c\"\n}\n"
|
||||
\"default\",\n \"system_fingerprint\": \"fp_f4ae844694\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
@@ -68,7 +61,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 15 Jan 2026 23:46:42 GMT
|
||||
- Fri, 06 Feb 2026 18:41:58 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
@@ -85,18 +78,14 @@ interactions:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
content-length:
|
||||
- '990'
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '880'
|
||||
- '325'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '1160'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
@@ -118,13 +107,8 @@ interactions:
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Second Agent. A polite
|
||||
farewell agent\nYour personal goal is: Say goodbye\nTo give my best complete
|
||||
final answer to the task respond using the exact following format:\n\nThought:
|
||||
I now can give a great answer\nFinal Answer: Your final answer must be the great
|
||||
and the most complete as possible, it must be outcome described.\n\nI MUST use
|
||||
these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task:
|
||||
Say goodbye\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4o-mini"}'
|
||||
farewell agent\nYour personal goal is: Say goodbye"},{"role":"user","content":"\nCurrent
|
||||
Task: Say goodbye\n\nProvide your complete response:"}],"model":"gpt-4o-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
@@ -137,7 +121,7 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '640'
|
||||
- '239'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
@@ -159,27 +143,24 @@ interactions:
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
- 3.13.5
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-CyRL1Ua2PkK5xXPp3KeF0AnGAk3JP\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768520803,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
string: "{\n \"id\": \"chatcmpl-D6L4BLMYC3ODccwbKfBIdtrEyd3no\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1770403319,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||
Answer: As we reach the end of our conversation, I want to express my gratitude
|
||||
for the time we've shared. It's been a pleasure assisting you, and I hope
|
||||
you found our interaction helpful and enjoyable. Remember, whenever you need
|
||||
assistance, I'm just a message away. Wishing you all the best in your future
|
||||
endeavors. Goodbye and take care!\",\n \"refusal\": null,\n \"annotations\":
|
||||
\"assistant\",\n \"content\": \"Thank you for the time we've spent
|
||||
together! I wish you all the best in your future endeavors. Take care, and
|
||||
until we meet again, goodbye!\",\n \"refusal\": null,\n \"annotations\":
|
||||
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 126,\n \"completion_tokens\":
|
||||
79,\n \"total_tokens\": 205,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 40,\n \"completion_tokens\":
|
||||
31,\n \"total_tokens\": 71,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_29330a9688\"\n}\n"
|
||||
\"default\",\n \"system_fingerprint\": \"fp_f4ae844694\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
@@ -188,7 +169,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 15 Jan 2026 23:46:44 GMT
|
||||
- Fri, 06 Feb 2026 18:41:59 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
@@ -205,18 +186,14 @@ interactions:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
content-length:
|
||||
- '1189'
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '1363'
|
||||
- '726'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '1605'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
|
||||
@@ -2,9 +2,8 @@ interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Calculator. You calculate
|
||||
things.\nYour personal goal is: Perform calculations efficiently"},{"role":"user","content":"\nCurrent
|
||||
Task: Use the failing_tool to do something.\n\nThis is VERY important to you,
|
||||
your job depends on it!"}],"model":"gpt-4o-mini","tool_choice":"auto","tools":[{"type":"function","function":{"name":"failing_tool","description":"This
|
||||
tool always fails","parameters":{"properties":{},"type":"object"}}}]}'
|
||||
Task: Use the failing_tool to do something."}],"model":"gpt-4o-mini","tool_choice":"auto","tools":[{"type":"function","function":{"name":"failing_tool","description":"This
|
||||
tool always fails","strict":true,"parameters":{"properties":{},"type":"object","additionalProperties":false,"required":[]}}}]}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
@@ -17,7 +16,7 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '477'
|
||||
- '476'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
@@ -39,26 +38,26 @@ interactions:
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
- 3.13.5
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D0vm2JDsOmy0czXPAr4vnw3wvuqYZ\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1769114454,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
string: "{\n \"id\": \"chatcmpl-D6L3dV6acwapgRyxmnzGfuOXemtjJ\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1770403285,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
|
||||
\ \"id\": \"call_8xr8rPUDWzLfQ3LOWPHtBUjK\",\n \"type\":
|
||||
\ \"id\": \"call_GCdaOdo32pr1sSk4RzO0tiB9\",\n \"type\":
|
||||
\"function\",\n \"function\": {\n \"name\": \"failing_tool\",\n
|
||||
\ \"arguments\": \"{}\"\n }\n }\n ],\n
|
||||
\ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"tool_calls\"\n }\n ],\n \"usage\":
|
||||
{\n \"prompt_tokens\": 78,\n \"completion_tokens\": 11,\n \"total_tokens\":
|
||||
89,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\":
|
||||
{\n \"prompt_tokens\": 65,\n \"completion_tokens\": 11,\n \"total_tokens\":
|
||||
76,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\":
|
||||
0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\":
|
||||
0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n
|
||||
\ \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_c4585b5b9c\"\n}\n"
|
||||
\"default\",\n \"system_fingerprint\": \"fp_6c0d1490cb\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
@@ -67,7 +66,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 22 Jan 2026 20:40:54 GMT
|
||||
- Fri, 06 Feb 2026 18:41:25 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
@@ -87,13 +86,11 @@ interactions:
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '593'
|
||||
- '436'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '621'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
@@ -116,12 +113,9 @@ interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Calculator. You calculate
|
||||
things.\nYour personal goal is: Perform calculations efficiently"},{"role":"user","content":"\nCurrent
|
||||
Task: Use the failing_tool to do something.\n\nThis is VERY important to you,
|
||||
your job depends on it!"},{"role":"assistant","content":null,"tool_calls":[{"id":"call_8xr8rPUDWzLfQ3LOWPHtBUjK","type":"function","function":{"name":"failing_tool","arguments":"{}"}}]},{"role":"tool","tool_call_id":"call_8xr8rPUDWzLfQ3LOWPHtBUjK","content":"Error
|
||||
executing tool: This tool always fails"},{"role":"user","content":"Analyze the
|
||||
tool result. If requirements are met, provide the Final Answer. Otherwise, call
|
||||
the next tool. Deliver only the answer without meta-commentary."}],"model":"gpt-4o-mini","tool_choice":"auto","tools":[{"type":"function","function":{"name":"failing_tool","description":"This
|
||||
tool always fails","parameters":{"properties":{},"type":"object"}}}]}'
|
||||
Task: Use the failing_tool to do something."},{"role":"assistant","content":null,"tool_calls":[{"id":"call_GCdaOdo32pr1sSk4RzO0tiB9","type":"function","function":{"name":"failing_tool","arguments":"{}"}}]},{"role":"tool","tool_call_id":"call_GCdaOdo32pr1sSk4RzO0tiB9","name":"failing_tool","content":"Error
|
||||
executing tool: This tool always fails"}],"model":"gpt-4o-mini","tool_choice":"auto","tools":[{"type":"function","function":{"name":"failing_tool","description":"This
|
||||
tool always fails","strict":true,"parameters":{"properties":{},"type":"object","additionalProperties":false,"required":[]}}}]}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
@@ -134,7 +128,7 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '941'
|
||||
- '778'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
@@ -158,22 +152,25 @@ interactions:
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
- 3.13.5
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D0vm3xcywoKBW75bhBXfkGJNim6Th\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1769114455,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
string: "{\n \"id\": \"chatcmpl-D6L3dhjDZOoihHvXvRpbJD3ReGu0z\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1770403285,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Error: This tool always fails.\",\n
|
||||
\ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
141,\n \"completion_tokens\": 8,\n \"total_tokens\": 149,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
\"assistant\",\n \"content\": \"The attempt to use the failing tool
|
||||
resulted in an error, as expected since it is designed to always fail. If
|
||||
there's anything else you would like to calculate or explore, please let me
|
||||
know!\",\n \"refusal\": null,\n \"annotations\": []\n },\n
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||
\ \"usage\": {\n \"prompt_tokens\": 93,\n \"completion_tokens\": 40,\n
|
||||
\ \"total_tokens\": 133,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_c4585b5b9c\"\n}\n"
|
||||
\"default\",\n \"system_fingerprint\": \"fp_6c0d1490cb\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
@@ -182,7 +179,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 22 Jan 2026 20:40:55 GMT
|
||||
- Fri, 06 Feb 2026 18:41:26 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
@@ -200,13 +197,11 @@ interactions:
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '420'
|
||||
- '776'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '436'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
|
||||
@@ -43,15 +43,15 @@ interactions:
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
- 3.13.5
|
||||
x-stainless-timeout:
|
||||
- NOT_GIVEN
|
||||
method: POST
|
||||
uri: https://api.anthropic.com/v1/messages
|
||||
response:
|
||||
body:
|
||||
string: '{"model":"claude-3-5-haiku-20241022","id":"msg_0149zKBgM47utdBdrfJjM6YZ","type":"message","role":"assistant","content":[{"type":"tool_use","id":"toolu_011jnBYLgtzXqdmSi7JDyQHj","name":"structured_output","input":{"operation":"Addition","result":42,"explanation":"Adding
|
||||
15 and 27 together results in 42"}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":573,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":79,"service_tier":"standard"}}'
|
||||
string: '{"model":"claude-3-5-haiku-20241022","id":"msg_01A41GpDoJbZLUhR8dQzUcUX","type":"message","role":"assistant","content":[{"type":"tool_use","id":"toolu_01UNPdzpayoWyqDYVE7fR5oA","name":"structured_output","input":{"operation":"Addition","result":42,"explanation":"Added
|
||||
15 and 27 together"}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":573,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":75,"service_tier":"standard","inference_geo":"not_available"}}'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
@@ -62,7 +62,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 30 Jan 2026 18:56:15 GMT
|
||||
- Fri, 06 Feb 2026 18:41:25 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -88,7 +88,7 @@ interactions:
|
||||
anthropic-ratelimit-requests-remaining:
|
||||
- '3999'
|
||||
anthropic-ratelimit-requests-reset:
|
||||
- '2026-01-30T18:56:14Z'
|
||||
- '2026-02-06T18:41:24Z'
|
||||
anthropic-ratelimit-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-tokens-remaining:
|
||||
@@ -102,7 +102,7 @@ interactions:
|
||||
strict-transport-security:
|
||||
- STS-XXX
|
||||
x-envoy-upstream-service-time:
|
||||
- '1473'
|
||||
- '1247'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
|
||||
@@ -44,21 +44,20 @@ interactions:
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
- 3.13.5
|
||||
x-stainless-timeout:
|
||||
- NOT_GIVEN
|
||||
method: POST
|
||||
uri: https://api.anthropic.com/v1/messages
|
||||
response:
|
||||
body:
|
||||
string: '{"model":"claude-3-5-haiku-20241022","id":"msg_013iHkpmto99iyH5kDvn8uER","type":"message","role":"assistant","content":[{"type":"tool_use","id":"toolu_01Kpda2DzHBqWq9a2FS2Bdw6","name":"structured_output","input":{"topic":"Benefits
|
||||
string: '{"model":"claude-3-5-haiku-20241022","id":"msg_016wrV83wm3FLYD4JoTy2Piw","type":"message","role":"assistant","content":[{"type":"tool_use","id":"toolu_01V6Pzr7eGfuG4Q3mc25ZXwN","name":"structured_output","input":{"topic":"Benefits
|
||||
of Remote Work","summary":"Remote work offers significant advantages for both
|
||||
employees and employers, transforming traditional work paradigms by providing
|
||||
flexibility, increased productivity, and cost savings.","key_points":["Increased
|
||||
employee flexibility and work-life balance","Reduced commuting time and associated
|
||||
stress","Cost savings for companies on office infrastructure","Access to a
|
||||
global talent pool","Higher employee productivity and job satisfaction","Lower
|
||||
carbon footprint due to reduced travel"]}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":589,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":153,"service_tier":"standard"}}'
|
||||
employees and employers, transforming traditional workplace dynamics.","key_points":["Increased
|
||||
flexibility in work schedule","Reduced commute time and transportation costs","Improved
|
||||
work-life balance","Higher productivity for many employees","Cost savings
|
||||
for companies on office infrastructure","Expanded talent pool for hiring","Enhanced
|
||||
employee job satisfaction"]}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":589,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":142,"service_tier":"standard","inference_geo":"not_available"}}'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
@@ -69,7 +68,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 30 Jan 2026 18:56:19 GMT
|
||||
- Fri, 06 Feb 2026 18:41:28 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -95,7 +94,7 @@ interactions:
|
||||
anthropic-ratelimit-requests-remaining:
|
||||
- '3999'
|
||||
anthropic-ratelimit-requests-reset:
|
||||
- '2026-01-30T18:56:16Z'
|
||||
- '2026-02-06T18:41:26Z'
|
||||
anthropic-ratelimit-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-tokens-remaining:
|
||||
@@ -109,7 +108,7 @@ interactions:
|
||||
strict-transport-security:
|
||||
- STS-XXX
|
||||
x-envoy-upstream-service-time:
|
||||
- '3107'
|
||||
- '2650'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
|
||||
@@ -0,0 +1,332 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"max_tokens":4096,"messages":[{"role":"user","content":[{"type":"text","text":"Say
|
||||
hello in one word.","cache_control":{"type":"ephemeral"}}]}],"model":"claude-sonnet-4-5-20250929","stream":false,"system":"You
|
||||
are a helpful assistant. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. "}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
anthropic-version:
|
||||
- '2023-06-01'
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '5918'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.anthropic.com
|
||||
x-api-key:
|
||||
- X-API-KEY-XXX
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 0.73.0
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
x-stainless-timeout:
|
||||
- NOT_GIVEN
|
||||
method: POST
|
||||
uri: https://api.anthropic.com/v1/messages
|
||||
response:
|
||||
body:
|
||||
string: '{"model":"claude-sonnet-4-5-20250929","id":"msg_013xTaKq41TFn6drdxt1mFdx","type":"message","role":"assistant","content":[{"type":"text","text":"Hello!"}],"stop_reason":"end_turn","stop_sequence":null,"usage":{"input_tokens":3,"cache_creation_input_tokens":0,"cache_read_input_tokens":1217,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":5,"service_tier":"standard","inference_geo":"not_available"}}'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Security-Policy:
|
||||
- CSP-FILTERED
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 10 Feb 2026 18:27:40 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Robots-Tag:
|
||||
- none
|
||||
anthropic-organization-id:
|
||||
- ANTHROPIC-ORGANIZATION-ID-XXX
|
||||
anthropic-ratelimit-input-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-input-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-input-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-output-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-output-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-output-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
request-id:
|
||||
- REQUEST-ID-XXX
|
||||
strict-transport-security:
|
||||
- STS-XXX
|
||||
x-envoy-upstream-service-time:
|
||||
- '726'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"max_tokens":4096,"messages":[{"role":"user","content":[{"type":"text","text":"Say
|
||||
goodbye in one word.","cache_control":{"type":"ephemeral"}}]}],"model":"claude-sonnet-4-5-20250929","stream":false,"system":"You
|
||||
are a helpful assistant. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. "}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
anthropic-version:
|
||||
- '2023-06-01'
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '5920'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.anthropic.com
|
||||
x-api-key:
|
||||
- X-API-KEY-XXX
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 0.73.0
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
x-stainless-timeout:
|
||||
- NOT_GIVEN
|
||||
method: POST
|
||||
uri: https://api.anthropic.com/v1/messages
|
||||
response:
|
||||
body:
|
||||
string: '{"model":"claude-sonnet-4-5-20250929","id":"msg_01LdueHX7nvf19wD8Uxn4EZD","type":"message","role":"assistant","content":[{"type":"text","text":"Goodbye"}],"stop_reason":"end_turn","stop_sequence":null,"usage":{"input_tokens":3,"cache_creation_input_tokens":0,"cache_read_input_tokens":1217,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":5,"service_tier":"standard","inference_geo":"not_available"}}'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Security-Policy:
|
||||
- CSP-FILTERED
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 10 Feb 2026 18:27:41 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Robots-Tag:
|
||||
- none
|
||||
anthropic-organization-id:
|
||||
- ANTHROPIC-ORGANIZATION-ID-XXX
|
||||
anthropic-ratelimit-input-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-input-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-input-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-output-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-output-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-output-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
request-id:
|
||||
- REQUEST-ID-XXX
|
||||
strict-transport-security:
|
||||
- STS-XXX
|
||||
x-envoy-upstream-service-time:
|
||||
- '759'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,336 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"max_tokens":4096,"messages":[{"role":"user","content":[{"type":"text","text":"What
|
||||
is the weather in Tokyo?","cache_control":{"type":"ephemeral"}}]}],"model":"claude-sonnet-4-5-20250929","stream":false,"system":"You
|
||||
are a helpful assistant that uses tools. This is padding text to ensure the
|
||||
prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. ","tool_choice":{"type":"tool","name":"get_weather"},"tools":[{"name":"get_weather","description":"Get
|
||||
the current weather for a location","input_schema":{"type":"object","properties":{"location":{"type":"string","description":"The
|
||||
city name"}},"required":["location"]}}]}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
anthropic-version:
|
||||
- '2023-06-01'
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '6211'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.anthropic.com
|
||||
x-api-key:
|
||||
- X-API-KEY-XXX
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 0.73.0
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
x-stainless-timeout:
|
||||
- NOT_GIVEN
|
||||
method: POST
|
||||
uri: https://api.anthropic.com/v1/messages
|
||||
response:
|
||||
body:
|
||||
string: '{"model":"claude-sonnet-4-5-20250929","id":"msg_01WhFk2ppoz43nbh4uNhXBfL","type":"message","role":"assistant","content":[{"type":"tool_use","id":"toolu_01CX1yZuJ5MQaJbXNSrnCiqf","name":"get_weather","input":{"location":"Tokyo"}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":24,"cache_creation_input_tokens":0,"cache_read_input_tokens":1857,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":33,"service_tier":"standard","inference_geo":"not_available"}}'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Security-Policy:
|
||||
- CSP-FILTERED
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 10 Feb 2026 18:27:38 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Robots-Tag:
|
||||
- none
|
||||
anthropic-organization-id:
|
||||
- ANTHROPIC-ORGANIZATION-ID-XXX
|
||||
anthropic-ratelimit-input-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-input-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-input-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-output-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-output-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-output-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
request-id:
|
||||
- REQUEST-ID-XXX
|
||||
strict-transport-security:
|
||||
- STS-XXX
|
||||
x-envoy-upstream-service-time:
|
||||
- '1390'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"max_tokens":4096,"messages":[{"role":"user","content":[{"type":"text","text":"What
|
||||
is the weather in Paris?","cache_control":{"type":"ephemeral"}}]}],"model":"claude-sonnet-4-5-20250929","stream":false,"system":"You
|
||||
are a helpful assistant that uses tools. This is padding text to ensure the
|
||||
prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. ","tool_choice":{"type":"tool","name":"get_weather"},"tools":[{"name":"get_weather","description":"Get
|
||||
the current weather for a location","input_schema":{"type":"object","properties":{"location":{"type":"string","description":"The
|
||||
city name"}},"required":["location"]}}]}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
anthropic-version:
|
||||
- '2023-06-01'
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '6211'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.anthropic.com
|
||||
x-api-key:
|
||||
- X-API-KEY-XXX
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 0.73.0
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
x-stainless-timeout:
|
||||
- NOT_GIVEN
|
||||
method: POST
|
||||
uri: https://api.anthropic.com/v1/messages
|
||||
response:
|
||||
body:
|
||||
string: '{"model":"claude-sonnet-4-5-20250929","id":"msg_01Nmw5NyAEwCLGjpVnf15rh4","type":"message","role":"assistant","content":[{"type":"tool_use","id":"toolu_01DEe9K7N4EfhPFqxHhqEHCE","name":"get_weather","input":{"location":"Paris"}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":24,"cache_creation_input_tokens":0,"cache_read_input_tokens":1857,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":33,"service_tier":"standard","inference_geo":"not_available"}}'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Security-Policy:
|
||||
- CSP-FILTERED
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 10 Feb 2026 18:27:40 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Robots-Tag:
|
||||
- none
|
||||
anthropic-organization-id:
|
||||
- ANTHROPIC-ORGANIZATION-ID-XXX
|
||||
anthropic-ratelimit-input-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-input-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-input-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-output-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-output-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-output-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
request-id:
|
||||
- REQUEST-ID-XXX
|
||||
strict-transport-security:
|
||||
- STS-XXX
|
||||
x-envoy-upstream-service-time:
|
||||
- '1259'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,411 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"max_tokens":4096,"messages":[{"role":"user","content":[{"type":"text","text":"Say
|
||||
hello in one word.","cache_control":{"type":"ephemeral"}}]}],"model":"claude-sonnet-4-5-20250929","system":"You
|
||||
are a helpful assistant. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. ","stream":true}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
anthropic-version:
|
||||
- '2023-06-01'
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '5917'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.anthropic.com
|
||||
x-api-key:
|
||||
- X-API-KEY-XXX
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 0.73.0
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
x-stainless-stream-helper:
|
||||
- messages
|
||||
x-stainless-timeout:
|
||||
- NOT_GIVEN
|
||||
method: POST
|
||||
uri: https://api.anthropic.com/v1/messages
|
||||
response:
|
||||
body:
|
||||
string: 'event: message_start
|
||||
|
||||
data: {"type":"message_start","message":{"model":"claude-sonnet-4-5-20250929","id":"msg_01LshZroyEGgd3HfDrKdQMLm","type":"message","role":"assistant","content":[],"stop_reason":null,"stop_sequence":null,"usage":{"input_tokens":3,"cache_creation_input_tokens":0,"cache_read_input_tokens":1217,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":4,"service_tier":"standard","inference_geo":"not_available"}} }
|
||||
|
||||
|
||||
event: content_block_start
|
||||
|
||||
data: {"type":"content_block_start","index":0,"content_block":{"type":"text","text":""} }
|
||||
|
||||
|
||||
event: ping
|
||||
|
||||
data: {"type": "ping"}
|
||||
|
||||
|
||||
event: content_block_delta
|
||||
|
||||
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"Hello"} }
|
||||
|
||||
|
||||
event: content_block_stop
|
||||
|
||||
data: {"type":"content_block_stop","index":0 }
|
||||
|
||||
|
||||
event: message_delta
|
||||
|
||||
data: {"type":"message_delta","delta":{"stop_reason":"end_turn","stop_sequence":null},"usage":{"input_tokens":3,"cache_creation_input_tokens":0,"cache_read_input_tokens":1217,"output_tokens":4}
|
||||
}
|
||||
|
||||
|
||||
event: message_stop
|
||||
|
||||
data: {"type":"message_stop" }
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Cache-Control:
|
||||
- no-cache
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Security-Policy:
|
||||
- CSP-FILTERED
|
||||
Content-Type:
|
||||
- text/event-stream; charset=utf-8
|
||||
Date:
|
||||
- Tue, 10 Feb 2026 18:27:43 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Robots-Tag:
|
||||
- none
|
||||
anthropic-organization-id:
|
||||
- ANTHROPIC-ORGANIZATION-ID-XXX
|
||||
anthropic-ratelimit-input-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-input-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-input-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-output-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-output-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-output-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
request-id:
|
||||
- REQUEST-ID-XXX
|
||||
strict-transport-security:
|
||||
- STS-XXX
|
||||
x-envoy-upstream-service-time:
|
||||
- '837'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"max_tokens":4096,"messages":[{"role":"user","content":[{"type":"text","text":"Say
|
||||
goodbye in one word.","cache_control":{"type":"ephemeral"}}]}],"model":"claude-sonnet-4-5-20250929","system":"You
|
||||
are a helpful assistant. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. ","stream":true}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
anthropic-version:
|
||||
- '2023-06-01'
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '5919'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.anthropic.com
|
||||
x-api-key:
|
||||
- X-API-KEY-XXX
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 0.73.0
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
x-stainless-stream-helper:
|
||||
- messages
|
||||
x-stainless-timeout:
|
||||
- NOT_GIVEN
|
||||
method: POST
|
||||
uri: https://api.anthropic.com/v1/messages
|
||||
response:
|
||||
body:
|
||||
string: 'event: message_start
|
||||
|
||||
data: {"type":"message_start","message":{"model":"claude-sonnet-4-5-20250929","id":"msg_01MZSWarEUbFXmek8aEpwKDu","type":"message","role":"assistant","content":[],"stop_reason":null,"stop_sequence":null,"usage":{"input_tokens":3,"cache_creation_input_tokens":0,"cache_read_input_tokens":1217,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":6,"service_tier":"standard","inference_geo":"not_available"}} }
|
||||
|
||||
|
||||
event: content_block_start
|
||||
|
||||
data: {"type":"content_block_start","index":0,"content_block":{"type":"text","text":""}}
|
||||
|
||||
|
||||
event: ping
|
||||
|
||||
data: {"type": "ping"}
|
||||
|
||||
|
||||
event: content_block_delta
|
||||
|
||||
data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"Goodbye."} }
|
||||
|
||||
|
||||
event: content_block_stop
|
||||
|
||||
data: {"type":"content_block_stop","index":0 }
|
||||
|
||||
|
||||
event: message_delta
|
||||
|
||||
data: {"type":"message_delta","delta":{"stop_reason":"end_turn","stop_sequence":null},"usage":{"input_tokens":3,"cache_creation_input_tokens":0,"cache_read_input_tokens":1217,"output_tokens":6} }
|
||||
|
||||
|
||||
event: message_stop
|
||||
|
||||
data: {"type":"message_stop" }
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Cache-Control:
|
||||
- no-cache
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Security-Policy:
|
||||
- CSP-FILTERED
|
||||
Content-Type:
|
||||
- text/event-stream; charset=utf-8
|
||||
Date:
|
||||
- Tue, 10 Feb 2026 18:27:44 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Robots-Tag:
|
||||
- none
|
||||
anthropic-organization-id:
|
||||
- ANTHROPIC-ORGANIZATION-ID-XXX
|
||||
anthropic-ratelimit-input-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-input-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-input-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-output-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-output-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-output-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
request-id:
|
||||
- REQUEST-ID-XXX
|
||||
strict-transport-security:
|
||||
- STS-XXX
|
||||
x-envoy-upstream-service-time:
|
||||
- '870'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,266 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"contents": [{"parts": [{"text": "Say hello in one word."}], "role": "user"}],
|
||||
"systemInstruction": {"parts": [{"text": "You are a helpful assistant. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
"}], "role": "user"}, "generationConfig": {}}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- '*/*'
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '5876'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- generativelanguage.googleapis.com
|
||||
x-goog-api-client:
|
||||
- google-genai-sdk/1.49.0 gl-python/3.13.3
|
||||
x-goog-api-key:
|
||||
- X-GOOG-API-KEY-XXX
|
||||
method: POST
|
||||
uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\":
|
||||
[\n {\n \"text\": \"Hello\"\n }\n ],\n
|
||||
\ \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n
|
||||
\ \"index\": 0\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\":
|
||||
1135,\n \"candidatesTokenCount\": 1,\n \"totalTokenCount\": 1158,\n
|
||||
\ \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n
|
||||
\ \"tokenCount\": 1135\n }\n ],\n \"thoughtsTokenCount\":
|
||||
22\n },\n \"modelVersion\": \"gemini-2.5-flash\",\n \"responseId\": \"46GLaf60NYmY-8YP--PB6QE\"\n}\n"
|
||||
headers:
|
||||
Alt-Svc:
|
||||
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
|
||||
Content-Type:
|
||||
- application/json; charset=UTF-8
|
||||
Date:
|
||||
- Tue, 10 Feb 2026 21:23:47 GMT
|
||||
Server:
|
||||
- scaffolding on HTTPServer2
|
||||
Server-Timing:
|
||||
- gfet4t7; dur=773
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
X-Frame-Options:
|
||||
- X-FRAME-OPTIONS-XXX
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"contents": [{"parts": [{"text": "Say goodbye in one word."}], "role":
|
||||
"user"}], "systemInstruction": {"parts": [{"text": "You are a helpful assistant.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. "}], "role": "user"}, "generationConfig": {}}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- '*/*'
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '5878'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- generativelanguage.googleapis.com
|
||||
x-goog-api-client:
|
||||
- google-genai-sdk/1.49.0 gl-python/3.13.3
|
||||
x-goog-api-key:
|
||||
- X-GOOG-API-KEY-XXX
|
||||
method: POST
|
||||
uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\":
|
||||
[\n {\n \"text\": \"Farewell.\"\n }\n ],\n
|
||||
\ \"role\": \"model\"\n },\n \"finishReason\": \"STOP\",\n
|
||||
\ \"index\": 0\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\":
|
||||
1135,\n \"candidatesTokenCount\": 3,\n \"totalTokenCount\": 1164,\n
|
||||
\ \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n
|
||||
\ \"tokenCount\": 1135\n }\n ],\n \"thoughtsTokenCount\":
|
||||
26\n },\n \"modelVersion\": \"gemini-2.5-flash\",\n \"responseId\": \"5KGLafeeIv-G-8YP_MfPgAI\"\n}\n"
|
||||
headers:
|
||||
Alt-Svc:
|
||||
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
|
||||
Content-Type:
|
||||
- application/json; charset=UTF-8
|
||||
Date:
|
||||
- Tue, 10 Feb 2026 21:23:48 GMT
|
||||
Server:
|
||||
- scaffolding on HTTPServer2
|
||||
Server-Timing:
|
||||
- gfet4t7; dur=662
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
X-Frame-Options:
|
||||
- X-FRAME-OPTIONS-XXX
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,280 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"contents": [{"parts": [{"text": "What is the weather in Tokyo?"}], "role":
|
||||
"user"}], "systemInstruction": {"parts": [{"text": "You are a helpful assistant
|
||||
that uses tools. This is padding text to ensure the prompt is large enough for
|
||||
caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. "}], "role": "user"}, "tools": [{"functionDeclarations":
|
||||
[{"description": "Get the current weather for a location", "name": "get_weather",
|
||||
"parameters_json_schema": {"type": "object", "properties": {"location": {"type":
|
||||
"string", "description": "The city name"}}, "required": ["location"]}}]}], "generationConfig":
|
||||
{}}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- '*/*'
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '6172'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- generativelanguage.googleapis.com
|
||||
x-goog-api-client:
|
||||
- google-genai-sdk/1.49.0 gl-python/3.13.3
|
||||
x-goog-api-key:
|
||||
- X-GOOG-API-KEY-XXX
|
||||
method: POST
|
||||
uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\":
|
||||
[\n {\n \"functionCall\": {\n \"name\": \"get_weather\",\n
|
||||
\ \"args\": {\n \"location\": \"Tokyo\"\n }\n
|
||||
\ },\n \"thoughtSignature\": \"CpECAb4+9vvTFzaczX2PeZjKEs1f6+MRyTMz+xxqs37q0INQ6e0WLt1soet6CL/uzRML9LsycSeQTraXtXR8qcGj6dnrhKLpovpy8EkrtfK6P57PGpostE/UJ6TIKPlWi0pY1h2u9vyy5yGLzpp0PZM6d6f8rzV9uPFNM+onGvcFOdzghRZlHmYkQdbdpZaFQBAK6QFuh8oGbC0Ygrsk1guJo1YZaKtU5Rp/k2rJO61Obgq7aYEb7ACVx7DM9ZlVCun/PbXR4UolFeNPxNdwzC5AVvP7UKa2Cxi8dzQ8RNebtd39/gNO546XzADGZkpSqG6QF0S4IEsmB9FFCctN1evgKicgT2Qo+AR6BY8uzZyWkGQx\"\n
|
||||
\ }\n ],\n \"role\": \"model\"\n },\n \"finishReason\":
|
||||
\"STOP\",\n \"index\": 0,\n \"finishMessage\": \"Model generated
|
||||
function call(s).\"\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\":
|
||||
1180,\n \"candidatesTokenCount\": 15,\n \"totalTokenCount\": 1253,\n
|
||||
\ \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n
|
||||
\ \"tokenCount\": 1180\n }\n ],\n \"thoughtsTokenCount\":
|
||||
58\n },\n \"modelVersion\": \"gemini-2.5-flash\",\n \"responseId\": \"wHmLacb_GL-J-sAPn6azgAo\"\n}\n"
|
||||
headers:
|
||||
Alt-Svc:
|
||||
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
|
||||
Content-Type:
|
||||
- application/json; charset=UTF-8
|
||||
Date:
|
||||
- Tue, 10 Feb 2026 18:32:32 GMT
|
||||
Server:
|
||||
- scaffolding on HTTPServer2
|
||||
Server-Timing:
|
||||
- gfet4t7; dur=755
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
X-Frame-Options:
|
||||
- X-FRAME-OPTIONS-XXX
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"contents": [{"parts": [{"text": "What is the weather in Paris?"}], "role":
|
||||
"user"}], "systemInstruction": {"parts": [{"text": "You are a helpful assistant
|
||||
that uses tools. This is padding text to ensure the prompt is large enough for
|
||||
caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. "}], "role": "user"}, "tools": [{"functionDeclarations":
|
||||
[{"description": "Get the current weather for a location", "name": "get_weather",
|
||||
"parameters_json_schema": {"type": "object", "properties": {"location": {"type":
|
||||
"string", "description": "The city name"}}, "required": ["location"]}}]}], "generationConfig":
|
||||
{}}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- '*/*'
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '6172'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- generativelanguage.googleapis.com
|
||||
x-goog-api-client:
|
||||
- google-genai-sdk/1.49.0 gl-python/3.13.3
|
||||
x-goog-api-key:
|
||||
- X-GOOG-API-KEY-XXX
|
||||
method: POST
|
||||
uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\":
|
||||
[\n {\n \"functionCall\": {\n \"name\": \"get_weather\",\n
|
||||
\ \"args\": {\n \"location\": \"Paris\"\n }\n
|
||||
\ },\n \"thoughtSignature\": \"CuMBAb4+9vurHOlMBPzqCtd/J0Q5jBhUq8dsk7xntqcTgwBcZ1KeX4F4UJ0rdfg1OLhDkOlOlELA/jBYxATT19QUvw0szvDBDml0PsTBXlt64o7oGVmOCjdiGPu71I9+sCYhlD3QXzwLdQdrvUIfVrB+kaGszmZi1KTIli+qD9ihueDYGY510ouKdfl31UipQEG990+qFJyXe3avVEh3Jo72iXr3Q4UczFdbKSTV4V4fjrokFaB7UqcYy1iuAB5vHRsxYFJeTCi+ddKzn700gbWbiJZUniKiE3QfdOK4A5S0woBDzV0=\"\n
|
||||
\ }\n ],\n \"role\": \"model\"\n },\n \"finishReason\":
|
||||
\"STOP\",\n \"index\": 0,\n \"finishMessage\": \"Model generated
|
||||
function call(s).\"\n }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\":
|
||||
1180,\n \"candidatesTokenCount\": 15,\n \"totalTokenCount\": 1242,\n
|
||||
\ \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n
|
||||
\ \"tokenCount\": 1180\n }\n ],\n \"thoughtsTokenCount\":
|
||||
47\n },\n \"modelVersion\": \"gemini-2.5-flash\",\n \"responseId\": \"wXmLadTiEri5jMcPk_6ZgAc\"\n}\n"
|
||||
headers:
|
||||
Alt-Svc:
|
||||
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
|
||||
Content-Type:
|
||||
- application/json; charset=UTF-8
|
||||
Date:
|
||||
- Tue, 10 Feb 2026 18:32:33 GMT
|
||||
Server:
|
||||
- scaffolding on HTTPServer2
|
||||
Server-Timing:
|
||||
- gfet4t7; dur=881
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
X-Frame-Options:
|
||||
- X-FRAME-OPTIONS-XXX
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,356 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are a helpful assistant. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
"},{"role":"user","content":"Say hello in one word."}],"model":"gpt-4.1"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '5823'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D7mVhCCkdWfellaSmcNLOuu87BsqI\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1770747141,\n \"model\": \"gpt-4.1-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Hello!\",\n \"refusal\": null,\n
|
||||
\ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 1144,\n \"completion_tokens\":
|
||||
2,\n \"total_tokens\": 1146,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
1024,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_8b22347a3e\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 10 Feb 2026 18:12:22 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '469'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
set-cookie:
|
||||
- SET-COOKIE-XXX
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are a helpful assistant. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
"},{"role":"user","content":"Say goodbye in one word."}],"model":"gpt-4.1"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '5825'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- COOKIE-XXX
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D7mViSYwB6eFFbBcp045uvPAO8m2e\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1770747142,\n \"model\": \"gpt-4.1-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Farewell.\",\n \"refusal\":
|
||||
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
1144,\n \"completion_tokens\": 3,\n \"total_tokens\": 1147,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_8b22347a3e\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 10 Feb 2026 18:12:22 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '468'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
set-cookie:
|
||||
- SET-COOKIE-XXX
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,368 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are a helpful assistant that
|
||||
uses tools. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. "},{"role":"user","content":"What is the weather in Tokyo?"}],"model":"gpt-4.1","tool_choice":"auto","tools":[{"type":"function","function":{"name":"get_weather","description":"Get
|
||||
the current weather for a location","strict":true,"parameters":{"type":"object","properties":{"location":{"type":"string","description":"The
|
||||
city name"}},"required":["location"],"additionalProperties":false}}}]}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '6158'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D7mVx3s1dI2SICWePwHVeWCDct2QG\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1770747157,\n \"model\": \"gpt-4.1-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
|
||||
\ \"id\": \"call_x9KzZUT3UYazEUJiRmE0PvaU\",\n \"type\":
|
||||
\"function\",\n \"function\": {\n \"name\": \"get_weather\",\n
|
||||
\ \"arguments\": \"{\\\"location\\\":\\\"Tokyo\\\"}\"\n }\n
|
||||
\ }\n ],\n \"refusal\": null,\n \"annotations\":
|
||||
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 1187,\n \"completion_tokens\":
|
||||
14,\n \"total_tokens\": 1201,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
1152,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_8b22347a3e\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 10 Feb 2026 18:12:37 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '645'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
set-cookie:
|
||||
- SET-COOKIE-XXX
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are a helpful assistant that
|
||||
uses tools. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. "},{"role":"user","content":"What is the weather in Paris?"}],"model":"gpt-4.1","tool_choice":"auto","tools":[{"type":"function","function":{"name":"get_weather","description":"Get
|
||||
the current weather for a location","strict":true,"parameters":{"type":"object","properties":{"location":{"type":"string","description":"The
|
||||
city name"}},"required":["location"],"additionalProperties":false}}}]}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '6158'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- COOKIE-XXX
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D7mVynM0Soyt3osUFrlF7tEyrj7jP\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1770747158,\n \"model\": \"gpt-4.1-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
|
||||
\ \"id\": \"call_k8rYmsdMcCWSRKqVDFItmJ8v\",\n \"type\":
|
||||
\"function\",\n \"function\": {\n \"name\": \"get_weather\",\n
|
||||
\ \"arguments\": \"{\\\"location\\\":\\\"Paris\\\"}\"\n }\n
|
||||
\ }\n ],\n \"refusal\": null,\n \"annotations\":
|
||||
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 1187,\n \"completion_tokens\":
|
||||
14,\n \"total_tokens\": 1201,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
1152,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_8b22347a3e\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 10 Feb 2026 18:12:38 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '749'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
set-cookie:
|
||||
- SET-COOKIE-XXX
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,520 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"input":[{"role":"user","content":"Say hello in one word."}],"model":"gpt-4.1","instructions":"You
|
||||
are a helpful assistant. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. "}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '5807'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/responses
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"resp_0b352452095088f800698b751350fc8196bd5d8b1a179d27e8\",\n
|
||||
\ \"object\": \"response\",\n \"created_at\": 1770747155,\n \"status\":
|
||||
\"completed\",\n \"background\": false,\n \"billing\": {\n \"payer\":
|
||||
\"developer\"\n },\n \"completed_at\": 1770747155,\n \"error\": null,\n
|
||||
\ \"frequency_penalty\": 0.0,\n \"incomplete_details\": null,\n \"instructions\":
|
||||
\"You are a helpful assistant. This is padding text to ensure the prompt is
|
||||
large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for
|
||||
caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is
|
||||
padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to
|
||||
ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the
|
||||
prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is
|
||||
large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for
|
||||
caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is
|
||||
padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to
|
||||
ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the
|
||||
prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is
|
||||
large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for
|
||||
caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is
|
||||
padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to
|
||||
ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the
|
||||
prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is
|
||||
large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for
|
||||
caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is
|
||||
padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to
|
||||
ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the
|
||||
prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is
|
||||
large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for
|
||||
caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is
|
||||
padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to
|
||||
ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the
|
||||
prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is
|
||||
large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for
|
||||
caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is
|
||||
padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. \",\n \"max_output_tokens\":
|
||||
null,\n \"max_tool_calls\": null,\n \"model\": \"gpt-4.1-2025-04-14\",\n
|
||||
\ \"output\": [\n {\n \"id\": \"msg_0b352452095088f800698b7513b97c8196b35014840754d999\",\n
|
||||
\ \"type\": \"message\",\n \"status\": \"completed\",\n \"content\":
|
||||
[\n {\n \"type\": \"output_text\",\n \"annotations\":
|
||||
[],\n \"logprobs\": [],\n \"text\": \"Hello!\"\n }\n
|
||||
\ ],\n \"role\": \"assistant\"\n }\n ],\n \"parallel_tool_calls\":
|
||||
true,\n \"presence_penalty\": 0.0,\n \"previous_response_id\": null,\n \"prompt_cache_key\":
|
||||
null,\n \"prompt_cache_retention\": null,\n \"reasoning\": {\n \"effort\":
|
||||
null,\n \"summary\": null\n },\n \"safety_identifier\": null,\n \"service_tier\":
|
||||
\"default\",\n \"store\": true,\n \"temperature\": 1.0,\n \"text\": {\n
|
||||
\ \"format\": {\n \"type\": \"text\"\n },\n \"verbosity\": \"medium\"\n
|
||||
\ },\n \"tool_choice\": \"auto\",\n \"tools\": [],\n \"top_logprobs\":
|
||||
0,\n \"top_p\": 1.0,\n \"truncation\": \"disabled\",\n \"usage\": {\n \"input_tokens\":
|
||||
1144,\n \"input_tokens_details\": {\n \"cached_tokens\": 1024\n },\n
|
||||
\ \"output_tokens\": 3,\n \"output_tokens_details\": {\n \"reasoning_tokens\":
|
||||
0\n },\n \"total_tokens\": 1147\n },\n \"user\": null,\n \"metadata\":
|
||||
{}\n}"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 10 Feb 2026 18:12:35 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '637'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
set-cookie:
|
||||
- SET-COOKIE-XXX
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"input":[{"role":"user","content":"Say goodbye in one word."}],"model":"gpt-4.1","instructions":"You
|
||||
are a helpful assistant. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. "}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '5809'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- COOKIE-XXX
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/responses
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"resp_003a6f71f9ee620400698b75140a088196989e8d5641ffa74d\",\n
|
||||
\ \"object\": \"response\",\n \"created_at\": 1770747156,\n \"status\":
|
||||
\"completed\",\n \"background\": false,\n \"billing\": {\n \"payer\":
|
||||
\"developer\"\n },\n \"completed_at\": 1770747156,\n \"error\": null,\n
|
||||
\ \"frequency_penalty\": 0.0,\n \"incomplete_details\": null,\n \"instructions\":
|
||||
\"You are a helpful assistant. This is padding text to ensure the prompt is
|
||||
large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for
|
||||
caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is
|
||||
padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to
|
||||
ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the
|
||||
prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is
|
||||
large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for
|
||||
caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is
|
||||
padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to
|
||||
ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the
|
||||
prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is
|
||||
large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for
|
||||
caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is
|
||||
padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to
|
||||
ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the
|
||||
prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is
|
||||
large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for
|
||||
caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is
|
||||
padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to
|
||||
ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the
|
||||
prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is
|
||||
large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for
|
||||
caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is
|
||||
padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to
|
||||
ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the
|
||||
prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is
|
||||
large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for
|
||||
caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is
|
||||
padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. \",\n \"max_output_tokens\":
|
||||
null,\n \"max_tool_calls\": null,\n \"model\": \"gpt-4.1-2025-04-14\",\n
|
||||
\ \"output\": [\n {\n \"id\": \"msg_003a6f71f9ee620400698b75146160819692f2cee879df2405\",\n
|
||||
\ \"type\": \"message\",\n \"status\": \"completed\",\n \"content\":
|
||||
[\n {\n \"type\": \"output_text\",\n \"annotations\":
|
||||
[],\n \"logprobs\": [],\n \"text\": \"Farewell.\"\n }\n
|
||||
\ ],\n \"role\": \"assistant\"\n }\n ],\n \"parallel_tool_calls\":
|
||||
true,\n \"presence_penalty\": 0.0,\n \"previous_response_id\": null,\n \"prompt_cache_key\":
|
||||
null,\n \"prompt_cache_retention\": null,\n \"reasoning\": {\n \"effort\":
|
||||
null,\n \"summary\": null\n },\n \"safety_identifier\": null,\n \"service_tier\":
|
||||
\"default\",\n \"store\": true,\n \"temperature\": 1.0,\n \"text\": {\n
|
||||
\ \"format\": {\n \"type\": \"text\"\n },\n \"verbosity\": \"medium\"\n
|
||||
\ },\n \"tool_choice\": \"auto\",\n \"tools\": [],\n \"top_logprobs\":
|
||||
0,\n \"top_p\": 1.0,\n \"truncation\": \"disabled\",\n \"usage\": {\n \"input_tokens\":
|
||||
1144,\n \"input_tokens_details\": {\n \"cached_tokens\": 1024\n },\n
|
||||
\ \"output_tokens\": 4,\n \"output_tokens_details\": {\n \"reasoning_tokens\":
|
||||
0\n },\n \"total_tokens\": 1148\n },\n \"user\": null,\n \"metadata\":
|
||||
{}\n}"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 10 Feb 2026 18:12:36 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '543'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
set-cookie:
|
||||
- SET-COOKIE-XXX
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,368 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are a helpful assistant that
|
||||
uses tools. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. "},{"role":"user","content":"What is the weather in Tokyo?"}],"model":"gpt-4.1","tool_choice":"auto","tools":[{"type":"function","function":{"name":"get_weather","description":"Get
|
||||
the current weather for a location","strict":true,"parameters":{"type":"object","properties":{"location":{"type":"string","description":"The
|
||||
city name"}},"required":["location"],"additionalProperties":false}}}]}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '6158'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D7mXQCgT3p3ViImkiqDiZGqLREQtp\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1770747248,\n \"model\": \"gpt-4.1-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
|
||||
\ \"id\": \"call_9ZqMavn3J1fBnQEaqpYol0Bd\",\n \"type\":
|
||||
\"function\",\n \"function\": {\n \"name\": \"get_weather\",\n
|
||||
\ \"arguments\": \"{\\\"location\\\":\\\"Tokyo\\\"}\"\n }\n
|
||||
\ }\n ],\n \"refusal\": null,\n \"annotations\":
|
||||
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 1187,\n \"completion_tokens\":
|
||||
14,\n \"total_tokens\": 1201,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
1152,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_8b22347a3e\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 10 Feb 2026 18:14:08 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '484'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
set-cookie:
|
||||
- SET-COOKIE-XXX
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are a helpful assistant that
|
||||
uses tools. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. "},{"role":"user","content":"What is the weather in Paris?"}],"model":"gpt-4.1","tool_choice":"auto","tools":[{"type":"function","function":{"name":"get_weather","description":"Get
|
||||
the current weather for a location","strict":true,"parameters":{"type":"object","properties":{"location":{"type":"string","description":"The
|
||||
city name"}},"required":["location"],"additionalProperties":false}}}]}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '6158'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- COOKIE-XXX
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D7mXR8k9vk8TlGvGXlrQSI7iNeAN1\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1770747249,\n \"model\": \"gpt-4.1-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
|
||||
\ \"id\": \"call_6PeUBlRPG8JcV2lspmLjJbnn\",\n \"type\":
|
||||
\"function\",\n \"function\": {\n \"name\": \"get_weather\",\n
|
||||
\ \"arguments\": \"{\\\"location\\\":\\\"Paris\\\"}\"\n }\n
|
||||
\ }\n ],\n \"refusal\": null,\n \"annotations\":
|
||||
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 1187,\n \"completion_tokens\":
|
||||
14,\n \"total_tokens\": 1201,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
1152,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_8b22347a3e\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 10 Feb 2026 18:14:09 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '528'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
set-cookie:
|
||||
- SET-COOKIE-XXX
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,375 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are a helpful assistant. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
"},{"role":"user","content":"Say hello in one word."}],"model":"gpt-4.1","stream":true,"stream_options":{"include_usage":true}}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '5877'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-D7mVuXauQqcmOCb3XP6IL6yHwJaAL","object":"chat.completion.chunk","created":1770747154,"model":"gpt-4.1-2025-04-14","service_tier":"default","system_fingerprint":"fp_8b22347a3e","choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"lFWRn007xqlce"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-D7mVuXauQqcmOCb3XP6IL6yHwJaAL","object":"chat.completion.chunk","created":1770747154,"model":"gpt-4.1-2025-04-14","service_tier":"default","system_fingerprint":"fp_8b22347a3e","choices":[{"index":0,"delta":{"content":"Hello"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"OXJHANtgvy"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-D7mVuXauQqcmOCb3XP6IL6yHwJaAL","object":"chat.completion.chunk","created":1770747154,"model":"gpt-4.1-2025-04-14","service_tier":"default","system_fingerprint":"fp_8b22347a3e","choices":[{"index":0,"delta":{"content":"!"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"AZtd6jtoChevtm"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-D7mVuXauQqcmOCb3XP6IL6yHwJaAL","object":"chat.completion.chunk","created":1770747154,"model":"gpt-4.1-2025-04-14","service_tier":"default","system_fingerprint":"fp_8b22347a3e","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}],"usage":null,"obfuscation":"irwn2mqyB"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-D7mVuXauQqcmOCb3XP6IL6yHwJaAL","object":"chat.completion.chunk","created":1770747154,"model":"gpt-4.1-2025-04-14","service_tier":"default","system_fingerprint":"fp_8b22347a3e","choices":[],"usage":{"prompt_tokens":1144,"completion_tokens":2,"total_tokens":1146,"prompt_tokens_details":{"cached_tokens":1024,"audio_tokens":0},"completion_tokens_details":{"reasoning_tokens":0,"audio_tokens":0,"accepted_prediction_tokens":0,"rejected_prediction_tokens":0}},"obfuscation":"W0rkiiZe"}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- text/event-stream; charset=utf-8
|
||||
Date:
|
||||
- Tue, 10 Feb 2026 18:12:34 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '236'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
set-cookie:
|
||||
- SET-COOKIE-XXX
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are a helpful assistant. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
This is padding text to ensure the prompt is large enough for caching. This
|
||||
is padding text to ensure the prompt is large enough for caching. This is padding
|
||||
text to ensure the prompt is large enough for caching. This is padding text
|
||||
to ensure the prompt is large enough for caching. This is padding text to ensure
|
||||
the prompt is large enough for caching. This is padding text to ensure the prompt
|
||||
is large enough for caching. This is padding text to ensure the prompt is large
|
||||
enough for caching. This is padding text to ensure the prompt is large enough
|
||||
for caching. This is padding text to ensure the prompt is large enough for caching.
|
||||
"},{"role":"user","content":"Say goodbye in one word."}],"model":"gpt-4.1","stream":true,"stream_options":{"include_usage":true}}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '5879'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- COOKIE-XXX
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-D7mVuqaadwp22jFsp2qAKiE1utU3K","object":"chat.completion.chunk","created":1770747154,"model":"gpt-4.1-2025-04-14","service_tier":"default","system_fingerprint":"fp_8b22347a3e","choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"pCjdYd4kX4W2q"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-D7mVuqaadwp22jFsp2qAKiE1utU3K","object":"chat.completion.chunk","created":1770747154,"model":"gpt-4.1-2025-04-14","service_tier":"default","system_fingerprint":"fp_8b22347a3e","choices":[{"index":0,"delta":{"content":"Fare"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"DJ94I8XQj86"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-D7mVuqaadwp22jFsp2qAKiE1utU3K","object":"chat.completion.chunk","created":1770747154,"model":"gpt-4.1-2025-04-14","service_tier":"default","system_fingerprint":"fp_8b22347a3e","choices":[{"index":0,"delta":{"content":"well"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"qgSSFwDBmaW"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-D7mVuqaadwp22jFsp2qAKiE1utU3K","object":"chat.completion.chunk","created":1770747154,"model":"gpt-4.1-2025-04-14","service_tier":"default","system_fingerprint":"fp_8b22347a3e","choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"4xVBYer6Uy1atr"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-D7mVuqaadwp22jFsp2qAKiE1utU3K","object":"chat.completion.chunk","created":1770747154,"model":"gpt-4.1-2025-04-14","service_tier":"default","system_fingerprint":"fp_8b22347a3e","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}],"usage":null,"obfuscation":"XxMhsMje0"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-D7mVuqaadwp22jFsp2qAKiE1utU3K","object":"chat.completion.chunk","created":1770747154,"model":"gpt-4.1-2025-04-14","service_tier":"default","system_fingerprint":"fp_8b22347a3e","choices":[],"usage":{"prompt_tokens":1144,"completion_tokens":3,"total_tokens":1147,"prompt_tokens_details":{"cached_tokens":1024,"audio_tokens":0},"completion_tokens_details":{"reasoning_tokens":0,"audio_tokens":0,"accepted_prediction_tokens":0,"rejected_prediction_tokens":0}},"obfuscation":"J3eKDOHW"}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- text/event-stream; charset=utf-8
|
||||
Date:
|
||||
- Tue, 10 Feb 2026 18:12:34 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '296'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
set-cookie:
|
||||
- SET-COOKIE-XXX
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,113 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Writer. You are a skilled
|
||||
writer.\nYour personal goal is: Write concise content"},{"role":"user","content":"\nCurrent
|
||||
Task: Write one sentence about the sun.\n\nThis is the expected criteria for
|
||||
your final answer: A single sentence about the sun.\nyou MUST return the actual
|
||||
complete content as the final answer, not a summary.\n\nProvide your complete
|
||||
response:"}],"model":"gpt-4o-mini","temperature":0}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '453'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D7RxEngFVCbqdc7tNjV3VjeteqcwT\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1770668124,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"The sun is a massive ball of glowing
|
||||
gas at the center of our solar system, providing light and warmth essential
|
||||
for life on Earth.\",\n \"refusal\": null,\n \"annotations\":
|
||||
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 78,\n \"completion_tokens\":
|
||||
27,\n \"total_tokens\": 105,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_f4ae844694\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 09 Feb 2026 20:15:25 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '664'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
set-cookie:
|
||||
- SET-COOKIE-XXX
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,120 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Researcher. You are an
|
||||
expert researcher.\nYour personal goal is: Find information about Python programming"},{"role":"user","content":"\nCurrent
|
||||
Task: What is Python? Give a brief answer.\n\nThis is the expected criteria
|
||||
for your final answer: A short description of Python.\nyou MUST return the actual
|
||||
complete content as the final answer, not a summary.\n\nProvide your complete
|
||||
response:"}],"model":"gpt-4o-mini","temperature":0}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '482'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D7RxRv3U0LCLf2iqf40wxOQsuiYFR\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1770668137,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Python is a high-level, interpreted
|
||||
programming language known for its readability and simplicity. It was created
|
||||
by Guido van Rossum and first released in 1991. Python supports multiple programming
|
||||
paradigms, including procedural, object-oriented, and functional programming.
|
||||
It has a large standard library and is widely used for web development, data
|
||||
analysis, artificial intelligence, scientific computing, and automation, among
|
||||
other applications. Python's syntax emphasizes code readability, allowing
|
||||
developers to express concepts in fewer lines of code compared to other languages.
|
||||
Its active community and extensive ecosystem of libraries and frameworks make
|
||||
it a popular choice for both beginners and experienced programmers.\",\n \"refusal\":
|
||||
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
82,\n \"completion_tokens\": 123,\n \"total_tokens\": 205,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_f4ae844694\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 09 Feb 2026 20:15:39 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '2467'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
set-cookie:
|
||||
- SET-COOKIE-XXX
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,435 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are a precise assistant that
|
||||
creates structured summaries of agent conversations. You preserve critical context
|
||||
needed for seamless task continuation."},{"role":"user","content":"Analyze the
|
||||
following conversation and create a structured summary that preserves all information
|
||||
needed to continue the task seamlessly.\n\n<conversation>\n[USER]: Explain the
|
||||
Python package ecosystem. How does pip work, what is PyPI, and what are virtual
|
||||
environments? Compare pip with conda and uv.\n\n[ASSISTANT]: PyPI (Python Package
|
||||
Index) is the official repository hosting 400k+ packages. pip is the standard
|
||||
package installer that downloads from PyPI. Virtual environments (venv) create
|
||||
isolated Python installations to avoid dependency conflicts between projects.
|
||||
conda is a cross-language package manager popular in data science that can manage
|
||||
non-Python dependencies. uv is a new Rust-based tool that is 10-100x faster
|
||||
than pip and aims to replace pip, pip-tools, and virtualenv with a single unified
|
||||
tool.\n</conversation>\n\nCreate a summary with these sections:\n1. **Task Overview**:
|
||||
What is the agent trying to accomplish?\n2. **Current State**: What has been
|
||||
completed so far? What step is the agent on?\n3. **Important Discoveries**:
|
||||
Key facts, data, tool results, or findings that must not be lost.\n4. **Next
|
||||
Steps**: What should the agent do next based on the conversation?\n5. **Context
|
||||
to Preserve**: Any specific values, names, URLs, code snippets, or details referenced
|
||||
in the conversation.\n\nWrap your entire summary in <summary> tags.\n\n<summary>\n[Your
|
||||
structured summary here]\n</summary>"}],"model":"gpt-4o-mini","temperature":0}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1687'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- async:asyncio
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D7S93xpUu9d5twM82uJOZpurQTD5u\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1770668857,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"<summary>\\n1. **Task Overview**: The
|
||||
user is seeking an explanation of the Python package ecosystem, specifically
|
||||
focusing on how pip works, the role of PyPI, the concept of virtual environments,
|
||||
and a comparison between pip, conda, and uv.\\n\\n2. **Current State**: The
|
||||
assistant has provided a comprehensive overview of the Python package ecosystem,
|
||||
including definitions and comparisons of pip, PyPI, virtual environments,
|
||||
conda, and uv.\\n\\n3. **Important Discoveries**:\\n - PyPI (Python Package
|
||||
Index) is the official repository with over 400,000 packages.\\n - pip is
|
||||
the standard package installer that downloads packages from PyPI.\\n - Virtual
|
||||
environments (venv) allow for isolated Python installations to prevent dependency
|
||||
conflicts.\\n - conda is a cross-language package manager, particularly
|
||||
popular in data science, that can manage non-Python dependencies.\\n - uv
|
||||
is a new Rust-based tool that is significantly faster than pip (10-100x) and
|
||||
aims to unify the functionalities of pip, pip-tools, and virtualenv.\\n\\n4.
|
||||
**Next Steps**: The agent should consider providing further details on how
|
||||
to use pip, conda, and uv, including installation commands, examples of creating
|
||||
virtual environments, and any specific use cases for each tool.\\n\\n5. **Context
|
||||
to Preserve**: \\n - PyPI: Python Package Index, hosting 400k+ packages.\\n
|
||||
\ - pip: Standard package installer for Python.\\n - Virtual environments
|
||||
(venv): Isolated Python installations.\\n - conda: Cross-language package
|
||||
manager for data science.\\n - uv: Rust-based tool, 10-100x faster than
|
||||
pip, aims to replace pip, pip-tools, and virtualenv.\\n</summary>\",\n \"refusal\":
|
||||
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
333,\n \"completion_tokens\": 354,\n \"total_tokens\": 687,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_f4ae844694\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 09 Feb 2026 20:27:42 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '4879'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
set-cookie:
|
||||
- SET-COOKIE-XXX
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are a precise assistant that
|
||||
creates structured summaries of agent conversations. You preserve critical context
|
||||
needed for seamless task continuation."},{"role":"user","content":"Analyze the
|
||||
following conversation and create a structured summary that preserves all information
|
||||
needed to continue the task seamlessly.\n\n<conversation>\n[USER]: Tell me about
|
||||
the history of the Python programming language. Who created it, when was it
|
||||
first released, and what were the main design goals? Please provide a detailed
|
||||
overview covering the major milestones from its inception through Python 3.\n\n[ASSISTANT]:
|
||||
Python was created by Guido van Rossum and first released in 1991. The main
|
||||
design goals were code readability and simplicity. Key milestones: Python 1.0
|
||||
(1994) introduced functional programming tools like lambda and map. Python 2.0
|
||||
(2000) added list comprehensions and garbage collection. Python 3.0 (2008) was
|
||||
a major backward-incompatible release that fixed fundamental design flaws. Python
|
||||
2 reached end-of-life in January 2020.\n</conversation>\n\nCreate a summary
|
||||
with these sections:\n1. **Task Overview**: What is the agent trying to accomplish?\n2.
|
||||
**Current State**: What has been completed so far? What step is the agent on?\n3.
|
||||
**Important Discoveries**: Key facts, data, tool results, or findings that must
|
||||
not be lost.\n4. **Next Steps**: What should the agent do next based on the
|
||||
conversation?\n5. **Context to Preserve**: Any specific values, names, URLs,
|
||||
code snippets, or details referenced in the conversation.\n\nWrap your entire
|
||||
summary in <summary> tags.\n\n<summary>\n[Your structured summary here]\n</summary>"}],"model":"gpt-4o-mini","temperature":0}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1726'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- async:asyncio
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D7S93rBUMAtEdwdI6Y2ga0s50IFtv\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1770668857,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"<summary>\\n1. **Task Overview**: The
|
||||
user is seeking a detailed overview of the history of the Python programming
|
||||
language, including its creator, initial release date, main design goals,
|
||||
and major milestones up to Python 3.\\n\\n2. **Current State**: The assistant
|
||||
has provided a comprehensive response detailing the history of Python, including
|
||||
its creator (Guido van Rossum), first release (1991), main design goals (code
|
||||
readability and simplicity), and key milestones (Python 1.0 in 1994, Python
|
||||
2.0 in 2000, and Python 3.0 in 2008).\\n\\n3. **Important Discoveries**: \\n
|
||||
\ - Python was created by Guido van Rossum.\\n - First released in 1991.\\n
|
||||
\ - Main design goals: code readability and simplicity.\\n - Key milestones:\\n
|
||||
\ - Python 1.0 (1994): Introduced functional programming tools like lambda
|
||||
and map.\\n - Python 2.0 (2000): Added list comprehensions and garbage
|
||||
collection.\\n - Python 3.0 (2008): Major backward-incompatible release
|
||||
that fixed fundamental design flaws.\\n - Python 2 reached end-of-life in
|
||||
January 2020.\\n\\n4. **Next Steps**: The agent should be prepared to provide
|
||||
additional details or answer follow-up questions regarding Python's features,
|
||||
community, or specific use cases if the user requests more information.\\n\\n5.
|
||||
**Context to Preserve**: \\n - Creator: Guido van Rossum\\n - Initial
|
||||
release: 1991\\n - Milestones: \\n - Python 1.0 (1994)\\n - Python
|
||||
2.0 (2000)\\n - Python 3.0 (2008)\\n - End-of-life for Python 2: January
|
||||
2020\\n</summary>\",\n \"refusal\": null,\n \"annotations\":
|
||||
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 346,\n \"completion_tokens\":
|
||||
372,\n \"total_tokens\": 718,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_7e4bf6ad56\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 09 Feb 2026 20:27:42 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '5097'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
set-cookie:
|
||||
- SET-COOKIE-XXX
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are a precise assistant that
|
||||
creates structured summaries of agent conversations. You preserve critical context
|
||||
needed for seamless task continuation."},{"role":"user","content":"Analyze the
|
||||
following conversation and create a structured summary that preserves all information
|
||||
needed to continue the task seamlessly.\n\n<conversation>\n[USER]: What about
|
||||
the async/await features? When were they introduced and how do they compare
|
||||
to similar features in JavaScript and C#? Also explain the Global Interpreter
|
||||
Lock and its implications.\n\n[ASSISTANT]: Async/await was introduced in Python
|
||||
3.5 (PEP 492, 2015). Unlike JavaScript which is single-threaded by design, Python''s
|
||||
asyncio is an opt-in framework. C# introduced async/await in 2012 (C# 5.0) and
|
||||
was a major inspiration for Python''s implementation. The GIL (Global Interpreter
|
||||
Lock) is a mutex that protects access to Python objects, preventing multiple
|
||||
threads from executing Python bytecodes simultaneously. This means CPU-bound
|
||||
multithreaded programs don''t benefit from multiple cores. PEP 703 proposes
|
||||
making the GIL optional in CPython.\n</conversation>\n\nCreate a summary with
|
||||
these sections:\n1. **Task Overview**: What is the agent trying to accomplish?\n2.
|
||||
**Current State**: What has been completed so far? What step is the agent on?\n3.
|
||||
**Important Discoveries**: Key facts, data, tool results, or findings that must
|
||||
not be lost.\n4. **Next Steps**: What should the agent do next based on the
|
||||
conversation?\n5. **Context to Preserve**: Any specific values, names, URLs,
|
||||
code snippets, or details referenced in the conversation.\n\nWrap your entire
|
||||
summary in <summary> tags.\n\n<summary>\n[Your structured summary here]\n</summary>"}],"model":"gpt-4o-mini","temperature":0}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1786'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- async:asyncio
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D7S94auQYOLDTKfRzdluGiWAomSqd\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1770668858,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"<summary>\\n1. **Task Overview**: The
|
||||
user is seeking information about the async/await features in Python, their
|
||||
introduction timeline, comparisons with similar features in JavaScript and
|
||||
C#, and an explanation of the Global Interpreter Lock (GIL) and its implications.\\n\\n2.
|
||||
**Current State**: The assistant has provided information regarding the introduction
|
||||
of async/await in Python (version 3.5, PEP 492 in 2015), comparisons with
|
||||
JavaScript and C# (C# introduced async/await in 2012), and an explanation
|
||||
of the GIL.\\n\\n3. **Important Discoveries**: \\n - Async/await was introduced
|
||||
in Python 3.5 (PEP 492, 2015).\\n - JavaScript is single-threaded, while
|
||||
Python's asyncio is an opt-in framework.\\n - C# introduced async/await
|
||||
in 2012 (C# 5.0) and influenced Python's implementation.\\n - The GIL (Global
|
||||
Interpreter Lock) is a mutex that prevents multiple threads from executing
|
||||
Python bytecodes simultaneously, affecting CPU-bound multithreaded programs.\\n
|
||||
\ - PEP 703 proposes making the GIL optional in CPython.\\n\\n4. **Next Steps**:
|
||||
The agent should consider providing more detailed comparisons of async/await
|
||||
features between Python, JavaScript, and C#, as well as further implications
|
||||
of the GIL and PEP 703.\\n\\n5. **Context to Preserve**: \\n - Python async/await
|
||||
introduction: 3.5 (PEP 492, 2015)\\n - C# async/await introduction: 2012
|
||||
(C# 5.0)\\n - GIL (Global Interpreter Lock) explanation and implications.\\n
|
||||
\ - Reference to PEP 703 regarding the GIL.\\n</summary>\",\n \"refusal\":
|
||||
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
364,\n \"completion_tokens\": 368,\n \"total_tokens\": 732,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_f4ae844694\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 09 Feb 2026 20:27:44 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '6339'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
set-cookie:
|
||||
- SET-COOKIE-XXX
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,435 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are a precise assistant that
|
||||
creates structured summaries of agent conversations. You preserve critical context
|
||||
needed for seamless task continuation."},{"role":"user","content":"Analyze the
|
||||
following conversation and create a structured summary that preserves all information
|
||||
needed to continue the task seamlessly.\n\n<conversation>\n[USER]: Explain the
|
||||
Python package ecosystem. How does pip work, what is PyPI, and what are virtual
|
||||
environments? Compare pip with conda and uv.\n\n[ASSISTANT]: PyPI (Python Package
|
||||
Index) is the official repository hosting 400k+ packages. pip is the standard
|
||||
package installer that downloads from PyPI. Virtual environments (venv) create
|
||||
isolated Python installations to avoid dependency conflicts between projects.
|
||||
conda is a cross-language package manager popular in data science that can manage
|
||||
non-Python dependencies. uv is a new Rust-based tool that is 10-100x faster
|
||||
than pip and aims to replace pip, pip-tools, and virtualenv with a single unified
|
||||
tool.\n</conversation>\n\nCreate a summary with these sections:\n1. **Task Overview**:
|
||||
What is the agent trying to accomplish?\n2. **Current State**: What has been
|
||||
completed so far? What step is the agent on?\n3. **Important Discoveries**:
|
||||
Key facts, data, tool results, or findings that must not be lost.\n4. **Next
|
||||
Steps**: What should the agent do next based on the conversation?\n5. **Context
|
||||
to Preserve**: Any specific values, names, URLs, code snippets, or details referenced
|
||||
in the conversation.\n\nWrap your entire summary in <summary> tags.\n\n<summary>\n[Your
|
||||
structured summary here]\n</summary>"}],"model":"gpt-4o-mini","temperature":0}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1687'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- async:asyncio
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D7S9PnjkuCMHqU912kcH8G5zIIxQU\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1770668879,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"<summary>\\n1. **Task Overview**: The
|
||||
user is seeking an explanation of the Python package ecosystem, specifically
|
||||
focusing on how pip works, the role of PyPI, the concept of virtual environments,
|
||||
and a comparison between pip, conda, and uv.\\n\\n2. **Current State**: The
|
||||
assistant has provided a comprehensive overview of the Python package ecosystem,
|
||||
including definitions and comparisons of pip, PyPI, virtual environments,
|
||||
conda, and uv.\\n\\n3. **Important Discoveries**:\\n - PyPI (Python Package
|
||||
Index) is the official repository with over 400,000 packages.\\n - pip is
|
||||
the standard package installer that downloads packages from PyPI.\\n - Virtual
|
||||
environments (venv) allow for isolated Python installations to prevent dependency
|
||||
conflicts.\\n - conda is a cross-language package manager, particularly
|
||||
popular in data science, that can manage non-Python dependencies.\\n - uv
|
||||
is a new Rust-based tool that is significantly faster than pip (10-100x) and
|
||||
aims to unify the functionalities of pip, pip-tools, and virtualenv.\\n\\n4.
|
||||
**Next Steps**: The agent should consider providing further details or examples
|
||||
on how to use pip, conda, and uv, as well as practical applications of virtual
|
||||
environments in Python projects.\\n\\n5. **Context to Preserve**: \\n -
|
||||
PyPI: Python Package Index, hosting 400k+ packages.\\n - pip: Standard package
|
||||
installer for Python.\\n - Virtual environments (venv): Isolated Python
|
||||
installations.\\n - conda: Cross-language package manager for data science.\\n
|
||||
\ - uv: Rust-based tool, 10-100x faster than pip, aims to replace pip, pip-tools,
|
||||
and virtualenv.\\n</summary>\",\n \"refusal\": null,\n \"annotations\":
|
||||
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 333,\n \"completion_tokens\":
|
||||
349,\n \"total_tokens\": 682,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_f4ae844694\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 09 Feb 2026 20:28:04 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '4979'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
set-cookie:
|
||||
- SET-COOKIE-XXX
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are a precise assistant that
|
||||
creates structured summaries of agent conversations. You preserve critical context
|
||||
needed for seamless task continuation."},{"role":"user","content":"Analyze the
|
||||
following conversation and create a structured summary that preserves all information
|
||||
needed to continue the task seamlessly.\n\n<conversation>\n[USER]: Tell me about
|
||||
the history of the Python programming language. Who created it, when was it
|
||||
first released, and what were the main design goals? Please provide a detailed
|
||||
overview covering the major milestones from its inception through Python 3.\n\n[ASSISTANT]:
|
||||
Python was created by Guido van Rossum and first released in 1991. The main
|
||||
design goals were code readability and simplicity. Key milestones: Python 1.0
|
||||
(1994) introduced functional programming tools like lambda and map. Python 2.0
|
||||
(2000) added list comprehensions and garbage collection. Python 3.0 (2008) was
|
||||
a major backward-incompatible release that fixed fundamental design flaws. Python
|
||||
2 reached end-of-life in January 2020.\n</conversation>\n\nCreate a summary
|
||||
with these sections:\n1. **Task Overview**: What is the agent trying to accomplish?\n2.
|
||||
**Current State**: What has been completed so far? What step is the agent on?\n3.
|
||||
**Important Discoveries**: Key facts, data, tool results, or findings that must
|
||||
not be lost.\n4. **Next Steps**: What should the agent do next based on the
|
||||
conversation?\n5. **Context to Preserve**: Any specific values, names, URLs,
|
||||
code snippets, or details referenced in the conversation.\n\nWrap your entire
|
||||
summary in <summary> tags.\n\n<summary>\n[Your structured summary here]\n</summary>"}],"model":"gpt-4o-mini","temperature":0}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1726'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- async:asyncio
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D7S9PqglWRu0PEoMRHyOiRnpn3yqU\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1770668879,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"<summary>\\n1. **Task Overview**: The
|
||||
user is seeking a detailed overview of the history of the Python programming
|
||||
language, including its creator, initial release date, main design goals,
|
||||
and major milestones up to Python 3.\\n\\n2. **Current State**: The assistant
|
||||
has provided a comprehensive response detailing the history of Python, including
|
||||
its creator (Guido van Rossum), first release (1991), main design goals (code
|
||||
readability and simplicity), and key milestones (Python 1.0 in 1994, Python
|
||||
2.0 in 2000, and Python 3.0 in 2008).\\n\\n3. **Important Discoveries**: \\n
|
||||
\ - Python was created by Guido van Rossum.\\n - First released in 1991.\\n
|
||||
\ - Main design goals: code readability and simplicity.\\n - Key milestones:\\n
|
||||
\ - Python 1.0 (1994): Introduced functional programming tools like lambda
|
||||
and map.\\n - Python 2.0 (2000): Added list comprehensions and garbage
|
||||
collection.\\n - Python 3.0 (2008): Major backward-incompatible release
|
||||
that fixed fundamental design flaws.\\n - Python 2 reached end-of-life in
|
||||
January 2020.\\n\\n4. **Next Steps**: The agent should be prepared to provide
|
||||
further details or answer any follow-up questions the user may have regarding
|
||||
Python's history or its features.\\n\\n5. **Context to Preserve**: \\n -
|
||||
Creator: Guido van Rossum\\n - First release: 1991\\n - Milestones: \\n
|
||||
\ - Python 1.0 (1994)\\n - Python 2.0 (2000)\\n - Python 3.0 (2008)\\n
|
||||
\ - End-of-life for Python 2: January 2020\\n</summary>\",\n \"refusal\":
|
||||
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
346,\n \"completion_tokens\": 367,\n \"total_tokens\": 713,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_7e4bf6ad56\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 09 Feb 2026 20:28:04 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '5368'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
set-cookie:
|
||||
- SET-COOKIE-XXX
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are a precise assistant that
|
||||
creates structured summaries of agent conversations. You preserve critical context
|
||||
needed for seamless task continuation."},{"role":"user","content":"Analyze the
|
||||
following conversation and create a structured summary that preserves all information
|
||||
needed to continue the task seamlessly.\n\n<conversation>\n[USER]: What about
|
||||
the async/await features? When were they introduced and how do they compare
|
||||
to similar features in JavaScript and C#? Also explain the Global Interpreter
|
||||
Lock and its implications.\n\n[ASSISTANT]: Async/await was introduced in Python
|
||||
3.5 (PEP 492, 2015). Unlike JavaScript which is single-threaded by design, Python''s
|
||||
asyncio is an opt-in framework. C# introduced async/await in 2012 (C# 5.0) and
|
||||
was a major inspiration for Python''s implementation. The GIL (Global Interpreter
|
||||
Lock) is a mutex that protects access to Python objects, preventing multiple
|
||||
threads from executing Python bytecodes simultaneously. This means CPU-bound
|
||||
multithreaded programs don''t benefit from multiple cores. PEP 703 proposes
|
||||
making the GIL optional in CPython.\n</conversation>\n\nCreate a summary with
|
||||
these sections:\n1. **Task Overview**: What is the agent trying to accomplish?\n2.
|
||||
**Current State**: What has been completed so far? What step is the agent on?\n3.
|
||||
**Important Discoveries**: Key facts, data, tool results, or findings that must
|
||||
not be lost.\n4. **Next Steps**: What should the agent do next based on the
|
||||
conversation?\n5. **Context to Preserve**: Any specific values, names, URLs,
|
||||
code snippets, or details referenced in the conversation.\n\nWrap your entire
|
||||
summary in <summary> tags.\n\n<summary>\n[Your structured summary here]\n</summary>"}],"model":"gpt-4o-mini","temperature":0}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1786'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- async:asyncio
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D7S9Pcl5ybKLH8cSEZ6hgPuvj5iCv\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1770668879,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"<summary>\\n1. **Task Overview**: The
|
||||
user is seeking information about the async/await features in Python, their
|
||||
introduction timeline, comparisons with similar features in JavaScript and
|
||||
C#, and an explanation of the Global Interpreter Lock (GIL) and its implications.\\n\\n2.
|
||||
**Current State**: The assistant has provided information regarding the introduction
|
||||
of async/await in Python (version 3.5, PEP 492 in 2015), comparisons with
|
||||
JavaScript and C# (C# introduced async/await in 2012), and an explanation
|
||||
of the GIL.\\n\\n3. **Important Discoveries**: \\n - Async/await was introduced
|
||||
in Python 3.5 (PEP 492, 2015).\\n - JavaScript is single-threaded, while
|
||||
Python's asyncio is an opt-in framework.\\n - C# introduced async/await
|
||||
in 2012 (C# 5.0) and influenced Python's implementation.\\n - The GIL (Global
|
||||
Interpreter Lock) is a mutex that prevents multiple threads from executing
|
||||
Python bytecodes simultaneously, affecting CPU-bound multithreaded programs.\\n
|
||||
\ - PEP 703 proposes making the GIL optional in CPython.\\n\\n4. **Next Steps**:
|
||||
The agent should consider providing further details on how async/await is
|
||||
implemented in Python, JavaScript, and C#, and explore the implications of
|
||||
the GIL in more depth, including potential alternatives or workarounds.\\n\\n5.
|
||||
**Context to Preserve**: \\n - Python async/await introduction: version
|
||||
3.5, PEP 492, 2015.\\n - C# async/await introduction: 2012, C# 5.0.\\n -
|
||||
GIL (Global Interpreter Lock) and its implications on multithreading in Python.\\n
|
||||
\ - Reference to PEP 703 regarding the GIL.\\n</summary>\",\n \"refusal\":
|
||||
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
364,\n \"completion_tokens\": 381,\n \"total_tokens\": 745,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_f4ae844694\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 09 Feb 2026 20:28:04 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '5489'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
set-cookie:
|
||||
- SET-COOKIE-XXX
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,136 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"max_tokens":4096,"messages":[{"role":"user","content":"Analyze the following
|
||||
conversation and create a structured summary that preserves all information
|
||||
needed to continue the task seamlessly.\n\n<conversation>\n[USER]: Research
|
||||
the latest developments in large language models. Focus on architecture improvements
|
||||
and training techniques.\n\n[ASSISTANT]: I''ll research the latest developments
|
||||
in large language models. Based on my knowledge, recent advances include:\n1.
|
||||
Mixture of Experts (MoE) architectures\n2. Improved attention mechanisms like
|
||||
Flash Attention\n3. Better training data curation techniques\n4. Constitutional
|
||||
AI and RLHF improvements\n\n[USER]: Can you go deeper on the MoE architectures?
|
||||
What are the key papers?\n\n[ASSISTANT]: Key papers on Mixture of Experts:\n-
|
||||
Switch Transformers (Google, 2021) - simplified MoE routing\n- GShard - scaling
|
||||
to 600B parameters\n- Mixtral (Mistral AI) - open-source MoE model\nThe main
|
||||
advantage is computational efficiency: only a subset of experts is activated
|
||||
per token.\n</conversation>\n\nCreate a summary with these sections:\n1. **Task
|
||||
Overview**: What is the agent trying to accomplish?\n2. **Current State**: What
|
||||
has been completed so far? What step is the agent on?\n3. **Important Discoveries**:
|
||||
Key facts, data, tool results, or findings that must not be lost.\n4. **Next
|
||||
Steps**: What should the agent do next based on the conversation?\n5. **Context
|
||||
to Preserve**: Any specific values, names, URLs, code snippets, or details referenced
|
||||
in the conversation.\n\nWrap your entire summary in <summary> tags.\n\n<summary>\n[Your
|
||||
structured summary here]\n</summary>"}],"model":"claude-3-5-haiku-latest","stream":false,"system":"You
|
||||
are a precise assistant that creates structured summaries of agent conversations.
|
||||
You preserve critical context needed for seamless task continuation.","temperature":0}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
anthropic-version:
|
||||
- '2023-06-01'
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1870'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.anthropic.com
|
||||
x-api-key:
|
||||
- X-API-KEY-XXX
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 0.73.0
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
x-stainless-timeout:
|
||||
- NOT_GIVEN
|
||||
method: POST
|
||||
uri: https://api.anthropic.com/v1/messages
|
||||
response:
|
||||
body:
|
||||
string: '{"model":"claude-3-5-haiku-20241022","id":"msg_01SK3LP6RedPBmpvD1HfKD23","type":"message","role":"assistant","content":[{"type":"text","text":"<summary>\n1.
|
||||
**Task Overview**:\n- Research latest developments in large language models\n-
|
||||
Focus on architecture improvements and training techniques\n\n2. **Current
|
||||
State**:\n- Initial research completed on broad developments\n- Currently
|
||||
exploring Mixture of Experts (MoE) architectures in depth\n- Detailed discussion
|
||||
of key MoE research papers initiated\n\n3. **Important Discoveries**:\nMoE
|
||||
Architecture Insights:\n- Computational efficiency through selective expert
|
||||
activation\n- Key research papers:\n * Switch Transformers (Google, 2021)\n *
|
||||
GShard\n * Mixtral (Mistral AI)\n- Main benefit: Only subset of experts activated
|
||||
per token\n\n4. **Next Steps**:\n- Conduct deeper analysis of MoE architecture
|
||||
mechanisms\n- Compare routing strategies across different MoE implementations\n-
|
||||
Investigate performance metrics and scalability of MoE models\n\n5. **Context
|
||||
to Preserve**:\n- Research Focus: Large Language Model Architectures\n- Specific
|
||||
Interest: Mixture of Experts (MoE) Architectures\n- Key Researchers/Organizations:
|
||||
Google, Mistral AI\n- Years of Significant Papers: 2021 onwards\n</summary>"}],"stop_reason":"end_turn","stop_sequence":null,"usage":{"input_tokens":400,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":270,"service_tier":"standard","inference_geo":"not_available"}}'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Security-Policy:
|
||||
- CSP-FILTERED
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 09 Feb 2026 20:18:41 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Robots-Tag:
|
||||
- none
|
||||
anthropic-organization-id:
|
||||
- ANTHROPIC-ORGANIZATION-ID-XXX
|
||||
anthropic-ratelimit-input-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-input-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-input-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-output-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-output-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-output-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-requests-limit:
|
||||
- '4000'
|
||||
anthropic-ratelimit-requests-remaining:
|
||||
- '3999'
|
||||
anthropic-ratelimit-requests-reset:
|
||||
- '2026-02-09T20:18:35Z'
|
||||
anthropic-ratelimit-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
request-id:
|
||||
- REQUEST-ID-XXX
|
||||
strict-transport-security:
|
||||
- STS-XXX
|
||||
x-envoy-upstream-service-time:
|
||||
- '5639'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,110 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are a precise assistant
|
||||
that creates structured summaries of agent conversations. You preserve critical
|
||||
context needed for seamless task continuation."}, {"role": "user", "content":
|
||||
"Analyze the following conversation and create a structured summary that preserves
|
||||
all information needed to continue the task seamlessly.\n\n<conversation>\n[USER]:
|
||||
Research the latest developments in large language models. Focus on architecture
|
||||
improvements and training techniques.\n\n[ASSISTANT]: I''ll research the latest
|
||||
developments in large language models. Based on my knowledge, recent advances
|
||||
include:\n1. Mixture of Experts (MoE) architectures\n2. Improved attention mechanisms
|
||||
like Flash Attention\n3. Better training data curation techniques\n4. Constitutional
|
||||
AI and RLHF improvements\n\n[USER]: Can you go deeper on the MoE architectures?
|
||||
What are the key papers?\n\n[ASSISTANT]: Key papers on Mixture of Experts:\n-
|
||||
Switch Transformers (Google, 2021) - simplified MoE routing\n- GShard - scaling
|
||||
to 600B parameters\n- Mixtral (Mistral AI) - open-source MoE model\nThe main
|
||||
advantage is computational efficiency: only a subset of experts is activated
|
||||
per token.\n</conversation>\n\nCreate a summary with these sections:\n1. **Task
|
||||
Overview**: What is the agent trying to accomplish?\n2. **Current State**: What
|
||||
has been completed so far? What step is the agent on?\n3. **Important Discoveries**:
|
||||
Key facts, data, tool results, or findings that must not be lost.\n4. **Next
|
||||
Steps**: What should the agent do next based on the conversation?\n5. **Context
|
||||
to Preserve**: Any specific values, names, URLs, code snippets, or details referenced
|
||||
in the conversation.\n\nWrap your entire summary in <summary> tags.\n\n<summary>\n[Your
|
||||
structured summary here]\n</summary>"}], "stream": false, "temperature": 0}'
|
||||
headers:
|
||||
Accept:
|
||||
- application/json
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '1849'
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
api-key:
|
||||
- X-API-KEY-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
x-ms-client-request-id:
|
||||
- X-MS-CLIENT-REQUEST-ID-XXX
|
||||
method: POST
|
||||
uri: https://fake-azure-endpoint.openai.azure.com/openai/deployments/gpt-4o-mini/chat/completions?api-version=2024-12-01-preview
|
||||
response:
|
||||
body:
|
||||
string: '{"choices":[{"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"protected_material_code":{"filtered":false,"detected":false},"protected_material_text":{"filtered":false,"detected":false},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}},"finish_reason":"stop","index":0,"logprobs":null,"message":{"annotations":[],"content":"\u003csummary\u003e\n1.
|
||||
**Task Overview**: The user has requested research on the latest developments
|
||||
in large language models, specifically focusing on architecture improvements
|
||||
and training techniques.\n\n2. **Current State**: The assistant has provided
|
||||
an initial overview of recent advances in large language models, including
|
||||
Mixture of Experts (MoE) architectures, improved attention mechanisms, better
|
||||
training data curation techniques, and advancements in Constitutional AI and
|
||||
Reinforcement Learning from Human Feedback (RLHF).\n\n3. **Important Discoveries**:
|
||||
\n - Recent advances in large language models include:\n 1. Mixture
|
||||
of Experts (MoE) architectures\n 2. Improved attention mechanisms like
|
||||
Flash Attention\n 3. Better training data curation techniques\n 4.
|
||||
Constitutional AI and RLHF improvements\n - Key papers on Mixture of Experts:\n -
|
||||
Switch Transformers (Google, 2021) - simplified MoE routing\n - GShard
|
||||
- scaling to 600B parameters\n - Mixtral (Mistral AI) - open-source MoE
|
||||
model\n - The main advantage of MoE architectures is computational efficiency,
|
||||
as only a subset of experts is activated per token.\n\n4. **Next Steps**:
|
||||
The assistant should delve deeper into the Mixture of Experts architectures,
|
||||
potentially summarizing the key findings and implications from the identified
|
||||
papers.\n\n5. **Context to Preserve**: \n - Key papers: \n - Switch
|
||||
Transformers (Google, 2021)\n - GShard\n - Mixtral (Mistral AI)\n -
|
||||
Focus on computational efficiency of MoE architectures.\n\u003c/summary\u003e","refusal":null,"role":"assistant"}}],"created":1770849953,"id":"chatcmpl-D8DFx1H1zzEerW5H0BWfuwmio2sz1","model":"gpt-4o-mini-2024-07-18","object":"chat.completion","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"jailbreak":{"filtered":false,"detected":false},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}],"system_fingerprint":"fp_f97eff32c5","usage":{"completion_tokens":328,"completion_tokens_details":{"accepted_prediction_tokens":0,"audio_tokens":0,"reasoning_tokens":0,"rejected_prediction_tokens":0},"prompt_tokens":368,"prompt_tokens_details":{"audio_tokens":0,"cached_tokens":0},"total_tokens":696}}
|
||||
|
||||
'
|
||||
headers:
|
||||
Content-Length:
|
||||
- '2786'
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 11 Feb 2026 22:45:56 GMT
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
apim-request-id:
|
||||
- APIM-REQUEST-ID-XXX
|
||||
azureml-model-session:
|
||||
- AZUREML-MODEL-SESSION-XXX
|
||||
x-accel-buffering:
|
||||
- 'no'
|
||||
x-content-type-options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
x-ms-client-request-id:
|
||||
- X-MS-CLIENT-REQUEST-ID-XXX
|
||||
x-ms-deployment-name:
|
||||
- gpt-4o-mini
|
||||
x-ms-rai-invoked:
|
||||
- 'true'
|
||||
x-ms-region:
|
||||
- X-MS-REGION-XXX
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,103 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"contents": [{"parts": [{"text": "Analyze the following conversation and
|
||||
create a structured summary that preserves all information needed to continue
|
||||
the task seamlessly.\n\n<conversation>\n[USER]: Research the latest developments
|
||||
in large language models. Focus on architecture improvements and training techniques.\n\n[ASSISTANT]:
|
||||
I''ll research the latest developments in large language models. Based on my
|
||||
knowledge, recent advances include:\n1. Mixture of Experts (MoE) architectures\n2.
|
||||
Improved attention mechanisms like Flash Attention\n3. Better training data
|
||||
curation techniques\n4. Constitutional AI and RLHF improvements\n\n[USER]: Can
|
||||
you go deeper on the MoE architectures? What are the key papers?\n\n[ASSISTANT]:
|
||||
Key papers on Mixture of Experts:\n- Switch Transformers (Google, 2021) - simplified
|
||||
MoE routing\n- GShard - scaling to 600B parameters\n- Mixtral (Mistral AI) -
|
||||
open-source MoE model\nThe main advantage is computational efficiency: only
|
||||
a subset of experts is activated per token.\n</conversation>\n\nCreate a summary
|
||||
with these sections:\n1. **Task Overview**: What is the agent trying to accomplish?\n2.
|
||||
**Current State**: What has been completed so far? What step is the agent on?\n3.
|
||||
**Important Discoveries**: Key facts, data, tool results, or findings that must
|
||||
not be lost.\n4. **Next Steps**: What should the agent do next based on the
|
||||
conversation?\n5. **Context to Preserve**: Any specific values, names, URLs,
|
||||
code snippets, or details referenced in the conversation.\n\nWrap your entire
|
||||
summary in <summary> tags.\n\n<summary>\n[Your structured summary here]\n</summary>"}],
|
||||
"role": "user"}], "systemInstruction": {"parts": [{"text": "You are a precise
|
||||
assistant that creates structured summaries of agent conversations. You preserve
|
||||
critical context needed for seamless task continuation."}], "role": "user"},
|
||||
"generationConfig": {"temperature": 0.0}}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- '*/*'
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1895'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- generativelanguage.googleapis.com
|
||||
x-goog-api-client:
|
||||
- google-genai-sdk/1.49.0 gl-python/3.13.3
|
||||
x-goog-api-key:
|
||||
- X-GOOG-API-KEY-XXX
|
||||
method: POST
|
||||
uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"candidates\": [\n {\n \"content\": {\n \"parts\":
|
||||
[\n {\n \"text\": \"```xml\\n\\u003csummary\\u003e\\n**Task
|
||||
Overview**: Research the latest developments in large language models, focusing
|
||||
on architecture improvements and training techniques.\\n\\n**Current State**:
|
||||
The agent has identified several key areas of advancement in LLMs: Mixture
|
||||
of Experts (MoE) architectures, improved attention mechanisms (Flash Attention),
|
||||
better training data curation, and Constitutional AI/RLHF improvements. The
|
||||
user has requested a deeper dive into MoE architectures. The agent has provided
|
||||
an initial overview of MoE architectures and listed some key papers.\\n\\n**Important
|
||||
Discoveries**:\\n* Key MoE papers: Switch Transformers (Google, 2021), GShard,
|
||||
Mixtral (Mistral AI).\\n* MoE advantage: Computational efficiency through
|
||||
selective activation of experts.\\n\\n**Next Steps**: Continue researching
|
||||
MoE architectures based on the user's request for more detail. The agent should
|
||||
elaborate further on the listed papers and potentially find more recent or
|
||||
relevant publications.\\n\\n**Context to Preserve**:\\n* Focus areas: Architecture
|
||||
improvements and training techniques for LLMs.\\n* Specific architectures:
|
||||
Mixture of Experts (MoE), Flash Attention.\\n* Training techniques: Data
|
||||
curation, Constitutional AI, RLHF.\\n* Specific papers: Switch Transformers
|
||||
(Google, 2021), GShard, Mixtral (Mistral AI).\\n\\u003c/summary\\u003e\\n```\"\n
|
||||
\ }\n ],\n \"role\": \"model\"\n },\n \"finishReason\":
|
||||
\"STOP\",\n \"avgLogprobs\": -0.14186729703630721\n }\n ],\n \"usageMetadata\":
|
||||
{\n \"promptTokenCount\": 373,\n \"candidatesTokenCount\": 280,\n \"totalTokenCount\":
|
||||
653,\n \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n
|
||||
\ \"tokenCount\": 373\n }\n ],\n \"candidatesTokensDetails\":
|
||||
[\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 280\n
|
||||
\ }\n ]\n },\n \"modelVersion\": \"gemini-2.0-flash\",\n \"responseId\":
|
||||
\"GEGKabP3OcGH-8YPzZCj2Ao\"\n}\n"
|
||||
headers:
|
||||
Alt-Svc:
|
||||
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
|
||||
Content-Type:
|
||||
- application/json; charset=UTF-8
|
||||
Date:
|
||||
- Mon, 09 Feb 2026 20:18:35 GMT
|
||||
Server:
|
||||
- scaffolding on HTTPServer2
|
||||
Server-Timing:
|
||||
- gfet4t7; dur=2310
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
X-Frame-Options:
|
||||
- X-FRAME-OPTIONS-XXX
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,148 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are a precise assistant that
|
||||
creates structured summaries of agent conversations. You preserve critical context
|
||||
needed for seamless task continuation."},{"role":"user","content":"Analyze the
|
||||
following conversation and create a structured summary that preserves all information
|
||||
needed to continue the task seamlessly.\n\n<conversation>\n[USER]: Research
|
||||
the latest developments in large language models. Focus on architecture improvements
|
||||
and training techniques.\n\n[ASSISTANT]: I''ll research the latest developments
|
||||
in large language models. Based on my knowledge, recent advances include:\n1.
|
||||
Mixture of Experts (MoE) architectures\n2. Improved attention mechanisms like
|
||||
Flash Attention\n3. Better training data curation techniques\n4. Constitutional
|
||||
AI and RLHF improvements\n\n[USER]: Can you go deeper on the MoE architectures?
|
||||
What are the key papers?\n\n[ASSISTANT]: Key papers on Mixture of Experts:\n-
|
||||
Switch Transformers (Google, 2021) - simplified MoE routing\n- GShard - scaling
|
||||
to 600B parameters\n- Mixtral (Mistral AI) - open-source MoE model\nThe main
|
||||
advantage is computational efficiency: only a subset of experts is activated
|
||||
per token.\n</conversation>\n\nCreate a summary with these sections:\n1. **Task
|
||||
Overview**: What is the agent trying to accomplish?\n2. **Current State**: What
|
||||
has been completed so far? What step is the agent on?\n3. **Important Discoveries**:
|
||||
Key facts, data, tool results, or findings that must not be lost.\n4. **Next
|
||||
Steps**: What should the agent do next based on the conversation?\n5. **Context
|
||||
to Preserve**: Any specific values, names, URLs, code snippets, or details referenced
|
||||
in the conversation.\n\nWrap your entire summary in <summary> tags.\n\n<summary>\n[Your
|
||||
structured summary here]\n</summary>"}],"model":"gpt-4o-mini","temperature":0}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1844'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D7RxGISdQet8JsWImiwzHQ2S9gSD4\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1770668126,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"<summary>\\n1. **Task Overview**: The
|
||||
agent is tasked with researching the latest developments in large language
|
||||
models, specifically focusing on architecture improvements and training techniques.\\n\\n2.
|
||||
**Current State**: The agent has identified several recent advances in large
|
||||
language models, including Mixture of Experts (MoE) architectures, improved
|
||||
attention mechanisms, better training data curation techniques, and advancements
|
||||
in Constitutional AI and Reinforcement Learning from Human Feedback (RLHF).\\n\\n3.
|
||||
**Important Discoveries**: \\n - Recent advances in large language models
|
||||
include:\\n 1. Mixture of Experts (MoE) architectures\\n 2. Improved
|
||||
attention mechanisms like Flash Attention\\n 3. Better training data curation
|
||||
techniques\\n 4. Constitutional AI and RLHF improvements\\n - Key papers
|
||||
on Mixture of Experts:\\n - Switch Transformers (Google, 2021) - simplified
|
||||
MoE routing\\n - GShard - scaling to 600B parameters\\n - Mixtral
|
||||
(Mistral AI) - open-source MoE model\\n - The main advantage of MoE architectures
|
||||
is computational efficiency, as only a subset of experts is activated per
|
||||
token.\\n\\n4. **Next Steps**: The agent should delve deeper into the Mixture
|
||||
of Experts architectures, reviewing the key papers mentioned and summarizing
|
||||
their contributions and implications for large language models.\\n\\n5. **Context
|
||||
to Preserve**: \\n - Key papers: \\n - Switch Transformers (Google,
|
||||
2021)\\n - GShard\\n - Mixtral (Mistral AI)\\n - Focus on computational
|
||||
efficiency of MoE architectures.\\n</summary>\",\n \"refusal\": null,\n
|
||||
\ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 368,\n \"completion_tokens\":
|
||||
328,\n \"total_tokens\": 696,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_f4ae844694\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 09 Feb 2026 20:15:32 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '5395'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
set-cookie:
|
||||
- SET-COOKIE-XXX
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,145 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are a precise assistant that
|
||||
creates structured summaries of agent conversations. You preserve critical context
|
||||
needed for seamless task continuation."},{"role":"user","content":"Analyze the
|
||||
following conversation and create a structured summary that preserves all information
|
||||
needed to continue the task seamlessly.\n\n<conversation>\n[USER]: Research
|
||||
the latest developments in large language models. Focus on architecture improvements
|
||||
and training techniques.\n\n[ASSISTANT]: I''ll research the latest developments
|
||||
in large language models. Based on my knowledge, recent advances include:\n1.
|
||||
Mixture of Experts (MoE) architectures\n2. Improved attention mechanisms like
|
||||
Flash Attention\n3. Better training data curation techniques\n4. Constitutional
|
||||
AI and RLHF improvements\n\n[USER]: Can you go deeper on the MoE architectures?
|
||||
What are the key papers?\n\n[ASSISTANT]: Key papers on Mixture of Experts:\n-
|
||||
Switch Transformers (Google, 2021) - simplified MoE routing\n- GShard - scaling
|
||||
to 600B parameters\n- Mixtral (Mistral AI) - open-source MoE model\nThe main
|
||||
advantage is computational efficiency: only a subset of experts is activated
|
||||
per token.\n</conversation>\n\nCreate a summary with these sections:\n1. **Task
|
||||
Overview**: What is the agent trying to accomplish?\n2. **Current State**: What
|
||||
has been completed so far? What step is the agent on?\n3. **Important Discoveries**:
|
||||
Key facts, data, tool results, or findings that must not be lost.\n4. **Next
|
||||
Steps**: What should the agent do next based on the conversation?\n5. **Context
|
||||
to Preserve**: Any specific values, names, URLs, code snippets, or details referenced
|
||||
in the conversation.\n\nWrap your entire summary in <summary> tags.\n\n<summary>\n[Your
|
||||
structured summary here]\n</summary>"}],"model":"gpt-4o-mini","temperature":0}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1844'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D7RxM4n36QoACHrC0QocV1pXIwvtD\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1770668132,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"<summary>\\n1. **Task Overview**: The
|
||||
user has requested research on the latest developments in large language models,
|
||||
specifically focusing on architecture improvements and training techniques.\\n\\n2.
|
||||
**Current State**: The assistant has identified several recent advances in
|
||||
large language models, including Mixture of Experts (MoE) architectures, improved
|
||||
attention mechanisms, better training data curation techniques, and advancements
|
||||
in Constitutional AI and Reinforcement Learning from Human Feedback (RLHF).\\n\\n3.
|
||||
**Important Discoveries**: \\n - Key papers on Mixture of Experts (MoE)
|
||||
architectures:\\n - \\\"Switch Transformers\\\" (Google, 2021) - simplified
|
||||
MoE routing.\\n - \\\"GShard\\\" - scaling to 600B parameters.\\n -
|
||||
\\\"Mixtral\\\" (Mistral AI) - open-source MoE model.\\n - The main advantage
|
||||
of MoE architectures is computational efficiency, as only a subset of experts
|
||||
is activated per token.\\n\\n4. **Next Steps**: The assistant should delve
|
||||
deeper into the Mixture of Experts architectures, potentially summarizing
|
||||
the findings from the key papers mentioned.\\n\\n5. **Context to Preserve**:
|
||||
\\n - Key papers: \\\"Switch Transformers,\\\" \\\"GShard,\\\" \\\"Mixtral.\\\"\\n
|
||||
\ - Notable organizations: Google, Mistral AI.\\n - Focus areas: MoE architectures,
|
||||
computational efficiency.\\n</summary>\",\n \"refusal\": null,\n \"annotations\":
|
||||
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 368,\n \"completion_tokens\":
|
||||
275,\n \"total_tokens\": 643,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_f4ae844694\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 09 Feb 2026 20:15:36 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '4188'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
set-cookie:
|
||||
- SET-COOKIE-XXX
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -1,6 +1,8 @@
|
||||
import os
|
||||
import unittest
|
||||
from unittest.mock import ANY, MagicMock, patch
|
||||
from unittest.mock import ANY, AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai.cli.plus_api import PlusAPI
|
||||
|
||||
@@ -68,37 +70,6 @@ class TestPlusAPI(unittest.TestCase):
|
||||
)
|
||||
self.assertEqual(response, mock_response)
|
||||
|
||||
@patch("crewai.cli.plus_api.PlusAPI._make_request")
|
||||
def test_get_agent(self, mock_make_request):
|
||||
mock_response = MagicMock()
|
||||
mock_make_request.return_value = mock_response
|
||||
|
||||
response = self.api.get_agent("test_agent_handle")
|
||||
mock_make_request.assert_called_once_with(
|
||||
"GET", "/crewai_plus/api/v1/agents/test_agent_handle"
|
||||
)
|
||||
self.assertEqual(response, mock_response)
|
||||
|
||||
@patch("crewai.cli.plus_api.Settings")
|
||||
@patch("requests.Session.request")
|
||||
def test_get_agent_with_org_uuid(self, mock_make_request, mock_settings_class):
|
||||
mock_settings = MagicMock()
|
||||
mock_settings.org_uuid = self.org_uuid
|
||||
mock_settings.enterprise_base_url = os.getenv('CREWAI_PLUS_URL')
|
||||
mock_settings_class.return_value = mock_settings
|
||||
# re-initialize Client
|
||||
self.api = PlusAPI(self.api_key)
|
||||
|
||||
mock_response = MagicMock()
|
||||
mock_make_request.return_value = mock_response
|
||||
|
||||
response = self.api.get_agent("test_agent_handle")
|
||||
|
||||
self.assert_request_with_org_id(
|
||||
mock_make_request, "GET", "/crewai_plus/api/v1/agents/test_agent_handle"
|
||||
)
|
||||
self.assertEqual(response, mock_response)
|
||||
|
||||
@patch("crewai.cli.plus_api.PlusAPI._make_request")
|
||||
def test_get_tool(self, mock_make_request):
|
||||
mock_response = MagicMock()
|
||||
@@ -338,3 +309,49 @@ class TestPlusAPI(unittest.TestCase):
|
||||
custom_api.base_url,
|
||||
"https://custom-url-from-env.com",
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("httpx.AsyncClient")
|
||||
async def test_get_agent(mock_async_client_class):
|
||||
api = PlusAPI("test_api_key")
|
||||
mock_response = MagicMock()
|
||||
mock_client_instance = AsyncMock()
|
||||
mock_client_instance.get.return_value = mock_response
|
||||
mock_async_client_class.return_value.__aenter__.return_value = mock_client_instance
|
||||
|
||||
response = await api.get_agent("test_agent_handle")
|
||||
|
||||
mock_client_instance.get.assert_called_once_with(
|
||||
f"{api.base_url}/crewai_plus/api/v1/agents/test_agent_handle",
|
||||
headers=api.headers,
|
||||
)
|
||||
assert response == mock_response
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("httpx.AsyncClient")
|
||||
@patch("crewai.cli.plus_api.Settings")
|
||||
async def test_get_agent_with_org_uuid(mock_settings_class, mock_async_client_class):
|
||||
org_uuid = "test-org-uuid"
|
||||
mock_settings = MagicMock()
|
||||
mock_settings.org_uuid = org_uuid
|
||||
mock_settings.enterprise_base_url = os.getenv("CREWAI_PLUS_URL")
|
||||
mock_settings_class.return_value = mock_settings
|
||||
|
||||
api = PlusAPI("test_api_key")
|
||||
|
||||
mock_response = MagicMock()
|
||||
mock_client_instance = AsyncMock()
|
||||
mock_client_instance.get.return_value = mock_response
|
||||
mock_async_client_class.return_value.__aenter__.return_value = mock_client_instance
|
||||
|
||||
response = await api.get_agent("test_agent_handle")
|
||||
|
||||
mock_client_instance.get.assert_called_once_with(
|
||||
f"{api.base_url}/crewai_plus/api/v1/agents/test_agent_handle",
|
||||
headers=api.headers,
|
||||
)
|
||||
assert "X-Crewai-Organization-Id" in api.headers
|
||||
assert api.headers["X-Crewai-Organization-Id"] == org_uuid
|
||||
assert response == mock_response
|
||||
|
||||
@@ -1,15 +1,19 @@
|
||||
"""Test for version management."""
|
||||
|
||||
import json
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from crewai import __version__
|
||||
from crewai.cli.version import (
|
||||
_find_latest_non_yanked_version,
|
||||
_get_cache_file,
|
||||
_is_cache_valid,
|
||||
_is_version_yanked,
|
||||
get_crewai_version,
|
||||
get_latest_version_from_pypi,
|
||||
is_current_version_yanked,
|
||||
is_newer_version_available,
|
||||
)
|
||||
|
||||
@@ -19,10 +23,8 @@ def test_dynamic_versioning_consistency() -> None:
|
||||
cli_version = get_crewai_version()
|
||||
package_version = __version__
|
||||
|
||||
# Both should return the same version string
|
||||
assert cli_version == package_version
|
||||
|
||||
# Version should not be empty
|
||||
assert package_version is not None
|
||||
assert len(package_version.strip()) > 0
|
||||
|
||||
@@ -63,12 +65,18 @@ class TestVersionChecking:
|
||||
def test_get_latest_version_from_pypi_success(
|
||||
self, mock_urlopen: MagicMock, mock_exists: MagicMock
|
||||
) -> None:
|
||||
"""Test successful PyPI version fetch."""
|
||||
# Mock cache not existing to force fetch from PyPI
|
||||
"""Test successful PyPI version fetch uses releases data."""
|
||||
mock_exists.return_value = False
|
||||
|
||||
releases = {
|
||||
"1.0.0": [{"yanked": False}],
|
||||
"2.0.0": [{"yanked": False}],
|
||||
"2.1.0": [{"yanked": True, "yanked_reason": "bad release"}],
|
||||
}
|
||||
mock_response = MagicMock()
|
||||
mock_response.read.return_value = b'{"info": {"version": "2.0.0"}}'
|
||||
mock_response.read.return_value = json.dumps(
|
||||
{"info": {"version": "2.1.0"}, "releases": releases}
|
||||
).encode()
|
||||
mock_urlopen.return_value.__enter__.return_value = mock_response
|
||||
|
||||
version = get_latest_version_from_pypi()
|
||||
@@ -82,7 +90,6 @@ class TestVersionChecking:
|
||||
"""Test PyPI version fetch failure."""
|
||||
from urllib.error import URLError
|
||||
|
||||
# Mock cache not existing to force fetch from PyPI
|
||||
mock_exists.return_value = False
|
||||
|
||||
mock_urlopen.side_effect = URLError("Network error")
|
||||
@@ -133,18 +140,247 @@ class TestVersionChecking:
|
||||
assert latest is None
|
||||
|
||||
|
||||
class TestFindLatestNonYankedVersion:
|
||||
"""Test _find_latest_non_yanked_version helper."""
|
||||
|
||||
def test_skips_yanked_versions(self) -> None:
|
||||
"""Test that yanked versions are skipped."""
|
||||
releases = {
|
||||
"1.0.0": [{"yanked": False}],
|
||||
"2.0.0": [{"yanked": True}],
|
||||
}
|
||||
assert _find_latest_non_yanked_version(releases) == "1.0.0"
|
||||
|
||||
def test_returns_highest_non_yanked(self) -> None:
|
||||
"""Test that the highest non-yanked version is returned."""
|
||||
releases = {
|
||||
"1.0.0": [{"yanked": False}],
|
||||
"1.5.0": [{"yanked": False}],
|
||||
"2.0.0": [{"yanked": True}],
|
||||
}
|
||||
assert _find_latest_non_yanked_version(releases) == "1.5.0"
|
||||
|
||||
def test_returns_none_when_all_yanked(self) -> None:
|
||||
"""Test that None is returned when all versions are yanked."""
|
||||
releases = {
|
||||
"1.0.0": [{"yanked": True}],
|
||||
"2.0.0": [{"yanked": True}],
|
||||
}
|
||||
assert _find_latest_non_yanked_version(releases) is None
|
||||
|
||||
def test_skips_prerelease_versions(self) -> None:
|
||||
"""Test that pre-release versions are skipped."""
|
||||
releases = {
|
||||
"1.0.0": [{"yanked": False}],
|
||||
"2.0.0a1": [{"yanked": False}],
|
||||
"2.0.0rc1": [{"yanked": False}],
|
||||
}
|
||||
assert _find_latest_non_yanked_version(releases) == "1.0.0"
|
||||
|
||||
def test_skips_versions_with_empty_files(self) -> None:
|
||||
"""Test that versions with no files are skipped."""
|
||||
releases: dict[str, list[dict[str, bool]]] = {
|
||||
"1.0.0": [{"yanked": False}],
|
||||
"2.0.0": [],
|
||||
}
|
||||
assert _find_latest_non_yanked_version(releases) == "1.0.0"
|
||||
|
||||
def test_handles_invalid_version_strings(self) -> None:
|
||||
"""Test that invalid version strings are skipped."""
|
||||
releases = {
|
||||
"1.0.0": [{"yanked": False}],
|
||||
"not-a-version": [{"yanked": False}],
|
||||
}
|
||||
assert _find_latest_non_yanked_version(releases) == "1.0.0"
|
||||
|
||||
def test_partially_yanked_files_not_considered_yanked(self) -> None:
|
||||
"""Test that a version with some non-yanked files is not yanked."""
|
||||
releases = {
|
||||
"1.0.0": [{"yanked": False}],
|
||||
"2.0.0": [{"yanked": True}, {"yanked": False}],
|
||||
}
|
||||
assert _find_latest_non_yanked_version(releases) == "2.0.0"
|
||||
|
||||
|
||||
class TestIsVersionYanked:
|
||||
"""Test _is_version_yanked helper."""
|
||||
|
||||
def test_non_yanked_version(self) -> None:
|
||||
"""Test a non-yanked version returns False."""
|
||||
releases = {"1.0.0": [{"yanked": False}]}
|
||||
is_yanked, reason = _is_version_yanked("1.0.0", releases)
|
||||
assert is_yanked is False
|
||||
assert reason == ""
|
||||
|
||||
def test_yanked_version_with_reason(self) -> None:
|
||||
"""Test a yanked version returns True with reason."""
|
||||
releases = {
|
||||
"1.0.0": [{"yanked": True, "yanked_reason": "critical bug"}],
|
||||
}
|
||||
is_yanked, reason = _is_version_yanked("1.0.0", releases)
|
||||
assert is_yanked is True
|
||||
assert reason == "critical bug"
|
||||
|
||||
def test_yanked_version_without_reason(self) -> None:
|
||||
"""Test a yanked version returns True with empty reason."""
|
||||
releases = {"1.0.0": [{"yanked": True}]}
|
||||
is_yanked, reason = _is_version_yanked("1.0.0", releases)
|
||||
assert is_yanked is True
|
||||
assert reason == ""
|
||||
|
||||
def test_unknown_version(self) -> None:
|
||||
"""Test an unknown version returns False."""
|
||||
releases = {"1.0.0": [{"yanked": False}]}
|
||||
is_yanked, reason = _is_version_yanked("9.9.9", releases)
|
||||
assert is_yanked is False
|
||||
assert reason == ""
|
||||
|
||||
def test_partially_yanked_files(self) -> None:
|
||||
"""Test a version with mixed yanked/non-yanked files is not yanked."""
|
||||
releases = {
|
||||
"1.0.0": [{"yanked": True}, {"yanked": False}],
|
||||
}
|
||||
is_yanked, reason = _is_version_yanked("1.0.0", releases)
|
||||
assert is_yanked is False
|
||||
assert reason == ""
|
||||
|
||||
def test_multiple_yanked_files_picks_first_reason(self) -> None:
|
||||
"""Test that the first available reason is returned."""
|
||||
releases = {
|
||||
"1.0.0": [
|
||||
{"yanked": True, "yanked_reason": ""},
|
||||
{"yanked": True, "yanked_reason": "second reason"},
|
||||
],
|
||||
}
|
||||
is_yanked, reason = _is_version_yanked("1.0.0", releases)
|
||||
assert is_yanked is True
|
||||
assert reason == "second reason"
|
||||
|
||||
|
||||
class TestIsCurrentVersionYanked:
|
||||
"""Test is_current_version_yanked public function."""
|
||||
|
||||
@patch("crewai.cli.version.get_crewai_version")
|
||||
@patch("crewai.cli.version._get_cache_file")
|
||||
def test_reads_from_valid_cache(
|
||||
self, mock_cache_file: MagicMock, mock_version: MagicMock, tmp_path: Path
|
||||
) -> None:
|
||||
"""Test reading yanked status from a valid cache."""
|
||||
mock_version.return_value = "1.0.0"
|
||||
cache_file = tmp_path / "version_cache.json"
|
||||
cache_data = {
|
||||
"version": "2.0.0",
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"current_version": "1.0.0",
|
||||
"current_version_yanked": True,
|
||||
"current_version_yanked_reason": "bad release",
|
||||
}
|
||||
cache_file.write_text(json.dumps(cache_data))
|
||||
mock_cache_file.return_value = cache_file
|
||||
|
||||
is_yanked, reason = is_current_version_yanked()
|
||||
assert is_yanked is True
|
||||
assert reason == "bad release"
|
||||
|
||||
@patch("crewai.cli.version.get_crewai_version")
|
||||
@patch("crewai.cli.version._get_cache_file")
|
||||
def test_not_yanked_from_cache(
|
||||
self, mock_cache_file: MagicMock, mock_version: MagicMock, tmp_path: Path
|
||||
) -> None:
|
||||
"""Test non-yanked status from a valid cache."""
|
||||
mock_version.return_value = "2.0.0"
|
||||
cache_file = tmp_path / "version_cache.json"
|
||||
cache_data = {
|
||||
"version": "2.0.0",
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"current_version": "2.0.0",
|
||||
"current_version_yanked": False,
|
||||
"current_version_yanked_reason": "",
|
||||
}
|
||||
cache_file.write_text(json.dumps(cache_data))
|
||||
mock_cache_file.return_value = cache_file
|
||||
|
||||
is_yanked, reason = is_current_version_yanked()
|
||||
assert is_yanked is False
|
||||
assert reason == ""
|
||||
|
||||
@patch("crewai.cli.version.get_latest_version_from_pypi")
|
||||
@patch("crewai.cli.version.get_crewai_version")
|
||||
@patch("crewai.cli.version._get_cache_file")
|
||||
def test_triggers_fetch_on_stale_cache(
|
||||
self,
|
||||
mock_cache_file: MagicMock,
|
||||
mock_version: MagicMock,
|
||||
mock_fetch: MagicMock,
|
||||
tmp_path: Path,
|
||||
) -> None:
|
||||
"""Test that a stale cache triggers a re-fetch."""
|
||||
mock_version.return_value = "1.0.0"
|
||||
cache_file = tmp_path / "version_cache.json"
|
||||
old_time = datetime.now() - timedelta(hours=25)
|
||||
cache_data = {
|
||||
"version": "2.0.0",
|
||||
"timestamp": old_time.isoformat(),
|
||||
"current_version": "1.0.0",
|
||||
"current_version_yanked": True,
|
||||
"current_version_yanked_reason": "old reason",
|
||||
}
|
||||
cache_file.write_text(json.dumps(cache_data))
|
||||
mock_cache_file.return_value = cache_file
|
||||
|
||||
fresh_cache = {
|
||||
"version": "2.0.0",
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"current_version": "1.0.0",
|
||||
"current_version_yanked": False,
|
||||
"current_version_yanked_reason": "",
|
||||
}
|
||||
|
||||
def write_fresh_cache() -> str:
|
||||
cache_file.write_text(json.dumps(fresh_cache))
|
||||
return "2.0.0"
|
||||
|
||||
mock_fetch.side_effect = lambda: write_fresh_cache()
|
||||
|
||||
is_yanked, reason = is_current_version_yanked()
|
||||
assert is_yanked is False
|
||||
mock_fetch.assert_called_once()
|
||||
|
||||
@patch("crewai.cli.version.get_latest_version_from_pypi")
|
||||
@patch("crewai.cli.version.get_crewai_version")
|
||||
@patch("crewai.cli.version._get_cache_file")
|
||||
def test_returns_false_on_fetch_failure(
|
||||
self,
|
||||
mock_cache_file: MagicMock,
|
||||
mock_version: MagicMock,
|
||||
mock_fetch: MagicMock,
|
||||
tmp_path: Path,
|
||||
) -> None:
|
||||
"""Test that fetch failure returns not yanked."""
|
||||
mock_version.return_value = "1.0.0"
|
||||
cache_file = tmp_path / "version_cache.json"
|
||||
mock_cache_file.return_value = cache_file
|
||||
mock_fetch.return_value = None
|
||||
|
||||
is_yanked, reason = is_current_version_yanked()
|
||||
assert is_yanked is False
|
||||
assert reason == ""
|
||||
|
||||
|
||||
class TestConsoleFormatterVersionCheck:
|
||||
"""Test version check display in ConsoleFormatter."""
|
||||
|
||||
@patch("crewai.events.utils.console_formatter.is_current_version_yanked")
|
||||
@patch("crewai.events.utils.console_formatter.is_newer_version_available")
|
||||
@patch.dict("os.environ", {"CI": ""})
|
||||
def test_version_message_shows_when_update_available_and_verbose(
|
||||
self, mock_check: MagicMock
|
||||
self, mock_check: MagicMock, mock_yanked: MagicMock
|
||||
) -> None:
|
||||
"""Test version message shows when update available and verbose enabled."""
|
||||
from crewai.events.utils.console_formatter import ConsoleFormatter
|
||||
|
||||
mock_check.return_value = (True, "1.0.0", "2.0.0")
|
||||
mock_yanked.return_value = (False, "")
|
||||
|
||||
formatter = ConsoleFormatter(verbose=True)
|
||||
with patch.object(formatter.console, "print") as mock_print:
|
||||
@@ -165,14 +401,16 @@ class TestConsoleFormatterVersionCheck:
|
||||
formatter._show_version_update_message_if_needed()
|
||||
mock_print.assert_not_called()
|
||||
|
||||
@patch("crewai.events.utils.console_formatter.is_current_version_yanked")
|
||||
@patch("crewai.events.utils.console_formatter.is_newer_version_available")
|
||||
def test_version_message_hides_when_no_update_available(
|
||||
self, mock_check: MagicMock
|
||||
self, mock_check: MagicMock, mock_yanked: MagicMock
|
||||
) -> None:
|
||||
"""Test version message hidden when no update available."""
|
||||
from crewai.events.utils.console_formatter import ConsoleFormatter
|
||||
|
||||
mock_check.return_value = (False, "2.0.0", "2.0.0")
|
||||
mock_yanked.return_value = (False, "")
|
||||
|
||||
formatter = ConsoleFormatter(verbose=True)
|
||||
with patch.object(formatter.console, "print") as mock_print:
|
||||
@@ -208,3 +446,60 @@ class TestConsoleFormatterVersionCheck:
|
||||
with patch.object(formatter.console, "print") as mock_print:
|
||||
formatter._show_version_update_message_if_needed()
|
||||
mock_print.assert_not_called()
|
||||
|
||||
@patch("crewai.events.utils.console_formatter.is_current_version_yanked")
|
||||
@patch("crewai.events.utils.console_formatter.is_newer_version_available")
|
||||
@patch.dict("os.environ", {"CI": ""})
|
||||
def test_yanked_warning_shows_when_version_is_yanked(
|
||||
self, mock_check: MagicMock, mock_yanked: MagicMock
|
||||
) -> None:
|
||||
"""Test yanked warning panel shows when current version is yanked."""
|
||||
from crewai.events.utils.console_formatter import ConsoleFormatter
|
||||
|
||||
mock_check.return_value = (False, "1.0.0", "1.0.0")
|
||||
mock_yanked.return_value = (True, "critical bug")
|
||||
|
||||
formatter = ConsoleFormatter(verbose=True)
|
||||
with patch.object(formatter.console, "print") as mock_print:
|
||||
formatter._show_version_update_message_if_needed()
|
||||
assert mock_print.call_count == 2
|
||||
panel = mock_print.call_args_list[0][0][0]
|
||||
assert "Yanked Version" in panel.title
|
||||
assert "critical bug" in str(panel.renderable)
|
||||
|
||||
@patch("crewai.events.utils.console_formatter.is_current_version_yanked")
|
||||
@patch("crewai.events.utils.console_formatter.is_newer_version_available")
|
||||
@patch.dict("os.environ", {"CI": ""})
|
||||
def test_yanked_warning_shows_without_reason(
|
||||
self, mock_check: MagicMock, mock_yanked: MagicMock
|
||||
) -> None:
|
||||
"""Test yanked warning panel shows even without a reason."""
|
||||
from crewai.events.utils.console_formatter import ConsoleFormatter
|
||||
|
||||
mock_check.return_value = (False, "1.0.0", "1.0.0")
|
||||
mock_yanked.return_value = (True, "")
|
||||
|
||||
formatter = ConsoleFormatter(verbose=True)
|
||||
with patch.object(formatter.console, "print") as mock_print:
|
||||
formatter._show_version_update_message_if_needed()
|
||||
assert mock_print.call_count == 2
|
||||
panel = mock_print.call_args_list[0][0][0]
|
||||
assert "Yanked Version" in panel.title
|
||||
assert "Reason:" not in str(panel.renderable)
|
||||
|
||||
@patch("crewai.events.utils.console_formatter.is_current_version_yanked")
|
||||
@patch("crewai.events.utils.console_formatter.is_newer_version_available")
|
||||
@patch.dict("os.environ", {"CI": ""})
|
||||
def test_both_update_and_yanked_warning_show(
|
||||
self, mock_check: MagicMock, mock_yanked: MagicMock
|
||||
) -> None:
|
||||
"""Test both update and yanked panels show when applicable."""
|
||||
from crewai.events.utils.console_formatter import ConsoleFormatter
|
||||
|
||||
mock_check.return_value = (True, "1.0.0", "2.0.0")
|
||||
mock_yanked.return_value = (True, "security issue")
|
||||
|
||||
formatter = ConsoleFormatter(verbose=True)
|
||||
with patch.object(formatter.console, "print") as mock_print:
|
||||
formatter._show_version_update_message_if_needed()
|
||||
assert mock_print.call_count == 4
|
||||
|
||||
@@ -177,4 +177,40 @@ class TestTriggeredByScope:
|
||||
raise ValueError("test error")
|
||||
except ValueError:
|
||||
pass
|
||||
assert get_triggering_event_id() is None
|
||||
assert get_triggering_event_id() is None
|
||||
|
||||
|
||||
def test_agent_scope_preserved_after_tool_error_event() -> None:
|
||||
from crewai.events import crewai_event_bus
|
||||
from crewai.events.types.tool_usage_events import (
|
||||
ToolUsageErrorEvent,
|
||||
ToolUsageStartedEvent,
|
||||
)
|
||||
|
||||
push_event_scope("crew-1", "crew_kickoff_started")
|
||||
push_event_scope("task-1", "task_started")
|
||||
push_event_scope("agent-1", "agent_execution_started")
|
||||
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
ToolUsageStartedEvent(
|
||||
tool_name="test_tool",
|
||||
tool_args={},
|
||||
agent_key="test_agent",
|
||||
)
|
||||
)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
ToolUsageErrorEvent(
|
||||
tool_name="test_tool",
|
||||
tool_args={},
|
||||
agent_key="test_agent",
|
||||
error=ValueError("test error"),
|
||||
)
|
||||
)
|
||||
|
||||
crewai_event_bus.flush()
|
||||
|
||||
assert get_current_parent_id() == "agent-1"
|
||||
|
||||
|
||||
@@ -990,3 +990,134 @@ def test_anthropic_agent_kickoff_structured_output_with_tools():
|
||||
assert result.pydantic.result == 42, f"Expected result 42 but got {result.pydantic.result}"
|
||||
assert result.pydantic.operation, "Operation should not be empty"
|
||||
assert result.pydantic.explanation, "Explanation should not be empty"
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_anthropic_cached_prompt_tokens():
|
||||
"""
|
||||
Test that Anthropic correctly extracts and tracks cached_prompt_tokens
|
||||
from cache_read_input_tokens. Uses cache_control to enable prompt caching
|
||||
and sends the same large prompt twice so the second call hits the cache.
|
||||
"""
|
||||
# Anthropic requires cache_control blocks and >=1024 tokens for caching
|
||||
padding = "This is padding text to ensure the prompt is large enough for caching. " * 80
|
||||
system_msg = f"You are a helpful assistant. {padding}"
|
||||
|
||||
llm = LLM(model="anthropic/claude-sonnet-4-5-20250929")
|
||||
|
||||
def _ephemeral_user(text: str):
|
||||
return [{"type": "text", "text": text, "cache_control": {"type": "ephemeral"}}]
|
||||
|
||||
# First call: creates the cache
|
||||
llm.call([
|
||||
{"role": "system", "content": system_msg},
|
||||
{"role": "user", "content": _ephemeral_user("Say hello in one word.")},
|
||||
])
|
||||
|
||||
# Second call: same system prompt should hit the cache
|
||||
llm.call([
|
||||
{"role": "system", "content": system_msg},
|
||||
{"role": "user", "content": _ephemeral_user("Say goodbye in one word.")},
|
||||
])
|
||||
|
||||
usage = llm.get_token_usage_summary()
|
||||
assert usage.total_tokens > 0
|
||||
assert usage.prompt_tokens > 0
|
||||
assert usage.completion_tokens > 0
|
||||
assert usage.successful_requests == 2
|
||||
# The second call should have cached prompt tokens
|
||||
assert usage.cached_prompt_tokens > 0
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_anthropic_streaming_cached_prompt_tokens():
|
||||
"""
|
||||
Test that Anthropic streaming correctly extracts and tracks cached_prompt_tokens.
|
||||
"""
|
||||
padding = "This is padding text to ensure the prompt is large enough for caching. " * 80
|
||||
system_msg = f"You are a helpful assistant. {padding}"
|
||||
|
||||
llm = LLM(model="anthropic/claude-sonnet-4-5-20250929", stream=True)
|
||||
|
||||
def _ephemeral_user(text: str):
|
||||
return [{"type": "text", "text": text, "cache_control": {"type": "ephemeral"}}]
|
||||
|
||||
# First call: creates the cache
|
||||
llm.call([
|
||||
{"role": "system", "content": system_msg},
|
||||
{"role": "user", "content": _ephemeral_user("Say hello in one word.")},
|
||||
])
|
||||
|
||||
# Second call: same system prompt should hit the cache
|
||||
llm.call([
|
||||
{"role": "system", "content": system_msg},
|
||||
{"role": "user", "content": _ephemeral_user("Say goodbye in one word.")},
|
||||
])
|
||||
|
||||
usage = llm.get_token_usage_summary()
|
||||
assert usage.total_tokens > 0
|
||||
assert usage.successful_requests == 2
|
||||
# The second call should have cached prompt tokens
|
||||
assert usage.cached_prompt_tokens > 0
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_anthropic_cached_prompt_tokens_with_tools():
|
||||
"""
|
||||
Test that Anthropic correctly tracks cached_prompt_tokens when tools are used.
|
||||
The large system prompt should be cached across tool-calling requests.
|
||||
"""
|
||||
padding = "This is padding text to ensure the prompt is large enough for caching. " * 80
|
||||
system_msg = f"You are a helpful assistant that uses tools. {padding}"
|
||||
|
||||
def get_weather(location: str) -> str:
|
||||
return f"The weather in {location} is sunny and 72°F"
|
||||
|
||||
tools = [
|
||||
{
|
||||
"name": "get_weather",
|
||||
"description": "Get the current weather for a location",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"location": {
|
||||
"type": "string",
|
||||
"description": "The city name"
|
||||
}
|
||||
},
|
||||
"required": ["location"],
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
llm = LLM(model="anthropic/claude-sonnet-4-5-20250929")
|
||||
|
||||
def _ephemeral_user(text: str):
|
||||
return [{"type": "text", "text": text, "cache_control": {"type": "ephemeral"}}]
|
||||
|
||||
# First call with tool: creates the cache
|
||||
llm.call(
|
||||
[
|
||||
{"role": "system", "content": system_msg},
|
||||
{"role": "user", "content": _ephemeral_user("What is the weather in Tokyo?")},
|
||||
],
|
||||
tools=tools,
|
||||
available_functions={"get_weather": get_weather},
|
||||
)
|
||||
|
||||
# Second call with same system prompt + tools: should hit the cache
|
||||
llm.call(
|
||||
[
|
||||
{"role": "system", "content": system_msg},
|
||||
{"role": "user", "content": _ephemeral_user("What is the weather in Paris?")},
|
||||
],
|
||||
tools=tools,
|
||||
available_functions={"get_weather": get_weather},
|
||||
)
|
||||
|
||||
usage = llm.get_token_usage_summary()
|
||||
assert usage.total_tokens > 0
|
||||
assert usage.prompt_tokens > 0
|
||||
assert usage.successful_requests == 2
|
||||
# The second call should have cached prompt tokens
|
||||
assert usage.cached_prompt_tokens > 0
|
||||
|
||||
@@ -102,7 +102,6 @@ def test_azure_tool_use_conversation_flow():
|
||||
# Verify that the API was called
|
||||
assert mock_complete.called
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("mock_azure_credentials")
|
||||
def test_azure_completion_module_is_imported():
|
||||
"""
|
||||
|
||||
@@ -42,65 +42,6 @@ def test_gemini_completion_is_used_when_gemini_provider():
|
||||
assert llm.provider == "gemini"
|
||||
assert llm.model == "gemini-2.0-flash-001"
|
||||
|
||||
|
||||
|
||||
|
||||
def test_gemini_tool_use_conversation_flow():
|
||||
"""
|
||||
Test that the Gemini completion properly handles tool use conversation flow
|
||||
"""
|
||||
from unittest.mock import Mock, patch
|
||||
from crewai.llms.providers.gemini.completion import GeminiCompletion
|
||||
|
||||
# Create GeminiCompletion instance
|
||||
completion = GeminiCompletion(model="gemini-2.0-flash-001")
|
||||
|
||||
# Mock tool function
|
||||
def mock_weather_tool(location: str) -> str:
|
||||
return f"The weather in {location} is sunny and 75°F"
|
||||
|
||||
available_functions = {"get_weather": mock_weather_tool}
|
||||
|
||||
# Mock the Google Gemini client responses
|
||||
with patch.object(completion.client.models, 'generate_content') as mock_generate:
|
||||
# Mock function call in response
|
||||
mock_function_call = Mock()
|
||||
mock_function_call.name = "get_weather"
|
||||
mock_function_call.args = {"location": "San Francisco"}
|
||||
|
||||
mock_part = Mock()
|
||||
mock_part.function_call = mock_function_call
|
||||
|
||||
mock_content = Mock()
|
||||
mock_content.parts = [mock_part]
|
||||
|
||||
mock_candidate = Mock()
|
||||
mock_candidate.content = mock_content
|
||||
|
||||
mock_response = Mock()
|
||||
mock_response.candidates = [mock_candidate]
|
||||
mock_response.text = "Based on the weather data, it's a beautiful day in San Francisco with sunny skies and 75°F temperature."
|
||||
mock_response.usage_metadata = Mock()
|
||||
mock_response.usage_metadata.prompt_token_count = 100
|
||||
mock_response.usage_metadata.candidates_token_count = 50
|
||||
mock_response.usage_metadata.total_token_count = 150
|
||||
|
||||
mock_generate.return_value = mock_response
|
||||
|
||||
# Test the call
|
||||
messages = [{"role": "user", "content": "What's the weather like in San Francisco?"}]
|
||||
result = completion.call(
|
||||
messages=messages,
|
||||
available_functions=available_functions
|
||||
)
|
||||
|
||||
# Verify the tool was executed and returned the result
|
||||
assert result == "The weather in San Francisco is sunny and 75°F"
|
||||
|
||||
# Verify that the API was called
|
||||
assert mock_generate.called
|
||||
|
||||
|
||||
def test_gemini_completion_module_is_imported():
|
||||
"""
|
||||
Test that the completion module is properly imported when using Google provider
|
||||
@@ -1114,3 +1055,97 @@ def test_gemini_structured_output_preserves_json_with_stop_word_patterns():
|
||||
assert "Action:" in result.action_taken
|
||||
assert "Observation:" in result.observation_result
|
||||
assert "Final Answer:" in result.final_answer
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_gemini_cached_prompt_tokens():
|
||||
"""
|
||||
Test that Gemini correctly extracts and tracks cached_prompt_tokens
|
||||
from cached_content_token_count in the usage metadata.
|
||||
Sends two calls with the same large prompt to trigger caching.
|
||||
"""
|
||||
padding = "This is padding text to ensure the prompt is large enough for caching. " * 80
|
||||
system_msg = f"You are a helpful assistant. {padding}"
|
||||
|
||||
llm = LLM(model="google/gemini-2.5-flash")
|
||||
|
||||
# First call
|
||||
llm.call([
|
||||
{"role": "system", "content": system_msg},
|
||||
{"role": "user", "content": "Say hello in one word."},
|
||||
])
|
||||
|
||||
# Second call: same system prompt
|
||||
llm.call([
|
||||
{"role": "system", "content": system_msg},
|
||||
{"role": "user", "content": "Say goodbye in one word."},
|
||||
])
|
||||
|
||||
usage = llm.get_token_usage_summary()
|
||||
assert usage.total_tokens > 0
|
||||
assert usage.prompt_tokens > 0
|
||||
assert usage.completion_tokens > 0
|
||||
assert usage.successful_requests == 2
|
||||
# cached_prompt_tokens should be populated (may be 0 if Gemini
|
||||
# doesn't cache for this particular request, but the field should exist)
|
||||
assert usage.cached_prompt_tokens >= 0
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_gemini_cached_prompt_tokens_with_tools():
|
||||
"""
|
||||
Test that Gemini correctly tracks cached_prompt_tokens when tools are used.
|
||||
The large system prompt should be cached across tool-calling requests.
|
||||
"""
|
||||
padding = "This is padding text to ensure the prompt is large enough for caching. " * 80
|
||||
system_msg = f"You are a helpful assistant that uses tools. {padding}"
|
||||
|
||||
def get_weather(location: str) -> str:
|
||||
return f"The weather in {location} is sunny and 72°F"
|
||||
|
||||
tools = [
|
||||
{
|
||||
"name": "get_weather",
|
||||
"description": "Get the current weather for a location",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"location": {
|
||||
"type": "string",
|
||||
"description": "The city name"
|
||||
}
|
||||
},
|
||||
"required": ["location"],
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
llm = LLM(model="google/gemini-2.5-flash")
|
||||
|
||||
# First call with tool
|
||||
llm.call(
|
||||
[
|
||||
{"role": "system", "content": system_msg},
|
||||
{"role": "user", "content": "What is the weather in Tokyo?"},
|
||||
],
|
||||
tools=tools,
|
||||
available_functions={"get_weather": get_weather},
|
||||
)
|
||||
|
||||
# Second call with same system prompt + tools
|
||||
llm.call(
|
||||
[
|
||||
{"role": "system", "content": system_msg},
|
||||
{"role": "user", "content": "What is the weather in Paris?"},
|
||||
],
|
||||
tools=tools,
|
||||
available_functions={"get_weather": get_weather},
|
||||
)
|
||||
|
||||
usage = llm.get_token_usage_summary()
|
||||
assert usage.total_tokens > 0
|
||||
assert usage.prompt_tokens > 0
|
||||
assert usage.successful_requests == 2
|
||||
# cached_prompt_tokens should be populated (may be 0 if Gemini
|
||||
# doesn't cache for this particular request, but the field should exist)
|
||||
assert usage.cached_prompt_tokens >= 0
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
import sys
|
||||
import types
|
||||
from typing import Any
|
||||
from unittest.mock import patch, MagicMock
|
||||
import openai
|
||||
import pytest
|
||||
@@ -1578,3 +1579,379 @@ def test_openai_structured_output_preserves_json_with_stop_word_patterns():
|
||||
assert "Action:" in result.action_taken
|
||||
assert "Observation:" in result.observation_result
|
||||
assert "Final Answer:" in result.final_answer
|
||||
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_openai_completions_cached_prompt_tokens():
|
||||
"""
|
||||
Test that the Chat Completions API correctly extracts and tracks
|
||||
cached_prompt_tokens from prompt_tokens_details.cached_tokens.
|
||||
Sends the same large prompt twice so the second call hits the cache.
|
||||
"""
|
||||
# Build a large system prompt to trigger prompt caching (>1024 tokens)
|
||||
padding = "This is padding text to ensure the prompt is large enough for caching. " * 80
|
||||
system_msg = f"You are a helpful assistant. {padding}"
|
||||
|
||||
llm = OpenAICompletion(model="gpt-4.1")
|
||||
|
||||
# First call: creates the cache
|
||||
llm.call([
|
||||
{"role": "system", "content": system_msg},
|
||||
{"role": "user", "content": "Say hello in one word."},
|
||||
])
|
||||
|
||||
# Second call: same system prompt should hit the cache
|
||||
llm.call([
|
||||
{"role": "system", "content": system_msg},
|
||||
{"role": "user", "content": "Say goodbye in one word."},
|
||||
])
|
||||
|
||||
usage = llm.get_token_usage_summary()
|
||||
assert usage.total_tokens > 0
|
||||
assert usage.prompt_tokens > 0
|
||||
assert usage.completion_tokens > 0
|
||||
assert usage.successful_requests == 2
|
||||
# The second call should have cached prompt tokens
|
||||
assert usage.cached_prompt_tokens > 0
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_openai_responses_api_cached_prompt_tokens():
|
||||
"""
|
||||
Test that the Responses API correctly extracts and tracks
|
||||
cached_prompt_tokens from input_tokens_details.cached_tokens.
|
||||
"""
|
||||
padding = "This is padding text to ensure the prompt is large enough for caching. " * 80
|
||||
system_msg = f"You are a helpful assistant. {padding}"
|
||||
|
||||
llm = OpenAICompletion(model="gpt-4.1", api="responses")
|
||||
|
||||
# First call: creates the cache
|
||||
llm.call([
|
||||
{"role": "system", "content": system_msg},
|
||||
{"role": "user", "content": "Say hello in one word."},
|
||||
])
|
||||
|
||||
# Second call: same system prompt should hit the cache
|
||||
llm.call([
|
||||
{"role": "system", "content": system_msg},
|
||||
{"role": "user", "content": "Say goodbye in one word."},
|
||||
])
|
||||
|
||||
usage = llm.get_token_usage_summary()
|
||||
assert usage.total_tokens > 0
|
||||
assert usage.prompt_tokens > 0
|
||||
assert usage.completion_tokens > 0
|
||||
assert usage.successful_requests == 2
|
||||
# The second call should have cached prompt tokens
|
||||
assert usage.cached_prompt_tokens > 0
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_openai_streaming_cached_prompt_tokens():
|
||||
"""
|
||||
Test that streaming Chat Completions API correctly extracts and tracks
|
||||
cached_prompt_tokens.
|
||||
"""
|
||||
padding = "This is padding text to ensure the prompt is large enough for caching. " * 80
|
||||
system_msg = f"You are a helpful assistant. {padding}"
|
||||
|
||||
llm = OpenAICompletion(model="gpt-4.1", stream=True)
|
||||
|
||||
# First call: creates the cache
|
||||
llm.call([
|
||||
{"role": "system", "content": system_msg},
|
||||
{"role": "user", "content": "Say hello in one word."},
|
||||
])
|
||||
|
||||
# Second call: same system prompt should hit the cache
|
||||
llm.call([
|
||||
{"role": "system", "content": system_msg},
|
||||
{"role": "user", "content": "Say goodbye in one word."},
|
||||
])
|
||||
|
||||
usage = llm.get_token_usage_summary()
|
||||
assert usage.total_tokens > 0
|
||||
assert usage.successful_requests == 2
|
||||
# The second call should have cached prompt tokens
|
||||
assert usage.cached_prompt_tokens > 0
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_openai_completions_cached_prompt_tokens_with_tools():
|
||||
"""
|
||||
Test that the Chat Completions API correctly tracks cached_prompt_tokens
|
||||
when tools are used. The large system prompt should be cached across calls.
|
||||
"""
|
||||
padding = "This is padding text to ensure the prompt is large enough for caching. " * 80
|
||||
system_msg = f"You are a helpful assistant that uses tools. {padding}"
|
||||
|
||||
def get_weather(location: str) -> str:
|
||||
return f"The weather in {location} is sunny and 72°F"
|
||||
|
||||
tools = [
|
||||
{
|
||||
"name": "get_weather",
|
||||
"description": "Get the current weather for a location",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"location": {
|
||||
"type": "string",
|
||||
"description": "The city name"
|
||||
}
|
||||
},
|
||||
"required": ["location"],
|
||||
"additionalProperties": False,
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
llm = OpenAICompletion(model="gpt-4.1")
|
||||
|
||||
# First call with tool: creates the cache
|
||||
llm.call(
|
||||
[
|
||||
{"role": "system", "content": system_msg},
|
||||
{"role": "user", "content": "What is the weather in Tokyo?"},
|
||||
],
|
||||
tools=tools,
|
||||
available_functions={"get_weather": get_weather},
|
||||
)
|
||||
|
||||
# Second call with same system prompt + tools: should hit the cache
|
||||
llm.call(
|
||||
[
|
||||
{"role": "system", "content": system_msg},
|
||||
{"role": "user", "content": "What is the weather in Paris?"},
|
||||
],
|
||||
tools=tools,
|
||||
available_functions={"get_weather": get_weather},
|
||||
)
|
||||
|
||||
usage = llm.get_token_usage_summary()
|
||||
assert usage.total_tokens > 0
|
||||
assert usage.prompt_tokens > 0
|
||||
assert usage.successful_requests == 2
|
||||
# The second call should have cached prompt tokens
|
||||
assert usage.cached_prompt_tokens > 0
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_openai_responses_api_cached_prompt_tokens_with_tools():
|
||||
"""
|
||||
Test that the Responses API correctly tracks cached_prompt_tokens
|
||||
when function tools are used.
|
||||
"""
|
||||
padding = "This is padding text to ensure the prompt is large enough for caching. " * 80
|
||||
system_msg = f"You are a helpful assistant that uses tools. {padding}"
|
||||
|
||||
def get_weather(location: str) -> str:
|
||||
return f"The weather in {location} is sunny and 72°F"
|
||||
|
||||
tools = [
|
||||
{
|
||||
"name": "get_weather",
|
||||
"description": "Get the current weather for a location",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"location": {
|
||||
"type": "string",
|
||||
"description": "The city name"
|
||||
}
|
||||
},
|
||||
"required": ["location"],
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
llm = OpenAICompletion(model="gpt-4.1", api='response')
|
||||
|
||||
# First call with tool
|
||||
llm.call(
|
||||
[
|
||||
{"role": "system", "content": system_msg},
|
||||
{"role": "user", "content": "What is the weather in Tokyo?"},
|
||||
],
|
||||
tools=tools,
|
||||
available_functions={"get_weather": get_weather},
|
||||
)
|
||||
|
||||
# Second call: same system prompt + tools should hit cache
|
||||
llm.call(
|
||||
[
|
||||
{"role": "system", "content": system_msg},
|
||||
{"role": "user", "content": "What is the weather in Paris?"},
|
||||
],
|
||||
tools=tools,
|
||||
available_functions={"get_weather": get_weather},
|
||||
)
|
||||
|
||||
usage = llm.get_token_usage_summary()
|
||||
assert usage.total_tokens > 0
|
||||
assert usage.successful_requests == 2
|
||||
assert usage.cached_prompt_tokens > 0
|
||||
def test_openai_streaming_returns_tool_calls_without_available_functions():
|
||||
"""Test that streaming returns tool calls list when available_functions is None.
|
||||
|
||||
This mirrors the non-streaming path where tool_calls are returned for
|
||||
the executor to handle. Reproduces the bug where streaming with tool
|
||||
calls would return empty text instead of tool_calls when
|
||||
available_functions was not provided (as the crew executor does).
|
||||
"""
|
||||
llm = LLM(model="openai/gpt-4o-mini", stream=True)
|
||||
|
||||
mock_chunk_1 = MagicMock()
|
||||
mock_chunk_1.choices = [MagicMock()]
|
||||
mock_chunk_1.choices[0].delta = MagicMock()
|
||||
mock_chunk_1.choices[0].delta.content = None
|
||||
mock_chunk_1.choices[0].delta.tool_calls = [MagicMock()]
|
||||
mock_chunk_1.choices[0].delta.tool_calls[0].index = 0
|
||||
mock_chunk_1.choices[0].delta.tool_calls[0].id = "call_abc123"
|
||||
mock_chunk_1.choices[0].delta.tool_calls[0].function = MagicMock()
|
||||
mock_chunk_1.choices[0].delta.tool_calls[0].function.name = "calculator"
|
||||
mock_chunk_1.choices[0].delta.tool_calls[0].function.arguments = '{"expr'
|
||||
mock_chunk_1.choices[0].finish_reason = None
|
||||
mock_chunk_1.usage = None
|
||||
mock_chunk_1.id = "chatcmpl-1"
|
||||
|
||||
mock_chunk_2 = MagicMock()
|
||||
mock_chunk_2.choices = [MagicMock()]
|
||||
mock_chunk_2.choices[0].delta = MagicMock()
|
||||
mock_chunk_2.choices[0].delta.content = None
|
||||
mock_chunk_2.choices[0].delta.tool_calls = [MagicMock()]
|
||||
mock_chunk_2.choices[0].delta.tool_calls[0].index = 0
|
||||
mock_chunk_2.choices[0].delta.tool_calls[0].id = None
|
||||
mock_chunk_2.choices[0].delta.tool_calls[0].function = MagicMock()
|
||||
mock_chunk_2.choices[0].delta.tool_calls[0].function.name = None
|
||||
mock_chunk_2.choices[0].delta.tool_calls[0].function.arguments = 'ession": "1+1"}'
|
||||
mock_chunk_2.choices[0].finish_reason = None
|
||||
mock_chunk_2.usage = None
|
||||
mock_chunk_2.id = "chatcmpl-1"
|
||||
|
||||
mock_chunk_3 = MagicMock()
|
||||
mock_chunk_3.choices = [MagicMock()]
|
||||
mock_chunk_3.choices[0].delta = MagicMock()
|
||||
mock_chunk_3.choices[0].delta.content = None
|
||||
mock_chunk_3.choices[0].delta.tool_calls = None
|
||||
mock_chunk_3.choices[0].finish_reason = "tool_calls"
|
||||
mock_chunk_3.usage = MagicMock()
|
||||
mock_chunk_3.usage.prompt_tokens = 10
|
||||
mock_chunk_3.usage.completion_tokens = 5
|
||||
mock_chunk_3.id = "chatcmpl-1"
|
||||
|
||||
with patch.object(
|
||||
llm.client.chat.completions, "create", return_value=iter([mock_chunk_1, mock_chunk_2, mock_chunk_3])
|
||||
):
|
||||
result = llm.call(
|
||||
messages=[{"role": "user", "content": "Calculate 1+1"}],
|
||||
tools=[{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "calculator",
|
||||
"description": "Calculate expression",
|
||||
"parameters": {"type": "object", "properties": {"expression": {"type": "string"}}},
|
||||
},
|
||||
}],
|
||||
available_functions=None,
|
||||
)
|
||||
|
||||
assert isinstance(result, list), f"Expected list of tool calls, got {type(result)}: {result}"
|
||||
assert len(result) == 1
|
||||
assert result[0]["function"]["name"] == "calculator"
|
||||
assert result[0]["function"]["arguments"] == '{"expression": "1+1"}'
|
||||
assert result[0]["id"] == "call_abc123"
|
||||
assert result[0]["type"] == "function"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_openai_async_streaming_returns_tool_calls_without_available_functions():
|
||||
"""Test that async streaming returns tool calls list when available_functions is None.
|
||||
|
||||
Same as the sync test but for the async path (_ahandle_streaming_completion).
|
||||
"""
|
||||
llm = LLM(model="openai/gpt-4o-mini", stream=True)
|
||||
|
||||
mock_chunk_1 = MagicMock()
|
||||
mock_chunk_1.choices = [MagicMock()]
|
||||
mock_chunk_1.choices[0].delta = MagicMock()
|
||||
mock_chunk_1.choices[0].delta.content = None
|
||||
mock_chunk_1.choices[0].delta.tool_calls = [MagicMock()]
|
||||
mock_chunk_1.choices[0].delta.tool_calls[0].index = 0
|
||||
mock_chunk_1.choices[0].delta.tool_calls[0].id = "call_abc123"
|
||||
mock_chunk_1.choices[0].delta.tool_calls[0].function = MagicMock()
|
||||
mock_chunk_1.choices[0].delta.tool_calls[0].function.name = "calculator"
|
||||
mock_chunk_1.choices[0].delta.tool_calls[0].function.arguments = '{"expr'
|
||||
mock_chunk_1.choices[0].finish_reason = None
|
||||
mock_chunk_1.usage = None
|
||||
mock_chunk_1.id = "chatcmpl-1"
|
||||
|
||||
mock_chunk_2 = MagicMock()
|
||||
mock_chunk_2.choices = [MagicMock()]
|
||||
mock_chunk_2.choices[0].delta = MagicMock()
|
||||
mock_chunk_2.choices[0].delta.content = None
|
||||
mock_chunk_2.choices[0].delta.tool_calls = [MagicMock()]
|
||||
mock_chunk_2.choices[0].delta.tool_calls[0].index = 0
|
||||
mock_chunk_2.choices[0].delta.tool_calls[0].id = None
|
||||
mock_chunk_2.choices[0].delta.tool_calls[0].function = MagicMock()
|
||||
mock_chunk_2.choices[0].delta.tool_calls[0].function.name = None
|
||||
mock_chunk_2.choices[0].delta.tool_calls[0].function.arguments = 'ession": "1+1"}'
|
||||
mock_chunk_2.choices[0].finish_reason = None
|
||||
mock_chunk_2.usage = None
|
||||
mock_chunk_2.id = "chatcmpl-1"
|
||||
|
||||
mock_chunk_3 = MagicMock()
|
||||
mock_chunk_3.choices = [MagicMock()]
|
||||
mock_chunk_3.choices[0].delta = MagicMock()
|
||||
mock_chunk_3.choices[0].delta.content = None
|
||||
mock_chunk_3.choices[0].delta.tool_calls = None
|
||||
mock_chunk_3.choices[0].finish_reason = "tool_calls"
|
||||
mock_chunk_3.usage = MagicMock()
|
||||
mock_chunk_3.usage.prompt_tokens = 10
|
||||
mock_chunk_3.usage.completion_tokens = 5
|
||||
mock_chunk_3.id = "chatcmpl-1"
|
||||
|
||||
class MockAsyncStream:
|
||||
"""Async iterator that mimics OpenAI's async streaming response."""
|
||||
|
||||
def __init__(self, chunks: list[Any]) -> None:
|
||||
self._chunks = chunks
|
||||
self._index = 0
|
||||
|
||||
def __aiter__(self) -> "MockAsyncStream":
|
||||
return self
|
||||
|
||||
async def __anext__(self) -> Any:
|
||||
if self._index >= len(self._chunks):
|
||||
raise StopAsyncIteration
|
||||
chunk = self._chunks[self._index]
|
||||
self._index += 1
|
||||
return chunk
|
||||
|
||||
async def mock_create(**kwargs: Any) -> MockAsyncStream:
|
||||
return MockAsyncStream([mock_chunk_1, mock_chunk_2, mock_chunk_3])
|
||||
|
||||
with patch.object(
|
||||
llm.async_client.chat.completions, "create", side_effect=mock_create
|
||||
):
|
||||
result = await llm.acall(
|
||||
messages=[{"role": "user", "content": "Calculate 1+1"}],
|
||||
tools=[{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "calculator",
|
||||
"description": "Calculate expression",
|
||||
"parameters": {"type": "object", "properties": {"expression": {"type": "string"}}},
|
||||
},
|
||||
}],
|
||||
available_functions=None,
|
||||
)
|
||||
|
||||
assert isinstance(result, list), f"Expected list of tool calls, got {type(result)}: {result}"
|
||||
assert len(result) == 1
|
||||
assert result[0]["function"]["name"] == "calculator"
|
||||
assert result[0]["function"]["arguments"] == '{"expression": "1+1"}'
|
||||
assert result[0]["id"] == "call_abc123"
|
||||
assert result[0]["type"] == "function"
|
||||
|
||||
@@ -308,6 +308,7 @@ def test_external_memory_search_events(
|
||||
"parent_event_id": None,
|
||||
"previous_event_id": ANY,
|
||||
"triggered_by_event_id": None,
|
||||
"started_event_id": ANY,
|
||||
"emission_sequence": ANY,
|
||||
"query": "test value",
|
||||
"limit": 3,
|
||||
@@ -330,6 +331,7 @@ def test_external_memory_search_events(
|
||||
"parent_event_id": ANY,
|
||||
"previous_event_id": ANY,
|
||||
"triggered_by_event_id": None,
|
||||
"started_event_id": ANY,
|
||||
"emission_sequence": ANY,
|
||||
"query": "test value",
|
||||
"results": [],
|
||||
@@ -390,6 +392,7 @@ def test_external_memory_save_events(
|
||||
"parent_event_id": None,
|
||||
"previous_event_id": ANY,
|
||||
"triggered_by_event_id": None,
|
||||
"started_event_id": ANY,
|
||||
"emission_sequence": ANY,
|
||||
"value": "saving value",
|
||||
"metadata": {"task": "test_task"},
|
||||
@@ -411,6 +414,7 @@ def test_external_memory_save_events(
|
||||
"parent_event_id": ANY,
|
||||
"previous_event_id": ANY,
|
||||
"triggered_by_event_id": None,
|
||||
"started_event_id": ANY,
|
||||
"emission_sequence": ANY,
|
||||
"value": "saving value",
|
||||
"metadata": {"task": "test_task"},
|
||||
|
||||
@@ -74,6 +74,7 @@ def test_long_term_memory_save_events(long_term_memory):
|
||||
"parent_event_id": None,
|
||||
"previous_event_id": ANY,
|
||||
"triggered_by_event_id": None,
|
||||
"started_event_id": ANY,
|
||||
"emission_sequence": ANY,
|
||||
"value": "test_task",
|
||||
"metadata": {"task": "test_task", "quality": 0.5},
|
||||
@@ -94,6 +95,7 @@ def test_long_term_memory_save_events(long_term_memory):
|
||||
"parent_event_id": None,
|
||||
"previous_event_id": ANY,
|
||||
"triggered_by_event_id": None,
|
||||
"started_event_id": ANY,
|
||||
"emission_sequence": ANY,
|
||||
"value": "test_task",
|
||||
"metadata": {
|
||||
@@ -153,6 +155,7 @@ def test_long_term_memory_search_events(long_term_memory):
|
||||
"parent_event_id": None,
|
||||
"previous_event_id": ANY,
|
||||
"triggered_by_event_id": None,
|
||||
"started_event_id": ANY,
|
||||
"emission_sequence": ANY,
|
||||
"query": "test query",
|
||||
"limit": 5,
|
||||
@@ -175,6 +178,7 @@ def test_long_term_memory_search_events(long_term_memory):
|
||||
"parent_event_id": ANY,
|
||||
"previous_event_id": ANY,
|
||||
"triggered_by_event_id": None,
|
||||
"started_event_id": ANY,
|
||||
"emission_sequence": ANY,
|
||||
"query": "test query",
|
||||
"results": None,
|
||||
|
||||
@@ -85,6 +85,7 @@ def test_short_term_memory_search_events(short_term_memory):
|
||||
"parent_event_id": None,
|
||||
"previous_event_id": ANY,
|
||||
"triggered_by_event_id": None,
|
||||
"started_event_id": ANY,
|
||||
"emission_sequence": ANY,
|
||||
"query": "test value",
|
||||
"limit": 3,
|
||||
@@ -107,6 +108,7 @@ def test_short_term_memory_search_events(short_term_memory):
|
||||
"parent_event_id": None,
|
||||
"previous_event_id": ANY,
|
||||
"triggered_by_event_id": None,
|
||||
"started_event_id": ANY,
|
||||
"emission_sequence": ANY,
|
||||
"query": "test value",
|
||||
"results": [],
|
||||
@@ -164,6 +166,7 @@ def test_short_term_memory_save_events(short_term_memory):
|
||||
"parent_event_id": None,
|
||||
"previous_event_id": ANY,
|
||||
"triggered_by_event_id": None,
|
||||
"started_event_id": ANY,
|
||||
"emission_sequence": ANY,
|
||||
"value": "test value",
|
||||
"metadata": {"task": "test_task"},
|
||||
@@ -185,6 +188,7 @@ def test_short_term_memory_save_events(short_term_memory):
|
||||
"parent_event_id": None,
|
||||
"previous_event_id": ANY,
|
||||
"triggered_by_event_id": None,
|
||||
"started_event_id": ANY,
|
||||
"emission_sequence": ANY,
|
||||
"value": "test value",
|
||||
"metadata": {"task": "test_task"},
|
||||
|
||||
@@ -2,6 +2,7 @@ from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
from crewai.events.event_listener import event_listener
|
||||
from crewai.core.providers.human_input import SyncHumanInputProvider
|
||||
|
||||
|
||||
class TestFlowHumanInputIntegration:
|
||||
@@ -24,14 +25,9 @@ class TestFlowHumanInputIntegration:
|
||||
@patch("builtins.input", return_value="")
|
||||
def test_human_input_pauses_flow_updates(self, mock_input):
|
||||
"""Test that human input pauses Flow status updates."""
|
||||
from crewai.agents.agent_builder.base_agent_executor_mixin import (
|
||||
CrewAgentExecutorMixin,
|
||||
)
|
||||
|
||||
executor = CrewAgentExecutorMixin()
|
||||
executor.crew = MagicMock()
|
||||
executor.crew._train = False
|
||||
executor._printer = MagicMock()
|
||||
provider = SyncHumanInputProvider()
|
||||
crew = MagicMock()
|
||||
crew._train = False
|
||||
|
||||
formatter = event_listener.formatter
|
||||
|
||||
@@ -39,7 +35,7 @@ class TestFlowHumanInputIntegration:
|
||||
patch.object(formatter, "pause_live_updates") as mock_pause,
|
||||
patch.object(formatter, "resume_live_updates") as mock_resume,
|
||||
):
|
||||
result = executor._ask_human_input("Test result")
|
||||
result = provider._prompt_input(crew)
|
||||
|
||||
mock_pause.assert_called_once()
|
||||
mock_resume.assert_called_once()
|
||||
@@ -49,14 +45,9 @@ class TestFlowHumanInputIntegration:
|
||||
@patch("builtins.input", side_effect=["feedback", ""])
|
||||
def test_multiple_human_input_rounds(self, mock_input):
|
||||
"""Test multiple rounds of human input with Flow status management."""
|
||||
from crewai.agents.agent_builder.base_agent_executor_mixin import (
|
||||
CrewAgentExecutorMixin,
|
||||
)
|
||||
|
||||
executor = CrewAgentExecutorMixin()
|
||||
executor.crew = MagicMock()
|
||||
executor.crew._train = False
|
||||
executor._printer = MagicMock()
|
||||
provider = SyncHumanInputProvider()
|
||||
crew = MagicMock()
|
||||
crew._train = False
|
||||
|
||||
formatter = event_listener.formatter
|
||||
|
||||
@@ -75,10 +66,10 @@ class TestFlowHumanInputIntegration:
|
||||
formatter, "resume_live_updates", side_effect=track_resume
|
||||
),
|
||||
):
|
||||
result1 = executor._ask_human_input("Test result 1")
|
||||
result1 = provider._prompt_input(crew)
|
||||
assert result1 == "feedback"
|
||||
|
||||
result2 = executor._ask_human_input("Test result 2")
|
||||
result2 = provider._prompt_input(crew)
|
||||
assert result2 == ""
|
||||
|
||||
assert len(pause_calls) == 2
|
||||
@@ -103,14 +94,9 @@ class TestFlowHumanInputIntegration:
|
||||
|
||||
def test_pause_resume_exception_handling(self):
|
||||
"""Test that resume is called even if exception occurs during human input."""
|
||||
from crewai.agents.agent_builder.base_agent_executor_mixin import (
|
||||
CrewAgentExecutorMixin,
|
||||
)
|
||||
|
||||
executor = CrewAgentExecutorMixin()
|
||||
executor.crew = MagicMock()
|
||||
executor.crew._train = False
|
||||
executor._printer = MagicMock()
|
||||
provider = SyncHumanInputProvider()
|
||||
crew = MagicMock()
|
||||
crew._train = False
|
||||
|
||||
formatter = event_listener.formatter
|
||||
|
||||
@@ -122,21 +108,16 @@ class TestFlowHumanInputIntegration:
|
||||
),
|
||||
):
|
||||
with pytest.raises(KeyboardInterrupt):
|
||||
executor._ask_human_input("Test result")
|
||||
provider._prompt_input(crew)
|
||||
|
||||
mock_pause.assert_called_once()
|
||||
mock_resume.assert_called_once()
|
||||
|
||||
def test_training_mode_human_input(self):
|
||||
"""Test human input in training mode."""
|
||||
from crewai.agents.agent_builder.base_agent_executor_mixin import (
|
||||
CrewAgentExecutorMixin,
|
||||
)
|
||||
|
||||
executor = CrewAgentExecutorMixin()
|
||||
executor.crew = MagicMock()
|
||||
executor.crew._train = True
|
||||
executor._printer = MagicMock()
|
||||
provider = SyncHumanInputProvider()
|
||||
crew = MagicMock()
|
||||
crew._train = True
|
||||
|
||||
formatter = event_listener.formatter
|
||||
|
||||
@@ -146,7 +127,7 @@ class TestFlowHumanInputIntegration:
|
||||
patch.object(formatter.console, "print") as mock_console_print,
|
||||
patch("builtins.input", return_value="training feedback"),
|
||||
):
|
||||
result = executor._ask_human_input("Test result")
|
||||
result = provider._prompt_input(crew)
|
||||
|
||||
mock_pause.assert_called_once()
|
||||
mock_resume.assert_called_once()
|
||||
@@ -161,4 +142,4 @@ class TestFlowHumanInputIntegration:
|
||||
for call in call_args
|
||||
if call[0]
|
||||
)
|
||||
assert training_panel_found
|
||||
assert training_panel_found
|
||||
@@ -157,6 +157,176 @@ class TestMultiStepFlows:
|
||||
|
||||
assert execution_order == ["generate", "review", "finalize"]
|
||||
|
||||
def test_chained_router_feedback_steps(self):
|
||||
"""Test that a router outcome can trigger another router method.
|
||||
|
||||
Regression test: @listen("outcome") combined with @human_feedback(emit=...)
|
||||
creates a method that is both a listener and a router. The flow must find
|
||||
and execute it when the upstream router emits the matching outcome.
|
||||
"""
|
||||
execution_order: list[str] = []
|
||||
|
||||
class ChainedRouterFlow(Flow):
|
||||
@start()
|
||||
@human_feedback(
|
||||
message="First review:",
|
||||
emit=["approved", "rejected"],
|
||||
llm="gpt-4o-mini",
|
||||
)
|
||||
def draft(self):
|
||||
execution_order.append("draft")
|
||||
return "draft content"
|
||||
|
||||
@listen("approved")
|
||||
@human_feedback(
|
||||
message="Final review:",
|
||||
emit=["publish", "revise"],
|
||||
llm="gpt-4o-mini",
|
||||
)
|
||||
def final_review(self, prev: HumanFeedbackResult):
|
||||
execution_order.append("final_review")
|
||||
return "final content"
|
||||
|
||||
@listen("rejected")
|
||||
def on_rejected(self, prev: HumanFeedbackResult):
|
||||
execution_order.append("on_rejected")
|
||||
return "rejected"
|
||||
|
||||
@listen("publish")
|
||||
def on_publish(self, prev: HumanFeedbackResult):
|
||||
execution_order.append("on_publish")
|
||||
return "published"
|
||||
|
||||
@listen("revise")
|
||||
def on_revise(self, prev: HumanFeedbackResult):
|
||||
execution_order.append("on_revise")
|
||||
return "revised"
|
||||
|
||||
flow = ChainedRouterFlow()
|
||||
|
||||
with (
|
||||
patch.object(
|
||||
flow,
|
||||
"_request_human_feedback",
|
||||
side_effect=["looks good", "ship it"],
|
||||
),
|
||||
patch.object(
|
||||
flow,
|
||||
"_collapse_to_outcome",
|
||||
side_effect=["approved", "publish"],
|
||||
),
|
||||
):
|
||||
result = flow.kickoff()
|
||||
|
||||
assert execution_order == ["draft", "final_review", "on_publish"]
|
||||
assert result == "published"
|
||||
assert len(flow.human_feedback_history) == 2
|
||||
assert flow.human_feedback_history[0].outcome == "approved"
|
||||
assert flow.human_feedback_history[1].outcome == "publish"
|
||||
|
||||
def test_chained_router_rejected_path(self):
|
||||
"""Test that a start-router outcome routes to a non-router listener."""
|
||||
execution_order: list[str] = []
|
||||
|
||||
class ChainedRouterFlow(Flow):
|
||||
@start()
|
||||
@human_feedback(
|
||||
message="Review:",
|
||||
emit=["approved", "rejected"],
|
||||
llm="gpt-4o-mini",
|
||||
)
|
||||
def draft(self):
|
||||
execution_order.append("draft")
|
||||
return "draft"
|
||||
|
||||
@listen("approved")
|
||||
@human_feedback(
|
||||
message="Final:",
|
||||
emit=["publish", "revise"],
|
||||
llm="gpt-4o-mini",
|
||||
)
|
||||
def final_review(self, prev: HumanFeedbackResult):
|
||||
execution_order.append("final_review")
|
||||
return "final"
|
||||
|
||||
@listen("rejected")
|
||||
def on_rejected(self, prev: HumanFeedbackResult):
|
||||
execution_order.append("on_rejected")
|
||||
return "rejected"
|
||||
|
||||
flow = ChainedRouterFlow()
|
||||
|
||||
with (
|
||||
patch.object(
|
||||
flow, "_request_human_feedback", return_value="bad"
|
||||
),
|
||||
patch.object(
|
||||
flow, "_collapse_to_outcome", return_value="rejected"
|
||||
),
|
||||
):
|
||||
result = flow.kickoff()
|
||||
|
||||
assert execution_order == ["draft", "on_rejected"]
|
||||
assert result == "rejected"
|
||||
assert len(flow.human_feedback_history) == 1
|
||||
assert flow.human_feedback_history[0].outcome == "rejected"
|
||||
|
||||
def test_router_and_non_router_listeners_for_same_outcome(self):
|
||||
"""Test that both router and non-router listeners fire for the same outcome."""
|
||||
execution_order: list[str] = []
|
||||
|
||||
class MixedListenerFlow(Flow):
|
||||
@start()
|
||||
@human_feedback(
|
||||
message="Review:",
|
||||
emit=["approved", "rejected"],
|
||||
llm="gpt-4o-mini",
|
||||
)
|
||||
def draft(self):
|
||||
execution_order.append("draft")
|
||||
return "draft"
|
||||
|
||||
@listen("approved")
|
||||
@human_feedback(
|
||||
message="Final:",
|
||||
emit=["publish", "revise"],
|
||||
llm="gpt-4o-mini",
|
||||
)
|
||||
def router_listener(self, prev: HumanFeedbackResult):
|
||||
execution_order.append("router_listener")
|
||||
return "final"
|
||||
|
||||
@listen("approved")
|
||||
def plain_listener(self, prev: HumanFeedbackResult):
|
||||
execution_order.append("plain_listener")
|
||||
return "logged"
|
||||
|
||||
@listen("publish")
|
||||
def on_publish(self, prev: HumanFeedbackResult):
|
||||
execution_order.append("on_publish")
|
||||
return "published"
|
||||
|
||||
flow = MixedListenerFlow()
|
||||
|
||||
with (
|
||||
patch.object(
|
||||
flow,
|
||||
"_request_human_feedback",
|
||||
side_effect=["approve it", "publish it"],
|
||||
),
|
||||
patch.object(
|
||||
flow,
|
||||
"_collapse_to_outcome",
|
||||
side_effect=["approved", "publish"],
|
||||
),
|
||||
):
|
||||
flow.kickoff()
|
||||
|
||||
assert "draft" in execution_order
|
||||
assert "router_listener" in execution_order
|
||||
assert "plain_listener" in execution_order
|
||||
assert "on_publish" in execution_order
|
||||
|
||||
|
||||
class TestStateManagement:
|
||||
"""Tests for state management with human feedback."""
|
||||
|
||||
@@ -10,7 +10,9 @@ from crewai import Agent, Task
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.tool_usage_events import (
|
||||
ToolSelectionErrorEvent,
|
||||
ToolUsageErrorEvent,
|
||||
ToolUsageFinishedEvent,
|
||||
ToolUsageStartedEvent,
|
||||
ToolValidateInputErrorEvent,
|
||||
)
|
||||
from crewai.tools import BaseTool
|
||||
@@ -744,3 +746,78 @@ def test_tool_usage_finished_event_with_cached_result():
|
||||
assert isinstance(event.started_at, datetime.datetime)
|
||||
assert isinstance(event.finished_at, datetime.datetime)
|
||||
assert event.type == "tool_usage_finished"
|
||||
|
||||
|
||||
def test_tool_error_does_not_emit_finished_event():
|
||||
from crewai.tools.tool_calling import ToolCalling
|
||||
|
||||
class FailingTool(BaseTool):
|
||||
name: str = "Failing Tool"
|
||||
description: str = "A tool that always fails"
|
||||
|
||||
def _run(self, **kwargs) -> str:
|
||||
raise ValueError("Intentional failure")
|
||||
|
||||
failing_tool = FailingTool().to_structured_tool()
|
||||
|
||||
mock_agent = MagicMock()
|
||||
mock_agent.key = "test_agent_key"
|
||||
mock_agent.role = "test_agent_role"
|
||||
mock_agent._original_role = "test_agent_role"
|
||||
mock_agent.verbose = False
|
||||
mock_agent.fingerprint = None
|
||||
mock_agent.i18n.tools.return_value = {"name": "Add Image"}
|
||||
mock_agent.i18n.errors.return_value = "Error: {error}"
|
||||
mock_agent.i18n.slice.return_value = "Available tools: {tool_names}"
|
||||
|
||||
mock_task = MagicMock()
|
||||
mock_task.delegations = 0
|
||||
mock_task.name = "Test Task"
|
||||
mock_task.description = "A test task"
|
||||
mock_task.id = "test-task-id"
|
||||
|
||||
mock_action = MagicMock()
|
||||
mock_action.tool = "failing_tool"
|
||||
mock_action.tool_input = "{}"
|
||||
|
||||
tool_usage = ToolUsage(
|
||||
tools_handler=MagicMock(cache=None, last_used_tool=None),
|
||||
tools=[failing_tool],
|
||||
task=mock_task,
|
||||
function_calling_llm=None,
|
||||
agent=mock_agent,
|
||||
action=mock_action,
|
||||
)
|
||||
|
||||
started_events = []
|
||||
error_events = []
|
||||
finished_events = []
|
||||
error_received = threading.Event()
|
||||
|
||||
@crewai_event_bus.on(ToolUsageStartedEvent)
|
||||
def on_started(source, event):
|
||||
if event.tool_name == "failing_tool":
|
||||
started_events.append(event)
|
||||
|
||||
@crewai_event_bus.on(ToolUsageErrorEvent)
|
||||
def on_error(source, event):
|
||||
if event.tool_name == "failing_tool":
|
||||
error_events.append(event)
|
||||
error_received.set()
|
||||
|
||||
@crewai_event_bus.on(ToolUsageFinishedEvent)
|
||||
def on_finished(source, event):
|
||||
if event.tool_name == "failing_tool":
|
||||
finished_events.append(event)
|
||||
|
||||
tool_calling = ToolCalling(tool_name="failing_tool", arguments={})
|
||||
tool_usage.use(calling=tool_calling, tool_string="Action: failing_tool")
|
||||
|
||||
assert error_received.wait(timeout=5), "Timeout waiting for error event"
|
||||
crewai_event_bus.flush()
|
||||
|
||||
assert len(started_events) >= 1, "Expected at least one ToolUsageStartedEvent"
|
||||
assert len(error_events) >= 1, "Expected at least one ToolUsageErrorEvent"
|
||||
assert len(finished_events) == 0, (
|
||||
"ToolUsageFinishedEvent should NOT be emitted after ToolUsageErrorEvent"
|
||||
)
|
||||
|
||||
@@ -2,13 +2,23 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from typing import Any
|
||||
from unittest.mock import MagicMock, patch
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.utilities.agent_utils import convert_tools_to_openai_schema, summarize_messages
|
||||
from crewai.utilities.agent_utils import (
|
||||
_asummarize_chunks,
|
||||
_estimate_token_count,
|
||||
_extract_summary_tags,
|
||||
_format_messages_for_summary,
|
||||
_split_messages_into_chunks,
|
||||
convert_tools_to_openai_schema,
|
||||
summarize_messages,
|
||||
)
|
||||
|
||||
|
||||
class CalculatorInput(BaseModel):
|
||||
@@ -214,6 +224,17 @@ class TestConvertToolsToOpenaiSchema:
|
||||
assert max_results_prop["default"] == 10
|
||||
|
||||
|
||||
def _make_mock_i18n() -> MagicMock:
|
||||
"""Create a mock i18n with the new structured prompt keys."""
|
||||
mock_i18n = MagicMock()
|
||||
mock_i18n.slice.side_effect = lambda key: {
|
||||
"summarizer_system_message": "You are a precise assistant that creates structured summaries.",
|
||||
"summarize_instruction": "Summarize the conversation:\n{conversation}",
|
||||
"summary": "<summary>\n{merged_summary}\n</summary>\nContinue the task.",
|
||||
}.get(key, "")
|
||||
return mock_i18n
|
||||
|
||||
|
||||
class TestSummarizeMessages:
|
||||
"""Tests for summarize_messages function."""
|
||||
|
||||
@@ -229,26 +250,22 @@ class TestSummarizeMessages:
|
||||
|
||||
mock_llm = MagicMock()
|
||||
mock_llm.get_context_window_size.return_value = 1000
|
||||
mock_llm.call.return_value = "Summarized conversation about image analysis."
|
||||
|
||||
mock_i18n = MagicMock()
|
||||
mock_i18n.slice.side_effect = lambda key: {
|
||||
"summarizer_system_message": "Summarize the following.",
|
||||
"summarize_instruction": "Summarize: {group}",
|
||||
"summary": "Summary: {merged_summary}",
|
||||
}.get(key, "")
|
||||
mock_llm.call.return_value = "<summary>Summarized conversation about image analysis.</summary>"
|
||||
|
||||
summarize_messages(
|
||||
messages=messages,
|
||||
llm=mock_llm,
|
||||
callbacks=[],
|
||||
i18n=mock_i18n,
|
||||
i18n=_make_mock_i18n(),
|
||||
)
|
||||
|
||||
assert len(messages) == 1
|
||||
assert messages[0]["role"] == "user"
|
||||
assert "files" in messages[0]
|
||||
assert messages[0]["files"] == mock_files
|
||||
# System message preserved + summary message = 2
|
||||
assert len(messages) == 2
|
||||
assert messages[0]["role"] == "system"
|
||||
summary_msg = messages[1]
|
||||
assert summary_msg["role"] == "user"
|
||||
assert "files" in summary_msg
|
||||
assert summary_msg["files"] == mock_files
|
||||
|
||||
def test_merges_files_from_multiple_user_messages(self) -> None:
|
||||
"""Test that files from multiple user messages are merged."""
|
||||
@@ -264,20 +281,13 @@ class TestSummarizeMessages:
|
||||
|
||||
mock_llm = MagicMock()
|
||||
mock_llm.get_context_window_size.return_value = 1000
|
||||
mock_llm.call.return_value = "Summarized conversation."
|
||||
|
||||
mock_i18n = MagicMock()
|
||||
mock_i18n.slice.side_effect = lambda key: {
|
||||
"summarizer_system_message": "Summarize the following.",
|
||||
"summarize_instruction": "Summarize: {group}",
|
||||
"summary": "Summary: {merged_summary}",
|
||||
}.get(key, "")
|
||||
mock_llm.call.return_value = "<summary>Summarized conversation.</summary>"
|
||||
|
||||
summarize_messages(
|
||||
messages=messages,
|
||||
llm=mock_llm,
|
||||
callbacks=[],
|
||||
i18n=mock_i18n,
|
||||
i18n=_make_mock_i18n(),
|
||||
)
|
||||
|
||||
assert len(messages) == 1
|
||||
@@ -297,20 +307,13 @@ class TestSummarizeMessages:
|
||||
|
||||
mock_llm = MagicMock()
|
||||
mock_llm.get_context_window_size.return_value = 1000
|
||||
mock_llm.call.return_value = "A greeting exchange."
|
||||
|
||||
mock_i18n = MagicMock()
|
||||
mock_i18n.slice.side_effect = lambda key: {
|
||||
"summarizer_system_message": "Summarize the following.",
|
||||
"summarize_instruction": "Summarize: {group}",
|
||||
"summary": "Summary: {merged_summary}",
|
||||
}.get(key, "")
|
||||
mock_llm.call.return_value = "<summary>A greeting exchange.</summary>"
|
||||
|
||||
summarize_messages(
|
||||
messages=messages,
|
||||
llm=mock_llm,
|
||||
callbacks=[],
|
||||
i18n=mock_i18n,
|
||||
i18n=_make_mock_i18n(),
|
||||
)
|
||||
|
||||
assert len(messages) == 1
|
||||
@@ -327,21 +330,595 @@ class TestSummarizeMessages:
|
||||
|
||||
mock_llm = MagicMock()
|
||||
mock_llm.get_context_window_size.return_value = 1000
|
||||
mock_llm.call.return_value = "Summary"
|
||||
|
||||
mock_i18n = MagicMock()
|
||||
mock_i18n.slice.side_effect = lambda key: {
|
||||
"summarizer_system_message": "Summarize.",
|
||||
"summarize_instruction": "Summarize: {group}",
|
||||
"summary": "Summary: {merged_summary}",
|
||||
}.get(key, "")
|
||||
mock_llm.call.return_value = "<summary>Summary</summary>"
|
||||
|
||||
summarize_messages(
|
||||
messages=messages,
|
||||
llm=mock_llm,
|
||||
callbacks=[],
|
||||
i18n=mock_i18n,
|
||||
i18n=_make_mock_i18n(),
|
||||
)
|
||||
|
||||
assert id(messages) == original_list_id
|
||||
assert len(messages) == 1
|
||||
|
||||
def test_preserves_system_messages(self) -> None:
|
||||
"""Test that system messages are preserved and not summarized."""
|
||||
messages: list[dict[str, Any]] = [
|
||||
{"role": "system", "content": "You are a research assistant."},
|
||||
{"role": "user", "content": "Find information about AI."},
|
||||
{"role": "assistant", "content": "I found several resources on AI."},
|
||||
]
|
||||
|
||||
mock_llm = MagicMock()
|
||||
mock_llm.get_context_window_size.return_value = 1000
|
||||
mock_llm.call.return_value = "<summary>User asked about AI, assistant found resources.</summary>"
|
||||
|
||||
summarize_messages(
|
||||
messages=messages,
|
||||
llm=mock_llm,
|
||||
callbacks=[],
|
||||
i18n=_make_mock_i18n(),
|
||||
)
|
||||
|
||||
assert len(messages) == 2
|
||||
assert messages[0]["role"] == "system"
|
||||
assert messages[0]["content"] == "You are a research assistant."
|
||||
assert messages[1]["role"] == "user"
|
||||
|
||||
def test_formats_conversation_with_role_labels(self) -> None:
|
||||
"""Test that the LLM receives role-labeled conversation text."""
|
||||
messages: list[dict[str, Any]] = [
|
||||
{"role": "system", "content": "System prompt."},
|
||||
{"role": "user", "content": "Hello there"},
|
||||
{"role": "assistant", "content": "Hi! How can I help?"},
|
||||
]
|
||||
|
||||
mock_llm = MagicMock()
|
||||
mock_llm.get_context_window_size.return_value = 1000
|
||||
mock_llm.call.return_value = "<summary>Greeting exchange.</summary>"
|
||||
|
||||
summarize_messages(
|
||||
messages=messages,
|
||||
llm=mock_llm,
|
||||
callbacks=[],
|
||||
i18n=_make_mock_i18n(),
|
||||
)
|
||||
|
||||
# Check what was passed to llm.call
|
||||
call_args = mock_llm.call.call_args[0][0]
|
||||
user_msg_content = call_args[1]["content"]
|
||||
assert "[USER]:" in user_msg_content
|
||||
assert "[ASSISTANT]:" in user_msg_content
|
||||
# System content should NOT appear in summarization input
|
||||
assert "System prompt." not in user_msg_content
|
||||
|
||||
def test_extracts_summary_from_tags(self) -> None:
|
||||
"""Test that <summary> tags are extracted from LLM response."""
|
||||
messages: list[dict[str, Any]] = [
|
||||
{"role": "user", "content": "Do something."},
|
||||
{"role": "assistant", "content": "Done."},
|
||||
]
|
||||
|
||||
mock_llm = MagicMock()
|
||||
mock_llm.get_context_window_size.return_value = 1000
|
||||
mock_llm.call.return_value = "Here is the summary:\n<summary>The extracted summary content.</summary>\nExtra text."
|
||||
|
||||
summarize_messages(
|
||||
messages=messages,
|
||||
llm=mock_llm,
|
||||
callbacks=[],
|
||||
i18n=_make_mock_i18n(),
|
||||
)
|
||||
|
||||
assert "The extracted summary content." in messages[0]["content"]
|
||||
|
||||
def test_handles_tool_messages(self) -> None:
|
||||
"""Test that tool messages are properly formatted in summarization."""
|
||||
messages: list[dict[str, Any]] = [
|
||||
{"role": "user", "content": "Search for Python."},
|
||||
{"role": "assistant", "content": None, "tool_calls": [
|
||||
{"function": {"name": "web_search", "arguments": '{"query": "Python"}'}}
|
||||
]},
|
||||
{"role": "tool", "content": "Python is a programming language.", "name": "web_search"},
|
||||
{"role": "assistant", "content": "Python is a programming language."},
|
||||
]
|
||||
|
||||
mock_llm = MagicMock()
|
||||
mock_llm.get_context_window_size.return_value = 1000
|
||||
mock_llm.call.return_value = "<summary>User searched for Python info.</summary>"
|
||||
|
||||
summarize_messages(
|
||||
messages=messages,
|
||||
llm=mock_llm,
|
||||
callbacks=[],
|
||||
i18n=_make_mock_i18n(),
|
||||
)
|
||||
|
||||
# Verify the conversation text sent to LLM contains tool labels
|
||||
call_args = mock_llm.call.call_args[0][0]
|
||||
user_msg_content = call_args[1]["content"]
|
||||
assert "[TOOL_RESULT (web_search)]:" in user_msg_content
|
||||
|
||||
def test_only_system_messages_no_op(self) -> None:
|
||||
"""Test that only system messages results in no-op (no summarization)."""
|
||||
messages: list[dict[str, Any]] = [
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{"role": "system", "content": "Additional system instructions."},
|
||||
]
|
||||
|
||||
mock_llm = MagicMock()
|
||||
mock_llm.get_context_window_size.return_value = 1000
|
||||
|
||||
summarize_messages(
|
||||
messages=messages,
|
||||
llm=mock_llm,
|
||||
callbacks=[],
|
||||
i18n=_make_mock_i18n(),
|
||||
)
|
||||
|
||||
# No LLM call should have been made
|
||||
mock_llm.call.assert_not_called()
|
||||
# System messages should remain untouched
|
||||
assert len(messages) == 2
|
||||
assert messages[0]["content"] == "You are a helpful assistant."
|
||||
assert messages[1]["content"] == "Additional system instructions."
|
||||
|
||||
|
||||
class TestFormatMessagesForSummary:
|
||||
"""Tests for _format_messages_for_summary helper."""
|
||||
|
||||
def test_skips_system_messages(self) -> None:
|
||||
messages: list[dict[str, Any]] = [
|
||||
{"role": "system", "content": "System prompt"},
|
||||
{"role": "user", "content": "Hello"},
|
||||
]
|
||||
result = _format_messages_for_summary(messages)
|
||||
assert "System prompt" not in result
|
||||
assert "[USER]: Hello" in result
|
||||
|
||||
def test_formats_user_and_assistant(self) -> None:
|
||||
messages: list[dict[str, Any]] = [
|
||||
{"role": "user", "content": "Question"},
|
||||
{"role": "assistant", "content": "Answer"},
|
||||
]
|
||||
result = _format_messages_for_summary(messages)
|
||||
assert "[USER]: Question" in result
|
||||
assert "[ASSISTANT]: Answer" in result
|
||||
|
||||
def test_formats_tool_messages(self) -> None:
|
||||
messages: list[dict[str, Any]] = [
|
||||
{"role": "tool", "content": "Result data", "name": "search_tool"},
|
||||
]
|
||||
result = _format_messages_for_summary(messages)
|
||||
assert "[TOOL_RESULT (search_tool)]:" in result
|
||||
assert "Result data" in result
|
||||
|
||||
def test_handles_none_content_with_tool_calls(self) -> None:
|
||||
messages: list[dict[str, Any]] = [
|
||||
{"role": "assistant", "content": None, "tool_calls": [
|
||||
{"function": {"name": "calculator", "arguments": "{}"}}
|
||||
]},
|
||||
]
|
||||
result = _format_messages_for_summary(messages)
|
||||
assert "[Called tools: calculator]" in result
|
||||
|
||||
def test_handles_none_content_without_tool_calls(self) -> None:
|
||||
messages: list[dict[str, Any]] = [
|
||||
{"role": "assistant", "content": None},
|
||||
]
|
||||
result = _format_messages_for_summary(messages)
|
||||
assert "[ASSISTANT]:" in result
|
||||
|
||||
def test_handles_multimodal_content(self) -> None:
|
||||
messages: list[dict[str, Any]] = [
|
||||
{"role": "user", "content": [
|
||||
{"type": "text", "text": "Describe this image"},
|
||||
{"type": "image_url", "image_url": {"url": "data:image/png;base64,..."}}
|
||||
]},
|
||||
]
|
||||
result = _format_messages_for_summary(messages)
|
||||
assert "[USER]: Describe this image" in result
|
||||
|
||||
def test_empty_messages(self) -> None:
|
||||
result = _format_messages_for_summary([])
|
||||
assert result == ""
|
||||
|
||||
|
||||
class TestExtractSummaryTags:
|
||||
"""Tests for _extract_summary_tags helper."""
|
||||
|
||||
def test_extracts_content_from_tags(self) -> None:
|
||||
text = "Preamble\n<summary>The actual summary.</summary>\nPostamble"
|
||||
assert _extract_summary_tags(text) == "The actual summary."
|
||||
|
||||
def test_handles_multiline_content(self) -> None:
|
||||
text = "<summary>\nLine 1\nLine 2\nLine 3\n</summary>"
|
||||
result = _extract_summary_tags(text)
|
||||
assert "Line 1" in result
|
||||
assert "Line 2" in result
|
||||
assert "Line 3" in result
|
||||
|
||||
def test_falls_back_when_no_tags(self) -> None:
|
||||
text = "Just a plain summary without tags."
|
||||
assert _extract_summary_tags(text) == text
|
||||
|
||||
def test_handles_empty_string(self) -> None:
|
||||
assert _extract_summary_tags("") == ""
|
||||
|
||||
def test_extracts_first_match(self) -> None:
|
||||
text = "<summary>First</summary> text <summary>Second</summary>"
|
||||
assert _extract_summary_tags(text) == "First"
|
||||
|
||||
|
||||
class TestSplitMessagesIntoChunks:
|
||||
"""Tests for _split_messages_into_chunks helper."""
|
||||
|
||||
def test_single_chunk_when_under_limit(self) -> None:
|
||||
messages: list[dict[str, Any]] = [
|
||||
{"role": "user", "content": "Hello"},
|
||||
{"role": "assistant", "content": "Hi"},
|
||||
]
|
||||
chunks = _split_messages_into_chunks(messages, max_tokens=1000)
|
||||
assert len(chunks) == 1
|
||||
assert len(chunks[0]) == 2
|
||||
|
||||
def test_splits_at_message_boundaries(self) -> None:
|
||||
messages: list[dict[str, Any]] = [
|
||||
{"role": "user", "content": "A" * 100}, # ~25 tokens
|
||||
{"role": "assistant", "content": "B" * 100}, # ~25 tokens
|
||||
{"role": "user", "content": "C" * 100}, # ~25 tokens
|
||||
]
|
||||
# max_tokens=30 should cause splits
|
||||
chunks = _split_messages_into_chunks(messages, max_tokens=30)
|
||||
assert len(chunks) == 3
|
||||
|
||||
def test_excludes_system_messages(self) -> None:
|
||||
messages: list[dict[str, Any]] = [
|
||||
{"role": "system", "content": "System prompt"},
|
||||
{"role": "user", "content": "Hello"},
|
||||
]
|
||||
chunks = _split_messages_into_chunks(messages, max_tokens=1000)
|
||||
assert len(chunks) == 1
|
||||
# The system message should not be in any chunk
|
||||
for chunk in chunks:
|
||||
for msg in chunk:
|
||||
assert msg.get("role") != "system"
|
||||
|
||||
def test_empty_messages(self) -> None:
|
||||
chunks = _split_messages_into_chunks([], max_tokens=1000)
|
||||
assert chunks == []
|
||||
|
||||
def test_only_system_messages(self) -> None:
|
||||
messages: list[dict[str, Any]] = [
|
||||
{"role": "system", "content": "System prompt"},
|
||||
]
|
||||
chunks = _split_messages_into_chunks(messages, max_tokens=1000)
|
||||
assert chunks == []
|
||||
|
||||
def test_handles_none_content(self) -> None:
|
||||
messages: list[dict[str, Any]] = [
|
||||
{"role": "assistant", "content": None},
|
||||
{"role": "user", "content": "Follow up"},
|
||||
]
|
||||
chunks = _split_messages_into_chunks(messages, max_tokens=1000)
|
||||
assert len(chunks) == 1
|
||||
assert len(chunks[0]) == 2
|
||||
|
||||
|
||||
class TestEstimateTokenCount:
|
||||
"""Tests for _estimate_token_count helper."""
|
||||
|
||||
def test_empty_string(self) -> None:
|
||||
assert _estimate_token_count("") == 0
|
||||
|
||||
def test_short_string(self) -> None:
|
||||
assert _estimate_token_count("hello") == 1 # 5 // 4 = 1
|
||||
|
||||
def test_longer_string(self) -> None:
|
||||
assert _estimate_token_count("a" * 100) == 25 # 100 // 4 = 25
|
||||
|
||||
def test_approximation_is_conservative(self) -> None:
|
||||
# For English text, actual token count is typically lower than char/4
|
||||
text = "The quick brown fox jumps over the lazy dog."
|
||||
estimated = _estimate_token_count(text)
|
||||
assert estimated > 0
|
||||
assert estimated == len(text) // 4
|
||||
|
||||
|
||||
class TestParallelSummarization:
|
||||
"""Tests for parallel chunk summarization via asyncio."""
|
||||
|
||||
def _make_messages_for_n_chunks(self, n: int) -> list[dict[str, Any]]:
|
||||
"""Build a message list that will produce exactly *n* chunks.
|
||||
|
||||
Each message has 400 chars (~100 tokens). With max_tokens=100 returned
|
||||
by the mock LLM, each message lands in its own chunk.
|
||||
"""
|
||||
msgs: list[dict[str, Any]] = []
|
||||
for i in range(n):
|
||||
msgs.append({"role": "user", "content": f"msg-{i} " + "x" * 400})
|
||||
return msgs
|
||||
|
||||
def test_multiple_chunks_use_acall(self) -> None:
|
||||
"""When there are multiple chunks, summarize_messages should use
|
||||
llm.acall (parallel) instead of llm.call (sequential)."""
|
||||
messages = self._make_messages_for_n_chunks(3)
|
||||
|
||||
mock_llm = MagicMock()
|
||||
mock_llm.get_context_window_size.return_value = 100 # force multiple chunks
|
||||
mock_llm.acall = AsyncMock(
|
||||
side_effect=[
|
||||
"<summary>Summary chunk 1</summary>",
|
||||
"<summary>Summary chunk 2</summary>",
|
||||
"<summary>Summary chunk 3</summary>",
|
||||
]
|
||||
)
|
||||
|
||||
summarize_messages(
|
||||
messages=messages,
|
||||
llm=mock_llm,
|
||||
callbacks=[],
|
||||
i18n=_make_mock_i18n(),
|
||||
)
|
||||
|
||||
# acall should have been awaited once per chunk
|
||||
assert mock_llm.acall.await_count == 3
|
||||
# sync call should NOT have been used for chunk summarization
|
||||
mock_llm.call.assert_not_called()
|
||||
|
||||
def test_single_chunk_uses_sync_call(self) -> None:
|
||||
"""When there is only one chunk, summarize_messages should use
|
||||
the sync llm.call path (no async overhead)."""
|
||||
messages: list[dict[str, Any]] = [
|
||||
{"role": "user", "content": "Short message"},
|
||||
{"role": "assistant", "content": "Short reply"},
|
||||
]
|
||||
|
||||
mock_llm = MagicMock()
|
||||
mock_llm.get_context_window_size.return_value = 100_000
|
||||
mock_llm.call.return_value = "<summary>Short summary</summary>"
|
||||
|
||||
summarize_messages(
|
||||
messages=messages,
|
||||
llm=mock_llm,
|
||||
callbacks=[],
|
||||
i18n=_make_mock_i18n(),
|
||||
)
|
||||
|
||||
mock_llm.call.assert_called_once()
|
||||
|
||||
def test_parallel_results_preserve_order(self) -> None:
|
||||
"""Summaries must appear in the same order as the original chunks,
|
||||
regardless of which async call finishes first."""
|
||||
messages = self._make_messages_for_n_chunks(3)
|
||||
|
||||
mock_llm = MagicMock()
|
||||
mock_llm.get_context_window_size.return_value = 100
|
||||
|
||||
# Simulate varying latencies — chunk 2 finishes before chunk 0
|
||||
async def _delayed_acall(msgs: Any, **kwargs: Any) -> str:
|
||||
user_content = msgs[1]["content"]
|
||||
if "msg-0" in user_content:
|
||||
await asyncio.sleep(0.05)
|
||||
return "<summary>Summary-A</summary>"
|
||||
elif "msg-1" in user_content:
|
||||
return "<summary>Summary-B</summary>" # fastest
|
||||
else:
|
||||
await asyncio.sleep(0.02)
|
||||
return "<summary>Summary-C</summary>"
|
||||
|
||||
mock_llm.acall = _delayed_acall
|
||||
|
||||
summarize_messages(
|
||||
messages=messages,
|
||||
llm=mock_llm,
|
||||
callbacks=[],
|
||||
i18n=_make_mock_i18n(),
|
||||
)
|
||||
|
||||
# The final summary message should have A, B, C in order
|
||||
summary_content = messages[-1]["content"]
|
||||
pos_a = summary_content.index("Summary-A")
|
||||
pos_b = summary_content.index("Summary-B")
|
||||
pos_c = summary_content.index("Summary-C")
|
||||
assert pos_a < pos_b < pos_c
|
||||
|
||||
def test_asummarize_chunks_returns_ordered_results(self) -> None:
|
||||
"""Direct test of the async helper _asummarize_chunks."""
|
||||
chunk_a: list[dict[str, Any]] = [{"role": "user", "content": "Chunk A"}]
|
||||
chunk_b: list[dict[str, Any]] = [{"role": "user", "content": "Chunk B"}]
|
||||
|
||||
mock_llm = MagicMock()
|
||||
mock_llm.acall = AsyncMock(
|
||||
side_effect=[
|
||||
"<summary>Result A</summary>",
|
||||
"<summary>Result B</summary>",
|
||||
]
|
||||
)
|
||||
|
||||
results = asyncio.run(
|
||||
_asummarize_chunks(
|
||||
chunks=[chunk_a, chunk_b],
|
||||
llm=mock_llm,
|
||||
callbacks=[],
|
||||
i18n=_make_mock_i18n(),
|
||||
)
|
||||
)
|
||||
|
||||
assert len(results) == 2
|
||||
assert results[0]["content"] == "Result A"
|
||||
assert results[1]["content"] == "Result B"
|
||||
|
||||
@patch("crewai.utilities.agent_utils.is_inside_event_loop", return_value=True)
|
||||
def test_works_inside_existing_event_loop(self, _mock_loop: Any) -> None:
|
||||
"""When called from inside a running event loop (e.g. a Flow),
|
||||
the ThreadPoolExecutor fallback should still work."""
|
||||
messages = self._make_messages_for_n_chunks(2)
|
||||
|
||||
mock_llm = MagicMock()
|
||||
mock_llm.get_context_window_size.return_value = 100
|
||||
mock_llm.acall = AsyncMock(
|
||||
side_effect=[
|
||||
"<summary>Flow summary 1</summary>",
|
||||
"<summary>Flow summary 2</summary>",
|
||||
]
|
||||
)
|
||||
|
||||
summarize_messages(
|
||||
messages=messages,
|
||||
llm=mock_llm,
|
||||
callbacks=[],
|
||||
i18n=_make_mock_i18n(),
|
||||
)
|
||||
|
||||
assert mock_llm.acall.await_count == 2
|
||||
# Verify the merged summary made it into messages
|
||||
assert "Flow summary 1" in messages[-1]["content"]
|
||||
assert "Flow summary 2" in messages[-1]["content"]
|
||||
|
||||
|
||||
def _build_long_conversation() -> list[dict[str, Any]]:
|
||||
"""Build a multi-turn conversation that produces multiple chunks at max_tokens=200.
|
||||
|
||||
Each non-system message is ~100-140 estimated tokens (400-560 chars),
|
||||
so a max_tokens of 200 yields roughly 3 chunks from 6 messages.
|
||||
"""
|
||||
return [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You are a helpful research assistant.",
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": (
|
||||
"Tell me about the history of the Python programming language. "
|
||||
"Who created it, when was it first released, and what were the "
|
||||
"main design goals? Please provide a detailed overview covering "
|
||||
"the major milestones from its inception through Python 3."
|
||||
),
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": (
|
||||
"Python was created by Guido van Rossum and first released in 1991. "
|
||||
"The main design goals were code readability and simplicity. Key milestones: "
|
||||
"Python 1.0 (1994) introduced functional programming tools like lambda and map. "
|
||||
"Python 2.0 (2000) added list comprehensions and garbage collection. "
|
||||
"Python 3.0 (2008) was a major backward-incompatible release that fixed "
|
||||
"fundamental design flaws. Python 2 reached end-of-life in January 2020."
|
||||
),
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": (
|
||||
"What about the async/await features? When were they introduced "
|
||||
"and how do they compare to similar features in JavaScript and C#? "
|
||||
"Also explain the Global Interpreter Lock and its implications."
|
||||
),
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": (
|
||||
"Async/await was introduced in Python 3.5 (PEP 492, 2015). "
|
||||
"Unlike JavaScript which is single-threaded by design, Python's asyncio "
|
||||
"is an opt-in framework. C# introduced async/await in 2012 (C# 5.0) and "
|
||||
"was a major inspiration for Python's implementation. "
|
||||
"The GIL (Global Interpreter Lock) is a mutex that protects access to "
|
||||
"Python objects, preventing multiple threads from executing Python bytecodes "
|
||||
"simultaneously. This means CPU-bound multithreaded programs don't benefit "
|
||||
"from multiple cores. PEP 703 proposes making the GIL optional in CPython."
|
||||
),
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": (
|
||||
"Explain the Python package ecosystem. How does pip work, what is PyPI, "
|
||||
"and what are virtual environments? Compare pip with conda and uv."
|
||||
),
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": (
|
||||
"PyPI (Python Package Index) is the official repository hosting 400k+ packages. "
|
||||
"pip is the standard package installer that downloads from PyPI. "
|
||||
"Virtual environments (venv) create isolated Python installations to avoid "
|
||||
"dependency conflicts between projects. conda is a cross-language package manager "
|
||||
"popular in data science that can manage non-Python dependencies. "
|
||||
"uv is a new Rust-based tool that is 10-100x faster than pip and aims to replace "
|
||||
"pip, pip-tools, and virtualenv with a single unified tool."
|
||||
),
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
class TestParallelSummarizationVCR:
|
||||
"""VCR-backed integration tests for parallel summarization.
|
||||
|
||||
These tests use a real LLM but patch get_context_window_size to force
|
||||
multiple chunks, exercising the asyncio.gather + acall parallel path.
|
||||
|
||||
To record cassettes:
|
||||
PYTEST_VCR_RECORD_MODE=all uv run pytest lib/crewai/tests/utilities/test_agent_utils.py::TestParallelSummarizationVCR -v
|
||||
"""
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_parallel_summarize_openai(self) -> None:
|
||||
"""Test that parallel summarization with gpt-4o-mini produces a valid summary."""
|
||||
from crewai.llm import LLM
|
||||
from crewai.utilities.i18n import I18N
|
||||
|
||||
llm = LLM(model="gpt-4o-mini", temperature=0)
|
||||
i18n = I18N()
|
||||
messages = _build_long_conversation()
|
||||
|
||||
original_system = messages[0]["content"]
|
||||
|
||||
# Patch get_context_window_size to return 200 — forces multiple chunks
|
||||
with patch.object(type(llm), "get_context_window_size", return_value=200):
|
||||
# Verify we actually get multiple chunks with this window size
|
||||
non_system = [m for m in messages if m.get("role") != "system"]
|
||||
chunks = _split_messages_into_chunks(non_system, max_tokens=200)
|
||||
assert len(chunks) > 1, f"Expected multiple chunks, got {len(chunks)}"
|
||||
|
||||
summarize_messages(
|
||||
messages=messages,
|
||||
llm=llm,
|
||||
callbacks=[],
|
||||
i18n=i18n,
|
||||
)
|
||||
|
||||
# System message preserved
|
||||
assert messages[0]["role"] == "system"
|
||||
assert messages[0]["content"] == original_system
|
||||
|
||||
# Summary produced as a user message
|
||||
summary_msg = messages[-1]
|
||||
assert summary_msg["role"] == "user"
|
||||
assert len(summary_msg["content"]) > 0
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_parallel_summarize_preserves_files(self) -> None:
|
||||
"""Test that file references survive parallel summarization."""
|
||||
from crewai.llm import LLM
|
||||
from crewai.utilities.i18n import I18N
|
||||
|
||||
llm = LLM(model="gpt-4o-mini", temperature=0)
|
||||
i18n = I18N()
|
||||
messages = _build_long_conversation()
|
||||
|
||||
mock_file = MagicMock()
|
||||
messages[1]["files"] = {"report.pdf": mock_file}
|
||||
|
||||
with patch.object(type(llm), "get_context_window_size", return_value=200):
|
||||
summarize_messages(
|
||||
messages=messages,
|
||||
llm=llm,
|
||||
callbacks=[],
|
||||
i18n=i18n,
|
||||
)
|
||||
|
||||
summary_msg = messages[-1]
|
||||
assert summary_msg["role"] == "user"
|
||||
assert "files" in summary_msg
|
||||
assert "report.pdf" in summary_msg["files"]
|
||||
|
||||
284
lib/crewai/tests/utilities/test_summarize_integration.py
Normal file
284
lib/crewai/tests/utilities/test_summarize_integration.py
Normal file
@@ -0,0 +1,284 @@
|
||||
"""
|
||||
Integration tests for structured context compaction (summarize_messages).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.crew import Crew
|
||||
from crewai.llm import LLM
|
||||
from crewai.task import Task
|
||||
from crewai.utilities.agent_utils import summarize_messages
|
||||
from crewai.utilities.i18n import I18N
|
||||
|
||||
|
||||
def _build_conversation_messages(
|
||||
*, include_system: bool = True, include_files: bool = False
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Build a realistic multi-turn conversation for summarization tests."""
|
||||
messages: list[dict[str, Any]] = []
|
||||
|
||||
if include_system:
|
||||
messages.append(
|
||||
{
|
||||
"role": "system",
|
||||
"content": (
|
||||
"You are a research assistant specializing in AI topics. "
|
||||
"Your goal is to find accurate, up-to-date information."
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
user_msg: dict[str, Any] = {
|
||||
"role": "user",
|
||||
"content": (
|
||||
"Research the latest developments in large language models. "
|
||||
"Focus on architecture improvements and training techniques."
|
||||
),
|
||||
}
|
||||
if include_files:
|
||||
user_msg["files"] = {"reference.pdf": MagicMock()}
|
||||
messages.append(user_msg)
|
||||
|
||||
messages.append(
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": (
|
||||
"I'll research the latest developments in large language models. "
|
||||
"Based on my knowledge, recent advances include:\n"
|
||||
"1. Mixture of Experts (MoE) architectures\n"
|
||||
"2. Improved attention mechanisms like Flash Attention\n"
|
||||
"3. Better training data curation techniques\n"
|
||||
"4. Constitutional AI and RLHF improvements"
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
messages.append(
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Can you go deeper on the MoE architectures? What are the key papers?",
|
||||
}
|
||||
)
|
||||
|
||||
messages.append(
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": (
|
||||
"Key papers on Mixture of Experts:\n"
|
||||
"- Switch Transformers (Google, 2021) - simplified MoE routing\n"
|
||||
"- GShard - scaling to 600B parameters\n"
|
||||
"- Mixtral (Mistral AI) - open-source MoE model\n"
|
||||
"The main advantage is computational efficiency: "
|
||||
"only a subset of experts is activated per token."
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
return messages
|
||||
|
||||
|
||||
class TestSummarizeDirectOpenAI:
|
||||
"""Test direct summarize_messages calls with OpenAI."""
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_summarize_direct_openai(self) -> None:
|
||||
"""Test summarize_messages with gpt-4o-mini preserves system messages."""
|
||||
llm = LLM(model="gpt-4o-mini", temperature=0)
|
||||
i18n = I18N()
|
||||
messages = _build_conversation_messages(include_system=True)
|
||||
|
||||
original_system_content = messages[0]["content"]
|
||||
|
||||
summarize_messages(
|
||||
messages=messages,
|
||||
llm=llm,
|
||||
callbacks=[],
|
||||
i18n=i18n,
|
||||
)
|
||||
|
||||
# System message should be preserved
|
||||
assert len(messages) >= 2
|
||||
assert messages[0]["role"] == "system"
|
||||
assert messages[0]["content"] == original_system_content
|
||||
|
||||
# Summary should be a user message with <summary> block
|
||||
summary_msg = messages[-1]
|
||||
assert summary_msg["role"] == "user"
|
||||
assert len(summary_msg["content"]) > 0
|
||||
assert "<summary>" in summary_msg["content"]
|
||||
assert "</summary>" in summary_msg["content"]
|
||||
|
||||
|
||||
class TestSummarizeDirectAnthropic:
|
||||
"""Test direct summarize_messages calls with Anthropic."""
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_summarize_direct_anthropic(self) -> None:
|
||||
"""Test summarize_messages with claude-3-5-haiku."""
|
||||
llm = LLM(model="anthropic/claude-3-5-haiku-latest", temperature=0)
|
||||
i18n = I18N()
|
||||
messages = _build_conversation_messages(include_system=True)
|
||||
|
||||
summarize_messages(
|
||||
messages=messages,
|
||||
llm=llm,
|
||||
callbacks=[],
|
||||
i18n=i18n,
|
||||
)
|
||||
|
||||
assert len(messages) >= 2
|
||||
assert messages[0]["role"] == "system"
|
||||
summary_msg = messages[-1]
|
||||
assert summary_msg["role"] == "user"
|
||||
assert len(summary_msg["content"]) > 0
|
||||
assert "<summary>" in summary_msg["content"]
|
||||
assert "</summary>" in summary_msg["content"]
|
||||
|
||||
|
||||
class TestSummarizeDirectGemini:
|
||||
"""Test direct summarize_messages calls with Gemini."""
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_summarize_direct_gemini(self) -> None:
|
||||
"""Test summarize_messages with gemini-2.0-flash."""
|
||||
llm = LLM(model="gemini/gemini-2.0-flash", temperature=0)
|
||||
i18n = I18N()
|
||||
messages = _build_conversation_messages(include_system=True)
|
||||
|
||||
summarize_messages(
|
||||
messages=messages,
|
||||
llm=llm,
|
||||
callbacks=[],
|
||||
i18n=i18n,
|
||||
)
|
||||
|
||||
assert len(messages) >= 2
|
||||
assert messages[0]["role"] == "system"
|
||||
summary_msg = messages[-1]
|
||||
assert summary_msg["role"] == "user"
|
||||
assert len(summary_msg["content"]) > 0
|
||||
assert "<summary>" in summary_msg["content"]
|
||||
assert "</summary>" in summary_msg["content"]
|
||||
|
||||
|
||||
class TestSummarizeDirectAzure:
|
||||
"""Test direct summarize_messages calls with Azure."""
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_summarize_direct_azure(self) -> None:
|
||||
"""Test summarize_messages with azure/gpt-4o-mini."""
|
||||
llm = LLM(model="azure/gpt-4o-mini", temperature=0)
|
||||
i18n = I18N()
|
||||
messages = _build_conversation_messages(include_system=True)
|
||||
|
||||
summarize_messages(
|
||||
messages=messages,
|
||||
llm=llm,
|
||||
callbacks=[],
|
||||
i18n=i18n,
|
||||
)
|
||||
|
||||
assert len(messages) >= 2
|
||||
assert messages[0]["role"] == "system"
|
||||
summary_msg = messages[-1]
|
||||
assert summary_msg["role"] == "user"
|
||||
assert len(summary_msg["content"]) > 0
|
||||
assert "<summary>" in summary_msg["content"]
|
||||
assert "</summary>" in summary_msg["content"]
|
||||
|
||||
|
||||
class TestCrewKickoffCompaction:
|
||||
"""Test compaction triggered via Crew.kickoff() with small context window."""
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_crew_kickoff_compaction_openai(self) -> None:
|
||||
"""Test that compaction is triggered during kickoff with small context_window_size."""
|
||||
llm = LLM(model="gpt-4o-mini", temperature=0)
|
||||
# Force a very small context window to trigger compaction
|
||||
llm.context_window_size = 500
|
||||
|
||||
agent = Agent(
|
||||
role="Researcher",
|
||||
goal="Find information about Python programming",
|
||||
backstory="You are an expert researcher.",
|
||||
llm=llm,
|
||||
verbose=False,
|
||||
max_iter=2,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="What is Python? Give a brief answer.",
|
||||
expected_output="A short description of Python.",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task], verbose=False)
|
||||
|
||||
# This may or may not trigger compaction depending on actual response sizes.
|
||||
# The test verifies the code path doesn't crash.
|
||||
result = crew.kickoff()
|
||||
assert result is not None
|
||||
|
||||
|
||||
class TestAgentExecuteTaskCompaction:
|
||||
"""Test compaction triggered via Agent.execute_task()."""
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_execute_task_compaction(self) -> None:
|
||||
"""Test that Agent.execute_task() works with small context_window_size."""
|
||||
llm = LLM(model="gpt-4o-mini", temperature=0)
|
||||
llm.context_window_size = 500
|
||||
|
||||
agent = Agent(
|
||||
role="Writer",
|
||||
goal="Write concise content",
|
||||
backstory="You are a skilled writer.",
|
||||
llm=llm,
|
||||
verbose=False,
|
||||
max_iter=2,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Write one sentence about the sun.",
|
||||
expected_output="A single sentence about the sun.",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
result = agent.execute_task(task=task)
|
||||
assert result is not None
|
||||
|
||||
|
||||
class TestSummarizePreservesFiles:
|
||||
"""Test that files are preserved through real summarization."""
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_summarize_preserves_files_integration(self) -> None:
|
||||
"""Test that file references survive a real summarization call."""
|
||||
llm = LLM(model="gpt-4o-mini", temperature=0)
|
||||
i18n = I18N()
|
||||
messages = _build_conversation_messages(
|
||||
include_system=True, include_files=True
|
||||
)
|
||||
|
||||
summarize_messages(
|
||||
messages=messages,
|
||||
llm=llm,
|
||||
callbacks=[],
|
||||
i18n=i18n,
|
||||
)
|
||||
|
||||
# System message preserved
|
||||
assert messages[0]["role"] == "system"
|
||||
|
||||
# Files should be on the summary message with <summary> block
|
||||
summary_msg = messages[-1]
|
||||
assert "<summary>" in summary_msg["content"]
|
||||
assert "</summary>" in summary_msg["content"]
|
||||
assert "files" in summary_msg
|
||||
assert "reference.pdf" in summary_msg["files"]
|
||||
Reference in New Issue
Block a user