refactor: update reasoning handling in Agent class

This commit modifies the Agent class to conditionally call the handle_reasoning function based on the executor class being used. The legacy CrewAgentExecutor will continue to utilize handle_reasoning, while the new AgentExecutor will manage planning internally. Additionally, the PlanningConfig class has been referenced in the documentation to clarify its role in enabling or disabling planning. Tests have been updated to reflect these changes and ensure proper functionality.
This commit is contained in:
lorenzejay
2026-02-02 16:27:39 -08:00
parent 50b9b42de9
commit 861da95aad
21 changed files with 1556 additions and 747 deletions

View File

@@ -32,6 +32,7 @@ from crewai.agent.utils import (
format_task_with_context,
get_knowledge_config,
handle_knowledge_retrieval,
handle_reasoning,
prepare_tools,
process_tool_results,
save_last_messages,
@@ -389,7 +390,11 @@ class Agent(BaseAgent):
ValueError: If the max execution time is not a positive integer.
RuntimeError: If the agent execution fails for other reasons.
"""
# Note: Planning is now handled inside AgentExecutor.generate_plan()
# Only call handle_reasoning for legacy CrewAgentExecutor
# For AgentExecutor, planning is handled in AgentExecutor.generate_plan()
if self.executor_class is not AgentExecutor:
handle_reasoning(self, task)
self._inject_date_to_task(task)
if self.tools_handler:
@@ -624,7 +629,10 @@ class Agent(BaseAgent):
ValueError: If the max execution time is not a positive integer.
RuntimeError: If the agent execution fails for other reasons.
"""
# Note: Planning is now handled inside AgentExecutor.generate_plan()
if not isinstance(self.executor_class, AgentExecutor):
handle_reasoning(
self, task
) # we need this till CrewAgentExecutor migrates to AgentExecutor
self._inject_date_to_task(task)
if self.tools_handler:

View File

@@ -11,8 +11,10 @@ class PlanningConfig(BaseModel):
This allows users to customize the planning behavior including prompts,
iteration limits, and the LLM used for planning.
Note: To disable planning, don't pass a planning_config or set planning=False
on the Agent. The presence of a PlanningConfig enables planning.
Attributes:
enabled: Whether planning is enabled. Defaults to True.
max_attempts: Maximum number of planning refinement attempts.
If None, will continue until the agent indicates readiness.
max_steps: Maximum number of steps in the generated plan.

View File

@@ -28,13 +28,20 @@ if TYPE_CHECKING:
def handle_reasoning(agent: Agent, task: Task) -> None:
"""Handle the reasoning process for an agent before task execution.
"""Handle the reasoning/planning process for an agent before task execution.
This function checks if planning is enabled for the agent and, if so,
creates a plan that gets appended to the task description.
Note: This function is used by CrewAgentExecutor (legacy path).
For AgentExecutor, planning is handled in AgentExecutor.generate_plan().
Args:
agent: The agent performing the task.
task: The task to execute.
"""
if not agent.reasoning:
# Check if planning is enabled using the planning_enabled property
if not getattr(agent, "planning_enabled", False):
return
try:
@@ -43,13 +50,13 @@ def handle_reasoning(agent: Agent, task: Task) -> None:
AgentReasoningOutput,
)
reasoning_handler = AgentReasoning(task=task, agent=agent)
reasoning_output: AgentReasoningOutput = (
reasoning_handler.handle_agent_reasoning()
planning_handler = AgentReasoning(agent=agent, task=task)
planning_output: AgentReasoningOutput = (
planning_handler.handle_agent_reasoning()
)
task.description += f"\n\nReasoning Plan:\n{reasoning_output.plan.plan}"
task.description += f"\n\nPlanning:\n{planning_output.plan.plan}"
except Exception as e:
agent._logger.log("error", f"Error during reasoning process: {e!s}")
agent._logger.log("error", f"Error during planning: {e!s}")
def build_task_prompt_with_schema(task: Task, task_prompt: str, i18n: I18N) -> str:

View File

@@ -543,9 +543,9 @@ class TestAgentExecutorPlanning:
assert "6" in str(result)
@pytest.mark.vcr()
def test_planning_config_disabled_skips_planning(self):
"""Test that PlanningConfig(enabled=False) skips planning."""
from crewai import Agent, PlanningConfig
def test_planning_disabled_skips_planning(self):
"""Test that planning=False skips planning."""
from crewai import Agent
from crewai.llm import LLM
llm = LLM("gpt-4o-mini")
@@ -555,7 +555,7 @@ class TestAgentExecutorPlanning:
goal="Help solve simple math problems",
backstory="A helpful assistant",
llm=llm,
planning_config=PlanningConfig(enabled=False),
planning=False, # Explicitly disable planning
verbose=False,
)
@@ -586,7 +586,6 @@ class TestAgentExecutorPlanning:
# Should have planning_config created from reasoning=True
assert agent.planning_config is not None
assert agent.planning_config.enabled is True
assert agent.planning_enabled is True
@pytest.mark.vcr()

View File

@@ -1,169 +1,18 @@
"""Tests for planning/reasoning in agents."""
import json
import warnings
import pytest
from crewai import Agent, PlanningConfig, Task
from crewai.experimental.agent_executor import AgentExecutor
from crewai.llm import LLM
@pytest.fixture
def mock_llm_responses():
"""Fixture for mock LLM responses."""
return {
"ready": "I'll solve this simple math problem.\n\nREADY: I am ready to execute the task.\n\n",
"not_ready": "I need to think about derivatives.\n\nNOT READY: I need to refine my plan because I'm not sure about the derivative rules.",
"ready_after_refine": "I'll use the power rule for derivatives where d/dx(x^n) = n*x^(n-1).\n\nREADY: I am ready to execute the task.",
"execution": "4",
}
# =============================================================================
# Tests for PlanningConfig (new API)
# Tests for PlanningConfig configuration (no LLM calls needed)
# =============================================================================
def test_agent_with_planning_config(mock_llm_responses):
"""Test agent with PlanningConfig."""
llm = LLM("gpt-3.5-turbo")
agent = Agent(
role="Test Agent",
goal="To test the planning feature",
backstory="I am a test agent created to verify the planning feature works correctly.",
llm=llm,
planning_config=PlanningConfig(),
verbose=True,
executor_class=AgentExecutor, # Use AgentExecutor for planning support
)
task = Task(
description="Simple math task: What's 2+2?",
expected_output="The answer should be a number.",
agent=agent,
)
call_count = [0]
def mock_llm_call(messages, *args, **kwargs):
# First call is for planning, subsequent calls are for execution
call_count[0] += 1
if call_count[0] == 1:
return mock_llm_responses["ready"]
return mock_llm_responses["execution"]
agent.llm.call = mock_llm_call
result = agent.execute_task(task)
assert result == mock_llm_responses["execution"]
assert "Planning:" in task.description
def test_agent_with_planning_config_max_attempts(mock_llm_responses):
"""Test agent with PlanningConfig and max_attempts."""
llm = LLM("gpt-4o-mini")
agent = Agent(
role="Test Agent",
goal="To test the planning feature",
backstory="I am a test agent created to verify the planning feature works correctly.",
llm=llm,
planning_config=PlanningConfig(max_attempts=2),
verbose=True,
executor_class=AgentExecutor, # Use AgentExecutor for planning support
)
task = Task(
description="Complex math task: What's the derivative of x²?",
expected_output="The answer should be a mathematical expression.",
agent=agent,
)
planning_call_count = [0]
total_call_count = [0]
def mock_llm_call(messages, *args, **kwargs):
total_call_count[0] += 1
# First 2 calls are for planning (initial + refine)
if total_call_count[0] <= 2:
planning_call_count[0] += 1
if planning_call_count[0] == 1:
return mock_llm_responses["not_ready"]
return mock_llm_responses["ready_after_refine"]
return "2x"
agent.llm.call = mock_llm_call
result = agent.execute_task(task)
assert result == "2x"
assert planning_call_count[0] == 2
assert "Planning:" in task.description
def test_agent_with_planning_config_custom_prompts():
"""Test agent with PlanningConfig using custom prompts."""
llm = LLM("gpt-3.5-turbo")
custom_system_prompt = "You are a specialized planner."
custom_plan_prompt = "Plan this task: {description}"
agent = Agent(
role="Test Agent",
goal="To test custom prompts",
backstory="I am a test agent.",
llm=llm,
planning_config=PlanningConfig(
system_prompt=custom_system_prompt,
plan_prompt=custom_plan_prompt,
max_steps=10,
),
verbose=True,
executor_class=AgentExecutor, # Use AgentExecutor for planning support
)
task = Task(
description="Simple task",
expected_output="Some output",
agent=agent,
)
captured_messages = []
def mock_llm_call(messages, *args, **kwargs):
captured_messages.extend(messages)
return "My plan.\n\nREADY: I am ready to execute the task."
agent.llm.call = mock_llm_call
# Just test that the agent is created properly
assert agent.planning_config is not None
assert agent.planning_config.system_prompt == custom_system_prompt
assert agent.planning_config.plan_prompt == custom_plan_prompt
assert agent.planning_config.max_steps == 10
def test_agent_with_planning_config_disabled():
"""Test agent with PlanningConfig disabled."""
llm = LLM("gpt-3.5-turbo")
agent = Agent(
role="Test Agent",
goal="To test disabled planning",
backstory="I am a test agent.",
llm=llm,
planning=False,
verbose=True,
)
# Planning should be disabled
assert agent.planning_enabled is False
def test_planning_config_default_values():
"""Test PlanningConfig default values."""
config = PlanningConfig()
@@ -195,9 +44,53 @@ def test_planning_config_custom_values():
assert config.llm == "gpt-4"
def test_agent_with_planning_config_custom_prompts():
"""Test agent with PlanningConfig using custom prompts."""
llm = LLM("gpt-4o-mini")
custom_system_prompt = "You are a specialized planner."
custom_plan_prompt = "Plan this task: {description}"
agent = Agent(
role="Test Agent",
goal="To test custom prompts",
backstory="I am a test agent.",
llm=llm,
planning_config=PlanningConfig(
system_prompt=custom_system_prompt,
plan_prompt=custom_plan_prompt,
max_steps=10,
),
verbose=False,
)
# Just test that the agent is created properly
assert agent.planning_config is not None
assert agent.planning_config.system_prompt == custom_system_prompt
assert agent.planning_config.plan_prompt == custom_plan_prompt
assert agent.planning_config.max_steps == 10
def test_agent_with_planning_config_disabled():
"""Test agent with PlanningConfig disabled."""
llm = LLM("gpt-4o-mini")
agent = Agent(
role="Test Agent",
goal="To test disabled planning",
backstory="I am a test agent.",
llm=llm,
planning=False,
verbose=False,
)
# Planning should be disabled
assert agent.planning_enabled is False
def test_planning_enabled_property():
"""Test the planning_enabled property on Agent."""
llm = LLM("gpt-3.5-turbo")
llm = LLM("gpt-4o-mini")
# With planning_config enabled
agent_with_planning = Agent(
@@ -230,16 +123,16 @@ def test_planning_enabled_property():
# =============================================================================
# Tests for backward compatibility with reasoning=True
# Tests for backward compatibility with reasoning=True (no LLM calls)
# =============================================================================
def test_agent_with_reasoning_backward_compat(mock_llm_responses):
def test_agent_with_reasoning_backward_compat():
"""Test agent with reasoning=True (backward compatibility)."""
llm = LLM("gpt-4o-mini")
# This should emit a deprecation warning
with warnings.catch_warnings(record=True) as w:
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
agent = Agent(
role="Test Agent",
@@ -247,11 +140,8 @@ def test_agent_with_reasoning_backward_compat(mock_llm_responses):
backstory="I am a test agent created to verify the reasoning feature works correctly.",
llm=llm,
reasoning=True,
verbose=True,
verbose=False,
)
# Check that a deprecation warning was issued
# Note: The warning may or may not be captured depending on how pydantic handles it
# So we just verify the agent is created correctly
# Should have created a PlanningConfig internally
assert agent.planning_config is not None
@@ -269,7 +159,7 @@ def test_agent_with_reasoning_and_max_attempts_backward_compat():
llm=llm,
reasoning=True,
max_reasoning_attempts=5,
verbose=True,
verbose=False,
)
# Should have created a PlanningConfig with max_attempts
@@ -277,201 +167,179 @@ def test_agent_with_reasoning_and_max_attempts_backward_compat():
assert agent.planning_config.max_attempts == 5
def test_agent_with_reasoning_not_ready_initially(mock_llm_responses):
"""Test agent with reasoning that requires refinement (backward compat)."""
llm = LLM("gpt-3.5-turbo")
agent = Agent(
role="Test Agent",
goal="To test the reasoning feature",
backstory="I am a test agent created to verify the reasoning feature works correctly.",
llm=llm,
reasoning=True,
max_reasoning_attempts=2,
verbose=True,
executor_class=AgentExecutor, # Use AgentExecutor for planning support
)
task = Task(
description="Complex math task: What's the derivative of x²?",
expected_output="The answer should be a mathematical expression.",
agent=agent,
)
planning_call_count = [0]
total_call_count = [0]
def mock_llm_call(messages, *args, **kwargs):
total_call_count[0] += 1
# First 2 calls are for planning (initial + refine)
if total_call_count[0] <= 2:
planning_call_count[0] += 1
if planning_call_count[0] == 1:
return mock_llm_responses["not_ready"]
return mock_llm_responses["ready_after_refine"]
return "2x"
agent.llm.call = mock_llm_call
result = agent.execute_task(task)
assert result == "2x"
assert planning_call_count[0] == 2 # Should have made 2 planning calls
assert "Planning:" in task.description
def test_agent_with_reasoning_max_attempts_reached():
"""Test agent with reasoning that reaches max attempts without being ready."""
llm = LLM("gpt-3.5-turbo")
agent = Agent(
role="Test Agent",
goal="To test the reasoning feature",
backstory="I am a test agent created to verify the reasoning feature works correctly.",
llm=llm,
reasoning=True,
max_reasoning_attempts=2,
verbose=True,
executor_class=AgentExecutor, # Use AgentExecutor for planning support
)
task = Task(
description="Complex math task: Solve the Riemann hypothesis.",
expected_output="A proof or disproof of the hypothesis.",
agent=agent,
)
planning_call_count = [0]
total_call_count = [0]
def mock_llm_call(messages, *args, **kwargs):
total_call_count[0] += 1
# First 2 calls are for planning (all will return NOT READY)
if total_call_count[0] <= 2:
planning_call_count[0] += 1
return f"Attempt {planning_call_count[0]}: I need more time to think.\n\nNOT READY: I need to refine my plan further."
return "This is an unsolved problem in mathematics."
agent.llm.call = mock_llm_call
result = agent.execute_task(task)
assert result == "This is an unsolved problem in mathematics."
assert (
planning_call_count[0] == 2
) # Should have made exactly 2 planning calls (max_attempts)
assert "Planning:" in task.description
def test_agent_reasoning_error_handling():
"""Test error handling during the planning process."""
llm = LLM("gpt-3.5-turbo")
agent = Agent(
role="Test Agent",
goal="To test the reasoning feature",
backstory="I am a test agent created to verify the reasoning feature works correctly.",
llm=llm,
reasoning=True,
executor_class=AgentExecutor, # Use AgentExecutor for planning support
)
task = Task(
description="Task that will cause an error",
expected_output="Output that will never be generated",
agent=agent,
)
call_count = [0]
def mock_llm_call_error(*args, **kwargs):
call_count[0] += 1
if call_count[0] <= 2: # First calls are for planning
raise Exception("LLM error during planning")
return "Fallback execution result" # Return a value for task execution
agent.llm.call = mock_llm_call_error
result = agent.execute_task(task)
assert result == "Fallback execution result"
assert call_count[0] > 0 # Ensure we called the mock at least once
# =============================================================================
# Tests for function calling
# Tests for Agent.kickoff() with planning (uses AgentExecutor)
# =============================================================================
@pytest.mark.skip(reason="Test requires updates for native tool calling changes")
def test_agent_with_function_calling():
"""Test agent with planning using function calling."""
llm = LLM("gpt-3.5-turbo")
agent = Agent(
role="Test Agent",
goal="To test the planning feature",
backstory="I am a test agent created to verify the planning feature works correctly.",
llm=llm,
planning_config=PlanningConfig(),
verbose=True,
)
task = Task(
description="Simple math task: What's 2+2?",
expected_output="The answer should be a number.",
agent=agent,
)
agent.llm.supports_function_calling = lambda: True
def mock_function_call(messages, *args, **kwargs):
if "tools" in kwargs:
return json.dumps(
{"plan": "I'll solve this simple math problem: 2+2=4.", "ready": True}
)
return "4"
agent.llm.call = mock_function_call
result = agent.execute_task(task)
assert result == "4"
assert "Planning:" in task.description
assert "I'll solve this simple math problem: 2+2=4." in task.description
@pytest.mark.skip(reason="Test requires updates for native tool calling changes")
def test_agent_with_function_calling_fallback():
"""Test agent with planning using function calling that falls back to text parsing."""
@pytest.mark.vcr()
def test_agent_kickoff_with_planning():
"""Test Agent.kickoff() with planning enabled generates a plan."""
llm = LLM("gpt-4o-mini")
agent = Agent(
role="Test Agent",
goal="To test the planning feature",
backstory="I am a test agent created to verify the planning feature works correctly.",
role="Math Assistant",
goal="Help solve math problems step by step",
backstory="A helpful math tutor",
llm=llm,
planning_config=PlanningConfig(),
verbose=True,
planning_config=PlanningConfig(max_attempts=1),
verbose=False,
)
result = agent.kickoff("What is 15 + 27?")
assert result is not None
assert "42" in str(result)
@pytest.mark.vcr()
def test_agent_kickoff_without_planning():
"""Test Agent.kickoff() without planning skips plan generation."""
llm = LLM("gpt-4o-mini")
agent = Agent(
role="Math Assistant",
goal="Help solve math problems",
backstory="A helpful assistant",
llm=llm,
# No planning_config = no planning
verbose=False,
)
result = agent.kickoff("What is 8 * 7?")
assert result is not None
assert "56" in str(result)
@pytest.mark.vcr()
def test_agent_kickoff_with_planning_disabled():
"""Test Agent.kickoff() with planning explicitly disabled via planning=False."""
llm = LLM("gpt-4o-mini")
agent = Agent(
role="Math Assistant",
goal="Help solve math problems",
backstory="A helpful assistant",
llm=llm,
planning=False, # Explicitly disable planning
verbose=False,
)
result = agent.kickoff("What is 100 / 4?")
assert result is not None
assert "25" in str(result)
@pytest.mark.vcr()
def test_agent_kickoff_multi_step_task_with_planning():
"""Test Agent.kickoff() with a multi-step task that benefits from planning."""
llm = LLM("gpt-4o-mini")
agent = Agent(
role="Math Tutor",
goal="Solve multi-step math problems",
backstory="An expert tutor who explains step by step",
llm=llm,
planning_config=PlanningConfig(max_attempts=1, max_steps=5),
verbose=False,
)
# Task requires: find primes, sum them, then double
result = agent.kickoff(
"Find the first 3 prime numbers, add them together, then multiply by 2."
)
assert result is not None
# First 3 primes: 2, 3, 5 -> sum = 10 -> doubled = 20
assert "20" in str(result)
# =============================================================================
# Tests for Agent.execute_task() with planning (uses CrewAgentExecutor)
# These test the legacy path via handle_reasoning()
# =============================================================================
@pytest.mark.vcr()
def test_agent_execute_task_with_planning():
"""Test Agent.execute_task() with planning via CrewAgentExecutor."""
llm = LLM("gpt-4o-mini")
agent = Agent(
role="Math Assistant",
goal="Help solve math problems",
backstory="A helpful math tutor",
llm=llm,
planning_config=PlanningConfig(max_attempts=1),
verbose=False,
)
task = Task(
description="Simple math task: What's 2+2?",
expected_output="The answer should be a number.",
description="What is 9 + 11?",
expected_output="A number",
agent=agent,
)
agent.llm.supports_function_calling = lambda: True
result = agent.execute_task(task)
def mock_function_call(messages, *args, **kwargs):
if "tools" in kwargs:
return "Invalid JSON that will trigger fallback. READY: I am ready to execute the task."
return "4"
assert result is not None
assert "20" in str(result)
# Planning should be appended to task description
assert "Planning:" in task.description
agent.llm.call = mock_function_call
@pytest.mark.vcr()
def test_agent_execute_task_without_planning():
"""Test Agent.execute_task() without planning."""
llm = LLM("gpt-4o-mini")
agent = Agent(
role="Math Assistant",
goal="Help solve math problems",
backstory="A helpful assistant",
llm=llm,
verbose=False,
)
task = Task(
description="What is 12 * 3?",
expected_output="A number",
agent=agent,
)
result = agent.execute_task(task)
assert result == "4"
assert result is not None
assert "36" in str(result)
# No planning should be added
assert "Planning:" not in task.description
@pytest.mark.vcr()
def test_agent_execute_task_with_planning_refine():
"""Test Agent.execute_task() with planning that requires refinement."""
llm = LLM("gpt-4o-mini")
agent = Agent(
role="Math Tutor",
goal="Solve complex math problems step by step",
backstory="An expert tutor",
llm=llm,
planning_config=PlanningConfig(max_attempts=2),
verbose=False,
)
task = Task(
description="Calculate the area of a circle with radius 5 (use pi = 3.14)",
expected_output="The area as a number",
agent=agent,
)
result = agent.execute_task(task)
assert result is not None
# Area = pi * r^2 = 3.14 * 25 = 78.5
assert "78" in str(result) or "79" in str(result)
assert "Planning:" in task.description
assert "Invalid JSON that will trigger fallback" in task.description

View File

@@ -55,19 +55,17 @@ interactions:
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D4xyahpAp1QHu0DdYtVebYWRoTjAz\",\n \"object\":
\"chat.completion\",\n \"created\": 1770076232,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
string: "{\n \"id\": \"chatcmpl-D4yTTAh68P65LybtqkwNI3p2HXcRv\",\n \"object\":
\"chat.completion\",\n \"created\": 1770078147,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
\ \"id\": \"call_ReffQXRmvwEFPixuZFQespq4\",\n \"type\":
\"function\",\n \"function\": {\n \"name\": \"create_reasoning_plan\",\n
\ \"arguments\": \"{\\\"plan\\\":\\\"1. Identify the numbers to
add (2 and 2). 2. Perform the addition operation (2 + 2). 3. State the result
(4).\\\",\\\"ready\\\":true}\"\n }\n }\n ],\n \"refusal\":
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"tool_calls\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
281,\n \"completion_tokens\": 54,\n \"total_tokens\": 335,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
\"assistant\",\n \"content\": \"## Execution Plan\\n\\n1. **Action:**
Perform the addition operation. \\n **Tool:** None (manually calculate).\\n\\n2.
**Action:** State the result. \\n **Tool:** None (manually output).\\n\\nREADY:
I am ready to execute the task.\",\n \"refusal\": null,\n \"annotations\":
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 281,\n \"completion_tokens\":
56,\n \"total_tokens\": 337,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_1590f93f9d\"\n}\n"
@@ -79,7 +77,7 @@ interactions:
Content-Type:
- application/json
Date:
- Mon, 02 Feb 2026 23:50:34 GMT
- Tue, 03 Feb 2026 00:22:28 GMT
Server:
- cloudflare
Set-Cookie:
@@ -99,7 +97,7 @@ interactions:
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '1341'
- '1165'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
@@ -169,16 +167,16 @@ interactions:
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D4xyc31JtkvXJRAzJjgkXtTHHn1kR\",\n \"object\":
\"chat.completion\",\n \"created\": 1770076234,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
string: "{\n \"id\": \"chatcmpl-D4yTVB9mdtq1YZrUVf1aSb6dVVQ8G\",\n \"object\":
\"chat.completion\",\n \"created\": 1770078149,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"To solve the problem 2 + 2, we can
follow these steps:\\n\\n1. Start with the first number: 2.\\n2. Add the second
number: another 2.\\n\\nSo, when we combine them together:\\n2 + 2 = 4\\n\\nTherefore,
the answer is 4.\",\n \"refusal\": null,\n \"annotations\":
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 54,\n \"completion_tokens\":
62,\n \"total_tokens\": 116,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
\"assistant\",\n \"content\": \"To solve the problem of 2 + 2, we simply
perform the addition:\\n\\n1. Start with the first number: 2\\n2. Add the
second number: + 2\\n3. Combine the two: 2 + 2 = 4\\n\\nTherefore, the answer
is 4.\",\n \"refusal\": null,\n \"annotations\": []\n },\n
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
\ \"usage\": {\n \"prompt_tokens\": 54,\n \"completion_tokens\": 62,\n
\ \"total_tokens\": 116,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
@@ -191,7 +189,7 @@ interactions:
Content-Type:
- application/json
Date:
- Mon, 02 Feb 2026 23:50:35 GMT
- Tue, 03 Feb 2026 00:22:30 GMT
Server:
- cloudflare
Strict-Transport-Security:
@@ -209,7 +207,7 @@ interactions:
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '1283'
- '1300'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:

View File

@@ -42,8 +42,8 @@ interactions:
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D4xyWvGJEtyPDSCnX2XwejaWrs49u\",\n \"object\":
\"chat.completion\",\n \"created\": 1770076228,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
string: "{\n \"id\": \"chatcmpl-D4yTTFxQ75llVmJv0ee902FIjXE8p\",\n \"object\":
\"chat.completion\",\n \"created\": 1770078147,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"3 + 3 equals 6.\",\n \"refusal\":
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
@@ -61,7 +61,7 @@ interactions:
Content-Type:
- application/json
Date:
- Mon, 02 Feb 2026 23:50:28 GMT
- Tue, 03 Feb 2026 00:22:27 GMT
Server:
- cloudflare
Set-Cookie:
@@ -81,7 +81,7 @@ interactions:
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '317'
- '401'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:

View File

@@ -55,16 +55,15 @@ interactions:
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D4xyXBOoUik3wsrFfXtqDz1DUX6mo\",\n \"object\":
\"chat.completion\",\n \"created\": 1770076229,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
string: "{\n \"id\": \"chatcmpl-D4yTdqlxwWowSdLncBERFrCgxTvVj\",\n \"object\":
\"chat.completion\",\n \"created\": 1770078157,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"## Execution Plan\\n\\n1. **Perform
the Addition**: Calculate the sum of 7 and 7.\\n\\nREADY: I am ready to execute
the task.\",\n \"refusal\": null,\n \"annotations\": []\n },\n
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
\ \"usage\": {\n \"prompt_tokens\": 281,\n \"completion_tokens\": 33,\n
\ \"total_tokens\": 314,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
\"assistant\",\n \"content\": \"## Execution Plan\\n\\n1. Calculate
the sum of 7 and 7.\\n \\nREADY: I am ready to execute the task.\",\n \"refusal\":
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
281,\n \"completion_tokens\": 28,\n \"total_tokens\": 309,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_1590f93f9d\"\n}\n"
@@ -76,7 +75,7 @@ interactions:
Content-Type:
- application/json
Date:
- Mon, 02 Feb 2026 23:50:30 GMT
- Tue, 03 Feb 2026 00:22:38 GMT
Server:
- cloudflare
Set-Cookie:
@@ -96,7 +95,7 @@ interactions:
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '1242'
- '709'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
@@ -166,17 +165,15 @@ interactions:
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D4xyZNzghoqg3nZdUC9buVZh1Y5J1\",\n \"object\":
\"chat.completion\",\n \"created\": 1770076231,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
string: "{\n \"id\": \"chatcmpl-D4yTeB6Miecallw9SjSfLAXPjX2XD\",\n \"object\":
\"chat.completion\",\n \"created\": 1770078158,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"To solve the problem, we simply need
to add the two numbers together.\\n\\n1. Start with the first number: 7\\n2.
Add the second number: + 7\\n3. Calculate the sum: \\n\\n \\\\(7 + 7 = 14\\\\)\\n\\nTherefore,
the answer is **14**.\",\n \"refusal\": null,\n \"annotations\":
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 54,\n \"completion_tokens\":
64,\n \"total_tokens\": 118,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
\"assistant\",\n \"content\": \"To find the sum of 7 and 7, you simply
add the two numbers together:\\n\\n7 + 7 = 14\\n\\nSo, the answer is 14.\",\n
\ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\":
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
54,\n \"completion_tokens\": 35,\n \"total_tokens\": 89,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_1590f93f9d\"\n}\n"
@@ -188,7 +185,7 @@ interactions:
Content-Type:
- application/json
Date:
- Mon, 02 Feb 2026 23:50:32 GMT
- Tue, 03 Feb 2026 00:22:38 GMT
Server:
- cloudflare
Strict-Transport-Security:
@@ -206,7 +203,7 @@ interactions:
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '1221'
- '733'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:

View File

@@ -42,13 +42,13 @@ interactions:
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D4xyXN89X6NeSJ8vEcgJil3w2xgLB\",\n \"object\":
\"chat.completion\",\n \"created\": 1770076229,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
string: "{\n \"id\": \"chatcmpl-D4yTf8T2iADffpPCJBZhntLlaoaSy\",\n \"object\":
\"chat.completion\",\n \"created\": 1770078159,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"The sum of 5 + 5 is 10.\",\n \"refusal\":
\"assistant\",\n \"content\": \"5 + 5 equals 10.\",\n \"refusal\":
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
47,\n \"completion_tokens\": 12,\n \"total_tokens\": 59,\n \"prompt_tokens_details\":
47,\n \"completion_tokens\": 8,\n \"total_tokens\": 55,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
@@ -61,7 +61,7 @@ interactions:
Content-Type:
- application/json
Date:
- Mon, 02 Feb 2026 23:50:29 GMT
- Tue, 03 Feb 2026 00:22:40 GMT
Server:
- cloudflare
Set-Cookie:
@@ -81,7 +81,7 @@ interactions:
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '448'
- '515'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:

View File

@@ -57,17 +57,17 @@ interactions:
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D4xyH78UJ0T1OmoLcmVuQOtaKHjkE\",\n \"object\":
\"chat.completion\",\n \"created\": 1770076213,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
string: "{\n \"id\": \"chatcmpl-D4yTWa7FxCHkHwHF25AYXXeJDBOuY\",\n \"object\":
\"chat.completion\",\n \"created\": 1770078150,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"## Execution Plan\\n\\n1. Identify
the first 3 prime numbers: 2, 3, and 5.\\n2. Calculate the sum of these prime
numbers: \\\\( 2 + 3 + 5 \\\\).\\n3. Multiply the sum by 2: \\\\( (2 + 3 +
5) \\\\times 2 \\\\).\\n\\nREADY: I am ready to execute the task.\",\n \"refusal\":
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
299,\n \"completion_tokens\": 81,\n \"total_tokens\": 380,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
the first 3 prime numbers: 2, 3, and 5.\\n2. Calculate the sum: \\\\(2 + 3
+ 5 = 10\\\\).\\n3. Multiply the sum by 2: \\\\(10 \\\\times 2 = 20\\\\).\\n\\nREADY:
I am ready to execute the task.\",\n \"refusal\": null,\n \"annotations\":
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 299,\n \"completion_tokens\":
74,\n \"total_tokens\": 373,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_1590f93f9d\"\n}\n"
@@ -79,7 +79,7 @@ interactions:
Content-Type:
- application/json
Date:
- Mon, 02 Feb 2026 23:50:15 GMT
- Tue, 03 Feb 2026 00:22:32 GMT
Server:
- cloudflare
Set-Cookie:
@@ -99,7 +99,7 @@ interactions:
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '1605'
- '1716'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
@@ -170,25 +170,26 @@ interactions:
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D4xyJEANwe4wyOgRyp06mcZkzkQRk\",\n \"object\":
\"chat.completion\",\n \"created\": 1770076215,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
string: "{\n \"id\": \"chatcmpl-D4yTYJgCZf2oY7wiPMZmN4QEQhHb5\",\n \"object\":
\"chat.completion\",\n \"created\": 1770078152,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"To solve the problem, we will break
it down into clear steps.\\n\\n### Step 1: Identify the first 3 prime numbers\\nPrime
numbers are natural numbers greater than 1 that have no positive divisors
other than 1 and themselves. The first three prime numbers are:\\n\\n- 2\\n-
3\\n- 5\\n\\n### Step 2: Calculate the sum of the first 3 prime numbers\\nNow
we need to add these three prime numbers together:\\n\\n\\\\[\\n2 + 3 + 5\\n\\\\]\\n\\nCalculating
that step-by-step:\\n\\n1. Add the first two prime numbers:\\n \\\\[\\n
\ 2 + 3 = 5\\n \\\\]\\n\\n2. Now add the result to the third prime number:\\n
\ \\\\[\\n 5 + 5 = 10\\n \\\\]\\n\\nSo, the sum of the first 3 prime
numbers is:\\n\\\\[\\n10\\n\\\\]\\n\\n### Step 3: Multiply the sum by 2\\nNow,
we take the sum (which is 10) and multiply it by 2:\\n\\n\\\\[\\n10 \\\\times
2\\n\\\\]\\n\\nCalculating this gives us:\\n\\\\[\\n10 \\\\times 2 = 20\\n\\\\]\\n\\n###
Final Result\\nThus, the final result of the entire operation is:\\n\\\\[\\n\\\\boxed{20}\\n\\\\]\",\n
\"assistant\",\n \"content\": \"To solve the problem, let's break it
down into two main steps: \\n\\n1. Calculate the sum of the first 3 prime
numbers.\\n2. Multiply the result of that sum by 2.\\n\\n### Step 1: Identify
the first 3 prime numbers\\nPrime numbers are natural numbers greater than
1 that have no positive divisors other than 1 and themselves. \\n\\nThe first
three prime numbers are:\\n- 2\\n- 3\\n- 5\\n\\n### Step 2: Calculate the
sum of the first 3 prime numbers\\nNow, we add these prime numbers together:\\n\\n\\\\[\\n2
+ 3 + 5\\n\\\\]\\n\\nCalculating this step-by-step:\\n- First, add 2 and 3:\\n
\ \\\\[\\n 2 + 3 = 5\\n \\\\]\\n \\n- Next, add this result to 5:\\n \\\\[\\n
\ 5 + 5 = 10\\n \\\\]\\n\\nSo, the sum of the first 3 prime numbers is \\\\(10\\\\).\\n\\n###
Step 3: Multiply the sum by 2\\nNext, we take the sum we calculated and multiply
it by 2:\\n\\n\\\\[\\n10 \\\\times 2\\n\\\\]\\n\\nCalculating this:\\n\\\\[\\n10
\\\\times 2 = 20\\n\\\\]\\n\\n### Final Answer\\nThus, the final result obtained
after performing all the steps is:\\n\\n\\\\[\\n\\\\boxed{20}\\n\\\\]\",\n
\ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\":
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
74,\n \"completion_tokens\": 270,\n \"total_tokens\": 344,\n \"prompt_tokens_details\":
74,\n \"completion_tokens\": 288,\n \"total_tokens\": 362,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
@@ -201,7 +202,7 @@ interactions:
Content-Type:
- application/json
Date:
- Mon, 02 Feb 2026 23:50:20 GMT
- Tue, 03 Feb 2026 00:22:37 GMT
Server:
- cloudflare
Strict-Transport-Security:
@@ -219,7 +220,7 @@ interactions:
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '4629'
- '4751'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:

View File

@@ -1,6 +1,8 @@
interactions:
- request:
body: '{"messages":[{"role":"system","content":"You are test role. test backstory\nYour personal goal is: test goal\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Calculate 2 + 2\n\nThis is the expected criteria for your final answer: The result of the calculation\nyou MUST return the actual complete content as the final answer, not a summary.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4o-mini"}'
body: '{"messages":[{"role":"system","content":"You are Math Assistant. A helpful
assistant\nYour personal goal is: Help solve simple math problems"},{"role":"user","content":"\nCurrent
Task: What is 5 + 5?\n\nProvide your complete response:"}],"model":"gpt-4o-mini"}'
headers:
User-Agent:
- X-USER-AGENT-XXX
@@ -13,7 +15,7 @@ interactions:
connection:
- keep-alive
content-length:
- '797'
- '260'
content-type:
- application/json
host:
@@ -35,13 +37,22 @@ interactions:
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
- 3.13.3
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-CjDsYJQa2tIYBbNloukSWecpsTvdK\",\n \"object\": \"chat.completion\",\n \"created\": 1764894146,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"I now can give a great answer \\nFinal Answer: The result of the calculation 2 + 2 is 4.\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 161,\n \"completion_tokens\": 25,\n \"total_tokens\": 186,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_11f3029f6b\"\
\n}\n"
string: "{\n \"id\": \"chatcmpl-D4yXGD5IrieoUDSK5hDmJyA2gJtDc\",\n \"object\":
\"chat.completion\",\n \"created\": 1770078382,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"5 + 5 equals 10.\",\n \"refusal\":
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
47,\n \"completion_tokens\": 8,\n \"total_tokens\": 55,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_1590f93f9d\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
@@ -50,7 +61,7 @@ interactions:
Content-Type:
- application/json
Date:
- Fri, 05 Dec 2025 00:22:27 GMT
- Tue, 03 Feb 2026 00:26:23 GMT
Server:
- cloudflare
Set-Cookie:
@@ -70,13 +81,11 @@ interactions:
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '516'
- '363'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '529'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:

View File

@@ -1,74 +1,4 @@
interactions:
- request:
body: '{"trace_id": "ded15959-7701-4de0-87c2-ba10536a939f", "execution_type":
"crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null,
"crew_name": "Unknown Crew", "flow_name": null, "crewai_version": "1.9.3", "privacy_level":
"standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count":
0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2026-02-02T23:50:09.124595+00:00"}}'
headers:
Accept:
- '*/*'
Connection:
- keep-alive
Content-Length:
- '434'
Content-Type:
- application/json
User-Agent:
- X-USER-AGENT-XXX
X-Crewai-Organization-Id:
- 3433f0ee-8a94-4aa4-822b-2ac71aa38b18
X-Crewai-Version:
- 1.9.3
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
method: POST
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/batches
response:
body:
string: '{"error":"bad_credentials","message":"Bad credentials"}'
headers:
Connection:
- keep-alive
Content-Length:
- '55'
Content-Type:
- application/json; charset=utf-8
Date:
- Mon, 02 Feb 2026 23:50:09 GMT
cache-control:
- no-store
content-security-policy:
- CSP-FILTERED
expires:
- '0'
permissions-policy:
- PERMISSIONS-POLICY-XXX
pragma:
- no-cache
referrer-policy:
- REFERRER-POLICY-XXX
strict-transport-security:
- STS-XXX
vary:
- Accept
x-content-type-options:
- X-CONTENT-TYPE-XXX
x-frame-options:
- X-FRAME-OPTIONS-XXX
x-permitted-cross-domain-policies:
- X-PERMITTED-XXX
x-request-id:
- X-REQUEST-ID-XXX
x-runtime:
- X-RUNTIME-XXX
x-xss-protection:
- X-XSS-PROTECTION-XXX
status:
code: 401
message: Unauthorized
- request:
body: '{"messages":[{"role":"system","content":"You are a strategic planning assistant.
Create minimal, effective execution plans. Prefer fewer steps over more."},{"role":"user","content":"Create
@@ -126,16 +56,16 @@ interactions:
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D4xyDGlTupuHxuRHCJKabPp1Fnwlz\",\n \"object\":
\"chat.completion\",\n \"created\": 1770076209,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
string: "{\n \"id\": \"chatcmpl-D4yTN8fHOefyzzhvdUOHjxdFDR2HW\",\n \"object\":
\"chat.completion\",\n \"created\": 1770078141,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"1. Calculate the Fahrenheit equivalent
of 100 degrees Celsius using the formula: \\\\( F = (C \\\\times \\\\frac{9}{5})
+ 32 \\\\).\\n2. Round the result to the nearest 10.\\n\\nREADY: I am ready
to execute the task.\",\n \"refusal\": null,\n \"annotations\":
\"assistant\",\n \"content\": \"## Execution Plan\\n\\n1. Convert 100
degrees Celsius to Fahrenheit using the formula: \\\\( F = C \\\\times \\\\frac{9}{5}
+ 32 \\\\).\\n2. Round the Fahrenheit result to the nearest 10.\\n\\nREADY:
I am ready to execute the task.\",\n \"refusal\": null,\n \"annotations\":
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 291,\n \"completion_tokens\":
56,\n \"total_tokens\": 347,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
58,\n \"total_tokens\": 349,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
@@ -148,7 +78,7 @@ interactions:
Content-Type:
- application/json
Date:
- Mon, 02 Feb 2026 23:50:10 GMT
- Tue, 03 Feb 2026 00:22:22 GMT
Server:
- cloudflare
Set-Cookie:
@@ -168,7 +98,7 @@ interactions:
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '1058'
- '1089'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
@@ -239,18 +169,22 @@ interactions:
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D4xyEjrq01KMkBu8Quv6q61sR3vWb\",\n \"object\":
\"chat.completion\",\n \"created\": 1770076210,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
string: "{\n \"id\": \"chatcmpl-D4yTPQewXDyPdYHI4dHPH7YGHcRge\",\n \"object\":
\"chat.completion\",\n \"created\": 1770078143,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"To convert degrees Celsius to degrees
Fahrenheit, you can use the formula:\\n\\n\\\\[ F = \\\\frac{9}{5}C + 32 \\\\]\\n\\nFor
100 degrees Celsius, the conversion would be:\\n\\n\\\\[ F = \\\\frac{9}{5}
\\\\times 100 + 32 \\\\]\\n\\\\[ F = 180 + 32 \\\\]\\n\\\\[ F = 212 \\\\]\\n\\nNow,
rounding 212 to the nearest 10 gives us 210.\\n\\nTherefore, the final result
is **210 degrees Fahrenheit**.\",\n \"refusal\": null,\n \"annotations\":
\"assistant\",\n \"content\": \"To convert degrees Celsius to Fahrenheit,
you can use the formula:\\n\\n\\\\[ F = \\\\left( C \\\\times \\\\frac{9}{5}
\\\\right) + 32 \\\\]\\n\\nPlugging in 100 degrees Celsius:\\n\\n\\\\[ F =
\\\\left( 100 \\\\times \\\\frac{9}{5} \\\\right) + 32 \\\\]\\n\\nCalculating
that step-by-step:\\n\\n1. Multiply 100 by 9: \\n \\\\[ 100 \\\\times 9
= 900 \\\\]\\n\\n2. Divide by 5:\\n \\\\[ 900 \\\\div 5 = 180 \\\\]\\n\\n3.
Add 32:\\n \\\\[ 180 + 32 = 212 \\\\]\\n\\nSo, 100 degrees Celsius is equal
to 212 degrees Fahrenheit.\\n\\nNow, rounding 212 to the nearest 10:\\n\\nThe
nearest multiple of 10 to 212 is 210.\\n\\nTherefore, the final result is
**210 degrees Fahrenheit**.\",\n \"refusal\": null,\n \"annotations\":
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 63,\n \"completion_tokens\":
104,\n \"total_tokens\": 167,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
191,\n \"total_tokens\": 254,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
@@ -263,7 +197,7 @@ interactions:
Content-Type:
- application/json
Date:
- Mon, 02 Feb 2026 23:50:13 GMT
- Tue, 03 Feb 2026 00:22:26 GMT
Server:
- cloudflare
Strict-Transport-Security:
@@ -281,7 +215,7 @@ interactions:
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '2517'
- '3736'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:

View File

@@ -1,99 +0,0 @@
interactions:
- request:
body: '{"messages":[{"role":"system","content":"You are test role. test backstory\nYour personal goal is: test goal\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Summarize the given context in one sentence\n\nThis is the expected criteria for your final answer: A one-sentence summary\nyou MUST return the actual complete content as the final answer, not a summary.\n\nThis is the context you''re working with:\nThe quick brown fox jumps over the lazy dog. This sentence contains every letter of the alphabet.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-3.5-turbo"}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '963'
content-type:
- application/json
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-CjDtsaX0LJ0dzZz02KwKeRGYgazv1\",\n \"object\": \"chat.completion\",\n \"created\": 1764894228,\n \"model\": \"gpt-3.5-turbo-0125\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"I now can give a great answer\\n\\nFinal Answer: The quick brown fox jumps over the lazy dog. This sentence contains every letter of the alphabet.\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 191,\n \"completion_tokens\": 30,\n \"total_tokens\": 221,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\"\
: \"default\",\n \"system_fingerprint\": null\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Fri, 05 Dec 2025 00:23:49 GMT
Server:
- cloudflare
Set-Cookie:
- SET-COOKIE-XXX
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '506'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '559'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
version: 1

View File

@@ -0,0 +1,231 @@
interactions:
- request:
body: '{"messages":[{"role":"system","content":"You are a strategic planning assistant.
Create minimal, effective execution plans. Prefer fewer steps over more."},{"role":"user","content":"Create
a focused execution plan for the following task:\n\n## Task\nWhat is 9 + 11?\n\n##
Expected Output\nA number\n\n## Available Tools\nNo tools available\n\n## Instructions\nCreate
ONLY the essential steps needed to complete this task. Use the MINIMUM number
of steps required - do NOT pad your plan with unnecessary steps. Most tasks
need only 2-5 steps.\n\nFor each step:\n- State the specific action to take\n-
Specify which tool to use (if any)\n\nDo NOT include:\n- Setup or preparation
steps that are obvious\n- Verification steps unless critical\n- Documentation
or cleanup steps unless explicitly required\n- Generic steps like \"review results\"
or \"finalize output\"\n\nAfter your plan, state:\n- \"READY: I am ready to
execute the task.\" if the plan is complete\n- \"NOT READY: I need to refine
my plan because [reason].\" if you need more thinking"}],"model":"gpt-4o-mini","tool_choice":"auto","tools":[{"type":"function","function":{"name":"create_reasoning_plan","description":"Create
or refine a reasoning plan for a task","strict":true,"parameters":{"type":"object","properties":{"plan":{"type":"string","description":"The
detailed reasoning plan for the task."},"ready":{"type":"boolean","description":"Whether
the agent is ready to execute the task."}},"required":["plan","ready"],"additionalProperties":false}}}]}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1520'
content-type:
- application/json
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.13.3
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D4yVACNTzZcghQRwt5kFYQ4HAvbgI\",\n \"object\":
\"chat.completion\",\n \"created\": 1770078252,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"## Execution Plan\\n1. Calculate the
sum of 9 and 11.\\n \\nREADY: I am ready to execute the task.\",\n \"refusal\":
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
279,\n \"completion_tokens\": 28,\n \"total_tokens\": 307,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_1590f93f9d\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Tue, 03 Feb 2026 00:24:13 GMT
Server:
- cloudflare
Set-Cookie:
- SET-COOKIE-XXX
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '951'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Math Assistant. A helpful
math tutor\nYour personal goal is: Help solve math problems"},{"role":"user","content":"\nCurrent
Task: What is 9 + 11?\n\nPlanning:\n## Execution Plan\n1. Calculate the sum
of 9 and 11.\n \nREADY: I am ready to execute the task.\n\nThis is the expected
criteria for your final answer: A number\nyou MUST return the actual complete
content as the final answer, not a summary.\n\nProvide your complete response:"}],"model":"gpt-4o-mini"}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '513'
content-type:
- application/json
cookie:
- COOKIE-XXX
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.13.3
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D4yVBdTCKSdfcJYlIOX9BbzrObgFI\",\n \"object\":
\"chat.completion\",\n \"created\": 1770078253,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"9 + 11 = 20\",\n \"refusal\":
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
105,\n \"completion_tokens\": 7,\n \"total_tokens\": 112,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_1590f93f9d\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Tue, 03 Feb 2026 00:24:13 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '477'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
version: 1

View File

@@ -0,0 +1,243 @@
interactions:
- request:
body: '{"messages":[{"role":"system","content":"You are a strategic planning assistant.
Create minimal, effective execution plans. Prefer fewer steps over more."},{"role":"user","content":"Create
a focused execution plan for the following task:\n\n## Task\nCalculate the area
of a circle with radius 5 (use pi = 3.14)\n\n## Expected Output\nThe area as
a number\n\n## Available Tools\nNo tools available\n\n## Instructions\nCreate
ONLY the essential steps needed to complete this task. Use the MINIMUM number
of steps required - do NOT pad your plan with unnecessary steps. Most tasks
need only 2-5 steps.\n\nFor each step:\n- State the specific action to take\n-
Specify which tool to use (if any)\n\nDo NOT include:\n- Setup or preparation
steps that are obvious\n- Verification steps unless critical\n- Documentation
or cleanup steps unless explicitly required\n- Generic steps like \"review results\"
or \"finalize output\"\n\nAfter your plan, state:\n- \"READY: I am ready to
execute the task.\" if the plan is complete\n- \"NOT READY: I need to refine
my plan because [reason].\" if you need more thinking"}],"model":"gpt-4o-mini","tool_choice":"auto","tools":[{"type":"function","function":{"name":"create_reasoning_plan","description":"Create
or refine a reasoning plan for a task","strict":true,"parameters":{"type":"object","properties":{"plan":{"type":"string","description":"The
detailed reasoning plan for the task."},"ready":{"type":"boolean","description":"Whether
the agent is ready to execute the task."}},"required":["plan","ready"],"additionalProperties":false}}}]}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1577'
content-type:
- application/json
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.13.3
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D4yVCdA1csIzfoHSQvxkfrA4gDn4z\",\n \"object\":
\"chat.completion\",\n \"created\": 1770078254,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"## Execution Plan\\n1. Multiply the
radius (5) by itself (5) to get the square of the radius.\\n2. Multiply the
squared radius by pi (3.14) to calculate the area.\\n\\nREADY: I am ready
to execute the task.\",\n \"refusal\": null,\n \"annotations\":
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 293,\n \"completion_tokens\":
54,\n \"total_tokens\": 347,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_1590f93f9d\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Tue, 03 Feb 2026 00:24:15 GMT
Server:
- cloudflare
Set-Cookie:
- SET-COOKIE-XXX
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '845'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Math Tutor. An expert
tutor\nYour personal goal is: Solve complex math problems step by step"},{"role":"user","content":"\nCurrent
Task: Calculate the area of a circle with radius 5 (use pi = 3.14)\n\nPlanning:\n##
Execution Plan\n1. Multiply the radius (5) by itself (5) to get the square of
the radius.\n2. Multiply the squared radius by pi (3.14) to calculate the area.\n\nREADY:
I am ready to execute the task.\n\nThis is the expected criteria for your final
answer: The area as a number\nyou MUST return the actual complete content as
the final answer, not a summary.\n\nProvide your complete response:"}],"model":"gpt-4o-mini"}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '682'
content-type:
- application/json
cookie:
- COOKIE-XXX
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.13.3
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D4yVDh2U2xx3qeYHcDQvbetOmVCxb\",\n \"object\":
\"chat.completion\",\n \"created\": 1770078255,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"To calculate the area of a circle with
a radius of 5, we will follow the steps outlined in the execution plan.\\n\\n1.
**Square the radius**:\\n \\\\[\\n 5 \\\\times 5 = 25\\n \\\\]\\n\\n2.
**Multiply the squared radius by pi (using \\\\(\\\\pi \\\\approx 3.14\\\\))**:\\n
\ \\\\[\\n \\\\text{Area} = \\\\pi \\\\times (\\\\text{radius})^2 = 3.14
\\\\times 25\\n \\\\]\\n\\n Now, let's perform the multiplication:\\n
\ \\\\[\\n 3.14 \\\\times 25 = 78.5\\n \\\\]\\n\\nThus, the area of the
circle is \\\\( \\\\boxed{78.5} \\\\).\",\n \"refusal\": null,\n \"annotations\":
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 147,\n \"completion_tokens\":
155,\n \"total_tokens\": 302,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_1590f93f9d\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Tue, 03 Feb 2026 00:24:18 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '2228'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
version: 1

View File

@@ -1,99 +0,0 @@
interactions:
- request:
body: '{"messages":[{"role":"system","content":"You are test role. test backstory\nYour personal goal is: test goal\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\nTool Name: dummy_tool\nTool Arguments: {''query'': {''description'': None, ''type'': ''str''}}\nTool Description: Useful for when you need to get a dummy result for a query.\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought: you should always think about what to do\nAction: the action to take, only one name of [dummy_tool], just the name, exactly as it''s written.\nAction Input: the input to the action, just a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce all necessary information is gathered, return the following format:\n\n```\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n```"},{"role":"user","content":"\nCurrent
Task: Use the dummy tool to get a result for ''test query''\n\nThis is the expected criteria for your final answer: The result from the dummy tool\nyou MUST return the actual complete content as the final answer, not a summary.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-3.5-turbo"}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1381'
content-type:
- application/json
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-CjDrE1Z8bFQjjxI2vDPPKgtOTm28p\",\n \"object\": \"chat.completion\",\n \"created\": 1764894064,\n \"model\": \"gpt-3.5-turbo-0125\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"you should always think about what to do\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 289,\n \"completion_tokens\": 8,\n \"total_tokens\": 297,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": null\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Fri, 05 Dec 2025 00:21:05 GMT
Server:
- cloudflare
Set-Cookie:
- SET-COOKIE-XXX
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '379'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '399'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
version: 1

View File

@@ -1,6 +1,10 @@
interactions:
- request:
body: '{"messages":[{"role":"system","content":"You are test role. test backstory\nYour personal goal is: test goal\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Write a haiku about AI\n\nThis is the expected criteria for your final answer: A haiku (3 lines, 5-7-5 syllable pattern) about AI\nyou MUST return the actual complete content as the final answer, not a summary.\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-3.5-turbo","max_tokens":50,"temperature":0.7}'
body: '{"messages":[{"role":"system","content":"You are Math Assistant. A helpful
assistant\nYour personal goal is: Help solve math problems"},{"role":"user","content":"\nCurrent
Task: What is 12 * 3?\n\nThis is the expected criteria for your final answer:
A number\nyou MUST return the actual complete content as the final answer, not
a summary.\n\nProvide your complete response:"}],"model":"gpt-4o-mini"}'
headers:
User-Agent:
- X-USER-AGENT-XXX
@@ -13,7 +17,7 @@ interactions:
connection:
- keep-alive
content-length:
- '861'
- '400'
content-type:
- application/json
host:
@@ -35,13 +39,22 @@ interactions:
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
- 3.13.3
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-CjDqr2BmEXQ08QzZKslTZJZ5vV9lo\",\n \"object\": \"chat.completion\",\n \"created\": 1764894041,\n \"model\": \"gpt-3.5-turbo-0125\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"I now can give a great answer\\n\\nFinal Answer: \\nIn circuits they thrive, \\nArtificial minds awake, \\nFuture's coded drive.\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 174,\n \"completion_tokens\": 29,\n \"total_tokens\": 203,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\"\
,\n \"system_fingerprint\": null\n}\n"
string: "{\n \"id\": \"chatcmpl-D4yVCw0CGLFmcVvniplwCCt8avtRb\",\n \"object\":
\"chat.completion\",\n \"created\": 1770078254,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"12 * 3 = 36\",\n \"refusal\":
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
75,\n \"completion_tokens\": 7,\n \"total_tokens\": 82,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_1590f93f9d\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
@@ -50,7 +63,7 @@ interactions:
Content-Type:
- application/json
Date:
- Fri, 05 Dec 2025 00:20:41 GMT
- Tue, 03 Feb 2026 00:24:14 GMT
Server:
- cloudflare
Set-Cookie:
@@ -70,13 +83,11 @@ interactions:
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '434'
- '331'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '456'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:

View File

@@ -0,0 +1,243 @@
interactions:
- request:
body: '{"messages":[{"role":"system","content":"You are a strategic planning assistant.
Create minimal, effective execution plans. Prefer fewer steps over more."},{"role":"user","content":"Create
a focused execution plan for the following task:\n\n## Task\nFind the first
3 prime numbers, add them together, then multiply by 2.\n\n## Expected Output\nComplete
the task successfully\n\n## Available Tools\nNo tools available\n\n## Instructions\nCreate
ONLY the essential steps needed to complete this task. Use the MINIMUM number
of steps required - do NOT pad your plan with unnecessary steps. Most tasks
need only 2-5 steps.\n\nFor each step:\n- State the specific action to take\n-
Specify which tool to use (if any)\n\nDo NOT include:\n- Setup or preparation
steps that are obvious\n- Verification steps unless critical\n- Documentation
or cleanup steps unless explicitly required\n- Generic steps like \"review results\"
or \"finalize output\"\n\nAfter your plan, state:\n- \"READY: I am ready to
execute the task.\" if the plan is complete\n- \"NOT READY: I need to refine
my plan because [reason].\" if you need more thinking"}],"model":"gpt-4o-mini","tool_choice":"auto","tools":[{"type":"function","function":{"name":"create_reasoning_plan","description":"Create
or refine a reasoning plan for a task","strict":true,"parameters":{"type":"object","properties":{"plan":{"type":"string","description":"The
detailed reasoning plan for the task."},"ready":{"type":"boolean","description":"Whether
the agent is ready to execute the task."}},"required":["plan","ready"],"additionalProperties":false}}}]}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1597'
content-type:
- application/json
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.13.3
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D4yU0MD5GfSUjRW0R4cBmFJ6Hcjbi\",\n \"object\":
\"chat.completion\",\n \"created\": 1770078180,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"### Execution Plan\\n1. Identify the
first 3 prime numbers: 2, 3, and 5.\\n2. Add the prime numbers together: 2
+ 3 + 5 = 10.\\n3. Multiply the sum by 2: 10 * 2 = 20.\\n\\nREADY: I am ready
to execute the task.\",\n \"refusal\": null,\n \"annotations\":
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 291,\n \"completion_tokens\":
73,\n \"total_tokens\": 364,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_1590f93f9d\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Tue, 03 Feb 2026 00:23:02 GMT
Server:
- cloudflare
Set-Cookie:
- SET-COOKIE-XXX
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '1253'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Math Tutor. An expert
tutor who explains step by step\nYour personal goal is: Solve multi-step math
problems"},{"role":"user","content":"\nCurrent Task: Find the first 3 prime
numbers, add them together, then multiply by 2.\n\nProvide your complete response:"}],"model":"gpt-4o-mini"}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '333'
content-type:
- application/json
cookie:
- COOKIE-XXX
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.13.3
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D4yU2qY6Xqpkz2D5yVAwagQzuPpen\",\n \"object\":
\"chat.completion\",\n \"created\": 1770078182,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"To solve the problem, let\u2019s go
through each step methodically.\\n\\n1. **Identify the first three prime numbers**:\\n
\ - **Prime numbers** are numbers greater than 1 that have no positive divisors
other than 1 and themselves.\\n - The first three prime numbers are:\\n
\ - 2\\n - 3\\n - 5\\n\\n2. **Add these prime numbers together**:\\n
\ - We add them together:\\n \\\\[\\n 2 + 3 + 5\\n \\\\]\\n -
Performing the addition step-by-step:\\n - First, add 2 and 3:\\n \\\\[\\n
\ 2 + 3 = 5\\n \\\\]\\n - Then add 5 to this result:\\n \\\\[\\n
\ 5 + 5 = 10\\n \\\\]\\n - So, the sum of the first three prime
numbers is **10**.\\n\\n3. **Multiply the sum by 2**:\\n - Now we multiply
the result by 2:\\n \\\\[\\n 10 \\\\times 2 = 20\\n \\\\]\\n \\nTherefore,
the final answer is **20**.\",\n \"refusal\": null,\n \"annotations\":
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 62,\n \"completion_tokens\":
236,\n \"total_tokens\": 298,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_1590f93f9d\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Tue, 03 Feb 2026 00:23:06 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '3846'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
version: 1

View File

@@ -0,0 +1,238 @@
interactions:
- request:
body: '{"messages":[{"role":"system","content":"You are a strategic planning assistant.
Create minimal, effective execution plans. Prefer fewer steps over more."},{"role":"user","content":"Create
a focused execution plan for the following task:\n\n## Task\nWhat is 15 + 27?\n\n##
Expected Output\nComplete the task successfully\n\n## Available Tools\nNo tools
available\n\n## Instructions\nCreate ONLY the essential steps needed to complete
this task. Use the MINIMUM number of steps required - do NOT pad your plan with
unnecessary steps. Most tasks need only 2-5 steps.\n\nFor each step:\n- State
the specific action to take\n- Specify which tool to use (if any)\n\nDo NOT
include:\n- Setup or preparation steps that are obvious\n- Verification steps
unless critical\n- Documentation or cleanup steps unless explicitly required\n-
Generic steps like \"review results\" or \"finalize output\"\n\nAfter your plan,
state:\n- \"READY: I am ready to execute the task.\" if the plan is complete\n-
\"NOT READY: I need to refine my plan because [reason].\" if you need more thinking"}],"model":"gpt-4o-mini","tool_choice":"auto","tools":[{"type":"function","function":{"name":"create_reasoning_plan","description":"Create
or refine a reasoning plan for a task","strict":true,"parameters":{"type":"object","properties":{"plan":{"type":"string","description":"The
detailed reasoning plan for the task."},"ready":{"type":"boolean","description":"Whether
the agent is ready to execute the task."}},"required":["plan","ready"],"additionalProperties":false}}}]}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1543'
content-type:
- application/json
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.13.3
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D4yTrm3GkzDX47DIcce9uA3iF8kFE\",\n \"object\":
\"chat.completion\",\n \"created\": 1770078171,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"## Execution Plan\\n\\n1. Calculate
the sum of 15 and 27.\\n\\nREADY: I am ready to execute the task.\",\n \"refusal\":
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
281,\n \"completion_tokens\": 27,\n \"total_tokens\": 308,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_1590f93f9d\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Tue, 03 Feb 2026 00:22:51 GMT
Server:
- cloudflare
Set-Cookie:
- SET-COOKIE-XXX
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '691'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Math Assistant. A helpful
math tutor\nYour personal goal is: Help solve math problems step by step"},{"role":"user","content":"\nCurrent
Task: What is 15 + 27?\n\nProvide your complete response:"}],"model":"gpt-4o-mini"}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '269'
content-type:
- application/json
cookie:
- COOKIE-XXX
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.13.3
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D4yTrUOvExA9fTFDwYxvG4xEgRP6L\",\n \"object\":
\"chat.completion\",\n \"created\": 1770078171,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"To solve the problem \\\\( 15 + 27
\\\\), we can follow these steps:\\n\\n1. **Align the numbers**: Write them
one on top of the other, aligned by their rightmost digits:\\n\\n ```\\n
\ 15\\n + 27\\n ```\\n\\n2. **Add the units place**: Start from the
rightmost digits (units place):\\n - \\\\( 5 + 7 = 12 \\\\)\\n - Write
down 2 and carry over 1.\\n\\n3. **Add the tens place**: Now, move to the
next column (tens place):\\n - \\\\( 1 + 2 + 1 \\\\) (the 1 is from the
carry) \\\\( = 4 \\\\)\\n\\n4. **Combine the results**: Now, combine the results
from the tens and units places:\\n - The result in the tens place is 4 and
in the units place is 2, giving us \\\\( 42 \\\\).\\n\\nTherefore, \\\\( 15
+ 27 = 42 \\\\).\",\n \"refusal\": null,\n \"annotations\":
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 50,\n \"completion_tokens\":
209,\n \"total_tokens\": 259,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_1590f93f9d\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Tue, 03 Feb 2026 00:22:55 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '3263'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
version: 1

View File

@@ -0,0 +1,110 @@
interactions:
- request:
body: '{"messages":[{"role":"system","content":"You are Math Assistant. A helpful
assistant\nYour personal goal is: Help solve math problems"},{"role":"user","content":"\nCurrent
Task: What is 100 / 4?\n\nProvide your complete response:"}],"model":"gpt-4o-mini"}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '255'
content-type:
- application/json
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.13.3
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D4yU6mFapBLuCx4fJtYBup52dwwrs\",\n \"object\":
\"chat.completion\",\n \"created\": 1770078186,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"To solve the problem 100 divided by
4, you can perform the division as follows:\\n\\n100 \xF7 4 = 25\\n\\nSo,
the answer is 25.\",\n \"refusal\": null,\n \"annotations\":
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 46,\n \"completion_tokens\":
36,\n \"total_tokens\": 82,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_1590f93f9d\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Tue, 03 Feb 2026 00:23:07 GMT
Server:
- cloudflare
Set-Cookie:
- SET-COOKIE-XXX
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '1098'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
version: 1

View File

@@ -0,0 +1,108 @@
interactions:
- request:
body: '{"messages":[{"role":"system","content":"You are Math Assistant. A helpful
assistant\nYour personal goal is: Help solve math problems"},{"role":"user","content":"\nCurrent
Task: What is 8 * 7?\n\nProvide your complete response:"}],"model":"gpt-4o-mini"}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '253'
content-type:
- application/json
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.13.3
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D4yTqLFhGtfq2CyS2aPPhiZL4GjtQ\",\n \"object\":
\"chat.completion\",\n \"created\": 1770078170,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"8 * 7 equals 56.\",\n \"refusal\":
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
46,\n \"completion_tokens\": 8,\n \"total_tokens\": 54,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_1590f93f9d\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Tue, 03 Feb 2026 00:22:50 GMT
Server:
- cloudflare
Set-Cookie:
- SET-COOKIE-XXX
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '443'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
version: 1