interactions: - request: body: '{"messages":[{"role":"user","content":"Say ''Hello, World!''"}],"model":"gpt-3.5-turbo"}' headers: User-Agent: - X-USER-AGENT-XXX accept: - application/json accept-encoding: - ACCEPT-ENCODING-XXX authorization: - AUTHORIZATION-XXX connection: - keep-alive content-length: - '86' content-type: - application/json host: - api.openai.com x-stainless-arch: - X-STAINLESS-ARCH-XXX x-stainless-async: - 'false' x-stainless-lang: - python x-stainless-os: - X-STAINLESS-OS-XXX x-stainless-package-version: - 1.83.0 x-stainless-read-timeout: - X-STAINLESS-READ-TIMEOUT-XXX x-stainless-retry-count: - '0' x-stainless-runtime: - CPython x-stainless-runtime-version: - 3.12.10 method: POST uri: https://api.openai.com/v1/chat/completions response: body: string: "{\n \"id\": \"chatcmpl-CjDtzWPzQqgm1wwCHZghRvbsczxnW\",\n \"object\": \"chat.completion\",\n \"created\": 1764894235,\n \"model\": \"gpt-3.5-turbo-0125\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Hello, World!\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 13,\n \"completion_tokens\": 4,\n \"total_tokens\": 17,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": null\n}\n" headers: CF-RAY: - CF-RAY-XXX Connection: - keep-alive Content-Type: - application/json Date: - Fri, 05 Dec 2025 00:23:55 GMT Server: - cloudflare Set-Cookie: - SET-COOKIE-XXX Strict-Transport-Security: - STS-XXX Transfer-Encoding: - chunked X-Content-Type-Options: - X-CONTENT-TYPE-XXX access-control-expose-headers: - ACCESS-CONTROL-XXX alt-svc: - h3=":443"; ma=86400 cf-cache-status: - DYNAMIC openai-organization: - OPENAI-ORG-XXX openai-processing-ms: - '248' openai-project: - OPENAI-PROJECT-XXX openai-version: - '2020-10-01' x-envoy-upstream-service-time: - '267' x-openai-proxy-wasm: - v0.1 x-ratelimit-limit-requests: - X-RATELIMIT-LIMIT-REQUESTS-XXX x-ratelimit-limit-tokens: - X-RATELIMIT-LIMIT-TOKENS-XXX x-ratelimit-remaining-requests: - X-RATELIMIT-REMAINING-REQUESTS-XXX x-ratelimit-remaining-tokens: - X-RATELIMIT-REMAINING-TOKENS-XXX x-ratelimit-reset-requests: - X-RATELIMIT-RESET-REQUESTS-XXX x-ratelimit-reset-tokens: - X-RATELIMIT-RESET-TOKENS-XXX x-request-id: - X-REQUEST-ID-XXX status: code: 200 message: OK version: 1