mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-22 06:18:14 +00:00
fix
This commit is contained in:
@@ -7,8 +7,7 @@ when the LLM supports it, across multiple providers.
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import Any
|
||||
from unittest.mock import patch, MagicMock
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
from pydantic import BaseModel, Field
|
||||
@@ -18,26 +17,6 @@ from crewai.llm import LLM
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
|
||||
|
||||
# Check for optional provider availability
|
||||
try:
|
||||
import anthropic
|
||||
HAS_ANTHROPIC = True
|
||||
except ImportError:
|
||||
HAS_ANTHROPIC = False
|
||||
|
||||
try:
|
||||
import google.genai
|
||||
HAS_GOOGLE_GENAI = True
|
||||
except ImportError:
|
||||
HAS_GOOGLE_GENAI = False
|
||||
|
||||
try:
|
||||
import boto3
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
|
||||
|
||||
class CalculatorInput(BaseModel):
|
||||
"""Input schema for calculator tool."""
|
||||
|
||||
@@ -159,9 +138,6 @@ class TestOpenAINativeToolCalling:
|
||||
# =============================================================================
|
||||
# Anthropic Provider Tests
|
||||
# =============================================================================
|
||||
|
||||
|
||||
@pytest.mark.skipif(not HAS_ANTHROPIC, reason="anthropic package not installed")
|
||||
class TestAnthropicNativeToolCalling:
|
||||
"""Tests for native tool calling with Anthropic models."""
|
||||
|
||||
@@ -235,42 +211,44 @@ class TestAnthropicNativeToolCalling:
|
||||
# =============================================================================
|
||||
|
||||
|
||||
@pytest.mark.skipif(not HAS_GOOGLE_GENAI, reason="google-genai package not installed")
|
||||
class TestGeminiNativeToolCalling:
|
||||
"""Tests for native tool calling with Gemini models."""
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def mock_google_api_key(self):
|
||||
"""Mock GOOGLE_API_KEY for tests."""
|
||||
with patch.dict(os.environ, {"GOOGLE_API_KEY": "test-key"}):
|
||||
if "GOOGLE_API_KEY" not in os.environ and "GEMINI_API_KEY" not in os.environ:
|
||||
with patch.dict(os.environ, {"GOOGLE_API_KEY": "test-key"}):
|
||||
yield
|
||||
else:
|
||||
yield
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_gemini_agent_with_native_tool_calling(
|
||||
self, calculator_tool: CalculatorTool
|
||||
) -> None:
|
||||
"""Test Gemini agent can use native tool calling."""
|
||||
agent = Agent(
|
||||
role="Math Assistant",
|
||||
goal="Help users with mathematical calculations",
|
||||
backstory="You are a helpful math assistant.",
|
||||
tools=[calculator_tool],
|
||||
llm=LLM(model="gemini/gemini-2.0-flash-001"),
|
||||
verbose=False,
|
||||
max_iter=3,
|
||||
)
|
||||
with patch.dict(os.environ, {"GOOGLE_GENAI_USE_VERTEXAI": "true"}):
|
||||
agent = Agent(
|
||||
role="Math Assistant",
|
||||
goal="Help users with mathematical calculations",
|
||||
backstory="You are a helpful math assistant.",
|
||||
tools=[calculator_tool],
|
||||
llm=LLM(model="gemini/gemini-2.0-flash-exp"),
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Calculate what is 15 * 8",
|
||||
expected_output="The result of the calculation",
|
||||
agent=agent,
|
||||
)
|
||||
task = Task(
|
||||
description="Calculate what is 15 * 8",
|
||||
expected_output="The result of the calculation",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
result = crew.kickoff()
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
result = crew.kickoff()
|
||||
|
||||
assert result is not None
|
||||
assert result.raw is not None
|
||||
assert result is not None
|
||||
assert result.raw is not None
|
||||
|
||||
def test_gemini_agent_kickoff_with_tools_mocked(
|
||||
self, calculator_tool: CalculatorTool
|
||||
@@ -358,7 +336,6 @@ class TestAzureNativeToolCalling:
|
||||
# =============================================================================
|
||||
|
||||
|
||||
@pytest.mark.skipif(not HAS_BOTO3, reason="boto3 package not installed")
|
||||
class TestBedrockNativeToolCalling:
|
||||
"""Tests for native tool calling with AWS Bedrock models."""
|
||||
|
||||
@@ -417,7 +394,6 @@ class TestNativeToolCallingBehavior:
|
||||
assert hasattr(openai_llm, "supports_function_calling")
|
||||
assert openai_llm.supports_function_calling() is True
|
||||
|
||||
@pytest.mark.skipif(not HAS_ANTHROPIC, reason="anthropic package not installed")
|
||||
def test_anthropic_supports_function_calling(self) -> None:
|
||||
"""Test that Anthropic models support function calling."""
|
||||
with patch.dict(os.environ, {"ANTHROPIC_API_KEY": "test-key"}):
|
||||
@@ -425,14 +401,10 @@ class TestNativeToolCallingBehavior:
|
||||
assert hasattr(llm, "supports_function_calling")
|
||||
assert llm.supports_function_calling() is True
|
||||
|
||||
@pytest.mark.skipif(not HAS_GOOGLE_GENAI, reason="google-genai package not installed")
|
||||
def test_gemini_supports_function_calling(self) -> None:
|
||||
"""Test that Gemini models support function calling."""
|
||||
# with patch.dict(os.environ, {"GOOGLE_API_KEY": "test-key"}):
|
||||
print("GOOGLE_API_KEY", os.getenv("GOOGLE_API_KEY"))
|
||||
llm = LLM(model="gemini/gemini-2.5-flash")
|
||||
assert hasattr(llm, "supports_function_calling")
|
||||
# Gemini uses supports_tools property
|
||||
assert llm.supports_function_calling() is True
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,104 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"max_tokens":4096,"messages":[{"role":"user","content":"\nCurrent Task:
|
||||
Calculate what is 15 * 8\n\nThis is the expected criteria for your final answer:
|
||||
The result of the calculation\nyou MUST return the actual complete content as
|
||||
the final answer, not a summary.\n\nThis is VERY important to you, your job
|
||||
depends on it!"}],"model":"claude-3-5-haiku-20241022","stop_sequences":["\nObservation:"],"stream":false,"system":"You
|
||||
are Math Assistant. You are a helpful math assistant.\nYour personal goal is:
|
||||
Help users with mathematical calculations"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
anthropic-version:
|
||||
- '2023-06-01'
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '548'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.anthropic.com
|
||||
x-api-key:
|
||||
- X-API-KEY-XXX
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 0.71.1
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
x-stainless-timeout:
|
||||
- NOT_GIVEN
|
||||
method: POST
|
||||
uri: https://api.anthropic.com/v1/messages
|
||||
response:
|
||||
body:
|
||||
string: '{"model":"claude-3-5-haiku-20241022","id":"msg_01RVZdv4eF4cFt5DEYngV3fG","type":"message","role":"assistant","content":[{"type":"text","text":"120"}],"stop_reason":"end_turn","stop_sequence":null,"usage":{"input_tokens":93,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":5,"service_tier":"standard"}}'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 21 Jan 2026 22:14:30 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Robots-Tag:
|
||||
- none
|
||||
anthropic-organization-id:
|
||||
- ANTHROPIC-ORGANIZATION-ID-XXX
|
||||
anthropic-ratelimit-input-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-input-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-input-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-output-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-output-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-output-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-requests-limit:
|
||||
- '4000'
|
||||
anthropic-ratelimit-requests-remaining:
|
||||
- '3999'
|
||||
anthropic-ratelimit-requests-reset:
|
||||
- '2026-01-21T22:14:29Z'
|
||||
anthropic-ratelimit-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
request-id:
|
||||
- REQUEST-ID-XXX
|
||||
strict-transport-security:
|
||||
- STS-XXX
|
||||
x-envoy-upstream-service-time:
|
||||
- '547'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,68 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"contents": [{"parts": [{"text": "\nCurrent Task: Calculate what is 15
|
||||
* 8\n\nThis is the expected criteria for your final answer: The result of the
|
||||
calculation\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nThis is VERY important to you, your job depends on it!"}],
|
||||
"role": "user"}], "systemInstruction": {"parts": [{"text": "You are Math Assistant.
|
||||
You are a helpful math assistant.\nYour personal goal is: Help users with mathematical
|
||||
calculations"}], "role": "user"}, "generationConfig": {"stopSequences": ["\nObservation:"]}}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- '*/*'
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '568'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- aiplatform.googleapis.com
|
||||
x-goog-api-client:
|
||||
- google-genai-sdk/1.60.0 gl-python/3.13.3
|
||||
x-goog-api-key:
|
||||
- X-GOOG-API-KEY-XXX
|
||||
method: POST
|
||||
uri: https://aiplatform.googleapis.com/v1/publishers/google/models/gemini-2.0-flash-exp:generateContent
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"candidates\": [\n {\n \"content\": {\n \"role\":
|
||||
\"model\",\n \"parts\": [\n {\n \"text\": \"15
|
||||
* 8 = 120\\n\"\n }\n ]\n },\n \"finishReason\":
|
||||
\"STOP\",\n \"avgLogprobs\": -2.7798222039233554e-05\n }\n ],\n \"usageMetadata\":
|
||||
{\n \"promptTokenCount\": 83,\n \"candidatesTokenCount\": 11,\n \"totalTokenCount\":
|
||||
94,\n \"trafficType\": \"ON_DEMAND\",\n \"promptTokensDetails\": [\n
|
||||
\ {\n \"modality\": \"TEXT\",\n \"tokenCount\": 83\n }\n
|
||||
\ ],\n \"candidatesTokensDetails\": [\n {\n \"modality\":
|
||||
\"TEXT\",\n \"tokenCount\": 11\n }\n ]\n },\n \"modelVersion\":
|
||||
\"gemini-2.0-flash-exp\",\n \"createTime\": \"2026-01-21T22:59:20.440324Z\",\n
|
||||
\ \"responseId\": \"SFpxaYTwGpedmecPx-blkAc\"\n}\n"
|
||||
headers:
|
||||
Alt-Svc:
|
||||
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
|
||||
Content-Type:
|
||||
- application/json; charset=UTF-8
|
||||
Date:
|
||||
- Wed, 21 Jan 2026 22:59:20 GMT
|
||||
Server:
|
||||
- scaffolding on HTTPServer2
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
X-Frame-Options:
|
||||
- X-FRAME-OPTIONS-XXX
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,112 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Calculator. You calculate
|
||||
things.\nYour personal goal is: Perform calculations efficiently"},{"role":"user","content":"\nCurrent
|
||||
Task: What is 100 / 4?\n\nThis is the expected criteria for your final answer:
|
||||
The result\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nThis is VERY important to you, your job depends on it!"}],"model":"gpt-4o-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '432'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D0alG1p0E0pPIPKoxeoRrMsS82mHf\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1769033682,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"100 / 4 = 25\",\n \"refusal\":
|
||||
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
81,\n \"completion_tokens\": 7,\n \"total_tokens\": 88,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_29330a9688\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 21 Jan 2026 22:14:43 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '341'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '356'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,113 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Math Assistant. You are
|
||||
a helpful math assistant.\nYour personal goal is: Help users with mathematical
|
||||
calculations"},{"role":"user","content":"\nCurrent Task: Calculate what is 15
|
||||
* 8\n\nThis is the expected criteria for your final answer: The result of the
|
||||
calculation\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nThis is VERY important to you, your job depends on it!"}],"model":"gpt-4o-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '484'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D0alFPT31hH4rYzeeQIJVSalV7wn3\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1769033681,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"15 * 8 = 120\",\n \"refusal\":
|
||||
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
91,\n \"completion_tokens\": 7,\n \"total_tokens\": 98,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_8bbc38b4db\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 21 Jan 2026 22:14:42 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '388'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '410'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
Reference in New Issue
Block a user