mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-10 00:28:31 +00:00
feat: enhance OpenAICompletion class with additional client parameters (#3701)
* feat: enhance OpenAICompletion class with additional client parameters - Added support for default_headers, default_query, and client_params in the OpenAICompletion class. - Refactored client initialization to use a dedicated method for client parameter retrieval. - Introduced new test cases to validate the correct usage of OpenAICompletion with various parameters. * fix: correct test case for unsupported OpenAI model - Updated the test_openai.py to ensure that the LLM instance is created before calling the method, maintaining proper error handling for unsupported models. - This change ensures that the test accurately checks for the NotFoundError when an invalid model is specified. * fix: enhance error handling in OpenAICompletion class - Added specific exception handling for NotFoundError and APIConnectionError in the OpenAICompletion class to provide clearer error messages and improve logging. - Updated the test case for unsupported models to ensure it raises a ValueError with the appropriate message when a non-existent model is specified. - This change improves the robustness of the OpenAI API integration and enhances the clarity of error reporting. * fix: improve test for unsupported OpenAI model handling - Refactored the test case in test_openai.py to create the LLM instance after mocking the OpenAI client, ensuring proper error handling for unsupported models. - This change enhances the clarity of the test by accurately checking for ValueError when a non-existent model is specified, aligning with recent improvements in error handling for the OpenAICompletion class.
This commit is contained in:
@@ -10,7 +10,7 @@ from crewai.utilities.agent_utils import is_context_length_exceeded
|
||||
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
LLMContextLengthExceededError,
|
||||
)
|
||||
from openai import OpenAI
|
||||
from openai import APIConnectionError, NotFoundError, OpenAI
|
||||
from openai.types.chat import ChatCompletion, ChatCompletionChunk
|
||||
from openai.types.chat.chat_completion import Choice
|
||||
from openai.types.chat.chat_completion_chunk import ChoiceDelta
|
||||
@@ -33,6 +33,9 @@ class OpenAICompletion(BaseLLM):
|
||||
project: str | None = None,
|
||||
timeout: float | None = None,
|
||||
max_retries: int = 2,
|
||||
default_headers: dict[str, str] | None = None,
|
||||
default_query: dict[str, Any] | None = None,
|
||||
client_params: dict[str, Any] | None = None,
|
||||
temperature: float | None = None,
|
||||
top_p: float | None = None,
|
||||
frequency_penalty: float | None = None,
|
||||
@@ -44,8 +47,8 @@ class OpenAICompletion(BaseLLM):
|
||||
response_format: dict[str, Any] | type[BaseModel] | None = None,
|
||||
logprobs: bool | None = None,
|
||||
top_logprobs: int | None = None,
|
||||
reasoning_effort: str | None = None, # For o1 models
|
||||
provider: str | None = None, # Add provider parameter
|
||||
reasoning_effort: str | None = None,
|
||||
provider: str | None = None,
|
||||
**kwargs,
|
||||
):
|
||||
"""Initialize OpenAI chat completion client."""
|
||||
@@ -53,6 +56,16 @@ class OpenAICompletion(BaseLLM):
|
||||
if provider is None:
|
||||
provider = kwargs.pop("provider", "openai")
|
||||
|
||||
# Client configuration attributes
|
||||
self.organization = organization
|
||||
self.project = project
|
||||
self.max_retries = max_retries
|
||||
self.default_headers = default_headers
|
||||
self.default_query = default_query
|
||||
self.client_params = client_params
|
||||
self.timeout = timeout
|
||||
self.base_url = base_url
|
||||
|
||||
super().__init__(
|
||||
model=model,
|
||||
temperature=temperature,
|
||||
@@ -63,15 +76,10 @@ class OpenAICompletion(BaseLLM):
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
self.client = OpenAI(
|
||||
api_key=api_key or os.getenv("OPENAI_API_KEY"),
|
||||
base_url=base_url,
|
||||
organization=organization,
|
||||
project=project,
|
||||
timeout=timeout,
|
||||
max_retries=max_retries,
|
||||
)
|
||||
client_config = self._get_client_params()
|
||||
self.client = OpenAI(**client_config)
|
||||
|
||||
# Completion parameters
|
||||
self.top_p = top_p
|
||||
self.frequency_penalty = frequency_penalty
|
||||
self.presence_penalty = presence_penalty
|
||||
@@ -83,10 +91,35 @@ class OpenAICompletion(BaseLLM):
|
||||
self.logprobs = logprobs
|
||||
self.top_logprobs = top_logprobs
|
||||
self.reasoning_effort = reasoning_effort
|
||||
self.timeout = timeout
|
||||
self.is_o1_model = "o1" in model.lower()
|
||||
self.is_gpt4_model = "gpt-4" in model.lower()
|
||||
|
||||
def _get_client_params(self) -> dict[str, Any]:
|
||||
"""Get OpenAI client parameters."""
|
||||
|
||||
if self.api_key is None:
|
||||
self.api_key = os.getenv("OPENAI_API_KEY")
|
||||
if self.api_key is None:
|
||||
raise ValueError("OPENAI_API_KEY is required")
|
||||
|
||||
base_params = {
|
||||
"api_key": self.api_key,
|
||||
"organization": self.organization,
|
||||
"project": self.project,
|
||||
"base_url": self.base_url,
|
||||
"timeout": self.timeout,
|
||||
"max_retries": self.max_retries,
|
||||
"default_headers": self.default_headers,
|
||||
"default_query": self.default_query,
|
||||
}
|
||||
|
||||
client_params = {k: v for k, v in base_params.items() if v is not None}
|
||||
|
||||
if self.client_params:
|
||||
client_params.update(self.client_params)
|
||||
|
||||
return client_params
|
||||
|
||||
def call(
|
||||
self,
|
||||
messages: str | list[dict[str, str]],
|
||||
@@ -207,7 +240,6 @@ class OpenAICompletion(BaseLLM):
|
||||
"api_key",
|
||||
"base_url",
|
||||
"timeout",
|
||||
"max_retries",
|
||||
}
|
||||
|
||||
return {k: v for k, v in params.items() if k not in crewai_specific_params}
|
||||
@@ -306,10 +338,31 @@ class OpenAICompletion(BaseLLM):
|
||||
|
||||
if usage.get("total_tokens", 0) > 0:
|
||||
logging.info(f"OpenAI API usage: {usage}")
|
||||
except NotFoundError as e:
|
||||
error_msg = f"Model {self.model} not found: {e}"
|
||||
logging.error(error_msg)
|
||||
self._emit_call_failed_event(
|
||||
error=error_msg, from_task=from_task, from_agent=from_agent
|
||||
)
|
||||
raise ValueError(error_msg) from e
|
||||
except APIConnectionError as e:
|
||||
error_msg = f"Failed to connect to OpenAI API: {e}"
|
||||
logging.error(error_msg)
|
||||
self._emit_call_failed_event(
|
||||
error=error_msg, from_task=from_task, from_agent=from_agent
|
||||
)
|
||||
raise ConnectionError(error_msg) from e
|
||||
except Exception as e:
|
||||
# Handle context length exceeded and other errors
|
||||
if is_context_length_exceeded(e):
|
||||
logging.error(f"Context window exceeded: {e}")
|
||||
raise LLMContextLengthExceededError(str(e)) from e
|
||||
|
||||
error_msg = f"OpenAI API call failed: {e!s}"
|
||||
logging.error(error_msg)
|
||||
self._emit_call_failed_event(
|
||||
error=error_msg, from_task=from_task, from_agent=from_agent
|
||||
)
|
||||
raise e from e
|
||||
|
||||
return content
|
||||
|
||||
227
lib/crewai/tests/cassettes/test_openai_completion_call.yaml
Normal file
227
lib/crewai/tests/cassettes/test_openai_completion_call.yaml
Normal file
@@ -0,0 +1,227 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Hello, how are you?"}], "model":
|
||||
"gpt-4o", "stream": false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '102'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFJNj9MwEL3nVwy+7CVdpd1+XxBCXbUSB7ggBFpFrj1JvDgeY08K1ar/
|
||||
HSXpNl1YJC4+zJs3fu/NPCUAwmixBqEqyar2dvT+04dNEzbOvXOrzz+228PHGS5pu/k6u7//ItKW
|
||||
QftHVPzMulVUe4tsyPWwCigZ26njxTy7W00n2bIDatJoW1rpeTSl0SSbTEfZcpTNz8SKjMIo1vAt
|
||||
AQB46t5WotP4S6whS58rNcYoSxTrSxOACGTbipAxmsjSsUgHUJFjdJ3qLVpLb2B3U8NjExkk+EBl
|
||||
kHUKkWAHmtwNQyUPCAWiNa6MKewb7hgVBgTpNASU+ghMUKH1cKTmFrb0E5R0sINeQlsFJi2Pb6+l
|
||||
BCyaKNskXGPtFSCdI5Ztkl0ID2fkdLFtqfSB9vEPqiiMM7HKA8pIrrUYmbzo0FMC8NDF27xITPhA
|
||||
teec6Tt2343v+nFi2OcATlZnkImlHerTSfrKtFwjS2Pj1XqEkqpCPTCHXcpGG7oCkivPf4t5bXbv
|
||||
27jyf8YPgFLoGXXuA2qjXhoe2gK21/6vtkvGnWARMRyMwpwNhnYPGgvZ2P4QRTxGxjovjCsx+GD6
|
||||
ayx8rvbFeLGczeYLkZyS3wAAAP//AwCZQodJlgMAAA==
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 98e23dd86b0c4705-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 13 Oct 2025 22:23:30 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=wwEqnpcIZyBbBZ_COqrhykwhzQkjmXMsXhNFYjtokPs-1760394210-1.0.1.1-8gJdrt5_Ak6dIqzZox1X9WYI1a7OgSgwaiJdWzz3egks.yw87Cm9__k5K.j4aXQFrUQt7b3OBkTuyrhIysP_CtKEqT5ap_Gc6vH4XqNYXVw;
|
||||
path=/; expires=Mon, 13-Oct-25 22:53:30 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=MTZb.IlikCEE87xU.hPEMy_FZxe7wdzqB_xM1BQOjQs-1760394210023-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '1252'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '1451'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-project-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999993'
|
||||
x-ratelimit-reset-project-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_bfe85ec6f9514d3093d79765a87c6c7b
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Hello, how are you?"}], "model":
|
||||
"gpt-4o", "stream": false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '102'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=wwEqnpcIZyBbBZ_COqrhykwhzQkjmXMsXhNFYjtokPs-1760394210-1.0.1.1-8gJdrt5_Ak6dIqzZox1X9WYI1a7OgSgwaiJdWzz3egks.yw87Cm9__k5K.j4aXQFrUQt7b3OBkTuyrhIysP_CtKEqT5ap_Gc6vH4XqNYXVw;
|
||||
_cfuvid=MTZb.IlikCEE87xU.hPEMy_FZxe7wdzqB_xM1BQOjQs-1760394210023-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFJNa9tAEL3rV0z3kosc5I/Iji8lFIJNPyBQSqEEsd4dSZusdpbdUVoT
|
||||
/N+LJMdy2hR62cO8ebPvvZnnBEAYLdYgVC1ZNd5OPtx9+qym7d3+a/4N69I9OpVtbubfP97efrkR
|
||||
aceg3QMqfmFdKmq8RTbkBlgFlIzd1Okyz+bXi3yV90BDGm1HqzxPFjSZZbPFJFtNsvxIrMkojGIN
|
||||
PxIAgOf+7SQ6jb/EGrL0pdJgjLJCsT41AYhAtqsIGaOJLB2LdAQVOUbXq96gtfQOthcNPLSRQYIP
|
||||
VAXZpBAJtqDJXTDU8gmhRLTGVTGFXcs9o8aAIJ2GgFLvgQlqtB721F7Chn6Ckg62MEjoqsCk5f79
|
||||
uZSAZRtll4RrrT0DpHPEskuyD+H+iBxOti1VPtAu/kEVpXEm1kVAGcl1FiOTFz16SADu+3jbV4kJ
|
||||
H6jxXDA9Yv/ddD6ME+M+R3B2fQSZWNqxvpilb0wrNLI0Np6tRyipatQjc9ylbLWhMyA58/y3mLdm
|
||||
D76Nq/5n/AgohZ5RFz6gNuq14bEtYHft/2o7ZdwLFhHDk1FYsMHQ7UFjKVs7HKKI+8jYFKVxFQYf
|
||||
zHCNpS/UrpwuV1dX+VIkh+Q3AAAA//8DAISwErWWAwAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 98e249852df117c4-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 13 Oct 2025 22:31:27 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '512'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '670'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-project-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999993'
|
||||
x-ratelimit-reset-project-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_6d219ed625a24c38895b896c9e13dcef
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,129 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Research Assistant.
|
||||
You are a helpful research assistant.\nYour personal goal is: Find information
|
||||
about the population of Tokyo\nTo give my best complete final answer to the
|
||||
task respond using the exact following format:\n\nThought: I now can give a
|
||||
great answer\nFinal Answer: Your final answer must be the great and the most
|
||||
complete as possible, it must be outcome described.\n\nI MUST use these formats,
|
||||
my job depends on it!"}, {"role": "user", "content": "\nCurrent Task: Find information
|
||||
about the population of Tokyo\n\nThis is the expected criteria for your final
|
||||
answer: The population of Tokyo is 10 million\nyou MUST return the actual complete
|
||||
content as the final answer, not a summary.\n\nBegin! This is VERY important
|
||||
to you, use the tools available and give your best Final Answer, your job depends
|
||||
on it!\n\nThought:"}], "model": "gpt-4o", "stream": false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '927'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFTbahsxEH33Vwx6Xgdf0sT2Wwi09AKlkFJoG8xYmt2dRqtRJa0dN+Tf
|
||||
i2Qndi6Fvixoz5yjc0Yj3Q0AFBu1AKVbTLrzdnj55RNtPm7N+s+cv9t6Mp/dXpL99vXz5fbig6oy
|
||||
Q1a/SKcH1omWzltKLG4H60CYKKuOz89G0/mb2WRSgE4M2UxrfBqeynAympwOR7Ph6GxPbIU1RbWA
|
||||
HwMAgLvyzRadoVu1gFH18KejGLEhtXgsAlBBbP6jMEaOCV1S1QHU4hK54vqqlb5p0wLeg5MNaHTQ
|
||||
8JoAocnWAV3cUAD46d6yQwsXZb2Aq5bAi+8t5rAgNVzJzVYqYKdtb9g1sJLUAqcIHaUgXiwndICB
|
||||
ENAZSC3BZArRk2a0sMFgIqQWE3R4Q9D7UqHJpYAWNKdtBRwhcuO4Zo0u2S1YDA2FTHMwHkHH1rK4
|
||||
E7iI2VIW6CQmCJR1wGDCamc0S4mjJ1Ulj/Qxb8YUgV3BNhKsqWDDqS3rd+VMw17nIudpcZ0T47OW
|
||||
yJoCTM8fbIEn8ZaqHDCXc3pl75fNOrZxUhr/om2H9m9a1m3mFQ797nmNNmeXGnDfxRbLAWvpVuzI
|
||||
HJsu/adbTWQizJ8ZPzmeoUB1HzGPsOutPQLQOUlFrUzv9R65f5xXK40PsorPqKpmx7FdBsIoLs9m
|
||||
TOJVQe8HANflXvRPRl35IJ1PyyQ3VLYbn093eupwE4/Q8dkeTZLQHoDJbF69Irg0lJBtPLpaSqNu
|
||||
yRyoh3uIvWE5AgZHsV/aeU17F51d8z/yB0Br8onM0gcyrJ9GPpQFyi/Vv8oe21wMq0hhzZqWiSnk
|
||||
ozBUY293j4iK25ioW9bsGgo+8O4lqf2SVlM91avZqVGD+8FfAAAA//8DAFlnuIlSBQAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 98e26542adbbce40-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 13 Oct 2025 22:50:26 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=ZOY3aTF4ZQGyq1Ai5bME5tI2L4FUKjdaM76hKUktVgg-1760395826-1.0.1.1-6MNmhofBsqJxHCGxkDDtTbJUi9JDiJwdeBOsfQEvrMTovTmf8eAYxjskKbAxY0ZicvPhqx2bOD64cOAPUfREUiFdzz1oh3uKuy4_AL9Vma0;
|
||||
path=/; expires=Mon, 13-Oct-25 23:20:26 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=ETABAP9icJoaIxhFazEUuSnHhwqlBentj3YJUS501.w-1760395826352-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '3572'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '3756'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-project-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999798'
|
||||
x-ratelimit-reset-project-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_3676b4edd10244929526ceb64a623a88
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,133 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Research Assistant.
|
||||
You are a helpful research assistant.\nYour personal goal is: Find information
|
||||
about the population of Tokyo\nTo give my best complete final answer to the
|
||||
task respond using the exact following format:\n\nThought: I now can give a
|
||||
great answer\nFinal Answer: Your final answer must be the great and the most
|
||||
complete as possible, it must be outcome described.\n\nI MUST use these formats,
|
||||
my job depends on it!"}, {"role": "user", "content": "\nCurrent Task: Find information
|
||||
about the population of Tokyo\n\nThis is the expected criteria for your final
|
||||
answer: The population of Tokyo is 10 million\nyou MUST return the actual complete
|
||||
content as the final answer, not a summary.\n\nBegin! This is VERY important
|
||||
to you, use the tools available and give your best Final Answer, your job depends
|
||||
on it!\n\nThought:"}], "model": "gpt-4o-mini", "stream": false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '932'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA4xUTY8bNwy9+1cQcx4vbK+93vXNDdompyJF0Bb5gEFrODPMSqJASXa8wf73QmN7
|
||||
7W1ToJcBxMdHvscR9X0EUHFTraAyPSbjgh2/ef9+8eanj7M/lr8vzduv+eHP/c9/rZ8+2vXTnKu6
|
||||
MGT7lUw6s26MuGApsfgjbJQwUak6Xd5N5tPZ/exuAJw0ZAutC2k8l7Fjz+PZZDYfT5bj6f2J3Qsb
|
||||
itUKPo0AAL4P36LTN/StWsGkPkccxYgdVauXJIBKxZZIhTFyTOhTVV9AIz6RH6S/Ay97MOih4x0B
|
||||
QldkA/q4JwX47H9hjxbWw3kF6wjSgjuAxZgghwYTAXv4zSTZksJsMrutIfUEQUK2WMZRGB/k8SDA
|
||||
ETAElW/sMJE9wHQOjq0tSYEk2KFWYRvySdECKmEN+54tDfFfh6Hqqd76jJoe2BubG4oQs6pk37Dv
|
||||
ICi1ZFJWijX0GAEhJuw60gF9JVF2pHC7PAuqweFjyeI0dHYS05EhOYKjpBLEckI/iDwL34va5gY+
|
||||
FA+cDlfeUyTblhEoedl7aqAVLWHY8VbRJzDZFqk1NLwjjQRkxIs71IC+gcid55ZNyeysbNEC+9Zm
|
||||
8oaODa/8tNwV0+DwAK3NJuXyo5pMkAR2qFxMtGiSaJmY6QEjOO50oNeQdYuen06n0r4hJ51i6NlA
|
||||
UvJNrGGbE3TkSdHaQ10mpeSQfQTxVKyXiVjUjsplKSUBu86Ko2OfeDJiDzfX11OpzRHLivhs7RWA
|
||||
3ks6MstifDkhzy+rYKULKtv4D2rVsufYb5Qwii/XPiYJ1YA+jwC+DCuXX21RFVRcSJskjzS0my5v
|
||||
j/Wqy6ZfobPFCU2S0F6A2cN9/YOCm4YSso1XW1sZND01F+plxTE3LFfA6Mr2v+X8qPbROvvu/5S/
|
||||
AMZQSNRsglLD5rXlS5pSeQn/K+1lzIPgKpLu2NAmMWn5FQ21mO3xfariISZym5Z9RxqUj49UGzaL
|
||||
uwm2d7RYPFSj59HfAAAA//8DAB8kWOqyBQAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 98e404605874fad2-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 14 Oct 2025 03:33:48 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=o5Vy5q.qstP73vjTrIb7GX6EjMltWq26Vk1ctm8rrcQ-1760412828-1.0.1.1-6PmDQhWH5.60C02WBN9ENJiBEZ0hYXY1YJ6TKxTAflRETSCaMVA2j1.xE2KPFpUrsSsmbkopxQ1p2NYmLzuRy08dingIYyz5HZGz8ghl.nM;
|
||||
path=/; expires=Tue, 14-Oct-25 04:03:48 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=TkrzMwZH3VZy7i4ED_kVxlx4MUrHeXnluoFfmeqTT2w-1760412828927-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '2644'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '2793'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999797'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999797'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_5c4fad6d3e4743d1a43ab65bd333b477
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
409
lib/crewai/tests/llms/openai/test_openai.py
Normal file
409
lib/crewai/tests/llms/openai/test_openai.py
Normal file
@@ -0,0 +1,409 @@
|
||||
import os
|
||||
import sys
|
||||
import types
|
||||
from unittest.mock import patch, MagicMock
|
||||
import openai
|
||||
import pytest
|
||||
|
||||
from crewai.llm import LLM
|
||||
from crewai.llms.providers.openai.completion import OpenAICompletion
|
||||
from crewai.crew import Crew
|
||||
from crewai.agent import Agent
|
||||
from crewai.task import Task
|
||||
from crewai.cli.constants import DEFAULT_LLM_MODEL
|
||||
|
||||
def test_openai_completion_is_used_when_openai_provider():
|
||||
"""
|
||||
Test that OpenAICompletion from completion.py is used when LLM uses provider 'openai'
|
||||
"""
|
||||
llm = LLM(model="openai/gpt-4o")
|
||||
|
||||
assert llm.__class__.__name__ == "OpenAICompletion"
|
||||
assert llm.provider == "openai"
|
||||
assert llm.model == "gpt-4o"
|
||||
|
||||
|
||||
def test_openai_completion_is_used_when_no_provider_prefix():
|
||||
"""
|
||||
Test that OpenAICompletion is used when no provider prefix is given (defaults to openai)
|
||||
"""
|
||||
llm = LLM(model="gpt-4o")
|
||||
|
||||
from crewai.llms.providers.openai.completion import OpenAICompletion
|
||||
assert isinstance(llm, OpenAICompletion)
|
||||
assert llm.provider == "openai"
|
||||
assert llm.model == "gpt-4o"
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_openai_is_default_provider_without_explicit_llm_set_on_agent():
|
||||
"""
|
||||
Test that OpenAI is the default provider when no explicit LLM is set on the agent
|
||||
"""
|
||||
agent = Agent(
|
||||
role="Research Assistant",
|
||||
goal="Find information about the population of Tokyo",
|
||||
backstory="You are a helpful research assistant.",
|
||||
)
|
||||
task = Task(
|
||||
description="Find information about the population of Tokyo",
|
||||
expected_output="The population of Tokyo is 10 million",
|
||||
agent=agent,
|
||||
)
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
crew.kickoff()
|
||||
assert crew.agents[0].llm.__class__.__name__ == "OpenAICompletion"
|
||||
assert crew.agents[0].llm.model == DEFAULT_LLM_MODEL
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def test_openai_completion_module_is_imported():
|
||||
"""
|
||||
Test that the completion module is properly imported when using OpenAI provider
|
||||
"""
|
||||
module_name = "crewai.llms.providers.openai.completion"
|
||||
|
||||
# Remove module from cache if it exists
|
||||
if module_name in sys.modules:
|
||||
del sys.modules[module_name]
|
||||
|
||||
# Create LLM instance - this should trigger the import
|
||||
LLM(model="openai/gpt-4o")
|
||||
|
||||
# Verify the module was imported
|
||||
assert module_name in sys.modules
|
||||
completion_mod = sys.modules[module_name]
|
||||
assert isinstance(completion_mod, types.ModuleType)
|
||||
|
||||
# Verify the class exists in the module
|
||||
assert hasattr(completion_mod, 'OpenAICompletion')
|
||||
|
||||
|
||||
def test_fallback_to_litellm_when_native_fails():
|
||||
"""
|
||||
Test that LLM falls back to LiteLLM when native OpenAI completion fails
|
||||
"""
|
||||
# Mock the _get_native_provider to return a failing class
|
||||
with patch('crewai.llm.LLM._get_native_provider') as mock_get_provider:
|
||||
|
||||
class FailingCompletion:
|
||||
def __init__(self, *args, **kwargs):
|
||||
raise Exception("Native SDK failed")
|
||||
|
||||
mock_get_provider.return_value = FailingCompletion
|
||||
|
||||
# This should fall back to LiteLLM
|
||||
llm = LLM(model="openai/gpt-4o")
|
||||
|
||||
# Check that it's using LiteLLM
|
||||
assert hasattr(llm, 'is_litellm')
|
||||
assert llm.is_litellm == True
|
||||
|
||||
|
||||
def test_openai_completion_initialization_parameters():
|
||||
"""
|
||||
Test that OpenAICompletion is initialized with correct parameters
|
||||
"""
|
||||
llm = LLM(
|
||||
model="openai/gpt-4o",
|
||||
temperature=0.7,
|
||||
max_tokens=1000,
|
||||
api_key="test-key"
|
||||
)
|
||||
|
||||
from crewai.llms.providers.openai.completion import OpenAICompletion
|
||||
assert isinstance(llm, OpenAICompletion)
|
||||
assert llm.model == "gpt-4o"
|
||||
assert llm.temperature == 0.7
|
||||
assert llm.max_tokens == 1000
|
||||
|
||||
def test_openai_completion_call():
|
||||
"""
|
||||
Test that OpenAICompletion call method works
|
||||
"""
|
||||
llm = LLM(model="openai/gpt-4o")
|
||||
|
||||
# Mock the call method on the instance
|
||||
with patch.object(llm, 'call', return_value="Hello! I'm ready to help.") as mock_call:
|
||||
result = llm.call("Hello, how are you?")
|
||||
|
||||
assert result == "Hello! I'm ready to help."
|
||||
mock_call.assert_called_once_with("Hello, how are you?")
|
||||
|
||||
|
||||
def test_openai_completion_called_during_crew_execution():
|
||||
"""
|
||||
Test that OpenAICompletion.call is actually invoked when running a crew
|
||||
"""
|
||||
# Create the LLM instance first
|
||||
openai_llm = LLM(model="openai/gpt-4o")
|
||||
|
||||
# Mock the call method on the specific instance
|
||||
with patch.object(openai_llm, 'call', return_value="Tokyo has 14 million people.") as mock_call:
|
||||
|
||||
# Create agent with explicit LLM configuration
|
||||
agent = Agent(
|
||||
role="Research Assistant",
|
||||
goal="Find population info",
|
||||
backstory="You research populations.",
|
||||
llm=openai_llm,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Find Tokyo population",
|
||||
expected_output="Population number",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
result = crew.kickoff()
|
||||
|
||||
# Verify mock was called
|
||||
assert mock_call.called
|
||||
assert "14 million" in str(result)
|
||||
|
||||
|
||||
def test_openai_completion_call_arguments():
|
||||
"""
|
||||
Test that OpenAICompletion.call is invoked with correct arguments
|
||||
"""
|
||||
# Create LLM instance first (like working tests)
|
||||
openai_llm = LLM(model="openai/gpt-4o")
|
||||
|
||||
# Mock the instance method (like working tests)
|
||||
with patch.object(openai_llm, 'call') as mock_call:
|
||||
mock_call.return_value = "Task completed successfully."
|
||||
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Complete a simple task",
|
||||
backstory="You are a test agent.",
|
||||
llm=openai_llm # Use same instance
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Say hello world",
|
||||
expected_output="Hello world",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
crew.kickoff()
|
||||
|
||||
# Verify call was made
|
||||
assert mock_call.called
|
||||
|
||||
# Check the arguments passed to the call method
|
||||
call_args = mock_call.call_args
|
||||
assert call_args is not None
|
||||
|
||||
# The first argument should be the messages
|
||||
messages = call_args[0][0] # First positional argument
|
||||
assert isinstance(messages, (str, list))
|
||||
|
||||
# Verify that the task description appears in the messages
|
||||
if isinstance(messages, str):
|
||||
assert "hello world" in messages.lower()
|
||||
elif isinstance(messages, list):
|
||||
message_content = str(messages).lower()
|
||||
assert "hello world" in message_content
|
||||
|
||||
|
||||
def test_multiple_openai_calls_in_crew():
|
||||
"""
|
||||
Test that OpenAICompletion.call is invoked multiple times for multiple tasks
|
||||
"""
|
||||
# Create LLM instance first
|
||||
openai_llm = LLM(model="openai/gpt-4o")
|
||||
|
||||
# Mock the instance method
|
||||
with patch.object(openai_llm, 'call') as mock_call:
|
||||
mock_call.return_value = "Task completed."
|
||||
|
||||
agent = Agent(
|
||||
role="Multi-task Agent",
|
||||
goal="Complete multiple tasks",
|
||||
backstory="You can handle multiple tasks.",
|
||||
llm=openai_llm # Use same instance
|
||||
)
|
||||
|
||||
task1 = Task(
|
||||
description="First task",
|
||||
expected_output="First result",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
task2 = Task(
|
||||
description="Second task",
|
||||
expected_output="Second result",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[agent],
|
||||
tasks=[task1, task2]
|
||||
)
|
||||
crew.kickoff()
|
||||
|
||||
# Verify multiple calls were made
|
||||
assert mock_call.call_count >= 2 # At least one call per task
|
||||
|
||||
# Verify each call had proper arguments
|
||||
for call in mock_call.call_args_list:
|
||||
assert len(call[0]) > 0 # Has positional arguments
|
||||
messages = call[0][0]
|
||||
assert messages is not None
|
||||
|
||||
|
||||
def test_openai_completion_with_tools():
|
||||
"""
|
||||
Test that OpenAICompletion.call is invoked with tools when agent has tools
|
||||
"""
|
||||
from crewai.tools import tool
|
||||
|
||||
@tool
|
||||
def sample_tool(query: str) -> str:
|
||||
"""A sample tool for testing"""
|
||||
return f"Tool result for: {query}"
|
||||
|
||||
# Create LLM instance first
|
||||
openai_llm = LLM(model="openai/gpt-4o")
|
||||
|
||||
# Mock the instance method (not the class method)
|
||||
with patch.object(openai_llm, 'call') as mock_call:
|
||||
mock_call.return_value = "Task completed with tools."
|
||||
|
||||
agent = Agent(
|
||||
role="Tool User",
|
||||
goal="Use tools to complete tasks",
|
||||
backstory="You can use tools.",
|
||||
llm=openai_llm, # Use same instance
|
||||
tools=[sample_tool]
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Use the sample tool",
|
||||
expected_output="Tool usage result",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
crew.kickoff()
|
||||
|
||||
assert mock_call.called
|
||||
|
||||
call_args = mock_call.call_args
|
||||
call_kwargs = call_args[1] if len(call_args) > 1 else {}
|
||||
|
||||
if 'tools' in call_kwargs:
|
||||
assert call_kwargs['tools'] is not None
|
||||
assert len(call_kwargs['tools']) > 0
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_openai_completion_call_returns_usage_metrics():
|
||||
"""
|
||||
Test that OpenAICompletion.call returns usage metrics
|
||||
"""
|
||||
agent = Agent(
|
||||
role="Research Assistant",
|
||||
goal="Find information about the population of Tokyo",
|
||||
backstory="You are a helpful research assistant.",
|
||||
llm=LLM(model="openai/gpt-4o"),
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Find information about the population of Tokyo",
|
||||
expected_output="The population of Tokyo is 10 million",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
result = crew.kickoff()
|
||||
assert result.token_usage is not None
|
||||
assert result.token_usage.total_tokens == 289
|
||||
assert result.token_usage.prompt_tokens == 173
|
||||
assert result.token_usage.completion_tokens == 116
|
||||
assert result.token_usage.successful_requests == 1
|
||||
assert result.token_usage.cached_prompt_tokens == 0
|
||||
|
||||
|
||||
def test_openai_raises_error_when_model_not_supported():
|
||||
"""Test that OpenAICompletion raises ValueError when model not supported"""
|
||||
|
||||
with patch('crewai.llms.providers.openai.completion.OpenAI') as mock_openai_class:
|
||||
mock_client = MagicMock()
|
||||
mock_openai_class.return_value = mock_client
|
||||
|
||||
mock_client.chat.completions.create.side_effect = openai.NotFoundError(
|
||||
message="The model `model-doesnt-exist` does not exist",
|
||||
response=MagicMock(),
|
||||
body={}
|
||||
)
|
||||
|
||||
llm = LLM(model="openai/model-doesnt-exist")
|
||||
|
||||
with pytest.raises(ValueError, match="Model.*not found"):
|
||||
llm.call("Hello")
|
||||
|
||||
def test_openai_client_setup_with_extra_arguments():
|
||||
"""
|
||||
Test that OpenAICompletion is initialized with correct parameters
|
||||
"""
|
||||
llm = LLM(
|
||||
model="openai/gpt-4o",
|
||||
temperature=0.7,
|
||||
max_tokens=1000,
|
||||
top_p=0.5,
|
||||
max_retries=3,
|
||||
timeout=30
|
||||
)
|
||||
|
||||
# Check that model parameters are stored on the LLM instance
|
||||
assert llm.temperature == 0.7
|
||||
assert llm.max_tokens == 1000
|
||||
assert llm.top_p == 0.5
|
||||
|
||||
# Check that client parameters are properly configured
|
||||
assert llm.client.max_retries == 3
|
||||
assert llm.client.timeout == 30
|
||||
|
||||
# Test that parameters are properly used in API calls
|
||||
with patch.object(llm.client.chat.completions, 'create') as mock_create:
|
||||
mock_create.return_value = MagicMock(
|
||||
choices=[MagicMock(message=MagicMock(content="test response", tool_calls=None))],
|
||||
usage=MagicMock(prompt_tokens=10, completion_tokens=20, total_tokens=30)
|
||||
)
|
||||
|
||||
llm.call("Hello")
|
||||
|
||||
# Verify the API was called with the right parameters
|
||||
call_args = mock_create.call_args[1] # keyword arguments
|
||||
assert call_args['temperature'] == 0.7
|
||||
assert call_args['max_tokens'] == 1000
|
||||
assert call_args['top_p'] == 0.5
|
||||
assert call_args['model'] == 'gpt-4o'
|
||||
|
||||
def test_extra_arguments_are_passed_to_openai_completion():
|
||||
"""
|
||||
Test that extra arguments are passed to OpenAICompletion
|
||||
"""
|
||||
llm = LLM(model="openai/gpt-4o", temperature=0.7, max_tokens=1000, top_p=0.5, max_retries=3)
|
||||
|
||||
with patch.object(llm.client.chat.completions, 'create') as mock_create:
|
||||
mock_create.return_value = MagicMock(
|
||||
choices=[MagicMock(message=MagicMock(content="test response", tool_calls=None))],
|
||||
usage=MagicMock(prompt_tokens=10, completion_tokens=20, total_tokens=30)
|
||||
)
|
||||
|
||||
llm.call("Hello, how are you?")
|
||||
|
||||
assert mock_create.called
|
||||
call_kwargs = mock_create.call_args[1]
|
||||
|
||||
assert call_kwargs['temperature'] == 0.7
|
||||
assert call_kwargs['max_tokens'] == 1000
|
||||
assert call_kwargs['top_p'] == 0.5
|
||||
assert call_kwargs['model'] == 'gpt-4o'
|
||||
Reference in New Issue
Block a user