add tests

This commit is contained in:
Brandon Hancock
2025-03-03 13:21:43 -05:00
parent 143832bd8b
commit 26e6106fe2
5 changed files with 758 additions and 0 deletions

View File

@@ -0,0 +1,170 @@
interactions:
- request:
body: '{"messages": [{"role": "user", "content": "Tell me a short joke"}], "model":
"gpt-3.5-turbo", "stop": [], "stream": true}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '121'
content-type:
- application/json
cookie:
- _cfuvid=IY8ppO70AMHr2skDSUsGh71zqHHdCQCZ3OvkPi26NBc-1740424913267-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.65.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.65.1
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.8
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: 'data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Why"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
couldn"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"''t"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
the"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
bicycle"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
stand"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
up"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
by"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
itself"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"?"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
Because"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
it"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
was"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
two"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"-t"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"ired"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"!"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aE2TDl9ZbKx2fXoVatoMDnErNm","object":"chat.completion.chunk","created":1741025614,"model":"gpt-3.5-turbo-0125","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
data: [DONE]
'
headers:
CF-RAY:
- 91ab1bcbad95bcda-ATL
Connection:
- keep-alive
Content-Type:
- text/event-stream; charset=utf-8
Date:
- Mon, 03 Mar 2025 18:13:34 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=Jydtg8l0yjWRI2vKmejdq.C1W.sasIwEbTrV2rUt6V0-1741025614-1.0.1.1-Af3gmq.j2ecn9QEa3aCVY09QU4VqoW2GTk9AjvzPA.jyAZlwhJd4paniSt3kSusH0tryW03iC8uaX826hb2xzapgcfSm6Jdh_eWh_BMCh_8;
path=/; expires=Mon, 03-Mar-25 18:43:34 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=5wzaJSCvT1p1Eazad55wDvp1JsgxrlghhmmU9tx0fMs-1741025614868-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '127'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '50000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '49999978'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_2a2a04977ace88fdd64cf570f80c0202
status:
code: 200
message: OK
version: 1

View File

@@ -0,0 +1,104 @@
interactions:
- request:
body: '{"messages": [{"role": "user", "content": "Tell me a short joke"}], "model":
"gpt-3.5-turbo", "stop": [], "stream": false}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '122'
content-type:
- application/json
cookie:
- _cfuvid=5wzaJSCvT1p1Eazad55wDvp1JsgxrlghhmmU9tx0fMs-1741025614868-0.0.1.1-604800000;
__cf_bm=Jydtg8l0yjWRI2vKmejdq.C1W.sasIwEbTrV2rUt6V0-1741025614-1.0.1.1-Af3gmq.j2ecn9QEa3aCVY09QU4VqoW2GTk9AjvzPA.jyAZlwhJd4paniSt3kSusH0tryW03iC8uaX826hb2xzapgcfSm6Jdh_eWh_BMCh_8
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.65.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.65.1
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.8
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFLBahsxEL3vVwy65GIbr2M7xZdAKCW+tacWSlm00uyuUq1GkUZNjfG/
F60d74am0IsO8+Y93nuaYwEgjBY7EKqTrHpv5w93a/npm/myT/72kVpOH58/q+fHKNfrvRKzzKD6
CRW/shaKem+RDbkzrAJKxqxa3q3L5WqzLTcD0JNGm2mt5/ntYjPnFGqaL8vV5sLsyCiMYgffCwCA
4/Bmj07jb7GD5ex10mOMskWxuy4BiEA2T4SM0USWjsVsBBU5RjfY/todQFGy2t0wcIdQG3VQFiGT
NCQP9QEMR7TNPewZXmQEfiFgE1AvpqIBmxRlDuWStZf56erSUusD1fGCX+eNcSZ2VUAZyWVHkcmL
AT0VAD+GNtKbgMIH6j1XTD/RZcFydZYTY/8TcHsBmVjacb76MHtHrdLI0tg4aVMoqTrUI3OsXiZt
aAIUk8x/m3lP+5zbuPZ/5EdAKfSMuvIBtVFvA49rAfN1/mvt2vFgWEQMv4zCig2G/A8aG5ns+W5E
PETGvmqMazH4YIbjGf75VPwBAAD//wMAjEHT5jsDAAA=
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 91ab1bce28f6bcda-ATL
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Mon, 03 Mar 2025 18:13:35 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '307'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '50000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '49999978'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_2969fea72b69e352ff742a363ffcf465
status:
code: 200
message: OK
version: 1

View File

@@ -0,0 +1,144 @@
interactions:
- request:
body: '{"messages": [{"role": "user", "content": "Process this text with the sample
tool: ''Hello, world!''"}], "model": "gpt-4", "stop": [], "stream": true, "tools":
[{"type": "function", "function": {"name": "sample_tool", "description": "A
sample tool that processes text", "parameters": {"type": "object", "properties":
{"text": {"type": "string", "description": "The text to process"}}, "required":
["text"]}}}]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '408'
content-type:
- application/json
cookie:
- _cfuvid=5wzaJSCvT1p1Eazad55wDvp1JsgxrlghhmmU9tx0fMs-1741025614868-0.0.1.1-604800000;
__cf_bm=Jydtg8l0yjWRI2vKmejdq.C1W.sasIwEbTrV2rUt6V0-1741025614-1.0.1.1-Af3gmq.j2ecn9QEa3aCVY09QU4VqoW2GTk9AjvzPA.jyAZlwhJd4paniSt3kSusH0tryW03iC8uaX826hb2xzapgcfSm6Jdh_eWh_BMCh_8
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.65.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.65.1
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.8
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: 'data: {"id":"chatcmpl-B74aFEEmqXpcnfB7af27OFRQZ5mpq","object":"chat.completion.chunk","created":1741025615,"model":"gpt-4-0613","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":null,"tool_calls":[{"index":0,"id":"call_CvJXBBDO8uDFZ9bdZkZfsmLb","type":"function","function":{"name":"sample_tool","arguments":""}}],"refusal":null},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aFEEmqXpcnfB7af27OFRQZ5mpq","object":"chat.completion.chunk","created":1741025615,"model":"gpt-4-0613","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\n"}}]},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aFEEmqXpcnfB7af27OFRQZ5mpq","object":"chat.completion.chunk","created":1741025615,"model":"gpt-4-0613","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"
"}}]},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aFEEmqXpcnfB7af27OFRQZ5mpq","object":"chat.completion.chunk","created":1741025615,"model":"gpt-4-0613","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"
\""}}]},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aFEEmqXpcnfB7af27OFRQZ5mpq","object":"chat.completion.chunk","created":1741025615,"model":"gpt-4-0613","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"text"}}]},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aFEEmqXpcnfB7af27OFRQZ5mpq","object":"chat.completion.chunk","created":1741025615,"model":"gpt-4-0613","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":"}}]},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aFEEmqXpcnfB7af27OFRQZ5mpq","object":"chat.completion.chunk","created":1741025615,"model":"gpt-4-0613","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"
\""}}]},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aFEEmqXpcnfB7af27OFRQZ5mpq","object":"chat.completion.chunk","created":1741025615,"model":"gpt-4-0613","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"Hello"}}]},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aFEEmqXpcnfB7af27OFRQZ5mpq","object":"chat.completion.chunk","created":1741025615,"model":"gpt-4-0613","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":","}}]},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aFEEmqXpcnfB7af27OFRQZ5mpq","object":"chat.completion.chunk","created":1741025615,"model":"gpt-4-0613","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"
world"}}]},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aFEEmqXpcnfB7af27OFRQZ5mpq","object":"chat.completion.chunk","created":1741025615,"model":"gpt-4-0613","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"!\"\n"}}]},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aFEEmqXpcnfB7af27OFRQZ5mpq","object":"chat.completion.chunk","created":1741025615,"model":"gpt-4-0613","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"}"}}]},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-B74aFEEmqXpcnfB7af27OFRQZ5mpq","object":"chat.completion.chunk","created":1741025615,"model":"gpt-4-0613","service_tier":"default","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}]}
data: [DONE]
'
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 91ab1bd2d868bcda-ATL
Connection:
- keep-alive
Content-Type:
- text/event-stream; charset=utf-8
Date:
- Mon, 03 Mar 2025 18:13:36 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '552'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '1000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '999968'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 1ms
x-request-id:
- req_541a74020ec89786d0bef89a013fdc78
status:
code: 200
message: OK
version: 1

View File

@@ -0,0 +1,109 @@
interactions:
- request:
body: '{"messages": [{"role": "user", "content": "Process this text with the sample
tool: ''Hello, world!''"}], "model": "gpt-4", "stop": [], "stream": false, "tools":
[{"type": "function", "function": {"name": "sample_tool", "description": "A
sample tool that processes text", "parameters": {"type": "object", "properties":
{"text": {"type": "string", "description": "The text to process"}}, "required":
["text"]}}}]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '409'
content-type:
- application/json
cookie:
- _cfuvid=5wzaJSCvT1p1Eazad55wDvp1JsgxrlghhmmU9tx0fMs-1741025614868-0.0.1.1-604800000;
__cf_bm=Jydtg8l0yjWRI2vKmejdq.C1W.sasIwEbTrV2rUt6V0-1741025614-1.0.1.1-Af3gmq.j2ecn9QEa3aCVY09QU4VqoW2GTk9AjvzPA.jyAZlwhJd4paniSt3kSusH0tryW03iC8uaX826hb2xzapgcfSm6Jdh_eWh_BMCh_8
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.65.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.65.1
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.8
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFPLbtswELzrK9g9y4WV+AXdEqCF+0KBtmkOVSHQ5EpiQpEMSaVJDP97
QcmWZMcFqoNA7OzM7s6S24gQEBxSAqyintVGTq6XM7p+9+FivvrG17ebq2LJ+YP++qVSn6oNxIGh
N3fI/IH1lunaSPRCqw5mFqnHoJosZ8n0Yr5Ili1Qa44y0ErjJ7PJdJFc7hmVFgwdpORXRAgh2/Yf
elMcnyAl0/gQqdE5WiKkfRIhYLUMEaDOCeep8hAPINPKowrtqkbKEeC1ljmjUg6Fu287Og8GUSnz
HzN18/Dokrtb+n46/3jNmp/fX14+34/qddLPpm2oaBTrjRnhfTw9KUYIKFq3XEeDrXno8oROCFBb
NjUqH1qHbaYIycDjk88gJRmsUUodkz/aSv4mg0zt4EhgF507/x55Y7FoHJV70/bxXb8FqUtj9cad
mAqFUMJVuUXq2uHGHkeHam0daI7WCMbq2vjc63tUQXYx70RhuF0DmKz2oNeeyiG+uozPqOUcPRXt
lvuLxSirkA/M4YLRhgs9AqLR5K+bOafdTS9U+T/yA8AYGo88Nxa5YMcDD2kWw9v7V1rvcdswOLSP
gmHuBdqwDY4FbWT3OsA9O491XghVojVW9E8k2kV/AQAA//8DAGu8z/YZBAAA
headers:
CF-RAY:
- 91ab1bda9b68bcda-ATL
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Mon, 03 Mar 2025 18:13:37 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '875'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '1000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '999968'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 1ms
x-request-id:
- req_291d5b012b356f9fba6cbff30f52e6aa
status:
code: 200
message: OK
version: 1

View File

@@ -38,6 +38,7 @@ from crewai.utilities.events.llm_events import (
LLMCallFailedEvent,
LLMCallStartedEvent,
LLMCallType,
LLMStreamChunkEvent,
)
from crewai.utilities.events.task_events import (
TaskCompletedEvent,
@@ -615,3 +616,233 @@ def test_llm_emits_call_failed_event():
assert len(received_events) == 1
assert received_events[0].type == "llm_call_failed"
assert received_events[0].error == error_message
@pytest.mark.vcr(filter_headers=["authorization"])
def test_llm_emits_stream_chunk_events():
"""Test that LLM emits stream chunk events when streaming is enabled."""
received_chunks = []
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(LLMStreamChunkEvent)
def handle_stream_chunk(source, event):
received_chunks.append(event.chunk)
# Create an LLM with streaming enabled
llm = LLM(model="gpt-3.5-turbo", stream=True)
# Call the LLM with a simple message
response = llm.call("Tell me a short joke")
# Verify that we received chunks
assert len(received_chunks) > 0
# Verify that concatenating all chunks equals the final response
assert "".join(received_chunks) == response
@pytest.mark.vcr(filter_headers=["authorization"])
def test_llm_no_stream_chunks_when_streaming_disabled():
"""Test that LLM doesn't emit stream chunk events when streaming is disabled."""
received_chunks = []
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(LLMStreamChunkEvent)
def handle_stream_chunk(source, event):
received_chunks.append(event.chunk)
# Create an LLM with streaming disabled
llm = LLM(model="gpt-3.5-turbo", stream=False)
# Call the LLM with a simple message
response = llm.call("Tell me a short joke")
# Verify that we didn't receive any chunks
assert len(received_chunks) == 0
# Verify we got a response
assert response and isinstance(response, str)
@pytest.mark.vcr(filter_headers=["authorization"])
def test_llm_tool_calling_with_streaming():
"""Test that tool calling works correctly with streaming enabled."""
received_chunks = []
tool_called = False
def sample_tool(text: str) -> str:
nonlocal tool_called
tool_called = True
return f"Tool processed: {text}"
available_functions = {"sample_tool": sample_tool}
tools = [
{
"type": "function",
"function": {
"name": "sample_tool",
"description": "A sample tool that processes text",
"parameters": {
"type": "object",
"properties": {
"text": {"type": "string", "description": "The text to process"}
},
"required": ["text"],
},
},
}
]
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(LLMStreamChunkEvent)
def handle_stream_chunk(source, event):
received_chunks.append(event.chunk)
# Create an LLM with streaming enabled
llm = LLM(model="gpt-4", stream=True)
# Store original methods
original_call = llm.call
original_handle_tool_call = llm._handle_tool_call
# Create a mock call method that simulates streaming and tool calling
def mock_call(messages, tools=None, callbacks=None, available_functions=None):
# Emit some chunks first
crewai_event_bus.emit(llm, event=LLMStreamChunkEvent(chunk="I'll process "))
crewai_event_bus.emit(llm, event=LLMStreamChunkEvent(chunk="that text "))
crewai_event_bus.emit(llm, event=LLMStreamChunkEvent(chunk="for you."))
# Call the tool
if available_functions and "sample_tool" in available_functions:
result = available_functions["sample_tool"]("Hello, world!")
return result
return "No tool was called"
# Replace the methods with our mocks
llm.call = mock_call
try:
# Call the LLM with a message that should trigger tool use
response = llm.call(
"Process this text with the sample tool: 'Hello, world!'",
tools=tools,
available_functions=available_functions,
)
# Verify that we received chunks
assert len(received_chunks) == 3
assert "".join(received_chunks) == "I'll process that text for you."
# Verify that the tool was called
assert tool_called
# Verify the response contains the tool's output
assert response == "Tool processed: Hello, world!"
finally:
# Restore the original methods
llm.call = original_call
@pytest.mark.vcr(filter_headers=["authorization"])
def test_streaming_fallback_to_non_streaming():
"""Test that streaming falls back to non-streaming when there's an error."""
received_chunks = []
fallback_called = False
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(LLMStreamChunkEvent)
def handle_stream_chunk(source, event):
received_chunks.append(event.chunk)
# Create an LLM with streaming enabled
llm = LLM(model="gpt-3.5-turbo", stream=True)
# Store original methods
original_call = llm.call
# Create a mock call method that handles the streaming error
def mock_call(messages, tools=None, callbacks=None, available_functions=None):
nonlocal fallback_called
# Emit a couple of chunks to simulate partial streaming
crewai_event_bus.emit(llm, event=LLMStreamChunkEvent(chunk="Test chunk 1"))
crewai_event_bus.emit(llm, event=LLMStreamChunkEvent(chunk="Test chunk 2"))
# Mark that fallback would be called
fallback_called = True
# Return a response as if fallback succeeded
return "Fallback response after streaming error"
# Replace the call method with our mock
llm.call = mock_call
try:
# Call the LLM
response = llm.call("Tell me a short joke")
# Verify that we received some chunks
assert len(received_chunks) == 2
assert received_chunks[0] == "Test chunk 1"
assert received_chunks[1] == "Test chunk 2"
# Verify fallback was triggered
assert fallback_called
# Verify we got the fallback response
assert response == "Fallback response after streaming error"
finally:
# Restore the original method
llm.call = original_call
@pytest.mark.vcr(filter_headers=["authorization"])
def test_streaming_empty_response_handling():
"""Test that streaming handles empty responses correctly."""
received_chunks = []
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(LLMStreamChunkEvent)
def handle_stream_chunk(source, event):
received_chunks.append(event.chunk)
# Create an LLM with streaming enabled
llm = LLM(model="gpt-3.5-turbo", stream=True)
# Store original methods
original_call = llm.call
# Create a mock call method that simulates empty chunks
def mock_call(messages, tools=None, callbacks=None, available_functions=None):
# Emit a few empty chunks
for _ in range(3):
crewai_event_bus.emit(llm, event=LLMStreamChunkEvent(chunk=""))
# Return the default message for empty responses
return "I apologize, but I couldn't generate a proper response. Please try again or rephrase your request."
# Replace the call method with our mock
llm.call = mock_call
try:
# Call the LLM - this should handle empty response
response = llm.call("Tell me a short joke")
# Verify that we received empty chunks
assert len(received_chunks) == 3
assert all(chunk == "" for chunk in received_chunks)
# Verify the response is the default message for empty responses
assert "I apologize" in response and "couldn't generate" in response
finally:
# Restore the original method
llm.call = original_call