feat: async llm support
Some checks failed
CodeQL Advanced / Analyze (actions) (push) Has been cancelled
CodeQL Advanced / Analyze (python) (push) Has been cancelled
Check Documentation Broken Links / Check broken links (push) Has been cancelled
Notify Downstream / notify-downstream (push) Has been cancelled
Build uv cache / build-cache (3.10) (push) Has been cancelled
Build uv cache / build-cache (3.11) (push) Has been cancelled
Build uv cache / build-cache (3.12) (push) Has been cancelled
Build uv cache / build-cache (3.13) (push) Has been cancelled
Mark stale issues and pull requests / stale (push) Has been cancelled

feat: introduce async contract to BaseLLM

feat: add async call support for:

Azure provider

Anthropic provider

OpenAI provider

Gemini provider

Bedrock provider

LiteLLM provider

chore: expand scrubbed header fields (conftest, anthropic, bedrock)

chore: update docs to cover async functionality

chore: update and harden tests to support acall; re-add uri for cassette compatibility

chore: generate missing cassette

fix: ensure acall is non-abstract and set supports_tools = true for supported Anthropic models

chore: improve Bedrock async docstring and general test robustness
This commit is contained in:
Greyson LaLonde
2025-12-01 18:56:56 -05:00
committed by GitHub
parent 59180e9c9f
commit 20704742e2
70 changed files with 8586 additions and 151 deletions

View File

@@ -475,10 +475,14 @@ def test_openai_get_client_params_priority_order():
params3 = llm3._get_client_params()
assert params3["base_url"] == "https://env.openai.com/v1"
def test_openai_get_client_params_no_base_url():
def test_openai_get_client_params_no_base_url(monkeypatch):
"""
Test that _get_client_params works correctly when no base_url is specified
"""
# Clear env vars that could set base_url
monkeypatch.delenv("OPENAI_BASE_URL", raising=False)
monkeypatch.delenv("OPENAI_API_BASE", raising=False)
llm = OpenAICompletion(model="gpt-4o")
client_params = llm._get_client_params()
# When no base_url is provided, it should not be in the params (filtered out as None)

View File

@@ -0,0 +1,139 @@
"""Tests for OpenAI async completion functionality."""
import pytest
import tiktoken
from crewai.llm import LLM
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_openai_async_basic_call():
"""Test basic async call with OpenAI."""
llm = LLM(model="gpt-4o-mini")
result = await llm.acall("Say hello")
assert result is not None
assert isinstance(result, str)
assert len(result) > 0
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_openai_async_with_temperature():
"""Test async call with temperature parameter."""
llm = LLM(model="gpt-4o-mini", temperature=0.1)
result = await llm.acall("Say the word 'test' once")
assert result is not None
assert isinstance(result, str)
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_openai_async_with_max_tokens():
"""Test async call with max_tokens parameter."""
llm = LLM(model="gpt-4o-mini", max_tokens=10)
result = await llm.acall("Write a very long story about a dragon.")
assert result is not None
assert isinstance(result, str)
encoder = tiktoken.get_encoding("cl100k_base")
token_count = len(encoder.encode(result))
assert token_count <= 10
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_openai_async_with_system_message():
"""Test async call with system message."""
llm = LLM(model="gpt-4o-mini")
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "What is 2+2?"}
]
result = await llm.acall(messages)
assert result is not None
assert isinstance(result, str)
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_openai_async_conversation():
"""Test async call with conversation history."""
llm = LLM(model="gpt-4o-mini")
messages = [
{"role": "user", "content": "My name is Alice."},
{"role": "assistant", "content": "Hello Alice! Nice to meet you."},
{"role": "user", "content": "What is my name?"}
]
result = await llm.acall(messages)
assert result is not None
assert isinstance(result, str)
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_openai_async_multiple_calls():
"""Test making multiple async calls in sequence."""
llm = LLM(model="gpt-4o-mini")
result1 = await llm.acall("What is 1+1?")
result2 = await llm.acall("What is 2+2?")
assert result1 is not None
assert result2 is not None
assert isinstance(result1, str)
assert isinstance(result2, str)
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_openai_async_with_response_format_none():
"""Test async call with response_format set to None."""
llm = LLM(model="gpt-4o-mini", response_format=None)
result = await llm.acall("Tell me a short fact")
assert result is not None
assert isinstance(result, str)
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_openai_async_with_response_format_json():
"""Test async call with JSON response format."""
llm = LLM(model="gpt-4o-mini", response_format={"type": "json_object"})
result = await llm.acall("Return a JSON object with a 'greeting' field")
assert result is not None
assert isinstance(result, str)
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_openai_async_with_parameters():
"""Test async call with multiple parameters."""
llm = LLM(
model="gpt-4o-mini",
temperature=0.7,
max_tokens=100,
top_p=0.9,
frequency_penalty=0.5,
presence_penalty=0.3
)
result = await llm.acall("Tell me a short fact")
assert result is not None
assert isinstance(result, str)