mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-10 00:28:31 +00:00
feat: async llm support
Some checks failed
CodeQL Advanced / Analyze (actions) (push) Has been cancelled
CodeQL Advanced / Analyze (python) (push) Has been cancelled
Check Documentation Broken Links / Check broken links (push) Has been cancelled
Notify Downstream / notify-downstream (push) Has been cancelled
Build uv cache / build-cache (3.10) (push) Has been cancelled
Build uv cache / build-cache (3.11) (push) Has been cancelled
Build uv cache / build-cache (3.12) (push) Has been cancelled
Build uv cache / build-cache (3.13) (push) Has been cancelled
Mark stale issues and pull requests / stale (push) Has been cancelled
Some checks failed
CodeQL Advanced / Analyze (actions) (push) Has been cancelled
CodeQL Advanced / Analyze (python) (push) Has been cancelled
Check Documentation Broken Links / Check broken links (push) Has been cancelled
Notify Downstream / notify-downstream (push) Has been cancelled
Build uv cache / build-cache (3.10) (push) Has been cancelled
Build uv cache / build-cache (3.11) (push) Has been cancelled
Build uv cache / build-cache (3.12) (push) Has been cancelled
Build uv cache / build-cache (3.13) (push) Has been cancelled
Mark stale issues and pull requests / stale (push) Has been cancelled
feat: introduce async contract to BaseLLM feat: add async call support for: Azure provider Anthropic provider OpenAI provider Gemini provider Bedrock provider LiteLLM provider chore: expand scrubbed header fields (conftest, anthropic, bedrock) chore: update docs to cover async functionality chore: update and harden tests to support acall; re-add uri for cassette compatibility chore: generate missing cassette fix: ensure acall is non-abstract and set supports_tools = true for supported Anthropic models chore: improve Bedrock async docstring and general test robustness
This commit is contained in:
114
lib/crewai/tests/llms/google/test_google_async.py
Normal file
114
lib/crewai/tests/llms/google/test_google_async.py
Normal file
@@ -0,0 +1,114 @@
|
||||
"""Tests for Google (Gemini) async completion functionality."""
|
||||
|
||||
import pytest
|
||||
import tiktoken
|
||||
|
||||
from crewai.llm import LLM
|
||||
from crewai.llms.providers.gemini.completion import GeminiCompletion
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
@pytest.mark.asyncio
|
||||
async def test_gemini_async_basic_call():
|
||||
"""Test basic async call with Gemini."""
|
||||
llm = LLM(model="gemini/gemini-3-pro-preview")
|
||||
|
||||
result = await llm.acall("Say hello")
|
||||
|
||||
assert result is not None
|
||||
assert isinstance(result, str)
|
||||
assert len(result) > 0
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
@pytest.mark.asyncio
|
||||
async def test_gemini_async_with_temperature():
|
||||
"""Test async call with temperature parameter."""
|
||||
llm = LLM(model="gemini/gemini-3-pro-preview", temperature=0.1)
|
||||
|
||||
result = await llm.acall("Say the word 'test' once")
|
||||
|
||||
assert result is not None
|
||||
assert isinstance(result, str)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.vcr
|
||||
async def test_gemini_async_with_max_tokens():
|
||||
"""Test async call with max_tokens parameter."""
|
||||
llm = GeminiCompletion(model="gemini-3-pro-preview", max_output_tokens=1000)
|
||||
|
||||
result = await llm.acall("Write a very short story about a dragon.")
|
||||
|
||||
assert result is not None
|
||||
assert isinstance(result, str)
|
||||
encoder = tiktoken.get_encoding("cl100k_base")
|
||||
token_count = len(encoder.encode(result))
|
||||
assert token_count <= 1000
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
@pytest.mark.asyncio
|
||||
async def test_gemini_async_with_system_message():
|
||||
"""Test async call with system message."""
|
||||
llm = LLM(model="gemini/gemini-3-pro-preview")
|
||||
|
||||
messages = [
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{"role": "user", "content": "What is 2+2?"}
|
||||
]
|
||||
|
||||
result = await llm.acall(messages)
|
||||
|
||||
assert result is not None
|
||||
assert isinstance(result, str)
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
@pytest.mark.asyncio
|
||||
async def test_gemini_async_conversation():
|
||||
"""Test async call with conversation history."""
|
||||
llm = LLM(model="gemini/gemini-3-pro-preview")
|
||||
|
||||
messages = [
|
||||
{"role": "user", "content": "My name is Alice."},
|
||||
{"role": "assistant", "content": "Hello Alice! Nice to meet you."},
|
||||
{"role": "user", "content": "What is my name?"}
|
||||
]
|
||||
|
||||
result = await llm.acall(messages)
|
||||
|
||||
assert result is not None
|
||||
assert isinstance(result, str)
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
@pytest.mark.asyncio
|
||||
async def test_gemini_async_multiple_calls():
|
||||
"""Test making multiple async calls in sequence."""
|
||||
llm = LLM(model="gemini/gemini-3-pro-preview")
|
||||
|
||||
result1 = await llm.acall("What is 1+1?")
|
||||
result2 = await llm.acall("What is 2+2?")
|
||||
|
||||
assert result1 is not None
|
||||
assert result2 is not None
|
||||
assert isinstance(result1, str)
|
||||
assert isinstance(result2, str)
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
@pytest.mark.asyncio
|
||||
async def test_gemini_async_with_parameters():
|
||||
"""Test async call with multiple parameters."""
|
||||
llm = LLM(
|
||||
model="gemini/gemini-3-pro-preview",
|
||||
temperature=0.7,
|
||||
max_output_tokens=1000,
|
||||
top_p=0.9
|
||||
)
|
||||
|
||||
result = await llm.acall("Tell me a short fact")
|
||||
|
||||
assert result is not None
|
||||
assert isinstance(result, str)
|
||||
Reference in New Issue
Block a user