Compare commits

...

2 Commits

Author SHA1 Message Date
Brandon Hancock
c89f475c4c Fix type issue 2024-12-09 15:27:36 -05:00
Brandon Hancock
0c5789f1e8 copy googles changes. Fix tests. Improve LLM file 2024-12-09 15:18:44 -05:00
2 changed files with 39 additions and 6 deletions

View File

@@ -29,7 +29,7 @@ Large Language Models (LLMs) are the core intelligence behind CrewAI agents. The
## Available Models and Their Capabilities ## Available Models and Their Capabilities
Here's a detailed breakdown of supported models and their capabilities: Here's a detailed breakdown of supported models and their capabilities, you can compare performance at [lmarena.ai](https://lmarena.ai/):
<Tabs> <Tabs>
<Tab title="OpenAI"> <Tab title="OpenAI">
@@ -43,6 +43,17 @@ Here's a detailed breakdown of supported models and their capabilities:
1 token ≈ 4 characters in English. For example, 8,192 tokens ≈ 32,768 characters or about 6,000 words. 1 token ≈ 4 characters in English. For example, 8,192 tokens ≈ 32,768 characters or about 6,000 words.
</Note> </Note>
</Tab> </Tab>
<Tab title="Gemini">
| Model | Context Window | Best For |
|-------|---------------|-----------|
| Gemini 1.5 Flash | 1M tokens | Balanced multimodal model, good for most tasks |
| Gemini 1.5 Flash 8B | 1M tokens | Fastest, most cost-efficient, good for high-frequency tasks |
| Gemini 1.5 Pro | 2M tokens | Best performing, wide variety of reasoning tasks including logical reasoning, coding, and creative collaboration |
<Tip>
Google's Gemini models are all multimodal, supporting audio, images, video and text, supporting context caching, json schema, function calling, etc.
</Tip>
</Tab>
<Tab title="Groq"> <Tab title="Groq">
| Model | Context Window | Best For | | Model | Context Window | Best For |
|-------|---------------|-----------| |-------|---------------|-----------|
@@ -128,10 +139,10 @@ There are three ways to configure LLMs in CrewAI. Choose the method that best fi
# llm: anthropic/claude-2.1 # llm: anthropic/claude-2.1
# llm: anthropic/claude-2.0 # llm: anthropic/claude-2.0
# Google Models - Good for general tasks # Google Models - Strong reasoning, large cachable context window, multimodal
# llm: gemini/gemini-pro
# llm: gemini/gemini-1.5-pro-latest # llm: gemini/gemini-1.5-pro-latest
# llm: gemini/gemini-1.0-pro-latest # llm: gemini/gemini-1.5-flash-latest
# llm: gemini/gemini-1.5-flash-8b-latest
# AWS Bedrock Models - Enterprise-grade # AWS Bedrock Models - Enterprise-grade
# llm: bedrock/anthropic.claude-3-sonnet-20240229-v1:0 # llm: bedrock/anthropic.claude-3-sonnet-20240229-v1:0
@@ -350,13 +361,18 @@ Learn how to get the most out of your LLM configuration:
<Accordion title="Google"> <Accordion title="Google">
```python Code ```python Code
# Option 1. Gemini accessed with an API key.
# https://ai.google.dev/gemini-api/docs/api-key
GEMINI_API_KEY=<your-api-key> GEMINI_API_KEY=<your-api-key>
# Option 2. Vertex AI IAM credentials for Gemini, Anthropic, and anything in the Model Garden.
# https://cloud.google.com/vertex-ai/generative-ai/docs/overview
``` ```
Example usage: Example usage:
```python Code ```python Code
llm = LLM( llm = LLM(
model="gemini/gemini-pro", model="gemini/gemini-1.5-pro-latest",
temperature=0.7 temperature=0.7
) )
``` ```

View File

@@ -43,6 +43,10 @@ LLM_CONTEXT_WINDOW_SIZES = {
"gpt-4-turbo": 128000, "gpt-4-turbo": 128000,
"o1-preview": 128000, "o1-preview": 128000,
"o1-mini": 128000, "o1-mini": 128000,
# gemini
"gemini-1.5-pro": 2097152,
"gemini-1.5-flash": 1048576,
"gemini-1.5-flash-8b": 1048576,
# deepseek # deepseek
"deepseek-chat": 128000, "deepseek-chat": 128000,
# groq # groq
@@ -61,6 +65,9 @@ LLM_CONTEXT_WINDOW_SIZES = {
"mixtral-8x7b-32768": 32768, "mixtral-8x7b-32768": 32768,
} }
DEFAULT_CONTEXT_WINDOW_SIZE = 8192
CONTEXT_WINDOW_USAGE_RATIO = 0.75
@contextmanager @contextmanager
def suppress_warnings(): def suppress_warnings():
@@ -124,6 +131,7 @@ class LLM:
self.api_version = api_version self.api_version = api_version
self.api_key = api_key self.api_key = api_key
self.callbacks = callbacks self.callbacks = callbacks
self.context_window_size = 0
self.kwargs = kwargs self.kwargs = kwargs
litellm.drop_params = True litellm.drop_params = True
@@ -191,7 +199,16 @@ class LLM:
def get_context_window_size(self) -> int: def get_context_window_size(self) -> int:
# Only using 75% of the context window size to avoid cutting the message in the middle # Only using 75% of the context window size to avoid cutting the message in the middle
return int(LLM_CONTEXT_WINDOW_SIZES.get(self.model, 8192) * 0.75) if self.context_window_size != 0:
return self.context_window_size
self.context_window_size = int(
DEFAULT_CONTEXT_WINDOW_SIZE * CONTEXT_WINDOW_USAGE_RATIO
)
for key, value in LLM_CONTEXT_WINDOW_SIZES.items():
if self.model.startswith(key):
self.context_window_size = int(value * CONTEXT_WINDOW_USAGE_RATIO)
return self.context_window_size
def set_callbacks(self, callbacks: List[Any]): def set_callbacks(self, callbacks: List[Any]):
callback_types = [type(callback) for callback in callbacks] callback_types = [type(callback) for callback in callbacks]