mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-07 07:08:31 +00:00
Compare commits
6 Commits
theCyberTe
...
devin/1747
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2afe25bd71 | ||
|
|
d692311aca | ||
|
|
cb1a98cabf | ||
|
|
369e6d109c | ||
|
|
2c011631f9 | ||
|
|
d3fc2b4477 |
@@ -169,19 +169,55 @@ In this section, you'll find detailed examples that help you select, configure,
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Google">
|
||||
Set the following environment variables in your `.env` file:
|
||||
<Accordion title="Google (Gemini API)">
|
||||
Set your API key in your `.env` file. If you need a key, or need to find an
|
||||
existing key, check [AI Studio](https://aistudio.google.com/apikey).
|
||||
|
||||
```toml Code
|
||||
# Option 1: Gemini accessed with an API key.
|
||||
```toml .env
|
||||
# https://ai.google.dev/gemini-api/docs/api-key
|
||||
GEMINI_API_KEY=<your-api-key>
|
||||
|
||||
# Option 2: Vertex AI IAM credentials for Gemini, Anthropic, and Model Garden.
|
||||
# https://cloud.google.com/vertex-ai/generative-ai/docs/overview
|
||||
```
|
||||
|
||||
Get credentials from your Google Cloud Console and save it to a JSON file with the following code:
|
||||
Example usage in your CrewAI project:
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
llm = LLM(
|
||||
model="gemini/gemini-2.0-flash",
|
||||
temperature=0.7,
|
||||
)
|
||||
```
|
||||
|
||||
### Gemini models
|
||||
|
||||
Google offers a range of powerful models optimized for different use cases.
|
||||
|
||||
| Model | Context Window | Best For |
|
||||
|--------------------------------|----------------|-------------------------------------------------------------------|
|
||||
| gemini-2.5-flash-preview-04-17 | 1M tokens | Adaptive thinking, cost efficiency |
|
||||
| gemini-2.5-pro-preview-05-06 | 1M tokens | Enhanced thinking and reasoning, multimodal understanding, advanced coding, and more |
|
||||
| gemini-2.0-flash | 1M tokens | Next generation features, speed, thinking, and realtime streaming |
|
||||
| gemini-2.0-flash-lite | 1M tokens | Cost efficiency and low latency |
|
||||
| gemini-1.5-flash | 1M tokens | Balanced multimodal model, good for most tasks |
|
||||
| gemini-1.5-flash-8B | 1M tokens | Fastest, most cost-efficient, good for high-frequency tasks |
|
||||
| gemini-1.5-pro | 2M tokens | Best performing, wide variety of reasoning tasks including logical reasoning, coding, and creative collaboration |
|
||||
|
||||
The full list of models is available in the [Gemini model docs](https://ai.google.dev/gemini-api/docs/models).
|
||||
|
||||
### Gemma
|
||||
|
||||
The Gemini API also allows you to use your API key to access [Gemma models](https://ai.google.dev/gemma/docs) hosted on Google infrastructure.
|
||||
|
||||
| Model | Context Window |
|
||||
|----------------|----------------|
|
||||
| gemma-3-1b-it | 32k tokens |
|
||||
| gemma-3-4b-it | 32k tokens |
|
||||
| gemma-3-12b-it | 32k tokens |
|
||||
| gemma-3-27b-it | 128k tokens |
|
||||
|
||||
</Accordion>
|
||||
<Accordion title="Google (Vertex AI)">
|
||||
Get credentials from your Google Cloud Console and save it to a JSON file, then load it with the following code:
|
||||
```python Code
|
||||
import json
|
||||
|
||||
@@ -205,14 +241,18 @@ In this section, you'll find detailed examples that help you select, configure,
|
||||
vertex_credentials=vertex_credentials_json
|
||||
)
|
||||
```
|
||||
|
||||
Google offers a range of powerful models optimized for different use cases:
|
||||
|
||||
| Model | Context Window | Best For |
|
||||
|-----------------------|----------------|------------------------------------------------------------------|
|
||||
| gemini-2.0-flash-exp | 1M tokens | Higher quality at faster speed, multimodal model, good for most tasks |
|
||||
| gemini-1.5-flash | 1M tokens | Balanced multimodal model, good for most tasks |
|
||||
| gemini-1.5-flash-8B | 1M tokens | Fastest, most cost-efficient, good for high-frequency tasks |
|
||||
| gemini-1.5-pro | 2M tokens | Best performing, wide variety of reasoning tasks including logical reasoning, coding, and creative collaboration |
|
||||
| Model | Context Window | Best For |
|
||||
|--------------------------------|----------------|-------------------------------------------------------------------|
|
||||
| gemini-2.5-flash-preview-04-17 | 1M tokens | Adaptive thinking, cost efficiency |
|
||||
| gemini-2.5-pro-preview-05-06 | 1M tokens | Enhanced thinking and reasoning, multimodal understanding, advanced coding, and more |
|
||||
| gemini-2.0-flash | 1M tokens | Next generation features, speed, thinking, and realtime streaming |
|
||||
| gemini-2.0-flash-lite | 1M tokens | Cost efficiency and low latency |
|
||||
| gemini-1.5-flash | 1M tokens | Balanced multimodal model, good for most tasks |
|
||||
| gemini-1.5-flash-8B | 1M tokens | Fastest, most cost-efficient, good for high-frequency tasks |
|
||||
| gemini-1.5-pro | 2M tokens | Best performing, wide variety of reasoning tasks including logical reasoning, coding, and creative collaboration |
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Azure">
|
||||
|
||||
@@ -68,7 +68,13 @@ We'll create a CrewAI application where two agents collaborate to research and w
|
||||
```python
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai_tools import SerperDevTool
|
||||
from openinference.instrumentation.crewai import CrewAIInstrumentor
|
||||
from phoenix.otel import register
|
||||
|
||||
# setup monitoring for your crew
|
||||
tracer_provider = register(
|
||||
endpoint="http://localhost:6006/v1/traces")
|
||||
CrewAIInstrumentor().instrument(skip_dep_check=True, tracer_provider=tracer_provider)
|
||||
search_tool = SerperDevTool()
|
||||
|
||||
# Define your agents with roles and goals
|
||||
|
||||
@@ -13,7 +13,7 @@ ENV_VARS = {
|
||||
],
|
||||
"gemini": [
|
||||
{
|
||||
"prompt": "Enter your GEMINI API key (press Enter to skip)",
|
||||
"prompt": "Enter your GEMINI API key from https://ai.dev/apikey (press Enter to skip)",
|
||||
"key_name": "GEMINI_API_KEY",
|
||||
}
|
||||
],
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
from collections import defaultdict
|
||||
from pathlib import Path
|
||||
@@ -8,6 +10,8 @@ import requests
|
||||
|
||||
from crewai.cli.constants import JSON_URL, MODELS, PROVIDERS
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def select_choice(prompt_message, choices):
|
||||
"""
|
||||
@@ -157,22 +161,74 @@ def fetch_provider_data(cache_file):
|
||||
"""
|
||||
Fetches provider data from a specified URL and caches it to a file.
|
||||
|
||||
Warning: This function includes a fallback that disables SSL verification.
|
||||
This should only be used in development environments or when absolutely necessary.
|
||||
Production deployments should resolve SSL certificate issues properly.
|
||||
|
||||
Args:
|
||||
- cache_file (Path): The path to the cache file.
|
||||
|
||||
Returns:
|
||||
- dict or None: The fetched provider data or None if the operation fails.
|
||||
"""
|
||||
allow_insecure = os.getenv("CREW_ALLOW_INSECURE_SSL", "false").lower() == "true"
|
||||
|
||||
try:
|
||||
response = requests.get(JSON_URL, stream=True, timeout=60)
|
||||
verify = not allow_insecure
|
||||
if not verify:
|
||||
logger.warning(
|
||||
"SSL verification disabled via CREW_ALLOW_INSECURE_SSL environment variable. "
|
||||
"This is less secure and should only be used in development environments."
|
||||
)
|
||||
click.secho(
|
||||
"SSL verification disabled via environment variable. "
|
||||
"This is less secure and should only be used in development environments.",
|
||||
fg="yellow",
|
||||
)
|
||||
|
||||
response = requests.get(JSON_URL, stream=True, timeout=60, verify=verify)
|
||||
response.raise_for_status()
|
||||
data = download_data(response)
|
||||
with open(cache_file, "w") as f:
|
||||
json.dump(data, f)
|
||||
return data
|
||||
except requests.exceptions.SSLError:
|
||||
if not allow_insecure:
|
||||
logger.warning(
|
||||
"SSL certificate verification failed. Retrying with verification disabled. "
|
||||
"This is less secure but may be necessary on some systems."
|
||||
)
|
||||
click.secho(
|
||||
"SSL certificate verification failed. Retrying with verification disabled. "
|
||||
"This is less secure but may be necessary on some systems.",
|
||||
fg="yellow",
|
||||
)
|
||||
try:
|
||||
os.environ["CREW_TEMP_ALLOW_INSECURE"] = "true"
|
||||
response = requests.get(
|
||||
JSON_URL,
|
||||
stream=True,
|
||||
timeout=60,
|
||||
verify=False, # nosec B501
|
||||
)
|
||||
os.environ.pop("CREW_TEMP_ALLOW_INSECURE", None)
|
||||
|
||||
response.raise_for_status()
|
||||
data = download_data(response)
|
||||
with open(cache_file, "w") as f:
|
||||
json.dump(data, f)
|
||||
return data
|
||||
except requests.RequestException as e:
|
||||
logger.error(f"Error fetching provider data: {e}")
|
||||
click.secho(f"Error fetching provider data: {e}", fg="red")
|
||||
return None
|
||||
finally:
|
||||
os.environ.pop("CREW_TEMP_ALLOW_INSECURE", None)
|
||||
except requests.RequestException as e:
|
||||
logger.error(f"Error fetching provider data: {e}")
|
||||
click.secho(f"Error fetching provider data: {e}", fg="red")
|
||||
except json.JSONDecodeError:
|
||||
logger.error("Error parsing provider data. Invalid JSON format.")
|
||||
click.secho("Error parsing provider data. Invalid JSON format.", fg="red")
|
||||
return None
|
||||
|
||||
|
||||
109
tests/cli/provider_test.py
Normal file
109
tests/cli/provider_test.py
Normal file
@@ -0,0 +1,109 @@
|
||||
import json
|
||||
import os
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from unittest import mock
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
from requests.exceptions import SSLError
|
||||
|
||||
from crewai.cli.provider import fetch_provider_data, get_provider_data
|
||||
|
||||
|
||||
class TestProviderFunctions:
|
||||
@mock.patch("crewai.cli.provider.requests.get")
|
||||
def test_fetch_provider_data_success(self, mock_get):
|
||||
mock_response = mock.MagicMock()
|
||||
mock_response.headers.get.return_value = "100"
|
||||
mock_response.iter_content.return_value = [b'{"test": "data"}']
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
with tempfile.NamedTemporaryFile() as temp_file:
|
||||
cache_file = Path(temp_file.name)
|
||||
result = fetch_provider_data(cache_file)
|
||||
|
||||
assert result == {"test": "data"}
|
||||
mock_get.assert_called_once()
|
||||
|
||||
@mock.patch("crewai.cli.provider.requests.get")
|
||||
@mock.patch("crewai.cli.provider.click.secho")
|
||||
def test_fetch_provider_data_ssl_error_fallback(self, mock_secho, mock_get):
|
||||
mock_response = mock.MagicMock()
|
||||
mock_response.headers.get.return_value = "100"
|
||||
mock_response.iter_content.return_value = [b'{"test": "data"}']
|
||||
|
||||
mock_get.side_effect = [
|
||||
SSLError("certificate verify failed: unable to get local issuer certificate"),
|
||||
mock_response
|
||||
]
|
||||
|
||||
with tempfile.NamedTemporaryFile() as temp_file:
|
||||
cache_file = Path(temp_file.name)
|
||||
result = fetch_provider_data(cache_file)
|
||||
|
||||
assert result == {"test": "data"}
|
||||
assert mock_get.call_count == 2
|
||||
|
||||
assert mock_get.call_args_list[1][1]["verify"] is False
|
||||
|
||||
mock_secho.assert_any_call(
|
||||
"SSL certificate verification failed. Retrying with verification disabled. "
|
||||
"This is less secure but may be necessary on some systems.",
|
||||
fg="yellow"
|
||||
)
|
||||
|
||||
@mock.patch("crewai.cli.provider.requests.get")
|
||||
@mock.patch("crewai.cli.provider.click.secho")
|
||||
@mock.patch.dict(os.environ, {"CREW_ALLOW_INSECURE_SSL": "true"})
|
||||
def test_fetch_provider_data_with_insecure_env_var(self, mock_secho, mock_get):
|
||||
mock_response = mock.MagicMock()
|
||||
mock_response.headers.get.return_value = "100"
|
||||
mock_response.iter_content.return_value = [b'{"test": "data"}']
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
with tempfile.NamedTemporaryFile() as temp_file:
|
||||
cache_file = Path(temp_file.name)
|
||||
result = fetch_provider_data(cache_file)
|
||||
|
||||
assert result == {"test": "data"}
|
||||
mock_get.assert_called_once()
|
||||
|
||||
assert mock_get.call_args[1]["verify"] is False
|
||||
|
||||
mock_secho.assert_any_call(
|
||||
"SSL verification disabled via environment variable. "
|
||||
"This is less secure and should only be used in development environments.",
|
||||
fg="yellow"
|
||||
)
|
||||
|
||||
@mock.patch("crewai.cli.provider.requests.get")
|
||||
def test_fetch_provider_data_with_empty_response(self, mock_get):
|
||||
mock_response = mock.MagicMock()
|
||||
mock_response.headers.get.return_value = "0"
|
||||
mock_response.iter_content.return_value = [b'{}']
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
with tempfile.NamedTemporaryFile() as temp_file:
|
||||
cache_file = Path(temp_file.name)
|
||||
result = fetch_provider_data(cache_file)
|
||||
|
||||
assert result == {}
|
||||
mock_get.assert_called_once()
|
||||
|
||||
@mock.patch("crewai.cli.provider.requests.get")
|
||||
@mock.patch("crewai.cli.provider.click.secho")
|
||||
def test_fetch_provider_data_request_exception(self, mock_secho, mock_get):
|
||||
mock_get.side_effect = requests.RequestException("Connection error")
|
||||
|
||||
with tempfile.NamedTemporaryFile() as temp_file:
|
||||
cache_file = Path(temp_file.name)
|
||||
result = fetch_provider_data(cache_file)
|
||||
|
||||
assert result is None
|
||||
mock_get.assert_called_once()
|
||||
|
||||
mock_secho.assert_any_call(
|
||||
"Error fetching provider data: Connection error",
|
||||
fg="red"
|
||||
)
|
||||
Reference in New Issue
Block a user