Compare commits

...

3 Commits

Author SHA1 Message Date
Devin AI
583e6584eb feat: Add model name validation and expand test coverage
- Add validation for Azure and Cerebras model names
- Add validation handling in create_crew.py
- Expand test coverage for model env var cases

Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-15 16:12:22 +00:00
Devin AI
e1ed85d7bd docs: Add comments and expand test coverage
- Add documentation comments for MODEL env var
- Expand test coverage for Azure MODEL env var
- Add test for missing MODEL env var case

Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-15 16:09:03 +00:00
Devin AI
96b6d91084 fix: Use uppercase MODEL env var for Azure provider
- Update Azure provider config to use uppercase MODEL env var
- Add test case to verify MODEL env var handling
- Fixes #2139

Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-15 16:06:09 +00:00
3 changed files with 47 additions and 4 deletions

View File

@@ -66,7 +66,8 @@ ENV_VARS = {
"azure": [
{
"prompt": "Enter your Azure deployment name (must start with 'azure/')",
"key_name": "model",
"key_name": "MODEL", # Uppercase MODEL used for consistency across environment variables
"validator": lambda x: x.startswith("azure/") or "Model name must start with 'azure/'"
},
{
"prompt": "Enter your AZURE API key (press Enter to skip)",
@@ -84,7 +85,8 @@ ENV_VARS = {
"cerebras": [
{
"prompt": "Enter your Cerebras model name (must start with 'cerebras/')",
"key_name": "model",
"key_name": "MODEL", # Uppercase MODEL used for consistency across environment variables
"validator": lambda x: x.startswith("cerebras/") or "Model name must start with 'cerebras/'"
},
{
"prompt": "Enter your Cerebras API version (press Enter to skip)",

View File

@@ -157,10 +157,19 @@ def create_crew(name, provider=None, skip_provider=False, parent_folder=None):
# Prompt for non-default key-value pairs
prompt = details["prompt"]
key_name = details["key_name"]
api_key_value = click.prompt(prompt, default="", show_default=False)
while True:
api_key_value = click.prompt(prompt, default="", show_default=False)
if not api_key_value.strip():
break
if "validator" in details:
validation_result = details["validator"](api_key_value)
if isinstance(validation_result, str):
click.secho(f"Invalid input: {validation_result}", fg="red")
continue
if api_key_value.strip():
env_vars[key_name] = api_key_value
break
if env_vars:
write_env_file(folder_path, env_vars)

View File

@@ -21,6 +21,38 @@ from crewai.utilities import RPMController
from crewai.utilities.events import Emitter
def test_agent_model_env_var():
"""Test MODEL environment variable handling with various cases."""
# Store original environment variables
original_model = os.environ.get("MODEL")
test_cases = [
("azure/test-model", "azure/test-model"), # Valid Azure case
("azure/minimal", "azure/minimal"), # Another valid Azure case
("cerebras/test-model", "cerebras/test-model"), # Valid Cerebras case
("cerebras/minimal", "cerebras/minimal"), # Another valid Cerebras case
]
for input_model, expected_model in test_cases:
# Set test MODEL value
os.environ["MODEL"] = input_model
agent = Agent(role="test role", goal="test goal", backstory="test backstory")
assert agent.llm.model == expected_model
# Test missing MODEL env var
if "MODEL" in os.environ:
del os.environ["MODEL"]
agent = Agent(role="test role", goal="test goal", backstory="test backstory")
assert agent.llm.model == "gpt-4o-mini" # Default model
# Clean up environment variables
if original_model:
os.environ["MODEL"] = original_model
else:
if "MODEL" in os.environ:
del os.environ["MODEL"]
def test_agent_llm_creation_with_env_vars():
# Store original environment variables
original_api_key = os.environ.get("OPENAI_API_KEY")