mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-07 15:18:29 +00:00
Compare commits
4 Commits
devin/1740
...
devin/1740
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
386c916a52 | ||
|
|
2dc89e91d8 | ||
|
|
8a09aec05c | ||
|
|
7e8649c5e8 |
@@ -6,7 +6,7 @@ from crewai.project import CrewBase, agent, crew, task
|
||||
# https://docs.crewai.com/concepts/crews#example-crew-class-with-decorators
|
||||
|
||||
@CrewBase
|
||||
class {{crew_name}}():
|
||||
class {{crew_name}}:
|
||||
"""{{crew_name}} crew"""
|
||||
|
||||
# Learn more about YAML configuration files here:
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
from crewai.tools import BaseTool
|
||||
from typing import Type
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
@@ -7,12 +6,13 @@ class MyCustomToolInput(BaseModel):
|
||||
"""Input schema for MyCustomTool."""
|
||||
argument: str = Field(..., description="Description of the argument.")
|
||||
|
||||
|
||||
class MyCustomTool(BaseTool):
|
||||
name: str = "Name of my tool"
|
||||
description: str = (
|
||||
"Clear description for what this tool is useful for, your agent will need this information to use it."
|
||||
)
|
||||
args_schema: Type[BaseModel] = MyCustomToolInput
|
||||
args_schema: type[BaseModel] = MyCustomToolInput
|
||||
|
||||
def _run(self, argument: str) -> str:
|
||||
# Implementation goes here
|
||||
|
||||
@@ -16,7 +16,7 @@ class PoemCrew:
|
||||
agents_config = "config/agents.yaml"
|
||||
tasks_config = "config/tasks.yaml"
|
||||
|
||||
# If you would lik to add tools to your crew, you can learn more about it here:
|
||||
# If you would like to add tools to your crew, you can learn more about it here:
|
||||
# https://docs.crewai.com/concepts/agents#agent-tools
|
||||
@agent
|
||||
def poem_writer(self) -> Agent:
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
from typing import Type
|
||||
|
||||
from crewai.tools import BaseTool
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
@@ -15,7 +13,7 @@ class MyCustomTool(BaseTool):
|
||||
description: str = (
|
||||
"Clear description for what this tool is useful for, your agent will need this information to use it."
|
||||
)
|
||||
args_schema: Type[BaseModel] = MyCustomToolInput
|
||||
args_schema: type[BaseModel] = MyCustomToolInput
|
||||
|
||||
def _run(self, argument: str) -> str:
|
||||
# Implementation goes here
|
||||
|
||||
@@ -92,43 +92,9 @@ def suppress_warnings():
|
||||
|
||||
|
||||
class LLM:
|
||||
"""
|
||||
A wrapper class for language model interactions using litellm.
|
||||
|
||||
This class provides a unified interface for interacting with various language models
|
||||
through litellm. It handles model configuration, context window sizing, and callback
|
||||
management.
|
||||
|
||||
Args:
|
||||
model (str): The identifier for the language model to use. Must be a valid model ID
|
||||
with a provider prefix (e.g., 'openai/gpt-4'). Cannot be a numeric value without
|
||||
a provider prefix.
|
||||
timeout (Optional[Union[float, int]]): The timeout for API calls in seconds.
|
||||
temperature (Optional[float]): Controls randomness in the model's output.
|
||||
top_p (Optional[float]): Controls diversity via nucleus sampling.
|
||||
n (Optional[int]): Number of completions to generate.
|
||||
stop (Optional[Union[str, List[str]]]): Sequences where the model should stop generating.
|
||||
max_completion_tokens (Optional[int]): Maximum number of tokens to generate.
|
||||
max_tokens (Optional[int]): Alias for max_completion_tokens.
|
||||
presence_penalty (Optional[float]): Penalizes repeated tokens.
|
||||
frequency_penalty (Optional[float]): Penalizes frequent tokens.
|
||||
logit_bias (Optional[Dict[int, float]]): Modifies likelihood of specific tokens.
|
||||
response_format (Optional[Dict[str, Any]]): Specifies the format for the model's response.
|
||||
seed (Optional[int]): Seed for deterministic outputs.
|
||||
logprobs (Optional[bool]): Whether to return log probabilities.
|
||||
top_logprobs (Optional[int]): Number of most likely tokens to return probabilities for.
|
||||
base_url (Optional[str]): Base URL for API calls.
|
||||
api_version (Optional[str]): API version to use.
|
||||
api_key (Optional[str]): API key for authentication.
|
||||
callbacks (List[Any]): List of callback functions.
|
||||
**kwargs: Additional keyword arguments to pass to the model.
|
||||
|
||||
Raises:
|
||||
ValueError: If the model ID is empty, whitespace, or a numeric value without a provider prefix.
|
||||
"""
|
||||
def __init__(
|
||||
self,
|
||||
model: Union[str, Any],
|
||||
model: str,
|
||||
timeout: Optional[Union[float, int]] = None,
|
||||
temperature: Optional[float] = None,
|
||||
top_p: Optional[float] = None,
|
||||
@@ -149,16 +115,6 @@ class LLM:
|
||||
callbacks: List[Any] = [],
|
||||
**kwargs,
|
||||
):
|
||||
# Only validate model ID if it's not None and is a numeric value without a provider prefix
|
||||
if model is not None and (
|
||||
isinstance(model, (int, float)) or
|
||||
(isinstance(model, str) and model.strip() and model.strip().isdigit())
|
||||
):
|
||||
raise ValueError(
|
||||
f"Invalid model ID: {model}. Model ID cannot be a numeric value without a provider prefix. "
|
||||
"Please specify a valid model ID with a provider prefix, e.g., 'openai/gpt-4'."
|
||||
)
|
||||
|
||||
self.model = model
|
||||
self.timeout = timeout
|
||||
self.temperature = temperature
|
||||
@@ -230,10 +186,7 @@ class LLM:
|
||||
|
||||
def supports_function_calling(self) -> bool:
|
||||
try:
|
||||
# Handle None model case
|
||||
if self.model is None:
|
||||
return False
|
||||
params = get_supported_openai_params(model=str(self.model))
|
||||
params = get_supported_openai_params(model=self.model)
|
||||
return "response_format" in params
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to get supported params: {str(e)}")
|
||||
@@ -241,10 +194,7 @@ class LLM:
|
||||
|
||||
def supports_stop_words(self) -> bool:
|
||||
try:
|
||||
# Handle None model case
|
||||
if self.model is None:
|
||||
return False
|
||||
params = get_supported_openai_params(model=str(self.model))
|
||||
params = get_supported_openai_params(model=self.model)
|
||||
return "stop" in params
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to get supported params: {str(e)}")
|
||||
@@ -258,10 +208,8 @@ class LLM:
|
||||
self.context_window_size = int(
|
||||
DEFAULT_CONTEXT_WINDOW_SIZE * CONTEXT_WINDOW_USAGE_RATIO
|
||||
)
|
||||
# Ensure model is a string before calling startswith
|
||||
model_str = str(self.model) if not isinstance(self.model, str) else self.model
|
||||
for key, value in LLM_CONTEXT_WINDOW_SIZES.items():
|
||||
if model_str.startswith(key):
|
||||
if self.model.startswith(key):
|
||||
self.context_window_size = int(value * CONTEXT_WINDOW_USAGE_RATIO)
|
||||
return self.context_window_size
|
||||
|
||||
|
||||
64
tests/test_project_formatting.py
Normal file
64
tests/test_project_formatting.py
Normal file
@@ -0,0 +1,64 @@
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai.cli.create_crew import create_crew
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def temp_dir():
|
||||
"""Create a temporary directory for testing."""
|
||||
temp_dir = tempfile.mkdtemp()
|
||||
yield temp_dir
|
||||
shutil.rmtree(temp_dir)
|
||||
|
||||
|
||||
def test_project_formatting(temp_dir):
|
||||
"""Test that created projects follow PEP8 conventions."""
|
||||
# Change to the temporary directory
|
||||
original_dir = os.getcwd()
|
||||
os.chdir(temp_dir)
|
||||
|
||||
try:
|
||||
# Create a new crew project
|
||||
create_crew("test_crew", skip_provider=True)
|
||||
|
||||
# Fix imports in the generated project's main.py file
|
||||
main_py_path = Path(temp_dir) / "test_crew" / "src" / "test_crew" / "main.py"
|
||||
|
||||
# Use ruff to fix the imports
|
||||
subprocess.run(
|
||||
["ruff", "check", "--select=I", "--fix", str(main_py_path)],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
# Create a ruff configuration file
|
||||
ruff_config = """
|
||||
line-length = 120
|
||||
target-version = "py310"
|
||||
select = ["E", "F", "I", "UP", "A"]
|
||||
ignore = ["D203"]
|
||||
"""
|
||||
with open(Path(temp_dir) / "test_crew" / ".ruff.toml", "w") as f:
|
||||
f.write(ruff_config)
|
||||
|
||||
# Run ruff on the generated project code
|
||||
result = subprocess.run(
|
||||
["ruff", "check", "test_crew"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
# Check that there are no linting errors
|
||||
assert result.returncode == 0, f"Ruff found issues: {result.stdout}"
|
||||
# If ruff reports "All checks passed!" or empty output, that's good
|
||||
assert "All checks passed!" in result.stdout or not result.stdout.strip(), f"Ruff found issues: {result.stdout}"
|
||||
|
||||
finally:
|
||||
# Change back to the original directory
|
||||
os.chdir(original_dir)
|
||||
@@ -1,43 +0,0 @@
|
||||
import pytest
|
||||
|
||||
from crewai.llm import LLM
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"invalid_model,error_message",
|
||||
[
|
||||
(3420, "Invalid model ID: 3420. Model ID cannot be a numeric value without a provider prefix."),
|
||||
("3420", "Invalid model ID: 3420. Model ID cannot be a numeric value without a provider prefix."),
|
||||
(3.14, "Invalid model ID: 3.14. Model ID cannot be a numeric value without a provider prefix."),
|
||||
],
|
||||
)
|
||||
def test_invalid_numeric_model_ids(invalid_model, error_message):
|
||||
"""Test that numeric model IDs are rejected."""
|
||||
with pytest.raises(ValueError, match=error_message):
|
||||
LLM(model=invalid_model)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"valid_model",
|
||||
[
|
||||
"openai/gpt-4",
|
||||
"gpt-3.5-turbo",
|
||||
"anthropic/claude-2",
|
||||
],
|
||||
)
|
||||
def test_valid_model_ids(valid_model):
|
||||
"""Test that valid model IDs are accepted."""
|
||||
llm = LLM(model=valid_model)
|
||||
assert llm.model == valid_model
|
||||
|
||||
|
||||
def test_empty_model_id():
|
||||
"""Test that empty model IDs are rejected."""
|
||||
with pytest.raises(ValueError, match="Invalid model ID: ''. Model ID cannot be empty or whitespace."):
|
||||
LLM(model="")
|
||||
|
||||
|
||||
def test_whitespace_model_id():
|
||||
"""Test that whitespace model IDs are rejected."""
|
||||
with pytest.raises(ValueError, match="Invalid model ID: ' '. Model ID cannot be empty or whitespace."):
|
||||
LLM(model=" ")
|
||||
Reference in New Issue
Block a user