mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-07 23:28:30 +00:00
Compare commits
1 Commits
devin/1740
...
devin/1744
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5afe6914eb |
@@ -4,6 +4,7 @@ from crewai.agent import Agent
|
||||
from crewai.crew import Crew
|
||||
from crewai.flow.flow import Flow
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.lite_agent import LiteAgent
|
||||
from crewai.llm import LLM
|
||||
from crewai.process import Process
|
||||
from crewai.task import Task
|
||||
@@ -23,4 +24,5 @@ __all__ = [
|
||||
"LLM",
|
||||
"Flow",
|
||||
"Knowledge",
|
||||
"LiteAgent",
|
||||
]
|
||||
|
||||
@@ -1,18 +1,10 @@
|
||||
from typing import Dict, List
|
||||
|
||||
ENV_VARS: Dict[str, List[Dict[str, str]]] = {
|
||||
ENV_VARS = {
|
||||
"openai": [
|
||||
{
|
||||
"prompt": "Enter your OPENAI API key (press Enter to skip)",
|
||||
"key_name": "OPENAI_API_KEY",
|
||||
}
|
||||
],
|
||||
"mistral": [
|
||||
{
|
||||
"prompt": "Enter your MISTRAL API key (press Enter to skip)",
|
||||
"key_name": "MISTRAL_API_KEY",
|
||||
}
|
||||
],
|
||||
"anthropic": [
|
||||
{
|
||||
"prompt": "Enter your ANTHROPIC API key (press Enter to skip)",
|
||||
@@ -96,7 +88,7 @@ ENV_VARS: Dict[str, List[Dict[str, str]]] = {
|
||||
}
|
||||
|
||||
|
||||
PROVIDERS: List[str] = [
|
||||
PROVIDERS = [
|
||||
"openai",
|
||||
"anthropic",
|
||||
"gemini",
|
||||
@@ -106,17 +98,10 @@ PROVIDERS: List[str] = [
|
||||
"bedrock",
|
||||
"azure",
|
||||
"cerebras",
|
||||
"mistral", # Added in v0.86.0
|
||||
]
|
||||
|
||||
MODELS: Dict[str, List[str]] = {
|
||||
MODELS = {
|
||||
"openai": ["gpt-4", "gpt-4o", "gpt-4o-mini", "o1-mini", "o1-preview"],
|
||||
"mistral": [
|
||||
"mistral-tiny", # 7B model optimized for speed
|
||||
"mistral-small", # 7B model balanced for performance
|
||||
"mistral-medium", # 8x7B model for enhanced capabilities
|
||||
"mistral-large", # Latest model with highest performance
|
||||
],
|
||||
"anthropic": [
|
||||
"claude-3-5-sonnet-20240620",
|
||||
"claude-3-sonnet-20240229",
|
||||
|
||||
@@ -10,12 +10,7 @@ from crewai.cli.provider import (
|
||||
select_model,
|
||||
select_provider,
|
||||
)
|
||||
from crewai.cli.utils import (
|
||||
copy_template,
|
||||
load_env_vars,
|
||||
validate_api_keys,
|
||||
write_env_file,
|
||||
)
|
||||
from crewai.cli.utils import copy_template, load_env_vars, write_env_file
|
||||
|
||||
|
||||
def create_folder_structure(name, parent_folder=None):
|
||||
@@ -167,13 +162,9 @@ def create_crew(name, provider=None, skip_provider=False, parent_folder=None):
|
||||
if api_key_value.strip():
|
||||
env_vars[key_name] = api_key_value
|
||||
|
||||
if validate_api_keys(env_vars):
|
||||
try:
|
||||
write_env_file(folder_path, env_vars)
|
||||
click.secho("API keys and model saved to .env file", fg="green")
|
||||
except IOError as e:
|
||||
click.secho(f"Error writing .env file: {str(e)}", fg="red")
|
||||
raise
|
||||
if env_vars:
|
||||
write_env_file(folder_path, env_vars)
|
||||
click.secho("API keys and model saved to .env file", fg="green")
|
||||
else:
|
||||
click.secho(
|
||||
"No API keys provided. Skipping .env file creation.", fg="yellow"
|
||||
|
||||
@@ -2,7 +2,6 @@ import os
|
||||
import shutil
|
||||
import sys
|
||||
from functools import reduce
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List
|
||||
|
||||
import click
|
||||
@@ -236,39 +235,15 @@ def update_env_vars(env_vars, provider, model):
|
||||
return env_vars
|
||||
|
||||
|
||||
def validate_api_keys(env_vars: Dict[str, str]) -> bool:
|
||||
"""
|
||||
Validates that at least one API key is present and non-empty in the environment variables.
|
||||
|
||||
Args:
|
||||
env_vars (Dict[str, str]): Dictionary of environment variables
|
||||
|
||||
Returns:
|
||||
bool: True if at least one API key is present and non-empty
|
||||
"""
|
||||
api_keys = ["MISTRAL_API_KEY", "OPENAI_API_KEY", "ANTHROPIC_API_KEY"]
|
||||
return any(
|
||||
key in env_vars and env_vars[key].strip()
|
||||
for key in api_keys
|
||||
)
|
||||
|
||||
|
||||
def write_env_file(folder_path: Path, env_vars: Dict[str, str]) -> None:
|
||||
def write_env_file(folder_path, env_vars):
|
||||
"""
|
||||
Writes environment variables to a .env file in the specified folder.
|
||||
|
||||
Args:
|
||||
folder_path (Path): The path to the folder where the .env file will be written.
|
||||
env_vars (Dict[str, str]): A dictionary of environment variables to write.
|
||||
|
||||
Raises:
|
||||
IOError: If there is an error writing to the .env file
|
||||
- folder_path (Path): The path to the folder where the .env file will be written.
|
||||
- env_vars (dict): A dictionary of environment variables to write.
|
||||
"""
|
||||
env_file_path = folder_path / ".env"
|
||||
try:
|
||||
with open(env_file_path, "w") as file:
|
||||
for key, value in env_vars.items():
|
||||
file.write(f"{key}={value}\n")
|
||||
except IOError as e:
|
||||
click.secho(f"Error writing .env file: {str(e)}", fg="red")
|
||||
raise
|
||||
with open(env_file_path, "w") as file:
|
||||
for key, value in env_vars.items():
|
||||
file.write(f"{key}={value}\n")
|
||||
|
||||
258
src/crewai/lite_agent.py
Normal file
258
src/crewai/lite_agent.py
Normal file
@@ -0,0 +1,258 @@
|
||||
import os
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
|
||||
|
||||
from crewai.agents import CacheHandler
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||
from crewai.llm import LLM
|
||||
from crewai.task import Task
|
||||
from crewai.tools import BaseTool
|
||||
from crewai.tools.base_tool import Tool
|
||||
from crewai.utilities import Converter, Prompts
|
||||
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
||||
|
||||
|
||||
class LiteAgent(BaseAgent):
|
||||
"""Represents a lightweight agent in a system.
|
||||
|
||||
Each agent has a role, a goal, a backstory, and an optional language model (llm).
|
||||
The agent can execute tasks but with fewer features compared to the full Agent class.
|
||||
|
||||
This is a simplified version of the Agent class with less dependencies and overhead.
|
||||
|
||||
Attributes:
|
||||
agent_executor: An instance of the CrewAgentExecutor class.
|
||||
role: The role of the agent.
|
||||
goal: The objective of the agent.
|
||||
backstory: The backstory of the agent.
|
||||
llm: The language model that will run the agent.
|
||||
max_iter: Maximum number of iterations for an agent to execute a task.
|
||||
verbose: Whether the agent execution should be in verbose mode.
|
||||
tools: Tools at agent's disposal
|
||||
"""
|
||||
|
||||
_times_executed: int = PrivateAttr(default=0)
|
||||
max_execution_time: Optional[int] = Field(
|
||||
default=None,
|
||||
description="Maximum execution time for an agent to execute a task",
|
||||
)
|
||||
cache_handler: InstanceOf[CacheHandler] = Field(
|
||||
default=None, description="An instance of the CacheHandler class."
|
||||
)
|
||||
llm: Union[str, InstanceOf[LLM], Any] = Field(
|
||||
description="Language model that will run the agent.", default=None
|
||||
)
|
||||
max_iter: int = Field(
|
||||
default=20,
|
||||
description="Maximum number of iterations for an agent to execute a task before giving it's best answer",
|
||||
)
|
||||
max_retry_limit: int = Field(
|
||||
default=2,
|
||||
description="Maximum number of retries for an agent to execute a task when an error occurs.",
|
||||
)
|
||||
tools_results: Optional[List[Any]] = Field(
|
||||
default=[], description="Results of the tools used by the agent."
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def post_init_setup(self):
|
||||
if isinstance(self.llm, str):
|
||||
self.llm = LLM(model=self.llm)
|
||||
elif isinstance(self.llm, LLM):
|
||||
pass
|
||||
elif self.llm is None:
|
||||
model_name = (
|
||||
os.environ.get("OPENAI_MODEL_NAME")
|
||||
or os.environ.get("MODEL")
|
||||
or "gpt-4o-mini"
|
||||
)
|
||||
llm_params = {"model": model_name}
|
||||
|
||||
api_base = os.environ.get("OPENAI_API_BASE") or os.environ.get(
|
||||
"OPENAI_BASE_URL"
|
||||
)
|
||||
if api_base:
|
||||
llm_params["base_url"] = api_base
|
||||
|
||||
api_key = os.environ.get("OPENAI_API_KEY")
|
||||
if api_key:
|
||||
llm_params["api_key"] = api_key
|
||||
|
||||
self.llm = LLM(**llm_params)
|
||||
else:
|
||||
llm_params = {
|
||||
"model": getattr(self.llm, "model_name", None)
|
||||
or getattr(self.llm, "deployment_name", None)
|
||||
or str(self.llm),
|
||||
"temperature": getattr(self.llm, "temperature", None),
|
||||
"max_tokens": getattr(self.llm, "max_tokens", None),
|
||||
"api_key": getattr(self.llm, "api_key", None),
|
||||
"base_url": getattr(self.llm, "base_url", None),
|
||||
"organization": getattr(self.llm, "organization", None),
|
||||
}
|
||||
llm_params = {k: v for k, v in llm_params.items() if v is not None}
|
||||
self.llm = LLM(**llm_params)
|
||||
|
||||
if not self.agent_executor:
|
||||
self._setup_agent_executor()
|
||||
|
||||
return self
|
||||
|
||||
def _setup_agent_executor(self):
|
||||
if not self.cache_handler:
|
||||
self.cache_handler = CacheHandler()
|
||||
self.set_cache_handler(self.cache_handler)
|
||||
|
||||
def execute_task(
|
||||
self,
|
||||
task: Task,
|
||||
context: Optional[str] = None,
|
||||
tools: Optional[List[BaseTool]] = None,
|
||||
) -> str:
|
||||
"""Execute a task with the agent.
|
||||
|
||||
Args:
|
||||
task: Task to execute.
|
||||
context: Context to execute the task in.
|
||||
tools: Tools to use for the task.
|
||||
|
||||
Returns:
|
||||
Output of the agent
|
||||
"""
|
||||
if self.tools_handler:
|
||||
self.tools_handler.last_used_tool = {}
|
||||
|
||||
task_prompt = task.prompt()
|
||||
|
||||
if task.output_json or task.output_pydantic:
|
||||
if task.output_json:
|
||||
schema = Converter.generate_model_description(task.output_json)
|
||||
elif task.output_pydantic:
|
||||
schema = Converter.generate_model_description(task.output_pydantic)
|
||||
|
||||
task_prompt += "\n" + self.i18n.slice("formatted_task_instructions").format(
|
||||
output_format=schema
|
||||
)
|
||||
|
||||
if context:
|
||||
task_prompt = self.i18n.slice("task_with_context").format(
|
||||
task=task_prompt, context=context
|
||||
)
|
||||
|
||||
tools = tools or self.tools or []
|
||||
self.create_agent_executor(tools=tools, task=task)
|
||||
|
||||
try:
|
||||
result = self.agent_executor.invoke(
|
||||
{
|
||||
"input": task_prompt,
|
||||
"tool_names": self.agent_executor.tools_names,
|
||||
"tools": self.agent_executor.tools_description,
|
||||
"ask_for_human_input": task.human_input,
|
||||
}
|
||||
)["output"]
|
||||
except Exception as e:
|
||||
self._times_executed += 1
|
||||
if self._times_executed > self.max_retry_limit:
|
||||
raise e
|
||||
result = self.execute_task(task, context, tools)
|
||||
|
||||
if self.max_rpm and self._rpm_controller:
|
||||
self._rpm_controller.stop_rpm_counter()
|
||||
|
||||
for tool_result in self.tools_results:
|
||||
if tool_result.get("result_as_answer", False):
|
||||
result = tool_result["result"]
|
||||
|
||||
return result
|
||||
|
||||
def create_agent_executor(
|
||||
self, tools: Optional[List[BaseTool]] = None, task=None
|
||||
) -> None:
|
||||
"""Create an agent executor for the agent.
|
||||
|
||||
Returns:
|
||||
An instance of the CrewAgentExecutor class.
|
||||
"""
|
||||
tools = tools or self.tools or []
|
||||
parsed_tools = self._parse_tools(tools)
|
||||
|
||||
prompt = Prompts(
|
||||
agent=self,
|
||||
tools=tools,
|
||||
i18n=self.i18n,
|
||||
).task_execution()
|
||||
|
||||
stop_words = [self.i18n.slice("observation")]
|
||||
|
||||
self.agent_executor = CrewAgentExecutor(
|
||||
llm=self.llm,
|
||||
task=task,
|
||||
agent=self,
|
||||
crew=self.crew,
|
||||
tools=parsed_tools,
|
||||
prompt=prompt,
|
||||
original_tools=tools,
|
||||
stop_words=stop_words,
|
||||
max_iter=self.max_iter,
|
||||
tools_handler=self.tools_handler,
|
||||
tools_names=self.__tools_names(parsed_tools),
|
||||
tools_description=self._render_text_description_and_args(parsed_tools),
|
||||
respect_context_window=True,
|
||||
request_within_rpm_limit=(
|
||||
self._rpm_controller.check_or_wait if self._rpm_controller else None
|
||||
),
|
||||
callbacks=[TokenCalcHandler(self._token_process)],
|
||||
)
|
||||
|
||||
def get_delegation_tools(self, agents: List[BaseAgent]):
|
||||
"""Stub implementation - LiteAgent doesn't support delegation."""
|
||||
return []
|
||||
|
||||
def get_multimodal_tools(self) -> List[Tool]:
|
||||
"""Stub implementation - LiteAgent doesn't support multimodal tools."""
|
||||
return []
|
||||
|
||||
def get_code_execution_tools(self):
|
||||
"""Stub implementation - LiteAgent doesn't support code execution."""
|
||||
return []
|
||||
|
||||
def get_output_converter(self, llm, text, model, instructions):
|
||||
"""Get the output converter for the agent."""
|
||||
return Converter(llm=llm, text=text, model=model, instructions=instructions)
|
||||
|
||||
def _parse_tools(self, tools: List[Any]) -> List[Any]:
|
||||
"""Parse tools to be used for the task."""
|
||||
tools_list = []
|
||||
try:
|
||||
from crewai.tools import BaseTool as CrewAITool
|
||||
|
||||
for tool in tools:
|
||||
if isinstance(tool, CrewAITool):
|
||||
tools_list.append(tool.to_structured_tool())
|
||||
else:
|
||||
tools_list.append(tool)
|
||||
except ModuleNotFoundError:
|
||||
tools_list = []
|
||||
for tool in tools:
|
||||
tools_list.append(tool)
|
||||
|
||||
return tools_list
|
||||
|
||||
def _render_text_description_and_args(self, tools: List[BaseTool]) -> str:
|
||||
"""Render the tool name, description, and args in plain text."""
|
||||
tool_strings = []
|
||||
for tool in tools:
|
||||
tool_strings.append(tool.description)
|
||||
|
||||
return "\n".join(tool_strings)
|
||||
|
||||
@staticmethod
|
||||
def __tools_names(tools) -> str:
|
||||
"""Get the names of the tools as a comma-separated string."""
|
||||
return ", ".join([t.name for t in tools])
|
||||
|
||||
def __repr__(self):
|
||||
return f"LiteAgent(role={self.role}, goal={self.goal}, backstory={self.backstory})"
|
||||
@@ -2,7 +2,6 @@ from pathlib import Path
|
||||
from unittest import mock
|
||||
|
||||
import pytest
|
||||
import click
|
||||
from click.testing import CliRunner
|
||||
|
||||
from crewai.cli.cli import (
|
||||
@@ -21,14 +20,6 @@ from crewai.cli.cli import (
|
||||
)
|
||||
|
||||
|
||||
from crewai.cli.cli import create
|
||||
TEST_CONSTANTS = {
|
||||
"CREW_NAME": "test_crew",
|
||||
"MISTRAL_API_KEY": "mistral_api_key_123",
|
||||
"MISTRAL_MODEL": "mistral-tiny",
|
||||
"EMPTY_KEY": "",
|
||||
}
|
||||
|
||||
@pytest.fixture
|
||||
def runner():
|
||||
return CliRunner()
|
||||
@@ -318,114 +309,6 @@ def test_flow_add_crew(mock_path_exists, mock_create_embedded_crew, runner):
|
||||
assert isinstance(call_kwargs["parent_folder"], Path)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"provider,model,api_key,has_valid_keys,expected_outputs",
|
||||
[
|
||||
(
|
||||
"mistral",
|
||||
TEST_CONSTANTS["MISTRAL_MODEL"],
|
||||
TEST_CONSTANTS["MISTRAL_API_KEY"],
|
||||
True,
|
||||
["API keys and model saved", f"Selected model: {TEST_CONSTANTS['MISTRAL_MODEL']}"]
|
||||
),
|
||||
(
|
||||
"mistral",
|
||||
TEST_CONSTANTS["MISTRAL_MODEL"],
|
||||
TEST_CONSTANTS["EMPTY_KEY"],
|
||||
False,
|
||||
["No API keys provided", f"Selected model: {TEST_CONSTANTS['MISTRAL_MODEL']}"]
|
||||
),
|
||||
(
|
||||
"mistral",
|
||||
None,
|
||||
TEST_CONSTANTS["EMPTY_KEY"],
|
||||
False,
|
||||
["No model selected"]
|
||||
),
|
||||
]
|
||||
)
|
||||
@mock.patch("crewai.cli.create_crew.validate_api_keys")
|
||||
@mock.patch("crewai.cli.create_crew.write_env_file")
|
||||
@mock.patch("crewai.cli.create_crew.load_env_vars")
|
||||
@mock.patch("crewai.cli.create_crew.get_provider_data")
|
||||
@mock.patch("crewai.cli.create_crew.select_model")
|
||||
@mock.patch("crewai.cli.create_crew.select_provider")
|
||||
@mock.patch("crewai.cli.create_crew.click.confirm")
|
||||
@mock.patch("crewai.cli.create_crew.click.prompt")
|
||||
def test_create_crew_scenarios(
|
||||
mock_prompt, mock_confirm, mock_select_provider, mock_select_model,
|
||||
mock_get_provider_data, mock_load_env_vars, mock_write_env_file, mock_validate_api_keys,
|
||||
runner, provider, model, api_key, has_valid_keys, expected_outputs
|
||||
):
|
||||
"""Test different scenarios for crew creation with provider configuration.
|
||||
|
||||
Args:
|
||||
mock_*: Mock objects for various dependencies
|
||||
runner: Click test runner
|
||||
provider: Provider to test (e.g. "mistral")
|
||||
model: Model to select (e.g. "mistral-tiny")
|
||||
api_key: API key to provide
|
||||
has_valid_keys: Whether the API key validation should pass
|
||||
expected_output: Expected message in the output
|
||||
"""
|
||||
mock_confirm.return_value = True
|
||||
mock_get_provider_data.return_value = {"mistral": [TEST_CONSTANTS["MISTRAL_MODEL"]]}
|
||||
mock_load_env_vars.return_value = {}
|
||||
mock_select_provider.return_value = provider
|
||||
mock_select_model.return_value = model
|
||||
mock_prompt.return_value = api_key
|
||||
mock_validate_api_keys.return_value = has_valid_keys
|
||||
|
||||
# When model is None, simulate model selection being cancelled
|
||||
if model is None:
|
||||
mock_select_model.side_effect = click.UsageError("No model selected")
|
||||
|
||||
result = runner.invoke(create, ["crew", TEST_CONSTANTS["CREW_NAME"]], input="y\n")
|
||||
|
||||
# For model=None case, we expect error message
|
||||
if model is None:
|
||||
assert result.exit_code == 2 # UsageError exit code
|
||||
assert "No model selected" in result.output
|
||||
else:
|
||||
assert result.exit_code == 0
|
||||
for expected_output in expected_outputs:
|
||||
assert expected_output in result.output
|
||||
|
||||
@mock.patch("crewai.cli.create_crew.validate_api_keys")
|
||||
@mock.patch("crewai.cli.create_crew.write_env_file")
|
||||
@mock.patch("crewai.cli.create_crew.load_env_vars")
|
||||
@mock.patch("crewai.cli.create_crew.get_provider_data")
|
||||
@mock.patch("crewai.cli.create_crew.select_model")
|
||||
@mock.patch("crewai.cli.create_crew.select_provider")
|
||||
@mock.patch("crewai.cli.create_crew.click.confirm")
|
||||
@mock.patch("crewai.cli.create_crew.click.prompt")
|
||||
def test_create_crew_with_file_error(
|
||||
mock_prompt, mock_confirm, mock_select_provider, mock_select_model,
|
||||
mock_get_provider_data, mock_load_env_vars, mock_write_env_file, mock_validate_api_keys,
|
||||
runner
|
||||
):
|
||||
# Mock folder override confirmation
|
||||
mock_confirm.return_value = True
|
||||
# Mock provider data
|
||||
mock_get_provider_data.return_value = {"mistral": [TEST_CONSTANTS["MISTRAL_MODEL"]]}
|
||||
# Mock empty env vars
|
||||
mock_load_env_vars.return_value = {}
|
||||
# Mock provider and model selection
|
||||
mock_select_provider.return_value = "mistral"
|
||||
mock_select_model.return_value = TEST_CONSTANTS["MISTRAL_MODEL"]
|
||||
# Mock API key input
|
||||
mock_prompt.return_value = TEST_CONSTANTS["MISTRAL_API_KEY"]
|
||||
# Mock API key validation
|
||||
mock_validate_api_keys.return_value = True
|
||||
# Mock file write error
|
||||
mock_write_env_file.side_effect = IOError("Permission denied")
|
||||
|
||||
result = runner.invoke(create, ["crew", TEST_CONSTANTS["CREW_NAME"]], input="y\n")
|
||||
|
||||
assert result.exit_code == 1
|
||||
assert "Error writing .env file: Permission denied" in result.output
|
||||
assert mock_write_env_file.called
|
||||
|
||||
def test_add_crew_to_flow_not_in_root(runner):
|
||||
# Simulate not being in the root of a flow project
|
||||
with mock.patch("pathlib.Path.exists", autospec=True) as mock_exists:
|
||||
|
||||
125
tests/lite_agent_test.py
Normal file
125
tests/lite_agent_test.py
Normal file
@@ -0,0 +1,125 @@
|
||||
"""Test LiteAgent creation and execution basic functionality."""
|
||||
|
||||
import os
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai import LiteAgent, Task
|
||||
from crewai.llm import LLM
|
||||
from crewai.tools import tool
|
||||
|
||||
|
||||
def test_lite_agent_creation():
|
||||
"""Test creating a LiteAgent with basic properties."""
|
||||
agent = LiteAgent(role="test role", goal="test goal", backstory="test backstory")
|
||||
|
||||
assert agent.role == "test role"
|
||||
assert agent.goal == "test goal"
|
||||
assert agent.backstory == "test backstory"
|
||||
assert agent.tools == []
|
||||
|
||||
|
||||
def test_lite_agent_default_values():
|
||||
"""Test default values for LiteAgent."""
|
||||
agent = LiteAgent(role="test role", goal="test goal", backstory="test backstory")
|
||||
assert agent.llm.model == "gpt-4o-mini"
|
||||
assert agent.max_iter == 20
|
||||
assert agent.max_retry_limit == 2
|
||||
|
||||
|
||||
def test_custom_llm():
|
||||
"""Test creating a LiteAgent with a custom LLM string."""
|
||||
agent = LiteAgent(
|
||||
role="test role", goal="test goal", backstory="test backstory", llm="gpt-4"
|
||||
)
|
||||
assert agent.llm.model == "gpt-4"
|
||||
|
||||
|
||||
def test_custom_llm_with_langchain():
|
||||
"""Test creating a LiteAgent with a langchain LLM."""
|
||||
mock_langchain_llm = MagicMock()
|
||||
mock_langchain_llm.model_name = "gpt-4"
|
||||
|
||||
agent = LiteAgent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
llm=mock_langchain_llm,
|
||||
)
|
||||
|
||||
assert agent.llm.model == "gpt-4"
|
||||
|
||||
|
||||
@patch("crewai.agents.crew_agent_executor.CrewAgentExecutor.invoke")
|
||||
def test_lite_agent_execute_task(mock_invoke):
|
||||
"""Test executing a task with a LiteAgent."""
|
||||
mock_invoke.return_value = {"output": "The area of a circle with radius 5 cm is 78.54 square centimeters."}
|
||||
|
||||
agent = LiteAgent(
|
||||
role="Math Tutor",
|
||||
goal="Solve math problems accurately",
|
||||
backstory="You are an experienced math tutor with a knack for explaining complex concepts simply.",
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Calculate the area of a circle with radius 5 cm.",
|
||||
expected_output="The calculated area of the circle in square centimeters.",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
result = agent.execute_task(task)
|
||||
|
||||
assert result is not None
|
||||
assert "square centimeters" in result.lower()
|
||||
mock_invoke.assert_called_once()
|
||||
|
||||
|
||||
@patch("crewai.agents.crew_agent_executor.CrewAgentExecutor.invoke")
|
||||
def test_lite_agent_execution(mock_invoke):
|
||||
"""Test executing a simple task."""
|
||||
mock_invoke.return_value = {"output": "1 + 1 = 2"}
|
||||
|
||||
agent = LiteAgent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="How much is 1 + 1?",
|
||||
agent=agent,
|
||||
expected_output="the result of the math operation.",
|
||||
)
|
||||
|
||||
output = agent.execute_task(task)
|
||||
assert "2" in output
|
||||
mock_invoke.assert_called_once()
|
||||
|
||||
|
||||
@patch("crewai.agents.crew_agent_executor.CrewAgentExecutor.invoke")
|
||||
def test_lite_agent_execution_with_tools(mock_invoke):
|
||||
"""Test executing a task with tools."""
|
||||
mock_invoke.return_value = {"output": "3 times 4 is 12"}
|
||||
|
||||
@tool
|
||||
def multiplier(first_number: int, second_number: int) -> float:
|
||||
"""Useful for when you need to multiply two numbers together."""
|
||||
return first_number * second_number
|
||||
|
||||
agent = LiteAgent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
tools=[multiplier],
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="What is 3 times 4?",
|
||||
agent=agent,
|
||||
expected_output="The result of the multiplication.",
|
||||
)
|
||||
|
||||
output = agent.execute_task(task)
|
||||
assert "12" in output
|
||||
mock_invoke.assert_called_once()
|
||||
Reference in New Issue
Block a user