Compare commits

...

1 Commits

Author SHA1 Message Date
Devin AI
5afe6914eb Fix #2558: Add LiteAgent as a lightweight agent implementation
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-04-09 18:44:45 +00:00
3 changed files with 385 additions and 0 deletions

View File

@@ -4,6 +4,7 @@ from crewai.agent import Agent
from crewai.crew import Crew
from crewai.flow.flow import Flow
from crewai.knowledge.knowledge import Knowledge
from crewai.lite_agent import LiteAgent
from crewai.llm import LLM
from crewai.process import Process
from crewai.task import Task
@@ -23,4 +24,5 @@ __all__ = [
"LLM",
"Flow",
"Knowledge",
"LiteAgent",
]

258
src/crewai/lite_agent.py Normal file
View File

@@ -0,0 +1,258 @@
import os
from typing import Any, Dict, List, Optional, Union
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
from crewai.agents import CacheHandler
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.agents.crew_agent_executor import CrewAgentExecutor
from crewai.llm import LLM
from crewai.task import Task
from crewai.tools import BaseTool
from crewai.tools.base_tool import Tool
from crewai.utilities import Converter, Prompts
from crewai.utilities.token_counter_callback import TokenCalcHandler
class LiteAgent(BaseAgent):
"""Represents a lightweight agent in a system.
Each agent has a role, a goal, a backstory, and an optional language model (llm).
The agent can execute tasks but with fewer features compared to the full Agent class.
This is a simplified version of the Agent class with less dependencies and overhead.
Attributes:
agent_executor: An instance of the CrewAgentExecutor class.
role: The role of the agent.
goal: The objective of the agent.
backstory: The backstory of the agent.
llm: The language model that will run the agent.
max_iter: Maximum number of iterations for an agent to execute a task.
verbose: Whether the agent execution should be in verbose mode.
tools: Tools at agent's disposal
"""
_times_executed: int = PrivateAttr(default=0)
max_execution_time: Optional[int] = Field(
default=None,
description="Maximum execution time for an agent to execute a task",
)
cache_handler: InstanceOf[CacheHandler] = Field(
default=None, description="An instance of the CacheHandler class."
)
llm: Union[str, InstanceOf[LLM], Any] = Field(
description="Language model that will run the agent.", default=None
)
max_iter: int = Field(
default=20,
description="Maximum number of iterations for an agent to execute a task before giving it's best answer",
)
max_retry_limit: int = Field(
default=2,
description="Maximum number of retries for an agent to execute a task when an error occurs.",
)
tools_results: Optional[List[Any]] = Field(
default=[], description="Results of the tools used by the agent."
)
@model_validator(mode="after")
def post_init_setup(self):
if isinstance(self.llm, str):
self.llm = LLM(model=self.llm)
elif isinstance(self.llm, LLM):
pass
elif self.llm is None:
model_name = (
os.environ.get("OPENAI_MODEL_NAME")
or os.environ.get("MODEL")
or "gpt-4o-mini"
)
llm_params = {"model": model_name}
api_base = os.environ.get("OPENAI_API_BASE") or os.environ.get(
"OPENAI_BASE_URL"
)
if api_base:
llm_params["base_url"] = api_base
api_key = os.environ.get("OPENAI_API_KEY")
if api_key:
llm_params["api_key"] = api_key
self.llm = LLM(**llm_params)
else:
llm_params = {
"model": getattr(self.llm, "model_name", None)
or getattr(self.llm, "deployment_name", None)
or str(self.llm),
"temperature": getattr(self.llm, "temperature", None),
"max_tokens": getattr(self.llm, "max_tokens", None),
"api_key": getattr(self.llm, "api_key", None),
"base_url": getattr(self.llm, "base_url", None),
"organization": getattr(self.llm, "organization", None),
}
llm_params = {k: v for k, v in llm_params.items() if v is not None}
self.llm = LLM(**llm_params)
if not self.agent_executor:
self._setup_agent_executor()
return self
def _setup_agent_executor(self):
if not self.cache_handler:
self.cache_handler = CacheHandler()
self.set_cache_handler(self.cache_handler)
def execute_task(
self,
task: Task,
context: Optional[str] = None,
tools: Optional[List[BaseTool]] = None,
) -> str:
"""Execute a task with the agent.
Args:
task: Task to execute.
context: Context to execute the task in.
tools: Tools to use for the task.
Returns:
Output of the agent
"""
if self.tools_handler:
self.tools_handler.last_used_tool = {}
task_prompt = task.prompt()
if task.output_json or task.output_pydantic:
if task.output_json:
schema = Converter.generate_model_description(task.output_json)
elif task.output_pydantic:
schema = Converter.generate_model_description(task.output_pydantic)
task_prompt += "\n" + self.i18n.slice("formatted_task_instructions").format(
output_format=schema
)
if context:
task_prompt = self.i18n.slice("task_with_context").format(
task=task_prompt, context=context
)
tools = tools or self.tools or []
self.create_agent_executor(tools=tools, task=task)
try:
result = self.agent_executor.invoke(
{
"input": task_prompt,
"tool_names": self.agent_executor.tools_names,
"tools": self.agent_executor.tools_description,
"ask_for_human_input": task.human_input,
}
)["output"]
except Exception as e:
self._times_executed += 1
if self._times_executed > self.max_retry_limit:
raise e
result = self.execute_task(task, context, tools)
if self.max_rpm and self._rpm_controller:
self._rpm_controller.stop_rpm_counter()
for tool_result in self.tools_results:
if tool_result.get("result_as_answer", False):
result = tool_result["result"]
return result
def create_agent_executor(
self, tools: Optional[List[BaseTool]] = None, task=None
) -> None:
"""Create an agent executor for the agent.
Returns:
An instance of the CrewAgentExecutor class.
"""
tools = tools or self.tools or []
parsed_tools = self._parse_tools(tools)
prompt = Prompts(
agent=self,
tools=tools,
i18n=self.i18n,
).task_execution()
stop_words = [self.i18n.slice("observation")]
self.agent_executor = CrewAgentExecutor(
llm=self.llm,
task=task,
agent=self,
crew=self.crew,
tools=parsed_tools,
prompt=prompt,
original_tools=tools,
stop_words=stop_words,
max_iter=self.max_iter,
tools_handler=self.tools_handler,
tools_names=self.__tools_names(parsed_tools),
tools_description=self._render_text_description_and_args(parsed_tools),
respect_context_window=True,
request_within_rpm_limit=(
self._rpm_controller.check_or_wait if self._rpm_controller else None
),
callbacks=[TokenCalcHandler(self._token_process)],
)
def get_delegation_tools(self, agents: List[BaseAgent]):
"""Stub implementation - LiteAgent doesn't support delegation."""
return []
def get_multimodal_tools(self) -> List[Tool]:
"""Stub implementation - LiteAgent doesn't support multimodal tools."""
return []
def get_code_execution_tools(self):
"""Stub implementation - LiteAgent doesn't support code execution."""
return []
def get_output_converter(self, llm, text, model, instructions):
"""Get the output converter for the agent."""
return Converter(llm=llm, text=text, model=model, instructions=instructions)
def _parse_tools(self, tools: List[Any]) -> List[Any]:
"""Parse tools to be used for the task."""
tools_list = []
try:
from crewai.tools import BaseTool as CrewAITool
for tool in tools:
if isinstance(tool, CrewAITool):
tools_list.append(tool.to_structured_tool())
else:
tools_list.append(tool)
except ModuleNotFoundError:
tools_list = []
for tool in tools:
tools_list.append(tool)
return tools_list
def _render_text_description_and_args(self, tools: List[BaseTool]) -> str:
"""Render the tool name, description, and args in plain text."""
tool_strings = []
for tool in tools:
tool_strings.append(tool.description)
return "\n".join(tool_strings)
@staticmethod
def __tools_names(tools) -> str:
"""Get the names of the tools as a comma-separated string."""
return ", ".join([t.name for t in tools])
def __repr__(self):
return f"LiteAgent(role={self.role}, goal={self.goal}, backstory={self.backstory})"

125
tests/lite_agent_test.py Normal file
View File

@@ -0,0 +1,125 @@
"""Test LiteAgent creation and execution basic functionality."""
import os
from unittest.mock import patch, MagicMock
import pytest
from crewai import LiteAgent, Task
from crewai.llm import LLM
from crewai.tools import tool
def test_lite_agent_creation():
"""Test creating a LiteAgent with basic properties."""
agent = LiteAgent(role="test role", goal="test goal", backstory="test backstory")
assert agent.role == "test role"
assert agent.goal == "test goal"
assert agent.backstory == "test backstory"
assert agent.tools == []
def test_lite_agent_default_values():
"""Test default values for LiteAgent."""
agent = LiteAgent(role="test role", goal="test goal", backstory="test backstory")
assert agent.llm.model == "gpt-4o-mini"
assert agent.max_iter == 20
assert agent.max_retry_limit == 2
def test_custom_llm():
"""Test creating a LiteAgent with a custom LLM string."""
agent = LiteAgent(
role="test role", goal="test goal", backstory="test backstory", llm="gpt-4"
)
assert agent.llm.model == "gpt-4"
def test_custom_llm_with_langchain():
"""Test creating a LiteAgent with a langchain LLM."""
mock_langchain_llm = MagicMock()
mock_langchain_llm.model_name = "gpt-4"
agent = LiteAgent(
role="test role",
goal="test goal",
backstory="test backstory",
llm=mock_langchain_llm,
)
assert agent.llm.model == "gpt-4"
@patch("crewai.agents.crew_agent_executor.CrewAgentExecutor.invoke")
def test_lite_agent_execute_task(mock_invoke):
"""Test executing a task with a LiteAgent."""
mock_invoke.return_value = {"output": "The area of a circle with radius 5 cm is 78.54 square centimeters."}
agent = LiteAgent(
role="Math Tutor",
goal="Solve math problems accurately",
backstory="You are an experienced math tutor with a knack for explaining complex concepts simply.",
)
task = Task(
description="Calculate the area of a circle with radius 5 cm.",
expected_output="The calculated area of the circle in square centimeters.",
agent=agent,
)
result = agent.execute_task(task)
assert result is not None
assert "square centimeters" in result.lower()
mock_invoke.assert_called_once()
@patch("crewai.agents.crew_agent_executor.CrewAgentExecutor.invoke")
def test_lite_agent_execution(mock_invoke):
"""Test executing a simple task."""
mock_invoke.return_value = {"output": "1 + 1 = 2"}
agent = LiteAgent(
role="test role",
goal="test goal",
backstory="test backstory",
)
task = Task(
description="How much is 1 + 1?",
agent=agent,
expected_output="the result of the math operation.",
)
output = agent.execute_task(task)
assert "2" in output
mock_invoke.assert_called_once()
@patch("crewai.agents.crew_agent_executor.CrewAgentExecutor.invoke")
def test_lite_agent_execution_with_tools(mock_invoke):
"""Test executing a task with tools."""
mock_invoke.return_value = {"output": "3 times 4 is 12"}
@tool
def multiplier(first_number: int, second_number: int) -> float:
"""Useful for when you need to multiply two numbers together."""
return first_number * second_number
agent = LiteAgent(
role="test role",
goal="test goal",
backstory="test backstory",
tools=[multiplier],
)
task = Task(
description="What is 3 times 4?",
agent=agent,
expected_output="The result of the multiplication.",
)
output = agent.execute_task(task)
assert "12" in output
mock_invoke.assert_called_once()