Add AISuite LLM support and update dependencies

- Integrate AISuite as a new third-party LLM option
- Update pyproject.toml and uv.lock to include aisuite package
- Modify BaseLLM to support more flexible initialization
- Remove unnecessary LLM imports across multiple files
- Implement AISuiteLLM with basic chat completion functionality
This commit is contained in:
Lorenze Jay
2025-03-11 15:48:49 -07:00
parent 0cece5fd59
commit 25c64ae86d
8 changed files with 95 additions and 9 deletions

View File

@@ -11,7 +11,7 @@ from crewai.agents.crew_agent_executor import CrewAgentExecutor
from crewai.knowledge.knowledge import Knowledge
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
from crewai.knowledge.utils.knowledge_utils import extract_knowledge_context
from crewai.llm import LLM, BaseLLM
from crewai.llm import BaseLLM
from crewai.memory.contextual.contextual_memory import ContextualMemory
from crewai.task import Task
from crewai.tools import BaseTool
@@ -117,7 +117,9 @@ class Agent(BaseAgent):
self.agent_ops_agent_name = self.role
self.llm = create_llm(self.llm)
if self.function_calling_llm and not isinstance(self.function_calling_llm, BaseLLM):
if self.function_calling_llm and not isinstance(
self.function_calling_llm, BaseLLM
):
self.function_calling_llm = create_llm(self.function_calling_llm)
if not self.agent_executor:

View File

@@ -13,7 +13,7 @@ from crewai.agents.parser import (
OutputParserException,
)
from crewai.agents.tools_handler import ToolsHandler
from crewai.llm import LLM
from crewai.llm import BaseLLM
from crewai.tools.base_tool import BaseTool
from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
from crewai.utilities import I18N, Printer
@@ -61,7 +61,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
callbacks: List[Any] = [],
):
self._i18n: I18N = I18N()
self.llm: LLM = llm
self.llm: BaseLLM = llm
self.task = task
self.agent = agent
self.crew = crew

View File

@@ -14,7 +14,7 @@ from packaging import version
from crewai.cli.utils import read_toml
from crewai.cli.version import get_crewai_version
from crewai.crew import Crew
from crewai.llm import LLM, BaseLLM
from crewai.llm import BaseLLM
from crewai.types.crew_chat import ChatInputField, ChatInputs
from crewai.utilities.llm_utils import create_llm

View File

@@ -19,7 +19,10 @@ class BaseLLM(ABC):
This is used by the CrewAgentExecutor and other components.
"""
def __init__(self):
model: str
temperature: Optional[float] = None
def __init__(self, model: str, temperature: Optional[float] = None):
"""Initialize the BaseLLM with default attributes.
This constructor sets default values for attributes that are expected
@@ -29,6 +32,8 @@ class BaseLLM(ABC):
that these default attributes are properly initialized.
"""
self.stop = []
self.model = model
self.temperature = temperature
@abstractmethod
def call(
@@ -102,3 +107,13 @@ class BaseLLM(ABC):
The context window size as an integer.
"""
pass
@abstractmethod
def set_callbacks(self, callbacks: List[Any]) -> None:
"""Set callback functions for the LLM.
Args:
callbacks: List of callback functions to be executed during
and after LLM calls.
"""
pass

50
src/crewai/llms/third_party/ai_suite.py vendored Normal file
View File

@@ -0,0 +1,50 @@
from typing import Any, Dict, List, Optional
import aisuite as ai
from crewai.llms.base_llm import BaseLLM
class AISuiteLLM(BaseLLM):
def __init__(self, model: str, temperature: Optional[float] = None, **kwargs):
super().__init__(model, temperature, **kwargs)
self.client = ai.Client()
def call(
self,
messages: List[Dict[str, str]],
tools: Optional[List[dict]] = None,
callbacks: Optional[List[Any]] = None,
available_functions: Optional[Dict[str, Any]] = None,
) -> str:
completion_params = self._prepare_completion_params(messages)
# print(f"Completion params: {completion_params}")
response = self.client.chat.completions.create(**completion_params)
print(f"Response: {response}")
tool_calls = getattr(response.choices[0].message, "tool_calls", [])
print(f"Tool calls: {tool_calls}")
return response.choices[0].message.content
def _prepare_completion_params(
self, messages: List[Dict[str, str]]
) -> Dict[str, Any]:
print(f"Preparing completion params for {self.model}")
# print(f"Messages: {messages}")
print(f"Temperature: {self.temperature}")
return {
"model": self.model,
"messages": messages,
"temperature": self.temperature,
}
def supports_function_calling(self) -> bool:
return False
def supports_stop_words(self) -> bool:
return False
def get_context_window_size(self):
pass
def set_callbacks(self, callbacks: List[Any]) -> None:
pass

View File

@@ -6,7 +6,7 @@ from rich.console import Console
from rich.table import Table
from crewai.agent import Agent
from crewai.llm import LLM
from crewai.llm import BaseLLM
from crewai.task import Task
from crewai.tasks.task_output import TaskOutput
from crewai.telemetry import Telemetry
@@ -24,7 +24,7 @@ class CrewEvaluator:
Attributes:
crew (Crew): The crew of agents to evaluate.
eval_llm (LLM): Language model instance to use for evaluations
eval_llm (BaseLLM): Language model instance to use for evaluations
tasks_scores (defaultdict): A dictionary to store the scores of the agents for each task.
iteration (int): The current iteration of the evaluation.
"""
@@ -33,7 +33,7 @@ class CrewEvaluator:
run_execution_times: defaultdict = defaultdict(list)
iteration: int = 0
def __init__(self, crew, eval_llm: InstanceOf[LLM]):
def __init__(self, crew, eval_llm: InstanceOf[BaseLLM]):
self.crew = crew
self.llm = eval_llm
self._telemetry = Telemetry()