Checking supports_function_calling isntead of gpt models

This commit is contained in:
João Moura
2024-09-23 16:23:38 -03:00
parent 3b6d1838b4
commit 493f046c03
8 changed files with 34 additions and 61 deletions

View File

@@ -39,9 +39,3 @@ class OutputConverter(BaseModel, ABC):
def to_json(self, current_attempt=1): def to_json(self, current_attempt=1):
"""Convert text to json.""" """Convert text to json."""
pass pass
@property
@abstractmethod
def is_gpt(self) -> bool:
"""Return if llm provided is of gpt from openai."""
pass

View File

@@ -1,6 +1,7 @@
from typing import Any, Dict, List, Optional, Union from typing import Any, Dict, List, Optional, Union
import logging import logging
import litellm import litellm
from litellm import get_supported_openai_params
class LLM: class LLM:
@@ -85,3 +86,11 @@ class LLM:
except Exception as e: except Exception as e:
logging.error(f"LiteLLM call failed: {str(e)}") logging.error(f"LiteLLM call failed: {str(e)}")
raise # Re-raise the exception after logging raise # Re-raise the exception after logging
def supports_function_calling(self) -> bool:
try:
params = get_supported_openai_params(model=self.model)
return "response_format" in params
except Exception as e:
logging.error(f"Failed to get supported params: {str(e)}")
return False

View File

@@ -73,7 +73,6 @@ class ToolUsage:
# Set the maximum parsing attempts for bigger models # Set the maximum parsing attempts for bigger models
if ( if (
self.function_calling_llm self.function_calling_llm
and self._is_gpt(self.function_calling_llm)
and self.function_calling_llm in OPENAI_BIGGER_MODELS and self.function_calling_llm in OPENAI_BIGGER_MODELS
): ):
self._max_parsing_attempts = 2 self._max_parsing_attempts = 2
@@ -299,13 +298,6 @@ class ToolUsage:
) )
return "\n--\n".join(descriptions) return "\n--\n".join(descriptions)
def _is_gpt(self, llm) -> bool:
return (
"gpt" in str(llm.model).lower()
or "o1-preview" in str(llm.model).lower()
or "o1-mini" in str(llm.model).lower()
)
def _tool_calling( def _tool_calling(
self, tool_string: str self, tool_string: str
) -> Union[ToolCalling, InstructorToolCalling]: ) -> Union[ToolCalling, InstructorToolCalling]:
@@ -314,13 +306,9 @@ class ToolUsage:
print("self.function_calling_llm") print("self.function_calling_llm")
model = ( model = (
InstructorToolCalling InstructorToolCalling
if self._is_gpt(self.function_calling_llm) if self.function_calling_llm.supports_function_calling()
else ToolCalling else ToolCalling
) )
print("model", model)
print(
"self.function_calling_llm.model", self.function_calling_llm.model
)
converter = Converter( converter = Converter(
text=f"Only tools available:\n###\n{self._render()}\n\nReturn a valid schema for the tool, the tool name must be exactly equal one of the options, use this text to inform the valid output schema:\n\n### TEXT \n{tool_string}", text=f"Only tools available:\n###\n{self._render()}\n\nReturn a valid schema for the tool, the tool name must be exactly equal one of the options, use this text to inform the valid output schema:\n\n### TEXT \n{tool_string}",
llm=self.function_calling_llm, llm=self.function_calling_llm,

View File

@@ -2,7 +2,6 @@ import json
import re import re
from typing import Any, Optional, Type, Union from typing import Any, Optional, Type, Union
from crewai.llm import LLM
from pydantic import BaseModel, ValidationError from pydantic import BaseModel, ValidationError
from crewai.agents.agent_builder.utilities.base_output_converter import OutputConverter from crewai.agents.agent_builder.utilities.base_output_converter import OutputConverter
@@ -24,7 +23,7 @@ class Converter(OutputConverter):
def to_pydantic(self, current_attempt=1): def to_pydantic(self, current_attempt=1):
"""Convert text to pydantic.""" """Convert text to pydantic."""
try: try:
if self.is_gpt: if self.llm.supports_function_calling():
return self._create_instructor().to_pydantic() return self._create_instructor().to_pydantic()
else: else:
return self.llm.call( return self.llm.call(
@@ -43,7 +42,7 @@ class Converter(OutputConverter):
def to_json(self, current_attempt=1): def to_json(self, current_attempt=1):
"""Convert text to json.""" """Convert text to json."""
try: try:
if self.is_gpt: if self.llm.supports_function_calling():
return self._create_instructor().to_json() return self._create_instructor().to_json()
else: else:
return json.dumps( return json.dumps(
@@ -86,15 +85,6 @@ class Converter(OutputConverter):
) )
return parser.parse_result(result) return parser.parse_result(result)
@property
def is_gpt(self) -> bool:
"""Return if llm provided is of gpt from openai."""
return (
"gpt" in str(self.llm.model).lower()
or "o1-preview" in str(self.llm.model).lower()
or "o1-mini" in str(self.llm.model).lower()
)
def convert_to_model( def convert_to_model(
result: str, result: str,
@@ -202,21 +192,12 @@ def convert_with_instructions(
def get_conversion_instructions(model: Type[BaseModel], llm: Any) -> str: def get_conversion_instructions(model: Type[BaseModel], llm: Any) -> str:
instructions = "I'm gonna convert this raw text into valid JSON." instructions = "I'm gonna convert this raw text into valid JSON."
if not is_gpt(llm): if llm.supports_function_calling():
model_schema = PydanticSchemaParser(model=model).get_schema() model_schema = PydanticSchemaParser(model=model).get_schema()
instructions = f"{instructions}\n\nThe json should have the following structure, with the following keys:\n{model_schema}" instructions = f"{instructions}\n\nThe json should have the following structure, with the following keys:\n{model_schema}"
return instructions return instructions
def is_gpt(llm: LLM) -> bool:
"""Return if llm provided is of gpt from openai."""
return (
"gpt" in str(llm.model).lower()
or "o1-preview" in str(llm.model).lower()
or "o1-mini" in str(llm.model).lower()
)
def create_converter( def create_converter(
agent: Optional[Any] = None, agent: Optional[Any] = None,
converter_cls: Optional[Type[Converter]] = None, converter_cls: Optional[Type[Converter]] = None,

View File

@@ -78,7 +78,7 @@ class TaskEvaluator:
instructions = "Convert all responses into valid JSON output." instructions = "Convert all responses into valid JSON output."
if not self._is_gpt(self.llm): if not self.llm.supports_function_calling():
model_schema = PydanticSchemaParser(model=TaskEvaluation).get_schema() model_schema = PydanticSchemaParser(model=TaskEvaluation).get_schema()
instructions = f"{instructions}\n\nReturn only valid JSON with the following schema:\n```json\n{model_schema}\n```" instructions = f"{instructions}\n\nReturn only valid JSON with the following schema:\n```json\n{model_schema}\n```"
@@ -91,13 +91,6 @@ class TaskEvaluator:
return converter.to_pydantic() return converter.to_pydantic()
def _is_gpt(self, llm) -> bool:
return (
"gpt" in str(self.llm.model).lower()
or "o1-preview" in str(self.llm.model).lower()
or "o1-mini" in str(self.llm.model).lower()
)
def evaluate_training_data( def evaluate_training_data(
self, training_data: dict, agent_id: str self, training_data: dict, agent_id: str
) -> TrainingTaskEvaluation: ) -> TrainingTaskEvaluation:
@@ -128,7 +121,7 @@ class TaskEvaluator:
) )
instructions = "I'm gonna convert this raw text into valid JSON." instructions = "I'm gonna convert this raw text into valid JSON."
if not self._is_gpt(self.llm): if not self.llm.supports_function_calling():
model_schema = PydanticSchemaParser( model_schema = PydanticSchemaParser(
model=TrainingTaskEvaluation model=TrainingTaskEvaluation
).get_schema() ).get_schema()

View File

@@ -816,7 +816,7 @@ def test_agent_step_callback():
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_function_calling_llm(): def test_agent_function_calling_llm():
llm = "gpt-4" llm = "gpt-4o"
@tool @tool
def learn_about_AI() -> str: def learn_about_AI() -> str:

View File

@@ -25,6 +25,7 @@ def test_evaluate_training_data(converter_mock):
} }
agent_id = "agent_id" agent_id = "agent_id"
original_agent = MagicMock() original_agent = MagicMock()
original_agent.llm.supports_function_calling.return_value = False
function_return_value = TrainingTaskEvaluation( function_return_value = TrainingTaskEvaluation(
suggestions=[ suggestions=[
"The initial output was already good, having a detailed explanation. However, the improved output " "The initial output was already good, having a detailed explanation. However, the improved output "

View File

@@ -11,11 +11,12 @@ from crewai.utilities.converter import (
create_converter, create_converter,
get_conversion_instructions, get_conversion_instructions,
handle_partial_json, handle_partial_json,
is_gpt,
validate_model, validate_model,
) )
from pydantic import BaseModel from pydantic import BaseModel
from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
# Sample Pydantic models for testing # Sample Pydantic models for testing
class EmailResponse(BaseModel): class EmailResponse(BaseModel):
@@ -198,14 +199,20 @@ def test_convert_with_instructions_failure(
def test_get_conversion_instructions_gpt(): def test_get_conversion_instructions_gpt():
mock_llm = Mock() mock_llm = Mock()
mock_llm.openai_api_base = None mock_llm.openai_api_base = None
with patch("crewai.utilities.converter.is_gpt", return_value=True): with patch.object(LLM, "supports_function_calling") as supports_function_calling:
supports_function_calling.return_value = True
instructions = get_conversion_instructions(SimpleModel, mock_llm) instructions = get_conversion_instructions(SimpleModel, mock_llm)
assert instructions == "I'm gonna convert this raw text into valid JSON." model_schema = PydanticSchemaParser(model=SimpleModel).get_schema()
assert (
instructions
== f"I'm gonna convert this raw text into valid JSON.\n\nThe json should have the following structure, with the following keys:\n{model_schema}"
)
def test_get_conversion_instructions_non_gpt(): def test_get_conversion_instructions_non_gpt():
mock_llm = Mock() mock_llm = Mock()
with patch("crewai.utilities.converter.is_gpt", return_value=False): with patch.object(LLM, "supports_function_calling") as supports_function_calling:
supports_function_calling.return_value = False
with patch("crewai.utilities.converter.PydanticSchemaParser") as mock_parser: with patch("crewai.utilities.converter.PydanticSchemaParser") as mock_parser:
mock_parser.return_value.get_schema.return_value = "Sample schema" mock_parser.return_value.get_schema.return_value = "Sample schema"
instructions = get_conversion_instructions(SimpleModel, mock_llm) instructions = get_conversion_instructions(SimpleModel, mock_llm)
@@ -213,14 +220,14 @@ def test_get_conversion_instructions_non_gpt():
# Tests for is_gpt # Tests for is_gpt
def test_is_gpt_true(): def test_supports_function_calling_true():
llm = LLM(model="gpt-4") llm = LLM(model="gpt-4o")
assert is_gpt(llm) is True assert llm.supports_function_calling() is True
def test_is_gpt_false(): def test_supports_function_calling_false():
llm = LLM(model="lol-4") llm = LLM(model="non-existent-model")
assert is_gpt(llm) is False assert llm.supports_function_calling() is False
class CustomConverter(Converter): class CustomConverter(Converter):