Fix crew_test issues with function calling

This commit is contained in:
Brandon Hancock
2025-01-09 11:11:21 -05:00
parent 0dc2582ce1
commit 6ad314af8c
5 changed files with 107 additions and 3905 deletions

View File

@@ -86,7 +86,7 @@ class Agent(BaseAgent):
llm: Union[str, InstanceOf[LLM], Any] = Field(
description="Language model that will run the agent.", default=None
)
function_calling_llm: Optional[Any] = Field(
function_calling_llm: Optional[Union[str, InstanceOf[LLM], Any]] = Field(
description="Language model that will run the agent.", default=None
)
system_template: Optional[str] = Field(

View File

@@ -47,6 +47,7 @@ from crewai.utilities.formatter import (
aggregate_raw_outputs_from_task_outputs,
aggregate_raw_outputs_from_tasks,
)
from crewai.utilities.llm_utils import create_llm # Ensure this import is present
from crewai.utilities.planning_handler import CrewPlanner
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
from crewai.utilities.training_handler import CrewTrainingHandler
@@ -149,7 +150,7 @@ class Crew(BaseModel):
manager_agent: Optional[BaseAgent] = Field(
description="Custom agent that will be used as manager.", default=None
)
function_calling_llm: Optional[Any] = Field(
function_calling_llm: Optional[Union[str, InstanceOf[LLM], Any]] = Field(
description="Language model that will run the agent.", default=None
)
config: Optional[Union[Json, Dict[str, Any]]] = Field(default=None)
@@ -246,14 +247,8 @@ class Crew(BaseModel):
self._file_handler = FileHandler(self.output_log_file)
self._rpm_controller = RPMController(max_rpm=self.max_rpm, logger=self._logger)
if self.function_calling_llm:
if isinstance(self.function_calling_llm, str):
self.function_calling_llm = LLM(model=self.function_calling_llm)
elif not isinstance(self.function_calling_llm, LLM):
self.function_calling_llm = LLM(
model=getattr(self.function_calling_llm, "model_name", None)
or getattr(self.function_calling_llm, "deployment_name", None)
or str(self.function_calling_llm)
)
self.function_calling_llm = create_llm(self.function_calling_llm)
self._telemetry = Telemetry()
self._telemetry.set_tracer()
return self

View File

@@ -348,8 +348,7 @@ class ToolUsage:
tool = self._select_tool(tool_name)
try:
arguments = self._validate_tool_input(self.action.tool_input)
print("Arguments:", arguments)
print("Arguments type:", type(arguments))
except Exception:
if raise_error:
raise

File diff suppressed because it is too large Load Diff

View File

@@ -1466,37 +1466,34 @@ def test_dont_set_agents_step_callback_if_already_set():
def test_crew_function_calling_llm():
from unittest.mock import patch
from crewai import LLM
from crewai.tools import tool
llm = "gpt-4o"
llm = LLM(model="gpt-4o-mini")
@tool
def learn_about_AI() -> str:
"""Useful for when you need to learn about AI to write an paragraph about it."""
return "AI is a very broad field."
def look_up_greeting() -> str:
"""Tool used to retrieve a greeting."""
return "Howdy!"
agent1 = Agent(
role="test role",
goal="test goal",
backstory="test backstory",
tools=[learn_about_AI],
role="Greeter",
goal="Say hello.",
backstory="You are a friendly greeter.",
tools=[look_up_greeting],
llm="gpt-4o-mini",
function_calling_llm=llm,
)
essay = Task(
description="Write and then review an small paragraph on AI until it's AMAZING",
expected_output="The final paragraph.",
description="Look up the greeting and say it.",
expected_output="A greeting.",
agent=agent1,
)
tasks = [essay]
crew = Crew(agents=[agent1], tasks=tasks)
with patch.object(
instructor, "from_litellm", wraps=instructor.from_litellm
) as mock_from_litellm:
crew.kickoff()
mock_from_litellm.assert_called()
crew = Crew(agents=[agent1], tasks=[essay])
result = crew.kickoff()
assert result.raw == "Howdy!"
@pytest.mark.vcr(filter_headers=["authorization"])