fix: resolve mypy type errors across agent adapters and core modules

This commit is contained in:
Greyson LaLonde
2025-09-04 22:47:18 -04:00
parent 843801f554
commit b44776c367
6 changed files with 34 additions and 20 deletions

View File

@@ -22,8 +22,10 @@ from crewai.utilities import Logger
from crewai.utilities.converter import Converter
try:
from langgraph.checkpoint.memory import MemorySaver
from langgraph.prebuilt import create_react_agent
from langgraph.checkpoint.memory import (
MemorySaver, # type: ignore[import-not-found]
)
from langgraph.prebuilt import create_react_agent # type: ignore[import-not-found]
LANGGRAPH_AVAILABLE = True
except ImportError:
@@ -55,7 +57,7 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
llm: Any = None,
max_iterations: int = 10,
agent_config: Optional[dict[str, Any]] = None,
**kwargs,
**kwargs: Any,
):
"""Initialize the LangGraph agent adapter."""
if not LANGGRAPH_AVAILABLE:
@@ -198,7 +200,7 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
raise
def create_agent_executor(
self, task=None, tools: Optional[list[BaseTool]] = None
self, task: Any = None, tools: Optional[list[BaseTool]] = None
) -> None:
"""Configure the LangGraph agent for execution."""
self.configure_tools(tools)
@@ -222,6 +224,6 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
"""Convert output format if needed."""
return Converter(llm=llm, text=text, model=model, instructions=instructions)
def configure_structured_output(self, task) -> None:
def configure_structured_output(self, task: Any) -> None:
"""Configure the structured output for LangGraph."""
self._converter_adapter.configure_structured_output(task)

View File

@@ -1,4 +1,5 @@
import json
from typing import Any
from crewai.agents.agent_adapters.base_converter_adapter import BaseConverterAdapter
from crewai.utilities.converter import generate_model_description
@@ -7,14 +8,14 @@ from crewai.utilities.converter import generate_model_description
class LangGraphConverterAdapter(BaseConverterAdapter):
"""Adapter for handling structured output conversion in LangGraph agents"""
def __init__(self, agent_adapter):
def __init__(self, agent_adapter: Any) -> None:
"""Initialize the converter adapter with a reference to the agent adapter"""
self.agent_adapter = agent_adapter
self._output_format = None
self._output_format: str | None = None
self._schema = None
self._system_prompt_appendix = None
def configure_structured_output(self, task) -> None:
def configure_structured_output(self, task: Any) -> None:
"""Configure the structured output for LangGraph."""
if not (task.output_json or task.output_pydantic):
self._output_format = None
@@ -41,7 +42,7 @@ Important: Your final answer MUST be provided in the following structured format
{self._schema}
DO NOT include any markdown code blocks, backticks, or other formatting around your response.
DO NOT include any markdown code blocks, backticks, or other formatting around your response.
The output should be raw JSON that exactly matches the specified schema.
"""

View File

@@ -18,8 +18,8 @@ from crewai.tools.agent_tools.agent_tools import AgentTools
from crewai.utilities import Logger
try:
from agents import Agent as OpenAIAgent # type: ignore
from agents import Runner, enable_verbose_stdout_logging # type: ignore
from agents import Agent as OpenAIAgent # type: ignore[import-not-found]
from agents import Runner, enable_verbose_stdout_logging
from .openai_agent_tool_adapter import OpenAIAgentToolAdapter
@@ -40,13 +40,14 @@ class OpenAIAgentAdapter(BaseAgentAdapter):
step_callback: Any = Field(default=None)
_tool_adapter: "OpenAIAgentToolAdapter" = PrivateAttr()
_converter_adapter: OpenAIConverterAdapter = PrivateAttr()
agent_executor: Any = Field(default=None)
def __init__(
self,
model: str = "gpt-4o-mini",
tools: Optional[list[BaseTool]] = None,
agent_config: Optional[dict] = None,
**kwargs,
agent_config: Optional[dict[str, Any]] = None,
**kwargs: Any,
):
if not OPENAI_AVAILABLE:
raise ImportError(
@@ -109,6 +110,7 @@ class OpenAIAgentAdapter(BaseAgentAdapter):
task=task,
),
)
assert hasattr(self, "agent_executor"), "agent_executor not initialized"
result = self.agent_executor.run_sync(self._openai_agent, task_prompt)
final_answer = self.handle_execution_result(result)
crewai_event_bus.emit(
@@ -132,7 +134,7 @@ class OpenAIAgentAdapter(BaseAgentAdapter):
raise
def create_agent_executor(
self, task=None, tools: Optional[list[BaseTool]] = None
self, task: Any = None, tools: Optional[list[BaseTool]] = None
) -> None:
"""
Configure the OpenAI agent for execution.
@@ -171,7 +173,7 @@ class OpenAIAgentAdapter(BaseAgentAdapter):
tools = agent_tools.tools()
return tools
def configure_structured_output(self, task) -> None:
def configure_structured_output(self, task: Any) -> None:
"""Configure the structured output for the specific agent implementation.
Args:

View File

@@ -1,5 +1,6 @@
import json
import re
from typing import Any
from crewai.agents.agent_adapters.base_converter_adapter import BaseConverterAdapter
from crewai.utilities.converter import generate_model_description
@@ -19,14 +20,14 @@ class OpenAIConverterAdapter(BaseConverterAdapter):
_output_model: The Pydantic model for the output
"""
def __init__(self, agent_adapter):
def __init__(self, agent_adapter: Any) -> None:
"""Initialize the converter adapter with a reference to the agent adapter"""
self.agent_adapter = agent_adapter
self._output_format = None
self._schema = None
self._output_model = None
def configure_structured_output(self, task) -> None:
def configure_structured_output(self, task: Any) -> None:
"""
Configure the structured output for OpenAI agent based on task requirements.

View File

@@ -791,7 +791,12 @@ class Crew(FlowTrackable, BaseModel):
manager.tools = []
raise Exception("Manager agent should not have tools")
else:
self.manager_llm = create_llm(self.manager_llm)
if self.manager_llm is None:
from crewai.utilities.llm_utils import create_default_llm
self.manager_llm = create_default_llm()
else:
self.manager_llm = create_llm(self.manager_llm)
manager = Agent(
role=i18n.retrieve("hierarchical_manager_agent", "role"),
goal=i18n.retrieve("hierarchical_manager_agent", "goal"),

View File

@@ -62,7 +62,7 @@ from crewai.utilities.agent_utils import (
)
from crewai.utilities.converter import generate_model_description
from crewai.utilities.guardrail import process_guardrail
from crewai.utilities.llm_utils import create_llm
from crewai.utilities.llm_utils import create_default_llm, create_llm
from crewai.utilities.printer import Printer
from crewai.utilities.token_counter_callback import TokenCalcHandler
from crewai.utilities.tool_utils import execute_tool_and_check_finality
@@ -195,7 +195,10 @@ class LiteAgent(FlowTrackable, BaseModel):
@model_validator(mode="after")
def setup_llm(self) -> Self:
"""Set up the LLM and other components after initialization."""
self.llm = create_llm(self.llm)
if self.llm is None:
self.llm = create_default_llm()
else:
self.llm = create_llm(self.llm)
if not isinstance(self.llm, BaseLLM):
raise ValueError(
f"Expected LLM instance of type BaseLLM, got {type(self.llm).__name__}"