chore: improve typing and docs in agents leaf files (#3461)
Some checks failed
Notify Downstream / notify-downstream (push) Has been cancelled
Mark stale issues and pull requests / stale (push) Has been cancelled

- Add typing and Google-style docstrings to agents leaf files
- Add TODO notes
This commit is contained in:
Greyson LaLonde
2025-09-08 11:57:34 -04:00
committed by GitHub
parent fa06aea8d5
commit d5126d159b
3 changed files with 109 additions and 35 deletions

View File

@@ -1,29 +1,58 @@
"""Base converter adapter for structured output conversion."""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from crewai.agents.agent_adapters.base_agent_adapter import BaseAgentAdapter
from crewai.task import Task
class BaseConverterAdapter(ABC):
"""Base class for all converter adapters in CrewAI.
"""Abstract base class for converter adapters in CrewAI.
This abstract class defines the common interface and functionality that all
converter adapters must implement for converting structured output.
Defines the common interface for converting agent outputs to structured formats.
All converter adapters must implement the methods defined here.
"""
def __init__(self, agent_adapter):
def __init__(self, agent_adapter: BaseAgentAdapter) -> None:
"""Initialize the converter adapter.
Args:
agent_adapter: The agent adapter to configure for structured output.
"""
self.agent_adapter = agent_adapter
@abstractmethod
def configure_structured_output(self, task) -> None:
def configure_structured_output(self, task: Task) -> None:
"""Configure agents to return structured output.
Must support json and pydantic output.
Must support both JSON and Pydantic output formats.
Args:
task: The task requiring structured output.
"""
pass
@abstractmethod
def enhance_system_prompt(self, base_prompt: str) -> str:
"""Enhance the system prompt with structured output instructions."""
pass
"""Enhance the system prompt with structured output instructions.
Args:
base_prompt: The original system prompt.
Returns:
Enhanced prompt with structured output guidance.
"""
@abstractmethod
def post_process_result(self, result: str) -> str:
"""Post-process the result to ensure it matches the expected format: string."""
pass
"""Post-process the result to ensure proper string format.
Args:
result: The raw result from agent execution.
Returns:
Processed result as a string.
"""

View File

@@ -1,29 +1,32 @@
"""Base output converter for transforming text into structured formats."""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Any, Optional
from typing import Any
from pydantic import BaseModel, Field
class OutputConverter(BaseModel, ABC):
"""
Abstract base class for converting task results into structured formats.
"""Abstract base class for converting text to structured formats.
This class provides a framework for converting unstructured text into
either Pydantic models or JSON, tailored for specific agent requirements.
It uses a language model to interpret and structure the input text based
on given instructions.
Uses language models to transform unstructured text into either Pydantic models
or JSON objects based on provided instructions and target schemas.
Attributes:
text (str): The input text to be converted.
llm (Any): The language model used for conversion.
model (Any): The target model for structuring the output.
instructions (str): Specific instructions for the conversion process.
max_attempts (int): Maximum number of conversion attempts (default: 3).
text: The input text to be converted.
llm: The language model used for conversion.
model: The target Pydantic model class for structuring output.
instructions: Specific instructions for the conversion process.
max_attempts: Maximum number of conversion attempts (default: 3).
"""
text: str = Field(description="Text to be converted.")
llm: Any = Field(description="The language model to be used to convert the text.")
model: Any = Field(description="The model to be used to convert the text.")
model: type[BaseModel] = Field(
description="The model to be used to convert the text."
)
instructions: str = Field(description="Conversion instructions to the LLM.")
max_attempts: int = Field(
description="Max number of attempts to try to get the output formatted.",
@@ -31,11 +34,23 @@ class OutputConverter(BaseModel, ABC):
)
@abstractmethod
def to_pydantic(self, current_attempt=1) -> BaseModel:
"""Convert text to pydantic."""
pass
def to_pydantic(self, current_attempt: int = 1) -> BaseModel:
"""Convert text to a Pydantic model instance.
Args:
current_attempt: Current attempt number for retry logic.
Returns:
Pydantic model instance with structured data.
"""
@abstractmethod
def to_json(self, current_attempt=1) -> dict:
"""Convert text to json."""
pass
def to_json(self, current_attempt: int = 1) -> dict[str, Any]:
"""Convert text to a JSON dictionary.
Args:
current_attempt: Current attempt number for retry logic.
Returns:
Dictionary containing structured JSON data.
"""

View File

@@ -1,15 +1,45 @@
from typing import Any, Dict, Optional
"""Cache handler for tool usage results."""
from typing import Any
from pydantic import BaseModel, PrivateAttr
class CacheHandler(BaseModel):
"""Callback handler for tool usage."""
"""Handles caching of tool execution results.
_cache: Dict[str, Any] = PrivateAttr(default_factory=dict)
Provides in-memory caching for tool outputs based on tool name and input.
def add(self, tool, input, output):
Notes:
- TODO: Make thread-safe.
"""
_cache: dict[str, Any] = PrivateAttr(default_factory=dict)
def add(self, tool: str, input: str, output: Any) -> None:
"""Add a tool result to the cache.
Args:
tool: Name of the tool.
input: Input string used for the tool.
output: Output result from tool execution.
Notes:
- TODO: Rename 'input' parameter to avoid shadowing builtin.
"""
self._cache[f"{tool}-{input}"] = output
def read(self, tool, input) -> Optional[str]:
def read(self, tool: str, input: str) -> Any | None:
"""Retrieve a cached tool result.
Args:
tool: Name of the tool.
input: Input string used for the tool.
Returns:
Cached result if found, None otherwise.
Notes:
- TODO: Rename 'input' parameter to avoid shadowing builtin.
"""
return self._cache.get(f"{tool}-{input}")