chore: deprecate PydanticSchemaParser in favor of openapi spec schema

This commit is contained in:
Greyson Lalonde
2025-11-03 11:02:56 -05:00
parent 329567153b
commit aed7788d8f
7 changed files with 91 additions and 326 deletions

View File

@@ -1,8 +1,7 @@
from __future__ import annotations
import asyncio
from collections.abc import Sequence
import json
from collections.abc import Awaitable, Callable, Sequence
import shutil
import subprocess
import time
@@ -15,7 +14,14 @@ from typing import (
)
from urllib.parse import urlparse
from pydantic import BaseModel, Field, InstanceOf, PrivateAttr, model_validator
from pydantic import (
BaseModel,
Field,
InstanceOf,
PrivateAttr,
create_model,
model_validator,
)
from typing_extensions import Self
from crewai.a2a.config import A2AConfig
@@ -51,7 +57,10 @@ from crewai.utilities.agent_utils import (
render_text_description_and_args,
)
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE
from crewai.utilities.converter import Converter, generate_model_description
from crewai.utilities.converter import (
Converter,
generate_instructions_with_openapi_schema,
)
from crewai.utilities.guardrail_types import GuardrailType
from crewai.utilities.llm_utils import create_llm
from crewai.utilities.prompts import Prompts
@@ -306,25 +315,13 @@ class Agent(BaseAgent):
task_prompt = task.prompt()
# If the task requires output in JSON or Pydantic format,
# append specific instructions to the task prompt to ensure
# that the final answer does not include any code block markers
# Skip this if task.response_model is set, as native structured outputs handle schema automatically
if (task.output_json or task.output_pydantic) and not task.response_model:
# Generate the schema based on the output format
if task.output_json:
schema_dict = generate_model_description(task.output_json)
schema = json.dumps(schema_dict["json_schema"]["schema"], indent=2)
task_prompt += "\n" + self.i18n.slice(
"formatted_task_instructions"
).format(output_format=schema)
elif task.output_pydantic:
schema_dict = generate_model_description(task.output_pydantic)
schema = json.dumps(schema_dict["json_schema"]["schema"], indent=2)
task_prompt += "\n" + self.i18n.slice(
"formatted_task_instructions"
).format(output_format=schema)
output_format: type[BaseModel] = cast(
type[BaseModel], task.output_json or task.output_pydantic
)
task_prompt += "\n" + generate_instructions_with_openapi_schema(
output_format
)
if context:
task_prompt = self.i18n.slice("task_with_context").format(
@@ -612,7 +609,7 @@ class Agent(BaseAgent):
)
self.agent_executor = CrewAgentExecutor(
llm=self.llm,
llm=self.llm, # type: ignore[arg-type]
task=task, # type: ignore[arg-type]
agent=self,
crew=self.crew,
@@ -762,7 +759,7 @@ class Agent(BaseAgent):
path = parsed.path.replace("/", "_").strip("_")
return f"{domain}_{path}" if path else domain
def _get_mcp_tool_schemas(self, server_params: dict) -> dict[str, dict]:
def _get_mcp_tool_schemas(self, server_params: dict[str, Any]) -> Any:
"""Get tool schemas from MCP server for wrapper creation with caching."""
server_url = server_params["url"]
@@ -794,7 +791,7 @@ class Agent(BaseAgent):
async def _get_mcp_tool_schemas_async(
self, server_params: dict[str, Any]
) -> dict[str, dict]:
) -> dict[str, dict[str, Any]]:
"""Async implementation of MCP tool schema retrieval with timeouts and retries."""
server_url = server_params["url"]
return await self._retry_mcp_discovery(
@@ -802,7 +799,9 @@ class Agent(BaseAgent):
)
async def _retry_mcp_discovery(
self, operation_func, server_url: str
self,
operation_func: Callable[[str], Awaitable[dict[str, dict[str, Any]]]],
server_url: str,
) -> dict[str, dict[str, Any]]:
"""Retry MCP discovery operation with exponential backoff, avoiding try-except in loop."""
last_error = None
@@ -833,7 +832,8 @@ class Agent(BaseAgent):
@staticmethod
async def _attempt_mcp_discovery(
operation_func, server_url: str
operation_func: Callable[[str], Awaitable[dict[str, dict[str, Any]]]],
server_url: str,
) -> tuple[dict[str, dict[str, Any]] | None, str, bool]:
"""Attempt single MCP discovery operation and return (result, error_message, should_retry)."""
try:
@@ -918,8 +918,6 @@ class Agent(BaseAgent):
Returns:
Pydantic BaseModel class
"""
from pydantic import Field, create_model
properties = json_schema.get("properties", {})
required_fields = json_schema.get("required", [])
@@ -937,13 +935,13 @@ class Agent(BaseAgent):
Field(..., description=field_description),
)
else:
field_definitions[field_name] = (
field_definitions[field_name] = ( # type: ignore[assignment]
field_type | None,
Field(default=None, description=field_description),
)
model_name = f"{tool_name.replace('-', '_').replace(' ', '_')}Schema"
return create_model(model_name, **field_definitions)
return create_model(model_name, **field_definitions) # type: ignore[no-any-return,call-overload]
def _json_type_to_python(self, field_schema: dict[str, Any]) -> type:
"""Convert JSON Schema type to Python type.
@@ -963,12 +961,12 @@ class Agent(BaseAgent):
if "const" in option:
types.append(str)
else:
types.append(self._json_type_to_python(option))
types.append(self._json_type_to_python(option)) # type: ignore[arg-type]
unique_types = list(set(types))
if len(unique_types) > 1:
result = unique_types[0]
for t in unique_types[1:]:
result = result | t
result = result | t # type: ignore[assignment]
return result
return unique_types[0]
@@ -981,10 +979,10 @@ class Agent(BaseAgent):
"object": dict,
}
return type_mapping.get(json_type, Any)
return type_mapping.get(json_type, Any) # type: ignore[arg-type]
@staticmethod
def _fetch_amp_mcp_servers(mcp_name: str) -> list[dict]:
def _fetch_amp_mcp_servers(mcp_name: str) -> list[dict[str, Any]]:
"""Fetch MCP server configurations from CrewAI AMP API."""
# TODO: Implement AMP API call to "integrations/mcps" endpoint
# Should return list of server configs with URLs
@@ -1211,11 +1209,11 @@ class Agent(BaseAgent):
if self.apps:
platform_tools = self.get_platform_tools(self.apps)
if platform_tools:
self.tools.extend(platform_tools)
self.tools.extend(platform_tools) # type: ignore[union-attr]
if self.mcps:
mcps = self.get_mcp_tools(self.mcps)
if mcps:
self.tools.extend(mcps)
self.tools.extend(mcps) # type: ignore[union-attr]
lite_agent = LiteAgent(
id=self.id,

View File

@@ -22,7 +22,7 @@
"summarize_instruction": "Summarize the following text, make sure to include all the important information: {group}",
"summary": "This is a summary of our conversation so far:\n{merged_summary}",
"manager_request": "Your best answer to your coworker asking you this, accounting for the context shared.",
"formatted_task_instructions": "Ensure your final answer contains only the content in the following format: {output_format}\n\nEnsure the final output does not include any code block markers like ```json or ```python.",
"formatted_task_instructions": "Ensure your final answer strictly adheres to the following OpenAPI schema: {output_format}\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python.",
"conversation_history_instruction": "You are a member of a crew collaborating to achieve a common goal. Your task is a specific action that contributes to this larger objective. For additional context, please review the conversation history between you and the user that led to the initiation of this crew. Use any relevant information or feedback from the conversation to inform your task execution and ensure your response aligns with both the immediate task and the crew's overall goals.",
"feedback_instructions": "User feedback: {feedback}\nInstructions: Use this feedback to enhance the next output iteration.\nNote: Do not respond or add commentary.",
"lite_agent_system_prompt_with_tools": "You are {role}. {backstory}\nYour personal goal is: {goal}\n\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\n{tools}\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought: you should always think about what to do\nAction: the action to take, only one name of [{tool_names}], just the name, exactly as it's written.\nAction Input: the input to the action, just a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce all necessary information is gathered, return the following format:\n\n```\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n```",

View File

@@ -10,9 +10,9 @@ from pydantic import BaseModel, ValidationError
from typing_extensions import Unpack
from crewai.agents.agent_builder.utilities.base_output_converter import OutputConverter
from crewai.utilities.i18n import get_i18n
from crewai.utilities.internal_instructor import InternalInstructor
from crewai.utilities.printer import Printer
from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
if TYPE_CHECKING:
@@ -22,6 +22,7 @@ if TYPE_CHECKING:
from crewai.llms.base_llm import BaseLLM
_JSON_PATTERN: Final[re.Pattern[str]] = re.compile(r"({.*})", re.DOTALL)
_I18N = get_i18n()
class ConverterError(Exception):
@@ -300,7 +301,7 @@ def convert_with_instructions(
if llm is None:
raise ValueError("Agent must have a valid LLM instance for conversion")
instructions = get_conversion_instructions(model=model, llm=llm)
instructions = generate_instructions_with_openapi_schema(model=model)
converter = create_converter(
agent=agent,
converter_cls=converter_cls,
@@ -323,40 +324,6 @@ def convert_with_instructions(
return exported_result
def get_conversion_instructions(
model: type[BaseModel], llm: BaseLLM | LLM | str | Any
) -> str:
"""Generate conversion instructions based on the model and LLM capabilities.
Args:
model: A Pydantic model class.
llm: The language model instance.
Returns:
"""
instructions = "Please convert the following text into valid JSON."
if (
llm
and not isinstance(llm, str)
and hasattr(llm, "supports_function_calling")
and llm.supports_function_calling()
):
model_schema = PydanticSchemaParser(model=model).get_schema()
instructions += (
f"\n\nOutput ONLY the valid JSON and nothing else.\n\n"
f"Use this format exactly:\n```json\n{model_schema}\n```"
)
else:
model_description = generate_model_description(model)
schema_json = json.dumps(model_description["json_schema"]["schema"], indent=2)
instructions += (
f"\n\nOutput ONLY the valid JSON and nothing else.\n\n"
f"Use this format exactly:\n```json\n{schema_json}\n```"
)
return instructions
class CreateConverterKwargs(TypedDict, total=False):
"""Keyword arguments for creating a converter.
@@ -628,3 +595,19 @@ def generate_model_description(model: type[BaseModel]) -> dict[str, Any]:
"schema": json_schema,
},
}
def generate_instructions_with_openapi_schema(
model: type[BaseModel],
) -> str:
"""Generate conversion instructions using OpenAPI schema format.
Args:
model: A Pydantic model class.
Returns:
A string containing the conversion instructions.
"""
schema_dict = generate_model_description(model)
schema = json.dumps(schema_dict, indent=2)
return _I18N.slice("formatted_task_instructions").format(output_format=schema)

View File

@@ -1,14 +1,16 @@
from __future__ import annotations
from typing import TYPE_CHECKING, cast
from typing import TYPE_CHECKING, Any, cast
from pydantic import BaseModel, Field
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.task_events import TaskEvaluationEvent
from crewai.llm import LLM
from crewai.utilities.converter import Converter
from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
from crewai.utilities.converter import (
Converter,
generate_instructions_with_openapi_schema,
)
from crewai.utilities.training_converter import TrainingConverter
@@ -79,7 +81,8 @@ class TaskEvaluator:
- Investigate the Converter.to_pydantic signature, returns BaseModel strictly?
"""
crewai_event_bus.emit(
self, TaskEvaluationEvent(evaluation_type="task_evaluation", task=task)
self,
TaskEvaluationEvent(evaluation_type="task_evaluation", task=task), # type: ignore[no-untyped-call]
)
evaluation_query = (
f"Assess the quality of the task completed based on the description, expected output, and actual results.\n\n"
@@ -95,8 +98,7 @@ class TaskEvaluator:
instructions = "Convert all responses into valid JSON output."
if not self.llm.supports_function_calling():
model_schema = PydanticSchemaParser(model=TaskEvaluation).get_schema()
instructions = f"{instructions}\n\nReturn only valid JSON with the following schema:\n```json\n{model_schema}\n```"
instructions = generate_instructions_with_openapi_schema(TaskEvaluation)
converter = Converter(
llm=self.llm,
@@ -108,7 +110,7 @@ class TaskEvaluator:
return cast(TaskEvaluation, converter.to_pydantic())
def evaluate_training_data(
self, training_data: dict, agent_id: str
self, training_data: dict[str, Any], agent_id: str
) -> TrainingTaskEvaluation:
"""
Evaluate the training data based on the llm output, human feedback, and improved output.
@@ -121,7 +123,8 @@ class TaskEvaluator:
- Investigate the Converter.to_pydantic signature, returns BaseModel strictly?
"""
crewai_event_bus.emit(
self, TaskEvaluationEvent(evaluation_type="training_data_evaluation")
self,
TaskEvaluationEvent(evaluation_type="training_data_evaluation"), # type: ignore[no-untyped-call]
)
output_training_data = training_data[agent_id]
@@ -162,13 +165,11 @@ class TaskEvaluator:
"- Provide a list of clear, actionable instructions derived from the Human Feedbacks to enhance the Agent's performance. Analyze the differences between Initial Outputs and Improved Outputs to generate specific action items for future tasks. Ensure all key and specificpoints from the human feedback are incorporated into these instructions.\n"
"- A score from 0 to 10 evaluating on completion, quality, and overall performance from the improved output to the initial output based on the human feedback\n"
)
instructions = "I'm gonna convert this raw text into valid JSON."
instructions = "Convert all responses into valid JSON output."
if not self.llm.supports_function_calling():
model_schema = PydanticSchemaParser(
model=TrainingTaskEvaluation
).get_schema()
instructions = f"{instructions}\n\nThe json should have the following structure, with the following keys:\n{model_schema}"
instructions = generate_instructions_with_openapi_schema(TaskEvaluation)
converter = TrainingConverter(
llm=self.llm,

View File

@@ -1,103 +0,0 @@
from typing import Any, Union, get_args, get_origin
from pydantic import BaseModel, Field
class PydanticSchemaParser(BaseModel):
model: type[BaseModel] = Field(..., description="The Pydantic model to parse.")
def get_schema(self) -> str:
"""Public method to get the schema of a Pydantic model.
Returns:
String representation of the model schema.
"""
return "{\n" + self._get_model_schema(self.model) + "\n}"
def _get_model_schema(self, model: type[BaseModel], depth: int = 0) -> str:
"""Recursively get the schema of a Pydantic model, handling nested models and lists.
Args:
model: The Pydantic model to process.
depth: The current depth of recursion for indentation purposes.
Returns:
A string representation of the model schema.
"""
indent: str = " " * 4 * depth
lines: list[str] = [
f"{indent} {field_name}: {self._get_field_type_for_annotation(field.annotation, depth + 1)}"
for field_name, field in model.model_fields.items()
]
return ",\n".join(lines)
def _format_list_type(self, list_item_type: Any, depth: int) -> str:
"""Format a List type, handling nested models if necessary.
Args:
list_item_type: The type of items in the list.
depth: The current depth of recursion for indentation purposes.
Returns:
A string representation of the List type.
"""
if isinstance(list_item_type, type) and issubclass(list_item_type, BaseModel):
nested_schema = self._get_model_schema(list_item_type, depth + 1)
nested_indent = " " * 4 * depth
return f"List[\n{nested_indent}{{\n{nested_schema}\n{nested_indent}}}\n{nested_indent}]"
return f"List[{list_item_type.__name__}]"
def _format_union_type(self, field_type: Any, depth: int) -> str:
"""Format a Union type, handling Optional and nested types.
Args:
field_type: The Union type to format.
depth: The current depth of recursion for indentation purposes.
Returns:
A string representation of the Union type.
"""
args = get_args(field_type)
if type(None) in args:
# It's an Optional type
non_none_args = [arg for arg in args if arg is not type(None)]
if len(non_none_args) == 1:
inner_type = self._get_field_type_for_annotation(
non_none_args[0], depth
)
return f"Optional[{inner_type}]"
# Union with None and multiple other types
inner_types = ", ".join(
self._get_field_type_for_annotation(arg, depth) for arg in non_none_args
)
return f"Optional[Union[{inner_types}]]"
# General Union type
inner_types = ", ".join(
self._get_field_type_for_annotation(arg, depth) for arg in args
)
return f"Union[{inner_types}]"
def _get_field_type_for_annotation(self, annotation: Any, depth: int) -> str:
"""Recursively get the string representation of a field's type annotation.
Args:
annotation: The type annotation to process.
depth: The current depth of recursion for indentation purposes.
Returns:
A string representation of the type annotation.
"""
origin: Any = get_origin(annotation)
if origin is list:
list_item_type = get_args(annotation)[0]
return self._format_list_type(list_item_type, depth)
if origin is dict:
key_type, value_type = get_args(annotation)
return f"Dict[{key_type.__name__}, {value_type.__name__}]"
if origin is Union:
return self._format_union_type(annotation, depth)
if isinstance(annotation, type) and issubclass(annotation, BaseModel):
nested_schema = self._get_model_schema(annotation, depth)
nested_indent = " " * 4 * depth
return f"{annotation.__name__}\n{nested_indent}{{\n{nested_schema}\n{nested_indent}}}"
return annotation.__name__

View File

@@ -11,12 +11,11 @@ from crewai.utilities.converter import (
convert_to_model,
convert_with_instructions,
create_converter,
generate_instructions_with_openapi_schema,
generate_model_description,
get_conversion_instructions,
handle_partial_json,
validate_model,
)
from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
from pydantic import BaseModel
import pytest
@@ -187,7 +186,7 @@ def test_handle_partial_json_with_invalid_partial(mock_agent: Mock) -> None:
# Tests for convert_with_instructions
@patch("crewai.utilities.converter.create_converter")
@patch("crewai.utilities.converter.get_conversion_instructions")
@patch("crewai.utilities.converter.generate_instructions_with_openapi_schema")
def test_convert_with_instructions_success(
mock_get_instructions: Mock, mock_create_converter: Mock, mock_agent: Mock
) -> None:
@@ -205,7 +204,7 @@ def test_convert_with_instructions_success(
@patch("crewai.utilities.converter.create_converter")
@patch("crewai.utilities.converter.get_conversion_instructions")
@patch("crewai.utilities.converter.generate_instructions_with_openapi_schema")
def test_convert_with_instructions_failure(
mock_get_instructions: Mock, mock_create_converter: Mock, mock_agent: Mock
) -> None:
@@ -221,34 +220,15 @@ def test_convert_with_instructions_failure(
mock_printer.return_value.print.assert_called_once()
# Tests for get_conversion_instructions
def test_get_conversion_instructions_gpt() -> None:
llm = LLM(model="gpt-4o-mini")
with patch.object(LLM, "supports_function_calling") as supports_function_calling:
supports_function_calling.return_value = True
instructions = get_conversion_instructions(SimpleModel, llm)
model_schema = PydanticSchemaParser(model=SimpleModel).get_schema()
expected_instructions = (
"Please convert the following text into valid JSON.\n\n"
"Output ONLY the valid JSON and nothing else.\n\n"
"Use this format exactly:\n```json\n"
f"{model_schema}\n```"
)
assert instructions == expected_instructions
def test_get_conversion_instructions_non_gpt() -> None:
llm = LLM(model="ollama/llama3.1", base_url="http://localhost:11434")
with patch.object(LLM, "supports_function_calling", return_value=False):
instructions = get_conversion_instructions(SimpleModel, llm)
# Check that the JSON schema is properly formatted
assert "Please convert the following text into valid JSON" in instructions
assert "Output ONLY the valid JSON and nothing else" in instructions
assert "Use this format exactly" in instructions
assert "```json" in instructions
assert '"type": "object"' in instructions
assert '"properties"' in instructions
assert "'type': 'json_schema'" not in instructions
# Tests for generate_instructions_with_openapi_schema
def test_generate_instructions_with_openapi_schema() -> None:
instructions = generate_instructions_with_openapi_schema(SimpleModel)
# Check that the JSON schema is properly formatted
assert "type" in instructions
assert "json_schema" in instructions
assert "schema" in instructions
assert '"type": "object"' in instructions
assert '"properties"' in instructions
# Tests for is_gpt
@@ -363,7 +343,7 @@ def test_convert_with_instructions() -> None:
llm = LLM(model="gpt-4o-mini")
sample_text = "Name: Alice, Age: 30"
instructions = get_conversion_instructions(SimpleModel, llm)
instructions = generate_instructions_with_openapi_schema(SimpleModel)
converter = Converter(
llm=llm,
text=sample_text,
@@ -384,7 +364,7 @@ def test_convert_with_instructions() -> None:
def test_converter_with_llama3_2_model() -> None:
llm = LLM(model="openrouter/meta-llama/llama-3.2-3b-instruct")
sample_text = "Name: Alice Llama, Age: 30"
instructions = get_conversion_instructions(SimpleModel, llm)
instructions = generate_instructions_with_openapi_schema(SimpleModel)
converter = Converter(
llm=llm,
text=sample_text,
@@ -403,7 +383,7 @@ def test_converter_with_llama3_1_model() -> None:
llm.call.return_value = '{"name": "Alice Llama", "age": 30}'
sample_text = "Name: Alice Llama, Age: 30"
instructions = get_conversion_instructions(SimpleModel, llm)
instructions = generate_instructions_with_openapi_schema(SimpleModel)
converter = Converter(
llm=llm,
text=sample_text,
@@ -421,7 +401,7 @@ def test_converter_with_nested_model() -> None:
llm = LLM(model="gpt-4o-mini")
sample_text = "Name: John Doe\nAge: 30\nAddress: 123 Main St, Anytown, 12345"
instructions = get_conversion_instructions(Person, llm)
instructions = generate_instructions_with_openapi_schema(Person)
converter = Converter(
llm=llm,
text=sample_text,
@@ -447,7 +427,7 @@ def test_converter_error_handling() -> None:
llm.call.return_value = "Invalid JSON"
sample_text = "Name: Alice, Age: 30"
instructions = get_conversion_instructions(SimpleModel, llm)
instructions = generate_instructions_with_openapi_schema(SimpleModel)
converter = Converter(
llm=llm,
text=sample_text,
@@ -472,7 +452,7 @@ def test_converter_retry_logic() -> None:
]
sample_text = "Name: Retry Alice, Age: 30"
instructions = get_conversion_instructions(SimpleModel, llm)
instructions = generate_instructions_with_openapi_schema(SimpleModel)
converter = Converter(
llm=llm,
text=sample_text,
@@ -501,7 +481,7 @@ def test_converter_with_optional_fields() -> None:
llm.call.return_value = '{"name": "Bob", "age": null}'
sample_text = "Name: Bob, age: None"
instructions = get_conversion_instructions(OptionalModel, llm)
instructions = generate_instructions_with_openapi_schema(OptionalModel)
converter = Converter(
llm=llm,
text=sample_text,
@@ -526,7 +506,7 @@ def test_converter_with_list_field() -> None:
llm.call.return_value = '{"items": [1, 2, 3]}'
sample_text = "Items: 1, 2, 3"
instructions = get_conversion_instructions(ListModel, llm)
instructions = generate_instructions_with_openapi_schema(ListModel)
converter = Converter(
llm=llm,
text=sample_text,
@@ -555,7 +535,7 @@ def test_converter_with_enum() -> None:
llm.call.return_value = '{"name": "Alice", "color": "red"}'
sample_text = "Name: Alice, Color: Red"
instructions = get_conversion_instructions(EnumModel, llm)
instructions = generate_instructions_with_openapi_schema(EnumModel)
converter = Converter(
llm=llm,
text=sample_text,
@@ -577,7 +557,7 @@ def test_converter_with_ambiguous_input() -> None:
llm.call.return_value = '{"name": "Charlie", "age": "Not an age"}'
sample_text = "Charlie is thirty years old"
instructions = get_conversion_instructions(SimpleModel, llm)
instructions = generate_instructions_with_openapi_schema(SimpleModel)
converter = Converter(
llm=llm,
text=sample_text,

View File

@@ -1,94 +0,0 @@
from typing import Any, Dict, List, Optional, Set, Tuple, Union
import pytest
from pydantic import BaseModel, Field
from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
def test_simple_model():
class SimpleModel(BaseModel):
field1: int
field2: str
parser = PydanticSchemaParser(model=SimpleModel)
schema = parser.get_schema()
expected_schema = """{
field1: int,
field2: str
}"""
assert schema.strip() == expected_schema.strip()
def test_nested_model():
class NestedModel(BaseModel):
nested_field: int
class ParentModel(BaseModel):
parent_field: str
nested: NestedModel
parser = PydanticSchemaParser(model=ParentModel)
schema = parser.get_schema()
expected_schema = """{
parent_field: str,
nested: NestedModel
{
nested_field: int
}
}"""
assert schema.strip() == expected_schema.strip()
def test_model_with_list():
class ListModel(BaseModel):
list_field: List[int]
parser = PydanticSchemaParser(model=ListModel)
schema = parser.get_schema()
expected_schema = """{
list_field: List[int]
}"""
assert schema.strip() == expected_schema.strip()
def test_model_with_optional_field():
class OptionalModel(BaseModel):
optional_field: Optional[str]
parser = PydanticSchemaParser(model=OptionalModel)
schema = parser.get_schema()
expected_schema = """{
optional_field: Optional[str]
}"""
assert schema.strip() == expected_schema.strip()
def test_model_with_union():
class UnionModel(BaseModel):
union_field: Union[int, str]
parser = PydanticSchemaParser(model=UnionModel)
schema = parser.get_schema()
expected_schema = """{
union_field: Union[int, str]
}"""
assert schema.strip() == expected_schema.strip()
def test_model_with_dict():
class DictModel(BaseModel):
dict_field: Dict[str, int]
parser = PydanticSchemaParser(model=DictModel)
schema = parser.get_schema()
expected_schema = """{
dict_field: Dict[str, int]
}"""
assert schema.strip() == expected_schema.strip()