feat: use json schema for tool argument serialization
Some checks failed
Check Documentation Broken Links / Check broken links (push) Has been cancelled
Notify Downstream / notify-downstream (push) Has been cancelled
CodeQL Advanced / Analyze (actions) (push) Has been cancelled
CodeQL Advanced / Analyze (python) (push) Has been cancelled
Mark stale issues and pull requests / stale (push) Has been cancelled
Build uv cache / build-cache (3.10) (push) Has been cancelled
Build uv cache / build-cache (3.11) (push) Has been cancelled
Build uv cache / build-cache (3.12) (push) Has been cancelled
Build uv cache / build-cache (3.13) (push) Has been cancelled

- Replace Python representation with JsonSchema for tool arguments
  - Remove deprecated PydanticSchemaParser in favor of direct schema generation
  - Add handling for VAR_POSITIONAL and VAR_KEYWORD parameters
  - Improve tool argument schema collection
This commit is contained in:
Greyson LaLonde
2025-12-11 15:50:19 -05:00
committed by GitHub
parent 9bd8ad51f7
commit 38b0b125d3
15 changed files with 442 additions and 602 deletions

View File

@@ -1,4 +1,3 @@
from unittest import mock
from unittest.mock import MagicMock, patch
from crewai.utilities.converter import ConverterError
@@ -44,26 +43,26 @@ def test_evaluate_training_data(converter_mock):
)
assert result == function_return_value
converter_mock.assert_has_calls(
[
mock.call(
llm=original_agent.llm,
text="Assess the quality of the training data based on the llm output, human feedback , and llm "
"output improved result.\n\nIteration: data1\nInitial Output:\nInitial output 1\n\nHuman Feedback:\nHuman feedback "
"1\n\nImproved Output:\nImproved output 1\n\n------------------------------------------------\n\nIteration: data2\nInitial Output:\nInitial output 2\n\nHuman "
"Feedback:\nHuman feedback 2\n\nImproved Output:\nImproved output 2\n\n------------------------------------------------\n\nPlease provide:\n- Provide "
"a list of clear, actionable instructions derived from the Human Feedbacks to enhance the Agent's "
"performance. Analyze the differences between Initial Outputs and Improved Outputs to generate specific "
"action items for future tasks. Ensure all key and specificpoints from the human feedback are "
"incorporated into these instructions.\n- A score from 0 to 10 evaluating on completion, quality, and "
"overall performance from the improved output to the initial output based on the human feedback\n",
model=TrainingTaskEvaluation,
instructions="I'm gonna convert this raw text into valid JSON.\n\nThe json should have the "
"following structure, with the following keys:\n{\n suggestions: List[str],\n quality: float,\n final_summary: str\n}",
),
mock.call().to_pydantic(),
]
)
# Verify the converter was called with correct arguments
converter_mock.assert_called_once()
call_kwargs = converter_mock.call_args.kwargs
assert call_kwargs["llm"] == original_agent.llm
assert call_kwargs["model"] == TrainingTaskEvaluation
assert "Iteration: data1" in call_kwargs["text"]
assert "Iteration: data2" in call_kwargs["text"]
instructions = call_kwargs["instructions"]
assert "I'm gonna convert this raw text into valid JSON." in instructions
assert "OpenAPI schema" in instructions
assert '"type": "json_schema"' in instructions
assert '"name": "TrainingTaskEvaluation"' in instructions
assert '"suggestions"' in instructions
assert '"quality"' in instructions
assert '"final_summary"' in instructions
converter_mock.return_value.to_pydantic.assert_called_once()
@patch("crewai.utilities.converter.Converter.to_pydantic")