feat: improve data training for models up to 7B parameters (#3085)

* feat: improve data training for models up to 7B parameters.

* docs: training considerations for small models to the documentation
This commit is contained in:
Lucas Gomide
2025-07-01 12:47:47 -03:00
committed by GitHub
parent 2ab002a5bf
commit 49c0144154
5 changed files with 296 additions and 7 deletions

View File

@@ -5,6 +5,7 @@ from pydantic import BaseModel, Field
from crewai.utilities import Converter
from crewai.utilities.events import TaskEvaluationEvent, crewai_event_bus
from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
from crewai.utilities.training_converter import TrainingConverter
class Entity(BaseModel):
@@ -133,7 +134,7 @@ class TaskEvaluator:
).get_schema()
instructions = f"{instructions}\n\nThe json should have the following structure, with the following keys:\n{model_schema}"
converter = Converter(
converter = TrainingConverter(
llm=self.llm,
text=evaluation_query,
model=TrainingTaskEvaluation,