mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-09 16:18:30 +00:00
Improve typed task outputs (#1651)
* V1 working * clean up imports and prints * more clean up and add tests * fixing tests * fix test * fix linting * Fix tests * Fix linting * add doc string as requested by eduardo
This commit is contained in:
committed by
GitHub
parent
a7147c99c6
commit
4069b621d5
@@ -11,10 +11,12 @@ from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||
from crewai.cli.constants import ENV_VARS
|
||||
from crewai.llm import LLM
|
||||
from crewai.memory.contextual.contextual_memory import ContextualMemory
|
||||
from crewai.task import Task
|
||||
from crewai.tools import BaseTool
|
||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.utilities import Converter, Prompts
|
||||
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE
|
||||
from crewai.utilities.converter import generate_model_description
|
||||
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
|
||||
@@ -237,7 +239,7 @@ class Agent(BaseAgent):
|
||||
|
||||
def execute_task(
|
||||
self,
|
||||
task: Any,
|
||||
task: Task,
|
||||
context: Optional[str] = None,
|
||||
tools: Optional[List[BaseTool]] = None,
|
||||
) -> str:
|
||||
@@ -256,6 +258,22 @@ class Agent(BaseAgent):
|
||||
|
||||
task_prompt = task.prompt()
|
||||
|
||||
# If the task requires output in JSON or Pydantic format,
|
||||
# append specific instructions to the task prompt to ensure
|
||||
# that the final answer does not include any code block markers
|
||||
if task.output_json or task.output_pydantic:
|
||||
# Generate the schema based on the output format
|
||||
if task.output_json:
|
||||
# schema = json.dumps(task.output_json, indent=2)
|
||||
schema = generate_model_description(task.output_json)
|
||||
|
||||
elif task.output_pydantic:
|
||||
schema = generate_model_description(task.output_pydantic)
|
||||
|
||||
task_prompt += "\n" + self.i18n.slice("formatted_task_instructions").format(
|
||||
output_format=schema
|
||||
)
|
||||
|
||||
if context:
|
||||
task_prompt = self.i18n.slice("task_with_context").format(
|
||||
task=task_prompt, context=context
|
||||
|
||||
@@ -279,9 +279,7 @@ class Task(BaseModel):
|
||||
content = (
|
||||
json_output
|
||||
if json_output
|
||||
else pydantic_output.model_dump_json()
|
||||
if pydantic_output
|
||||
else result
|
||||
else pydantic_output.model_dump_json() if pydantic_output else result
|
||||
)
|
||||
self._save_file(content)
|
||||
|
||||
|
||||
@@ -21,7 +21,8 @@
|
||||
"summarizer_system_message": "You are a helpful assistant that summarizes text.",
|
||||
"sumamrize_instruction": "Summarize the following text, make sure to include all the important information: {group}",
|
||||
"summary": "This is a summary of our conversation so far:\n{merged_summary}",
|
||||
"manager_request": "Your best answer to your coworker asking you this, accounting for the context shared."
|
||||
"manager_request": "Your best answer to your coworker asking you this, accounting for the context shared.",
|
||||
"formatted_task_instructions": "Ensure your final answer contains only the content in the following format: {output_format}\n\nEnsure the final output does not include any code block markers like ```json or ```python."
|
||||
},
|
||||
"errors": {
|
||||
"force_final_answer_error": "You can't keep going, this was the best you could do.\n {formatted_answer.text}",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import json
|
||||
import re
|
||||
from typing import Any, Optional, Type, Union
|
||||
from typing import Any, Optional, Type, Union, get_args, get_origin
|
||||
|
||||
from pydantic import BaseModel, ValidationError
|
||||
|
||||
@@ -214,3 +214,38 @@ def create_converter(
|
||||
raise Exception("No output converter found or set.")
|
||||
|
||||
return converter
|
||||
|
||||
|
||||
def generate_model_description(model: Type[BaseModel]) -> str:
|
||||
"""
|
||||
Generate a string description of a Pydantic model's fields and their types.
|
||||
|
||||
This function takes a Pydantic model class and returns a string that describes
|
||||
the model's fields and their respective types. The description includes handling
|
||||
of complex types such as `Optional`, `List`, and `Dict`, as well as nested Pydantic
|
||||
models.
|
||||
"""
|
||||
|
||||
def describe_field(field_type):
|
||||
origin = get_origin(field_type)
|
||||
args = get_args(field_type)
|
||||
|
||||
if origin is Union and type(None) in args:
|
||||
non_none_args = [arg for arg in args if arg is not type(None)]
|
||||
return f"Optional[{describe_field(non_none_args[0])}]"
|
||||
elif origin is list:
|
||||
return f"List[{describe_field(args[0])}]"
|
||||
elif origin is dict:
|
||||
key_type = describe_field(args[0])
|
||||
value_type = describe_field(args[1])
|
||||
return f"Dict[{key_type}, {value_type}]"
|
||||
elif isinstance(field_type, type) and issubclass(field_type, BaseModel):
|
||||
return generate_model_description(field_type)
|
||||
else:
|
||||
return field_type.__name__
|
||||
|
||||
fields = model.__annotations__
|
||||
field_descriptions = [
|
||||
f'"{name}": {describe_field(type_)}' for name, type_ in fields.items()
|
||||
]
|
||||
return "{\n " + ",\n ".join(field_descriptions) + "\n}"
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
import json
|
||||
from typing import Dict, List, Optional
|
||||
from unittest.mock import MagicMock, Mock, patch
|
||||
|
||||
import pytest
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.llm import LLM
|
||||
from crewai.utilities.converter import (
|
||||
Converter,
|
||||
@@ -9,12 +12,11 @@ from crewai.utilities.converter import (
|
||||
convert_to_model,
|
||||
convert_with_instructions,
|
||||
create_converter,
|
||||
generate_model_description,
|
||||
get_conversion_instructions,
|
||||
handle_partial_json,
|
||||
validate_model,
|
||||
)
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
|
||||
|
||||
|
||||
@@ -269,3 +271,45 @@ def test_create_converter_fails_without_agent_or_converter_cls():
|
||||
create_converter(
|
||||
llm=Mock(), text="Sample", model=SimpleModel, instructions="Convert"
|
||||
)
|
||||
|
||||
|
||||
def test_generate_model_description_simple_model():
|
||||
description = generate_model_description(SimpleModel)
|
||||
expected_description = '{\n "name": str,\n "age": int\n}'
|
||||
assert description == expected_description
|
||||
|
||||
|
||||
def test_generate_model_description_nested_model():
|
||||
description = generate_model_description(NestedModel)
|
||||
expected_description = (
|
||||
'{\n "id": int,\n "data": {\n "name": str,\n "age": int\n}\n}'
|
||||
)
|
||||
assert description == expected_description
|
||||
|
||||
|
||||
def test_generate_model_description_optional_field():
|
||||
class ModelWithOptionalField(BaseModel):
|
||||
name: Optional[str]
|
||||
age: int
|
||||
|
||||
description = generate_model_description(ModelWithOptionalField)
|
||||
expected_description = '{\n "name": Optional[str],\n "age": int\n}'
|
||||
assert description == expected_description
|
||||
|
||||
|
||||
def test_generate_model_description_list_field():
|
||||
class ModelWithListField(BaseModel):
|
||||
items: List[int]
|
||||
|
||||
description = generate_model_description(ModelWithListField)
|
||||
expected_description = '{\n "items": List[int]\n}'
|
||||
assert description == expected_description
|
||||
|
||||
|
||||
def test_generate_model_description_dict_field():
|
||||
class ModelWithDictField(BaseModel):
|
||||
attributes: Dict[str, int]
|
||||
|
||||
description = generate_model_description(ModelWithDictField)
|
||||
expected_description = '{\n "attributes": Dict[str, int]\n}'
|
||||
assert description == expected_description
|
||||
|
||||
Reference in New Issue
Block a user