output type works now

This commit is contained in:
Brandon Hancock
2025-03-25 14:42:50 -04:00
parent 06854fff86
commit 0785d596f0
3 changed files with 57 additions and 38 deletions

View File

@@ -74,43 +74,43 @@ async def main():
response_format=ResearchResult, # Optional: Use a structured output format
)
# Example 1: Simple query with raw text response
print("\n=== Example 1: Simple Query ===")
result = await agent.kickoff_async("What is the population of Tokyo in 2023?")
print(f"Raw response: {result.raw}")
# # Example 1: Simple query with raw text response
# print("\n=== Example 1: Simple Query ===")
# result = await agent.kickoff_async("What is the population of Tokyo in 2023?")
# print(f"Raw response: {result.raw}")
# # Example 2: Query with structured output
# print("\n=== Example 2: Structured Output ===")
# structured_query = """
# Research the impact of climate change on coral reefs.
# Example 2: Query with structured output
print("\n=== Example 2: Structured Output ===")
structured_query = """
Research the impact of climate change on coral reefs.
# YOU MUST format your response as a valid JSON object with the following structure:
# {
# "main_findings": "A summary of the main findings",
# "key_points": ["Point 1", "Point 2", "Point 3"],
# "sources": ["Source 1", "Source 2"]
# }
YOU MUST format your response as a valid JSON object with the following structure:
{
"main_findings": "A summary of the main findings",
"key_points": ["Point 1", "Point 2", "Point 3"],
"sources": ["Source 1", "Source 2"]
}
# Include at least 3 key points and 2 sources. Wrap your JSON in ```json and ``` tags.
# """
Include at least 3 key points and 2 sources. Wrap your JSON in ```json and ``` tags.
"""
# result = await agent.kickoff_async(structured_query)
result = await agent.kickoff_async(structured_query)
# if result.pydantic:
# # Cast to the specific type for better IDE support
# research_result = cast(ResearchResult, result.pydantic)
# print(f"Main findings: {research_result.main_findings}")
# print("\nKey points:")
# for i, point in enumerate(research_result.key_points, 1):
# print(f"{i}. {point}")
# print("\nSources:")
# for i, source in enumerate(research_result.sources, 1):
# print(f"{i}. {source}")
# else:
# print(f"Raw response: {result.raw}")
# print(
# "\nNote: Structured output was not generated. The LLM may need more explicit instructions to format the response as JSON."
# )
if result.pydantic:
# Cast to the specific type for better IDE support
research_result = cast(ResearchResult, result.pydantic)
print(f"Main findings: {research_result.main_findings}")
print("\nKey points:")
for i, point in enumerate(research_result.key_points, 1):
print(f"{i}. {point}")
print("\nSources:")
for i, source in enumerate(research_result.sources, 1):
print(f"{i}. {source}")
else:
print(f"Raw response: {result.raw}")
print(
"\nNote: Structured output was not generated. The LLM may need more explicit instructions to format the response as JSON."
)
# # Example 3: Multi-turn conversation
# print("\n=== Example 3: Multi-turn Conversation ===")

View File

@@ -32,6 +32,7 @@ from crewai.utilities.agent_utils import (
process_llm_response,
render_text_description_and_args,
)
from crewai.utilities.converter import convert_to_model, generate_model_description
from crewai.utilities.events.agent_events import (
LiteAgentExecutionStartedEvent,
)
@@ -202,9 +203,10 @@ class LiteAgent(BaseModel):
def _get_default_system_prompt(self) -> str:
"""Get the default system prompt for the agent."""
base_prompt = ""
if self._parsed_tools:
# Use the prompt template for agents with tools
return self.i18n.slice("lite_agent_system_prompt_with_tools").format(
base_prompt = self.i18n.slice("lite_agent_system_prompt_with_tools").format(
role=self.role,
backstory=self.backstory,
goal=self.goal,
@@ -213,12 +215,25 @@ class LiteAgent(BaseModel):
)
else:
# Use the prompt template for agents without tools
return self.i18n.slice("lite_agent_system_prompt_without_tools").format(
base_prompt = self.i18n.slice(
"lite_agent_system_prompt_without_tools"
).format(
role=self.role,
backstory=self.backstory,
goal=self.goal,
)
# Add response format instructions if specified
if self.response_format:
schema = generate_model_description(self.response_format)
base_prompt += self.i18n.slice("lite_agent_response_format").format(
response_format=schema
)
print("BASE PROMPT:", base_prompt)
return base_prompt
def _format_messages(
self, messages: Union[str, List[Dict[str, str]]]
) -> List[Dict[str, str]]:
@@ -307,10 +322,13 @@ class LiteAgent(BaseModel):
else:
raise e
# TODO: CREATE AND RETURN LiteAgentOutput
formatted_result: Optional[BaseModel] = None
if self.response_format:
formatted_result = self.response_format.model_validate_json(result.output)
return LiteAgentOutput(
raw=result.text,
pydantic=None, # TODO: Add pydantic output
raw=result.output,
pydantic=formatted_result,
agent_role=self.role,
usage_metrics=None, # TODO: Add usage metrics
)

View File

@@ -26,7 +26,8 @@
"conversation_history_instruction": "You are a member of a crew collaborating to achieve a common goal. Your task is a specific action that contributes to this larger objective. For additional context, please review the conversation history between you and the user that led to the initiation of this crew. Use any relevant information or feedback from the conversation to inform your task execution and ensure your response aligns with both the immediate task and the crew's overall goals.",
"feedback_instructions": "User feedback: {feedback}\nInstructions: Use this feedback to enhance the next output iteration.\nNote: Do not respond or add commentary.",
"lite_agent_system_prompt_with_tools": "You are {role}. {backstory}\nYour personal goal is: {goal}\n\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\n{tools}\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought: you should always think about what to do\nAction: the action to take, only one name of [{tool_names}], just the name, exactly as it's written.\nAction Input: the input to the action, just a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce all necessary information is gathered, return the following format:\n\n```\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n```",
"lite_agent_system_prompt_without_tools": "You are {role}. {backstory}\nYour personal goal is: {goal}\n\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"
"lite_agent_system_prompt_without_tools": "You are {role}. {backstory}\nYour personal goal is: {goal}\n\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!",
"lite_agent_response_format": "\nIMPORTANT: Your final answer MUST contain all the information requested in the following format: {response_format}\n\nIMPORTANT: Ensure the final output does not include any code block markers like ```json or ```python."
},
"errors": {
"force_final_answer_error": "You can't keep going, here is the best final answer you generated:\n\n {formatted_answer}",