more testing

This commit is contained in:
Brandon Hancock
2025-03-26 10:54:00 -04:00
parent fa62df7d18
commit e6b90699a8
3 changed files with 50 additions and 52 deletions

View File

@@ -80,62 +80,62 @@ async def main():
# print(f"Raw response: {result.raw}")
# Example 2: Query with structured output
print("\n=== Example 2: Structured Output ===")
structured_query = """
Research the impact of climate change on coral reefs.
# print("\n=== Example 2: Structured Output ===")
# structured_query = """
# Research the impact of climate change on coral reefs.
YOU MUST format your response as a valid JSON object with the following structure:
{
"main_findings": "A summary of the main findings",
"key_points": ["Point 1", "Point 2", "Point 3"],
"sources": ["Source 1", "Source 2"]
}
# YOU MUST format your response as a valid JSON object with the following structure:
# {
# "main_findings": "A summary of the main findings",
# "key_points": ["Point 1", "Point 2", "Point 3"],
# "sources": ["Source 1", "Source 2"]
# }
Include at least 3 key points and 2 sources. Wrap your JSON in ```json and ``` tags.
"""
# Include at least 3 key points and 2 sources. Wrap your JSON in ```json and ``` tags.
# """
result = await agent.kickoff_async(structured_query)
# result = await agent.kickoff_async(structured_query)
if result.pydantic:
# Cast to the specific type for better IDE support
research_result = cast(ResearchResult, result.pydantic)
print(f"Main findings: {research_result.main_findings}")
print("\nKey points:")
for i, point in enumerate(research_result.key_points, 1):
print(f"{i}. {point}")
print("\nSources:")
for i, source in enumerate(research_result.sources, 1):
print(f"{i}. {source}")
else:
print(f"Raw response: {result.raw}")
print(
"\nNote: Structured output was not generated. The LLM may need more explicit instructions to format the response as JSON."
)
print("Usage metrics:")
print(result.usage_metrics)
# if result.pydantic:
# # Cast to the specific type for better IDE support
# research_result = cast(ResearchResult, result.pydantic)
# print(f"Main findings: {research_result.main_findings}")
# print("\nKey points:")
# for i, point in enumerate(research_result.key_points, 1):
# print(f"{i}. {point}")
# print("\nSources:")
# for i, source in enumerate(research_result.sources, 1):
# print(f"{i}. {source}")
# else:
# print(f"Raw response: {result.raw}")
# print(
# "\nNote: Structured output was not generated. The LLM may need more explicit instructions to format the response as JSON."
# )
# print("Usage metrics:")
# print(result.usage_metrics)
# # Example 3: Multi-turn conversation
# print("\n=== Example 3: Multi-turn Conversation ===")
# messages = [
# {"role": "user", "content": "I'm planning a trip to Japan."},
# {
# "role": "assistant",
# "content": "That sounds exciting! Japan is a beautiful country with rich culture, delicious food, and stunning landscapes. What would you like to know about Japan to help with your trip planning?",
# },
# {
# "role": "user",
# "content": "What are the best times to visit Tokyo and Kyoto?",
# },
# ]
# Example 3: Multi-turn conversation
print("\n=== Example 3: Multi-turn Conversation ===")
messages = [
{"role": "user", "content": "I'm planning a trip to Japan."},
{
"role": "assistant",
"content": "That sounds exciting! Japan is a beautiful country with rich culture, delicious food, and stunning landscapes. What would you like to know about Japan to help with your trip planning?",
},
{
"role": "user",
"content": "What are the best times to visit Tokyo and Kyoto?",
},
]
# result = await agent.kickoff_async(messages)
# print(f"Response: {result.raw}")
result = await agent.kickoff_async(messages)
print(f"Response: {result.raw}")
# # Print usage metrics if available
# if result.usage_metrics:
# print("\nUsage metrics:")
# for key, value in result.usage_metrics.items():
# print(f"{key}: {value}")
# Print usage metrics if available
if result.usage_metrics:
print("\nUsage metrics:")
for key, value in result.usage_metrics.items():
print(f"{key}: {value}")
if __name__ == "__main__":

View File

@@ -748,6 +748,7 @@ class LLM:
if not LLMContextLengthExceededException(
str(e)
)._is_context_limit_error(str(e)):
print("HERE IS WHERE THE ERROR IS")
logging.error(f"LiteLLM call failed: {str(e)}")
raise

View File

@@ -105,7 +105,6 @@ class ToolUsage:
def use(
self, calling: Union[ToolCalling, InstructorToolCalling], tool_string: str
) -> str:
print("USING A TOOL", calling, tool_string)
if isinstance(calling, ToolUsageErrorException):
error = calling.message
if self.agent.verbose:
@@ -145,8 +144,6 @@ class ToolUsage:
tool: CrewStructuredTool,
calling: Union[ToolCalling, InstructorToolCalling],
) -> str:
print("USING A TOOL: ", tool)
print("Type of tool: ", type(tool))
if self._check_tool_repeated_usage(calling=calling): # type: ignore # _check_tool_repeated_usage of "ToolUsage" does not return a value (it only ever returns None)
try:
result = self._i18n.errors("task_repeated_usage").format(