diff --git a/examples/lite_agent_example.py b/examples/lite_agent_example.py index 4d07189b1..1d0642f90 100644 --- a/examples/lite_agent_example.py +++ b/examples/lite_agent_example.py @@ -80,62 +80,62 @@ async def main(): # print(f"Raw response: {result.raw}") # Example 2: Query with structured output - print("\n=== Example 2: Structured Output ===") - structured_query = """ - Research the impact of climate change on coral reefs. + # print("\n=== Example 2: Structured Output ===") + # structured_query = """ + # Research the impact of climate change on coral reefs. - YOU MUST format your response as a valid JSON object with the following structure: - { - "main_findings": "A summary of the main findings", - "key_points": ["Point 1", "Point 2", "Point 3"], - "sources": ["Source 1", "Source 2"] - } + # YOU MUST format your response as a valid JSON object with the following structure: + # { + # "main_findings": "A summary of the main findings", + # "key_points": ["Point 1", "Point 2", "Point 3"], + # "sources": ["Source 1", "Source 2"] + # } - Include at least 3 key points and 2 sources. Wrap your JSON in ```json and ``` tags. - """ + # Include at least 3 key points and 2 sources. Wrap your JSON in ```json and ``` tags. + # """ - result = await agent.kickoff_async(structured_query) + # result = await agent.kickoff_async(structured_query) - if result.pydantic: - # Cast to the specific type for better IDE support - research_result = cast(ResearchResult, result.pydantic) - print(f"Main findings: {research_result.main_findings}") - print("\nKey points:") - for i, point in enumerate(research_result.key_points, 1): - print(f"{i}. {point}") - print("\nSources:") - for i, source in enumerate(research_result.sources, 1): - print(f"{i}. {source}") - else: - print(f"Raw response: {result.raw}") - print( - "\nNote: Structured output was not generated. The LLM may need more explicit instructions to format the response as JSON." - ) - print("Usage metrics:") - print(result.usage_metrics) + # if result.pydantic: + # # Cast to the specific type for better IDE support + # research_result = cast(ResearchResult, result.pydantic) + # print(f"Main findings: {research_result.main_findings}") + # print("\nKey points:") + # for i, point in enumerate(research_result.key_points, 1): + # print(f"{i}. {point}") + # print("\nSources:") + # for i, source in enumerate(research_result.sources, 1): + # print(f"{i}. {source}") + # else: + # print(f"Raw response: {result.raw}") + # print( + # "\nNote: Structured output was not generated. The LLM may need more explicit instructions to format the response as JSON." + # ) + # print("Usage metrics:") + # print(result.usage_metrics) - # # Example 3: Multi-turn conversation - # print("\n=== Example 3: Multi-turn Conversation ===") - # messages = [ - # {"role": "user", "content": "I'm planning a trip to Japan."}, - # { - # "role": "assistant", - # "content": "That sounds exciting! Japan is a beautiful country with rich culture, delicious food, and stunning landscapes. What would you like to know about Japan to help with your trip planning?", - # }, - # { - # "role": "user", - # "content": "What are the best times to visit Tokyo and Kyoto?", - # }, - # ] + # Example 3: Multi-turn conversation + print("\n=== Example 3: Multi-turn Conversation ===") + messages = [ + {"role": "user", "content": "I'm planning a trip to Japan."}, + { + "role": "assistant", + "content": "That sounds exciting! Japan is a beautiful country with rich culture, delicious food, and stunning landscapes. What would you like to know about Japan to help with your trip planning?", + }, + { + "role": "user", + "content": "What are the best times to visit Tokyo and Kyoto?", + }, + ] - # result = await agent.kickoff_async(messages) - # print(f"Response: {result.raw}") + result = await agent.kickoff_async(messages) + print(f"Response: {result.raw}") - # # Print usage metrics if available - # if result.usage_metrics: - # print("\nUsage metrics:") - # for key, value in result.usage_metrics.items(): - # print(f"{key}: {value}") + # Print usage metrics if available + if result.usage_metrics: + print("\nUsage metrics:") + for key, value in result.usage_metrics.items(): + print(f"{key}: {value}") if __name__ == "__main__": diff --git a/src/crewai/llm.py b/src/crewai/llm.py index fb8367dfe..baed78386 100644 --- a/src/crewai/llm.py +++ b/src/crewai/llm.py @@ -748,6 +748,7 @@ class LLM: if not LLMContextLengthExceededException( str(e) )._is_context_limit_error(str(e)): + print("HERE IS WHERE THE ERROR IS") logging.error(f"LiteLLM call failed: {str(e)}") raise diff --git a/src/crewai/tools/tool_usage.py b/src/crewai/tools/tool_usage.py index c4b606eda..29d502876 100644 --- a/src/crewai/tools/tool_usage.py +++ b/src/crewai/tools/tool_usage.py @@ -105,7 +105,6 @@ class ToolUsage: def use( self, calling: Union[ToolCalling, InstructorToolCalling], tool_string: str ) -> str: - print("USING A TOOL", calling, tool_string) if isinstance(calling, ToolUsageErrorException): error = calling.message if self.agent.verbose: @@ -145,8 +144,6 @@ class ToolUsage: tool: CrewStructuredTool, calling: Union[ToolCalling, InstructorToolCalling], ) -> str: - print("USING A TOOL: ", tool) - print("Type of tool: ", type(tool)) if self._check_tool_repeated_usage(calling=calling): # type: ignore # _check_tool_repeated_usage of "ToolUsage" does not return a value (it only ever returns None) try: result = self._i18n.errors("task_repeated_usage").format(