This commit is contained in:
Brandon Hancock
2025-03-25 10:36:59 -04:00
parent defb0c55e6
commit fa15c5eb1d
9 changed files with 171 additions and 345 deletions

View File

@@ -91,7 +91,7 @@ class Agent(BaseAgent):
response_template: Optional[str] = Field(
default=None, description="Response format for the agent."
)
tools_results: Optional[List[Any]] = Field(
tools_results: Optional[List[Dict[str, Any]]] = Field(
default=[], description="Results of the tools used by the agent."
)
allow_code_execution: Optional[bool] = Field(
@@ -305,12 +305,12 @@ class Agent(BaseAgent):
Returns:
An instance of the CrewAgentExecutor class.
"""
tools = tools or self.tools or []
parsed_tools = parse_tools(tools)
raw_tools: List[BaseTool] = tools or self.tools or []
parsed_tools = parse_tools(raw_tools)
prompt = Prompts(
agent=self,
tools=tools,
has_tools=len(raw_tools) > 0,
i18n=self.i18n,
use_system_prompt=self.use_system_prompt,
system_template=self.system_template,
@@ -332,7 +332,7 @@ class Agent(BaseAgent):
crew=self.crew,
tools=parsed_tools,
prompt=prompt,
original_tools=tools,
original_tools=raw_tools,
stop_words=stop_words,
max_iter=self.max_iter,
tools_handler=self.tools_handler,

View File

@@ -248,10 +248,6 @@ class BaseAgent(ABC, BaseModel):
def create_agent_executor(self, tools=None) -> None:
pass
@abstractmethod
def _parse_tools(self, tools: List[BaseTool]) -> List[BaseTool]:
pass
@abstractmethod
def get_delegation_tools(self, agents: List["BaseAgent"]) -> List[BaseTool]:
"""Set the task tools that init BaseAgenTools class."""

View File

@@ -15,9 +15,9 @@ if TYPE_CHECKING:
class CrewAgentExecutorMixin:
crew: Optional["Crew"]
agent: Optional["BaseAgent"]
task: Optional["Task"]
crew: "Crew"
agent: "BaseAgent"
task: "Task"
iterations: int
max_iter: int
_i18n: I18N

View File

@@ -13,8 +13,10 @@ from crewai.agents.parser import (
OutputParserException,
)
from crewai.agents.tools_handler import ToolsHandler
from crewai.lite_agent import LiteAgent
from crewai.llm import LLM
from crewai.tools.base_tool import BaseTool
from crewai.tools.structured_tool import CrewStructuredTool
from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
from crewai.utilities import I18N, Printer
from crewai.utilities.agent_utils import (
@@ -57,7 +59,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
agent: BaseAgent,
prompt: dict[str, str],
max_iter: int,
tools: List[BaseTool],
tools: List[Union[CrewStructuredTool, BaseTool]],
tools_names: str,
stop_words: List[str],
tools_description: str,
@@ -93,7 +95,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
self.messages: List[Dict[str, str]] = []
self.iterations = 0
self.log_error_after = 3
self.tool_name_to_tool_map: Dict[str, BaseTool] = {
self.tool_name_to_tool_map: Dict[str, Union[CrewStructuredTool, BaseTool]] = {
tool.name: tool for tool in self.tools
}
self.stop = stop_words
@@ -344,11 +346,8 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
tool_usage = ToolUsage(
tools_handler=self.tools_handler,
tools=self.tools,
original_tools=self.original_tools,
tools_description=self.tools_description,
tools_names=self.tools_names,
function_calling_llm=self.function_calling_llm,
task=self.task, # type: ignore[arg-type]
task=self.task,
agent=self.agent,
action=agent_action,
)
@@ -377,7 +376,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
return ToolResult(result=tool_result, result_as_answer=False)
except Exception as e:
# TODO: drop
if self.agent:
crewai_event_bus.emit(
self,

View File

@@ -18,6 +18,7 @@ from crewai.agents.parser import (
from crewai.agents.tools_handler import ToolsHandler
from crewai.llm import LLM
from crewai.tools.base_tool import BaseTool
from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
from crewai.types.usage_metrics import UsageMetrics
from crewai.utilities import I18N
from crewai.utilities.agent_utils import (
@@ -123,14 +124,15 @@ class LiteAgent(BaseModel):
default=None,
description="Callback to be executed after each step of the agent execution.",
)
tools_results: Optional[List[Dict[str, Any]]] = Field(
default=[], description="Results of the tools used by the agent."
)
_token_process: TokenProcess = PrivateAttr(default_factory=TokenProcess)
_cache_handler: CacheHandler = PrivateAttr(default_factory=CacheHandler)
_times_executed: int = PrivateAttr(default=0)
_max_retry_limit: int = PrivateAttr(default=2)
_key: str = PrivateAttr(default_factory=lambda: str(uuid.uuid4()))
# Store tool results for tracking
_tools_results: List[Dict[str, Any]] = PrivateAttr(default_factory=list)
# Store messages for conversation
_messages: List[Dict[str, str]] = PrivateAttr(default_factory=list)
# Iteration counter
@@ -140,8 +142,8 @@ class LiteAgent(BaseModel):
_tools_errors: int = PrivateAttr(default=0)
_delegations: Dict[str, int] = PrivateAttr(default_factory=dict)
# Internationalization
_i18n: I18N = PrivateAttr(default_factory=I18N)
_printer: Printer = PrivateAttr(default_factory=Printer)
i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
request_within_rpm_limit: Optional[Callable[[], bool]] = Field(
default=None,
description="Callback to check if the request is within the RPM limit",
@@ -177,16 +179,16 @@ class LiteAgent(BaseModel):
"""Get the default system prompt for the agent."""
if self.tools:
# Use the prompt template for agents with tools
return self._i18n.slice("lite_agent_system_prompt_with_tools").format(
return self.i18n.slice("lite_agent_system_prompt_with_tools").format(
role=self.role,
backstory=self.backstory,
goal=self.goal,
tools=format_tools_description(),
tool_names=self._get_tools_names(),
tools=render_text_description_and_args(self.tools),
tool_names=get_tool_names(self.tools),
)
else:
# Use the prompt template for agents without tools
return self._i18n.slice("lite_agent_system_prompt_without_tools").format(
return self.i18n.slice("lite_agent_system_prompt_without_tools").format(
role=self.role,
backstory=self.backstory,
goal=self.goal,
@@ -335,7 +337,7 @@ class LiteAgent(BaseModel):
"""
# Reset state for this run
self._iterations = 0
self._tools_results = []
self.tools_results = []
# Format messages for the LLM
self._messages = self._format_messages(messages)
@@ -424,27 +426,28 @@ class LiteAgent(BaseModel):
Returns:
str: The final result of the agent execution.
"""
# Set up tools handler for tool execution
tools_handler = ToolsHandler(cache=self._cache_handler)
# # Set up tools handler for tool execution
# tools_handler = ToolsHandler(cache=self._cache_handler)
# TODO: MOVE TO INIT
# Set up callbacks for token tracking
token_callback = TokenCalcHandler(token_cost_process=self._token_process)
callbacks = [token_callback]
# Prepare tool configurations
parsed_tools = parse_tools(self.tools)
tools_description = render_text_description_and_args(parsed_tools)
tools_names = get_tool_names(parsed_tools)
# # Prepare tool configurations
# parsed_tools = parse_tools(self.tools)
# tools_description = render_text_description_and_args(parsed_tools)
# tools_names = get_tool_names(parsed_tools)
# Execute the agent loop
formatted_answer = None
while not isinstance(formatted_answer, AgentFinish):
try :
try:
if has_reached_max_iterations(self._iterations, self.max_iterations):
formatted_answer = handle_max_iterations_exceeded(
formatted_answer,
printer=self._printer,
i18n=self._i18n,
i18n=self.i18n,
messages=self._messages,
llm=self.llm,
callbacks=callbacks,
@@ -460,254 +463,75 @@ class LiteAgent(BaseModel):
)
formatted_answer = process_llm_response(answer, self.use_stop_words)
while self._iterations < self.max_iterations:
try:
# Execute the LLM
llm_instance = self.llm
if not isinstance(llm_instance, LLM):
llm_instance = create_llm(llm_instance)
if llm_instance is None:
raise ValueError(
"LLM instance is None. Please provide a valid LLM."
)
# Set response_format if supported
try:
if (
self.response_format
and hasattr(llm_instance, "response_format")
and not llm_instance.response_format
):
provider = getattr(
llm_instance, "_get_custom_llm_provider", lambda: None
)()
from litellm.utils import supports_response_schema
if hasattr(llm_instance, "model") and supports_response_schema(
model=llm_instance.model, custom_llm_provider=provider
):
llm_instance.response_format = self.response_format
except Exception as e:
if self.verbose:
print(f"Warning: Could not set response_format: {e}")
# Get the LLM's response
answer = llm_instance.call(
messages=self._messages,
tools=parsed_tools,
callbacks=callbacks,
)
# Keep a copy of the original answer in case we need to fall back to it
original_answer = answer
# Pre-process the answer to correct formatting issues
answer = self._preprocess_model_output(answer)
# Parse the response into an action or final answer
parser = CrewAgentParser(agent=cast(BaseAgent, self))
try:
formatted_answer = parser.parse(answer)
except OutputParserException as e:
if self.verbose:
print(f"Parser error: {str(e)}")
# If we have a Final Answer format error and the original answer is substantive,
# return it directly if it looks like a final answer
if (
"Final Answer" in str(e)
and len(original_answer.strip()) > 20
and "Action:" not in original_answer
):
if self.verbose:
print(
"Returning original answer directly as final response"
)
return original_answer
# Try to reformat and parse again
reformatted = self._preprocess_model_output(
"Thought: I need to provide an answer.\n\nFinal Answer: "
+ original_answer
)
# Try parsing again
try:
formatted_answer = parser.parse(reformatted)
except Exception:
# If we still can't parse, just use the original answer
return original_answer
# If the agent wants to use a tool
if isinstance(formatted_answer, AgentAction):
# Find the appropriate tool
tool_name = formatted_answer.tool
tool_input = formatted_answer.tool_input
# Emit tool usage event
crewai_event_bus.emit(
self,
event=ToolUsageStartedEvent(
agent_key=self.key,
agent_role=self.role,
tool_name=tool_name,
tool_args=tool_input,
tool_class=tool_name,
),
tool_result = self._execute_tool_and_check_finality(
formatted_answer
)
formatted_answer = self._handle_agent_action(
formatted_answer, tool_result
)
# Use the tool
if tool_name in tool_map:
tool = tool_map[tool_name]
try:
if hasattr(tool, "_run"):
# BaseTool interface
# Ensure tool_input is a proper dict with string keys
if isinstance(tool_input, dict):
result = tool._run(
**{str(k): v for k, v in tool_input.items()}
)
else:
result = tool._run(tool_input)
elif hasattr(tool, "run"):
# Another common interface
if isinstance(tool_input, dict):
result = tool.run(
**{str(k): v for k, v in tool_input.items()}
)
else:
result = tool.run(tool_input)
else:
result = f"Error: Tool '{tool_name}' does not have a supported execution method."
# Check if tool result should be the final answer
result_as_answer = getattr(tool, "result_as_answer", False)
# Add to tools_results for tracking
self._tools_results.append(
{
"result": result,
"tool_name": tool_name,
"tool_args": tool_input,
"result_as_answer": result_as_answer,
}
)
# Create tool result
tool_result = ToolResult(
result=result, result_as_answer=result_as_answer
)
# If the tool result should be the final answer, return it
if tool_result.result_as_answer:
return tool_result.result
# Add the result to the formatted answer and messaging
formatted_answer.result = tool_result.result
formatted_answer.text += (
f"\nObservation: {tool_result.result}"
)
# Execute the step callback if provided
if self.step_callback:
self.step_callback(formatted_answer)
# Add the assistant message to the conversation
self._messages.append(
{"role": "assistant", "content": formatted_answer.text}
)
except Exception as e:
error_message = f"Error using tool '{tool_name}': {str(e)}"
if self.verbose:
print(error_message)
# Add error message to conversation
self._messages.append(
{"role": "user", "content": error_message}
)
else:
# Tool not found
error_message = f"Tool '{tool_name}' not found. Available tools: {tools_names}"
if self.verbose:
print(error_message)
# Add error message to conversation
self._messages.append(
{"role": "user", "content": error_message}
)
# If the agent provided a final answer
elif isinstance(formatted_answer, AgentFinish):
# Execute the step callback if provided
if self.step_callback:
self.step_callback(formatted_answer)
# Return the output
return formatted_answer.output
else:
# If formatted_answer is None, return the original answer
if not formatted_answer and original_answer:
return original_answer
# Increment the iteration counter
self._iterations += 1
self._invoke_step_callback(formatted_answer)
self._append_message(formatted_answer.text, role="assistant")
except Exception as e:
if self.verbose:
print(f"Error during agent execution: {e}")
# Add error message to conversation
self._messages.append({"role": "user", "content": f"Error: {str(e)}"})
self._iterations += 1
print(f"Error: {e}")
# If we've reached max iterations without a final answer, force one
if self.verbose:
print("Maximum iterations reached. Requesting final answer.")
def _execute_tool_and_check_finality(self, agent_action: AgentAction) -> ToolResult:
try:
crewai_event_bus.emit(
self,
event=ToolUsageStartedEvent(
agent_key=self.key,
agent_role=self.role,
tool_name=agent_action.tool,
tool_args=agent_action.tool_input,
tool_class=agent_action.tool,
),
)
tool_usage = ToolUsage(
tools=self.tools,
original_tools=self.tools, # TODO: INVESTIGATE DIFF BETWEEN THIS AND ABOVE
tools_description=render_text_description_and_args(self.tools),
tools_names=get_tool_names(self.tools),
agent=self,
action=agent_action,
)
tool_calling = tool_usage.parse_tool_calling(agent_action.text)
# Add a message requesting a final answer
self._messages.append(
{
"role": "user",
"content": "You've been thinking for a while. Please provide your final answer now.",
}
)
if isinstance(tool_calling, ToolUsageErrorException):
tool_result = tool_calling.message
return ToolResult(result=tool_result, result_as_answer=False)
else:
if tool_calling.tool_name.casefold().strip() in [
name.casefold().strip() for name in self.tool_name_to_tool_map
] or tool_calling.tool_name.casefold().replace("_", " ") in [
name.casefold().strip() for name in self.tool_name_to_tool_map
]:
tool_result = tool_usage.use(tool_calling, agent_action.text)
tool = self.tool_name_to_tool_map.get(tool_calling.tool_name)
if tool:
return ToolResult(
result=tool_result, result_as_answer=tool.result_as_answer
)
else:
tool_result = self._i18n.errors("wrong_tool_name").format(
tool=tool_calling.tool_name,
tools=", ".join([tool.name.casefold() for tool in self.tools]),
)
return ToolResult(result=tool_result, result_as_answer=False)
# Get the final answer from the LLM
llm_instance = self.llm
if not isinstance(llm_instance, LLM):
llm_instance = create_llm(llm_instance)
if llm_instance is None:
raise ValueError("LLM instance is None. Please provide a valid LLM.")
final_answer = llm_instance.call(
messages=self._messages,
callbacks=callbacks,
)
return final_answer
@property
def tools_results(self) -> List[Dict[str, Any]]:
"""Get the tools results for this agent."""
return self._tools_results
def increment_formatting_errors(self) -> None:
"""Increment the formatting errors counter."""
self._formatting_errors += 1
def increment_tools_errors(self) -> None:
"""Increment the tools errors counter."""
self._tools_errors += 1
def increment_delegations(self, agent_name: Optional[str] = None) -> None:
"""
Increment the delegations counter for a specific agent.
Args:
agent_name: The name of the agent being delegated to.
"""
if agent_name:
if agent_name not in self._delegations:
self._delegations[agent_name] = 0
self._delegations[agent_name] += 1
except Exception as e:
if self.agent:
crewai_event_bus.emit(
self,
event=ToolUsageErrorEvent( # validation error
agent_key=self.agent.key,
agent_role=self.agent.role,
tool_name=agent_action.tool,
tool_args=agent_action.tool_input,
tool_class=agent_action.tool,
error=str(e),
),
)
raise e

View File

@@ -11,12 +11,17 @@ import json5
from json_repair import repair_json
from crewai.agents.tools_handler import ToolsHandler
from crewai.lite_agent import LiteAgent
from crewai.task import Task
from crewai.telemetry import Telemetry
from crewai.tools import BaseTool
from crewai.tools.structured_tool import CrewStructuredTool
from crewai.tools.tool_calling import InstructorToolCalling, ToolCalling
from crewai.utilities import I18N, Converter, ConverterError, Printer
from crewai.utilities.agent_utils import (
get_tool_names,
render_text_description_and_args,
)
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
from crewai.utilities.events.tool_usage_events import (
ToolSelectionErrorEvent,
@@ -60,14 +65,11 @@ class ToolUsage:
def __init__(
self,
tools_handler: ToolsHandler,
tools: List[BaseTool],
original_tools: List[Any],
tools_description: str,
tools_names: str,
tools_handler: Optional[ToolsHandler],
tools: List[Union[CrewStructuredTool, BaseTool]],
task: Task,
function_calling_llm: Any,
agent: Any,
agent: Union[BaseAgent, LiteAgent],
action: Any,
) -> None:
self._i18n: I18N = agent.i18n
@@ -77,10 +79,9 @@ class ToolUsage:
self._max_parsing_attempts: int = 3
self._remember_format_after_usages: int = 3
self.agent = agent
self.tools_description = tools_description
self.tools_names = tools_names
self.tools_description = render_text_description_and_args(tools)
self.tools_names = get_tool_names(tools)
self.tools_handler = tools_handler
self.original_tools = original_tools
self.tools = tools
self.task = task
self.action = action
@@ -134,9 +135,9 @@ class ToolUsage:
def _use(
self,
tool_string: str,
tool: Any,
tool: CrewStructuredTool,
calling: Union[ToolCalling, InstructorToolCalling],
) -> str: # TODO: Fix this return type
) -> str:
if self._check_tool_repeated_usage(calling=calling): # type: ignore # _check_tool_repeated_usage of "ToolUsage" does not return a value (it only ever returns None)
try:
result = self._i18n.errors("task_repeated_usage").format(
@@ -156,19 +157,24 @@ class ToolUsage:
started_at = time.time()
from_cache = False
result = None # type: ignore # Incompatible types in assignment (expression has type "None", variable has type "str")
result = None
# check if cache is available
if self.tools_handler.cache:
result = self.tools_handler.cache.read( # type: ignore # Incompatible types in assignment (expression has type "str | None", variable has type "str")
if self.tools_handler and self.tools_handler.cache:
result = self.tools_handler.cache.read(
tool=calling.tool_name, input=calling.arguments
)
from_cache = result is not None
original_tool = next(
(ot for ot in self.original_tools if ot.name == tool.name), None
available_tool = next(
(
available_tool
for available_tool in self.tools
if available_tool.name == tool.name
),
None,
)
if result is None: #! finecwg: if not result --> if result is None
if result is None:
try:
if calling.tool_name in [
"Delegate work to coworker",
@@ -217,10 +223,10 @@ class ToolUsage:
if self.tools_handler:
should_cache = True
if (
hasattr(original_tool, "cache_function")
and original_tool.cache_function # type: ignore # Item "None" of "Any | None" has no attribute "cache_function"
hasattr(available_tool, "cache_function")
and available_tool.cache_function # type: ignore # Item "None" of "Any | None" has no attribute "cache_function"
):
should_cache = original_tool.cache_function( # type: ignore # Item "None" of "Any | None" has no attribute "cache_function"
should_cache = available_tool.cache_function( # type: ignore # Item "None" of "Any | None" has no attribute "cache_function"
calling.arguments, result
)
@@ -247,10 +253,10 @@ class ToolUsage:
)
if (
hasattr(original_tool, "result_as_answer")
and original_tool.result_as_answer # type: ignore # Item "None" of "Any | None" has no attribute "cache_function"
hasattr(available_tool, "result_as_answer")
and available_tool.result_as_answer # type: ignore # Item "None" of "Any | None" has no attribute "cache_function"
):
result_as_answer = original_tool.result_as_answer # type: ignore # Item "None" of "Any | None" has no attribute "result_as_answer"
result_as_answer = available_tool.result_as_answer # type: ignore # Item "None" of "Any | None" has no attribute "result_as_answer"
data["result_as_answer"] = result_as_answer
self.agent.tools_results.append(data)

View File

@@ -1,5 +1,6 @@
from typing import Any, Callable, Dict, List, Optional, Union
from crewai.agent import Agent
from crewai.agents.parser import (
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE,
AgentAction,
@@ -7,14 +8,22 @@ from crewai.agents.parser import (
CrewAgentParser,
OutputParserException,
)
from crewai.lite_agent import ToolResult
from crewai.llm import LLM
from crewai.tools import BaseTool as CrewAITool
from crewai.tools.base_tool import BaseTool
from crewai.tools.structured_tool import CrewStructuredTool
from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
from crewai.utilities.events import crewai_event_bus
from crewai.utilities.events.tool_usage_events import (
ToolUsageErrorEvent,
ToolUsageStartedEvent,
)
from crewai.utilities.i18n import I18N
from crewai.utilities.printer import Printer
def parse_tools(tools: List[Any]) -> List[Any]:
def parse_tools(tools: List[BaseTool]) -> List[Union[CrewStructuredTool, BaseTool]]:
"""Parse tools to be used for the task."""
tools_list = []
try:
@@ -31,18 +40,16 @@ def parse_tools(tools: List[Any]) -> List[Any]:
return tools_list
def get_tool_names(tools: List[Any]) -> str:
def get_tool_names(tools: List[Union[CrewStructuredTool, BaseTool]]) -> str:
"""Get the names of the tools."""
return ", ".join([t.name for t in tools])
def render_text_description_and_args(tools: List[BaseTool]) -> str:
def render_text_description_and_args(
tools: List[Union[CrewStructuredTool, BaseTool]]
) -> str:
"""Render the tool name, description, and args in plain text.
Output will be in the format of:
.. code-block:: markdown
search: This tool is used for search, args: {"query": {"type": "string"}}
calculator: This tool is used for math, \
args: {"expression": {"type": "string"}}

View File

@@ -9,7 +9,7 @@ class Prompts(BaseModel):
"""Manages and generates prompts for a generic agent."""
i18n: I18N = Field(default=I18N())
tools: list[Any] = Field(default=[])
has_tools: bool = False
system_template: Optional[str] = None
prompt_template: Optional[str] = None
response_template: Optional[str] = None
@@ -19,7 +19,7 @@ class Prompts(BaseModel):
def task_execution(self) -> dict[str, str]:
"""Generate a standard prompt for task execution."""
slices = ["role_playing"]
if len(self.tools) > 0:
if self.has_tools:
slices.append("tools")
else:
slices.append("no_tools")

View File

@@ -4,62 +4,57 @@ from crewai.tools import BaseTool
# A simple test tool
class TestTool(BaseTool):
name = "test_tool"
description = "A simple test tool"
class SecretLookupTool(BaseTool):
name = "secret_lookup"
description = "A tool to lookup secrets"
def _run(self, query: str) -> str:
return f"Test result for: {query}"
def _run(self) -> str:
return "SUPERSECRETPASSWORD123"
# Test with tools
def test_with_tools():
llm = LLM(model="gpt-4o")
agent = LiteAgent(
role="Test Agent",
goal="Test the system prompt formatting",
backstory="I am a test agent created to verify the system prompt works correctly.",
role="Secret Agent",
goal="Return the secret password",
backstory="I am a secret agent created to return the secret password",
llm=llm,
tools=[TestTool()],
tools=[SecretLookupTool()],
verbose=True,
)
# Get the system prompt
system_prompt = agent._get_default_system_prompt()
print("\n=== System Prompt (with tools) ===")
print(system_prompt)
# Test a simple query
response = agent.kickoff("Hello, can you help me?")
print("\n=== Agent Response ===")
print(response)
# Test without tools
def test_without_tools():
llm = LLM(model="gpt-4o")
agent = LiteAgent(
role="Test Agent",
goal="Test the system prompt formatting",
backstory="I am a test agent created to verify the system prompt works correctly.",
llm=llm,
verbose=True,
)
# # Test without tools
# def test_without_tools():
# llm = LLM(model="gpt-4o")
# agent = LiteAgent(
# role="Test Agent",
# goal="Test the system prompt formatting",
# backstory="I am a test agent created to verify the system prompt works correctly.",
# llm=llm,
# verbose=True,
# )
# Get the system prompt
system_prompt = agent._get_default_system_prompt()
print("\n=== System Prompt (without tools) ===")
print(system_prompt)
# # Get the system prompt
# system_prompt = agent._get_default_system_prompt()
# print("\n=== System Prompt (without tools) ===")
# print(system_prompt)
# Test a simple query
response = agent.kickoff("Hello, can you help me?")
print("\n=== Agent Response ===")
print(response)
# # Test a simple query
# response = agent.kickoff("Hello, can you help me?")
# print("\n=== Agent Response ===")
# print(response)
if __name__ == "__main__":
print("Testing LiteAgent with tools...")
test_with_tools()
print("\n\nTesting LiteAgent without tools...")
test_without_tools()
# print("\n\nTesting LiteAgent without tools...")
# test_without_tools()