Update inner tool usage logic to support both regular and function calling

This commit is contained in:
João Moura
2024-03-02 13:54:23 -03:00
parent c12283bb16
commit 42eeec5897
10 changed files with 208 additions and 142 deletions

12
poetry.lock generated
View File

@@ -1954,13 +1954,13 @@ socks = ["socksio (==1.*)"]
[[package]]
name = "huggingface-hub"
version = "0.21.1"
version = "0.21.2"
description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub"
optional = false
python-versions = ">=3.8.0"
files = [
{file = "huggingface_hub-0.21.1-py3-none-any.whl", hash = "sha256:b40dd1dc5c589b7c73178f5f17996bac516524dce83f16d5219a83e33a565712"},
{file = "huggingface_hub-0.21.1.tar.gz", hash = "sha256:c458ae6b3e8e197472c4ef01d8cc5f8b3ddb70e9288afcd494753d832dac3a70"},
{file = "huggingface_hub-0.21.2-py3-none-any.whl", hash = "sha256:16955c2b60bcff32a0778f84b9e9ae8f61d7f003da6aa1fbb7bc897a0c37b28c"},
{file = "huggingface_hub-0.21.2.tar.gz", hash = "sha256:839f2fc69fc51797b76dcffa7edbf7fb1150176f74cb1dc2d87ca00e5e0b5611"},
]
[package.dependencies]
@@ -4311,13 +4311,13 @@ py = ">=1.4.26,<2.0.0"
[[package]]
name = "rich"
version = "13.7.0"
version = "13.7.1"
description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal"
optional = false
python-versions = ">=3.7.0"
files = [
{file = "rich-13.7.0-py3-none-any.whl", hash = "sha256:6da14c108c4866ee9520bbffa71f6fe3962e193b7da68720583850cd4548e235"},
{file = "rich-13.7.0.tar.gz", hash = "sha256:5cb5123b5cf9ee70584244246816e9114227e0b98ad9176eede6ad54bf5403fa"},
{file = "rich-13.7.1-py3-none-any.whl", hash = "sha256:4edbae314f59eb482f54e9e30bf00d33350aaa94f4bfcd4e9e3110e64d0d7222"},
{file = "rich-13.7.1.tar.gz", hash = "sha256:9be308cb1fe2f1f57d67ce99e95af38a1e2bc71ad9813b0e247cf7ffbcc3a432"},
]
[package.dependencies]

View File

@@ -1,7 +1,7 @@
[tool.poetry]
name = "crewai"
version = "0.16.3"
version = "0.16.4"
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
authors = ["Joao Moura <joao@crewai.com>"]
readme = "README.md"
@@ -26,7 +26,7 @@ opentelemetry-sdk = "^1.22.0"
opentelemetry-exporter-otlp-proto-http = "^1.22.0"
instructor = "^0.5.2"
regex = "^2023.12.25"
crewai-tools = { version = "^0.0.12", optional = true }
crewai-tools = { version = "^0.0.13", optional = true }
click = "^8.1.7"
[tool.poetry.extras]
@@ -45,7 +45,7 @@ mkdocs-material = {extras = ["imaging"], version = "^9.5.7"}
mkdocs-material-extensions = "^1.3.1"
pillow = "^10.2.0"
cairosvg = "^2.7.1"
crewai_tools = "^0.0.12"
crewai_tools = "^0.0.14"
[tool.isort]
profile = "black"

View File

@@ -151,6 +151,8 @@ class Agent(BaseModel):
Returns:
Output of the agent
"""
self.tools_handler.last_used_tool = {}
task_prompt = task.prompt()
if context:
@@ -162,6 +164,7 @@ class Agent(BaseModel):
self.create_agent_executor(tools=tools)
self.agent_executor.tools = tools
self.agent_executor.task = task
self.agent_executor.tools_description = render_text_description(tools)
self.agent_executor.tools_names = self.__tools_names(tools)
@@ -268,7 +271,7 @@ class Agent(BaseModel):
def format_log_to_str(
self,
intermediate_steps: List[Tuple[AgentAction, str]],
observation_prefix: str = "Result: ",
observation_prefix: str = "Observation: ",
llm_prefix: str = "",
) -> str:
"""Construct the scratchpad that lets the agent continue its thought process."""

View File

@@ -27,6 +27,7 @@ class CrewAgentExecutor(AgentExecutor):
request_within_rpm_limit: Any = None
tools_handler: InstanceOf[ToolsHandler] = None
max_iterations: Optional[int] = 15
have_forced_answer: bool = False
force_answer_max_iterations: Optional[int] = None
step_callback: Optional[Any] = None
@@ -36,7 +37,9 @@ class CrewAgentExecutor(AgentExecutor):
return values
def _should_force_answer(self) -> bool:
return True if self.iterations == self.force_answer_max_iterations else False
return (
self.iterations == self.force_answer_max_iterations
) and not self.have_forced_answer
def _call(
self,
@@ -103,6 +106,13 @@ class CrewAgentExecutor(AgentExecutor):
Override this to take control of how the agent makes and acts on choices.
"""
try:
if self._should_force_answer():
error = self._i18n.errors("force_final_answer")
output = AgentAction("_Exception", error, error)
self.have_forced_answer = True
yield AgentStep(action=output, observation=error)
return
intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)
# Call the LLM to see what to do.
output = self.agent.plan(
@@ -111,23 +121,6 @@ class CrewAgentExecutor(AgentExecutor):
**inputs,
)
if self._should_force_answer():
if isinstance(output, AgentFinish):
yield output
return
if isinstance(output, AgentAction):
output = output
else:
raise ValueError(
f"Unexpected output type from agent: {type(output)}"
)
yield AgentStep(
action=output, observation=self._i18n.errors("force_final_answer")
)
return
except OutputParserException as e:
if isinstance(self.handle_parsing_errors, bool):
raise_error = not self.handle_parsing_errors
@@ -140,11 +133,11 @@ class CrewAgentExecutor(AgentExecutor):
"again, pass `handle_parsing_errors=True` to the AgentExecutor. "
f"This is the error: {str(e)}"
)
text = str(e)
str(e)
if isinstance(self.handle_parsing_errors, bool):
if e.send_to_llm:
observation = f"\n{str(e.observation)}"
text = str(e.llm_output)
str(e.llm_output)
else:
observation = ""
elif isinstance(self.handle_parsing_errors, str):
@@ -153,22 +146,22 @@ class CrewAgentExecutor(AgentExecutor):
observation = f"\n{self.handle_parsing_errors(e)}"
else:
raise ValueError("Got unexpected type of `handle_parsing_errors`")
output = AgentAction("_Exception", observation, text)
output = AgentAction("_Exception", observation, "")
if run_manager:
run_manager.on_agent_action(output, color="green")
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
observation = ExceptionTool().run(
output.tool_input,
verbose=self.verbose,
verbose=False,
color=None,
callbacks=run_manager.get_child() if run_manager else None,
**tool_run_kwargs,
)
if self._should_force_answer():
yield AgentStep(
action=output, observation=self._i18n.errors("force_final_answer")
)
error = self._i18n.errors("force_final_answer")
output = AgentAction("_Exception", error, error)
yield AgentStep(action=output, observation=error)
return
yield AgentStep(action=output, observation=observation)
@@ -192,8 +185,8 @@ class CrewAgentExecutor(AgentExecutor):
tools_description=self.tools_description,
tools_names=self.tools_names,
function_calling_llm=self.function_calling_llm,
llm=self.llm,
task=self.task,
action=agent_action,
)
tool_calling = tool_usage.parse(agent_action.log)

View File

@@ -1,3 +1,4 @@
import re
from typing import Any, Union
from langchain.agents.output_parsers import ReActSingleInputOutputParser
@@ -6,13 +7,14 @@ from langchain_core.exceptions import OutputParserException
from crewai.utilities import I18N
TOOL_USAGE_SECTION = "Use Tool:"
FINAL_ANSWER_ACTION = "Final Answer:"
FINAL_ANSWER_AND_TOOL_ERROR_MESSAGE = "I tried to use a tool and give a final answer at the same time, I must choose only one."
MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE = "I did it wrong. Invalid Format: I missed the 'Action:' after 'Thought:'. I will do right next, and don't use a tool I have already used.\n"
MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE = "I did it wrong. Invalid Format: I missed the 'Action Input:' after 'Action:'. I will do right next, and don't use a tool I have already used.\n"
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = "I did it wrong. Tried to both perform Action and give a Final Answer at the same time, I must do one or the other"
class CrewAgentParser(ReActSingleInputOutputParser):
"""Parses Crew-style LLM calls that have a single tool input.
"""Parses ReAct-style LLM calls that have a single tool input.
Expects output to be in one of two formats.
@@ -20,17 +22,16 @@ class CrewAgentParser(ReActSingleInputOutputParser):
should be in the below format. This will result in an AgentAction
being returned.
```
Use Tool: All context for using the tool here
```
Thought: agent thought here
Action: search
Action Input: what is the temperature in SF?
If the output signals that a final answer should be given,
should be in the below format. This will result in an AgentFinish
being returned.
```
Thought: agent thought here
Final Answer: The temperature is 100 degrees
```
"""
_i18n: I18N = I18N()
@@ -38,26 +39,52 @@ class CrewAgentParser(ReActSingleInputOutputParser):
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
includes_answer = FINAL_ANSWER_ACTION in text
includes_tool = TOOL_USAGE_SECTION in text
if includes_tool:
regex = (
r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
)
action_match = re.search(regex, text, re.DOTALL)
if action_match:
if includes_answer:
self.agent.increment_formatting_errors()
raise OutputParserException(f"{FINAL_ANSWER_AND_TOOL_ERROR_MESSAGE}")
raise OutputParserException(
f"{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}: {text}"
)
action = action_match.group(1).strip()
action_input = action_match.group(2)
tool_input = action_input.strip(" ")
tool_input = tool_input.strip('"')
return AgentAction("", "", text)
return AgentAction(action, tool_input, text)
elif includes_answer:
return AgentFinish(
{"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text
)
format = self._i18n.slice("format_without_tools")
error = f"{format}"
self.agent.increment_formatting_errors()
raise OutputParserException(
error,
observation=error,
llm_output=text,
send_to_llm=True,
)
if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL):
self.agent.increment_formatting_errors()
raise OutputParserException(
f"Could not parse LLM output: `{text}`",
observation=f"{MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE}\n{self._i18n.slice('final_answer_format')}",
llm_output=text,
send_to_llm=True,
)
elif not re.search(
r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)", text, re.DOTALL
):
self.agent.increment_formatting_errors()
raise OutputParserException(
f"Could not parse LLM output: `{text}`",
observation=MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
llm_output=text,
send_to_llm=True,
)
else:
format = self._i18n.slice("format_without_tools")
error = f"{format}"
self.agent.increment_formatting_errors()
raise OutputParserException(
error,
observation=error,
llm_output=text,
send_to_llm=True,
)

View File

@@ -41,7 +41,6 @@ class Crew(BaseModel):
full_output: Whether the crew should return the full output with all tasks outputs or just the final output.
step_callback: Callback to be executed after each step for every agents execution.
share_crew: Whether you want to share the complete crew infromation and execution with crewAI to make the library better, and allow us to train models.
inputs: Any inputs that the crew will use in tasks or agents, it will be interpolated in promtps.
"""
__hash__ = object.__hash__ # type: ignore
@@ -68,10 +67,6 @@ class Crew(BaseModel):
function_calling_llm: Optional[Any] = Field(
description="Language model that will run the agent.", default=None
)
inputs: Optional[Dict[str, Any]] = Field(
description="Any inputs that the crew will use in tasks or agents, it will be interpolated in promtps.",
default=None,
)
config: Optional[Union[Json, Dict[str, Any]]] = Field(default=None)
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
share_crew: Optional[bool] = Field(default=False)
@@ -134,15 +129,6 @@ class Crew(BaseModel):
)
return self
@model_validator(mode="after")
def interpolate_inputs(self):
"""Interpolates the inputs in the tasks and agents."""
for task in self.tasks:
task.interpolate_inputs(self.inputs)
for agent in self.agents:
agent.interpolate_inputs(self.inputs)
return self
@model_validator(mode="after")
def check_config(self):
"""Validates that the crew is properly configured with agents and tasks."""
@@ -191,9 +177,10 @@ class Crew(BaseModel):
del task_config["agent"]
return Task(**task_config, agent=task_agent)
def kickoff(self) -> str:
def kickoff(self, inputs: Optional[Dict[str, Any]] = {}) -> str:
"""Starts the crew to work on its assigned tasks."""
self._execution_span = self._telemetry.crew_execution_span(self)
self._interpolate_inputs(inputs)
for agent in self.agents:
agent.i18n = I18N(language=self.language)
@@ -231,7 +218,7 @@ class Crew(BaseModel):
"""Executes tasks sequentially and returns the final output."""
task_output = ""
for task in self.tasks:
if task.agent is not None and task.agent.allow_delegation:
if task.agent.allow_delegation:
agents_for_delegation = [
agent for agent in self.agents if agent != task.agent
]
@@ -281,6 +268,11 @@ class Crew(BaseModel):
self._finish_execution(task_output)
return self._format_output(task_output), manager._token_process.get_summary()
def _interpolate_inputs(self, inputs: Dict[str, Any]) -> str:
"""Interpolates the inputs in the tasks and agents."""
[task.interpolate_inputs(inputs) for task in self.tasks]
[agent.interpolate_inputs(inputs) for agent in self.agents]
def _format_output(self, output: str) -> str:
"""Formats the output of the crew execution."""
if self.full_output:

View File

@@ -25,16 +25,15 @@ class Task(BaseModel):
i18n: I18N = I18N()
thread: threading.Thread = None
description: str = Field(description="Description of the actual task.")
expected_output: str = Field(
description="Clear definition of expected output for the task."
)
callback: Optional[Any] = Field(
description="Callback to be executed after the task is completed.", default=None
)
agent: Optional[Agent] = Field(
description="Agent responsible for execution the task.", default=None
)
expected_output: Optional[str] = Field(
description="Clear definition of expected output for the task.",
default=None,
)
context: Optional[List["Task"]] = Field(
description="Other tasks that will have their output used as context for this task.",
default=None,
@@ -166,19 +165,17 @@ class Task(BaseModel):
"""
tasks_slices = [self.description]
if self.expected_output:
output = self.i18n.slice("expected_output").format(
expected_output=self.expected_output
)
tasks_slices = [self.description, output]
output = self.i18n.slice("expected_output").format(
expected_output=self.expected_output
)
tasks_slices = [self.description, output]
return "\n".join(tasks_slices)
def interpolate_inputs(self, inputs: Dict[str, Any]) -> None:
"""Interpolate inputs into the task description and expected output."""
if inputs:
self.description = self.description.format(**inputs)
if self.expected_output:
self.expected_output = self.expected_output.format(**inputs)
self.expected_output = self.expected_output.format(**inputs)
def increment_tools_errors(self) -> None:
"""Increment the tools errors counter."""

View File

@@ -20,24 +20,24 @@ class AgentTools(BaseModel):
func=self.delegate_work,
name="Delegate work to co-worker",
description=self.i18n.tools("delegate_work").format(
coworkers="\n".join([f"- {agent.role}" for agent in self.agents])
coworkers=[f"{agent.role}" for agent in self.agents]
),
),
StructuredTool.from_function(
func=self.ask_question,
name="Ask question to co-worker",
description=self.i18n.tools("ask_question").format(
coworkers="\n".join([f"- {agent.role}" for agent in self.agents])
coworkers=[f"{agent.role}" for agent in self.agents]
),
),
]
def delegate_work(self, coworker: str, task: str, context: str):
"""Useful to delegate a specific task to a coworker."""
"""Useful to delegate a specific task to a coworker passing all necessary context and names."""
return self._execute(coworker, task, context)
def ask_question(self, coworker: str, question: str, context: str):
"""Useful to ask a question, opinion or take from a coworker."""
"""Useful to ask a question, opinion or take from a coworker passing all necessary context and names."""
return self._execute(coworker, question, context)
def _execute(self, agent, task, context):
@@ -59,5 +59,9 @@ class AgentTools(BaseModel):
)
agent = agent[0]
task = Task(description=task, agent=agent)
task = Task(
description=task,
agent=agent,
expected_output="Your best answer to your coworker asking you this, accounting for the context shared.",
)
return agent.execute_task(task, context)

View File

@@ -1,3 +1,4 @@
import ast
from textwrap import dedent
from typing import Any, List, Union
@@ -30,7 +31,6 @@ class ToolUsage:
tools: List of tools available for the agent.
tools_description: Description of the tools available for the agent.
tools_names: Names of the tools available for the agent.
llm: Language model to be used for the tool usage.
function_calling_llm: Language model to be used for the tool usage.
"""
@@ -41,8 +41,8 @@ class ToolUsage:
tools_description: str,
tools_names: str,
task: Any,
llm: Any,
function_calling_llm: Any,
action: Any,
) -> None:
self._i18n: I18N = I18N()
self._printer: Printer = Printer()
@@ -55,11 +55,14 @@ class ToolUsage:
self.tools_handler = tools_handler
self.tools = tools
self.task = task
self.llm = function_calling_llm or llm
self.action = action
self.function_calling_llm = function_calling_llm
# Set the maximum parsing attempts for bigger models
if (isinstance(self.llm, ChatOpenAI)) and (self.llm.openai_api_base == None):
if self.llm.model_name in OPENAI_BIGGER_MODELS:
if (isinstance(self.function_calling_llm, ChatOpenAI)) and (
self.function_calling_llm.openai_api_base == None
):
if self.function_calling_llm.model_name in OPENAI_BIGGER_MODELS:
self._max_parsing_attempts = 2
self._remember_format_after_usages = 4
@@ -82,7 +85,7 @@ class ToolUsage:
self.task.increment_tools_errors()
self._printer.print(content=f"\n\n{error}\n", color="red")
return error
return f"{self._use(tool_string=tool_string, tool=tool, calling=calling)}\n\n{self._i18n.slice('final_answer_format')}"
return f"{self._use(tool_string=tool_string, tool=tool, calling=calling)}"
def _use(
self,
@@ -93,14 +96,13 @@ class ToolUsage:
if self._check_tool_repeated_usage(calling=calling):
try:
result = self._i18n.errors("task_repeated_usage").format(
tool=calling.tool_name,
tool_input=", ".join(
[str(arg) for arg in calling.arguments.values()]
),
tool_names=self.tools_names
)
self._printer.print(content=f"\n\n{result}\n", color="yellow")
self._telemetry.tool_repeated_usage(
llm=self.llm, tool_name=tool.name, attempts=self._run_attempts
llm=self.function_calling_llm,
tool_name=tool.name,
attempts=self._run_attempts,
)
result = self._format_result(result=result)
return result
@@ -120,18 +122,32 @@ class ToolUsage:
self.task.increment_delegations()
if calling.arguments:
result = tool._run(**calling.arguments)
try:
acceptable_args = tool.args_schema.schema()["properties"].keys()
arguments = {
k: v
for k, v in calling.arguments.items()
if k in acceptable_args
}
result = tool._run(**arguments)
except Exception:
if tool.args_schema:
arguments = calling.arguments
result = tool._run(**arguments)
else:
arguments = calling.arguments.values()
result = tool._run(*arguments)
else:
result = tool._run()
except Exception as e:
self._run_attempts += 1
if self._run_attempts > self._max_parsing_attempts:
self._telemetry.tool_usage_error(llm=self.llm)
self._telemetry.tool_usage_error(llm=self.function_calling_llm)
error_message = self._i18n.errors("tool_usage_exception").format(
error=e
error=e, tool=tool.name, tool_inputs=tool.description
)
error = ToolUsageErrorException(
f'\n{error_message}.\nMoving one then. {self._i18n.slice("format").format(tool_names=self.tools_names)}'
f'\n{error_message}.\nMoving on then. {self._i18n.slice("format").format(tool_names=self.tools_names)}'
).message
self.task.increment_tools_errors()
self._printer.print(content=f"\n\n{error_message}\n", color="red")
@@ -139,11 +155,14 @@ class ToolUsage:
self.task.increment_tools_errors()
return self.use(calling=calling, tool_string=tool_string)
print('LET"S GOOOOOOOO')
self.tools_handler.on_tool_use(calling=calling, output=result)
self._printer.print(content=f"\n\n{result}\n", color="yellow")
self._telemetry.tool_usage(
llm=self.llm, tool_name=tool.name, attempts=self._run_attempts
llm=self.function_calling_llm,
tool_name=tool.name,
attempts=self._run_attempts,
)
result = self._format_result(result=result)
return result
@@ -177,7 +196,14 @@ class ToolUsage:
if tool.name.lower().strip() == tool_name.lower().strip():
return tool
self.task.increment_tools_errors()
raise Exception(f"Tool '{tool_name}' not found.")
if tool_name and tool_name != "":
raise Exception(
f"Action '{tool_name}' don't exist, these are the only available Actions: {self.tools_description}"
)
else:
raise Exception(
f"I forgot the Action name, these are the only available Actions: {self.tools_description}"
)
def _render(self) -> str:
"""Render the tool name and description in plain text."""
@@ -205,34 +231,57 @@ class ToolUsage:
self, tool_string: str
) -> Union[ToolCalling, InstructorToolCalling]:
try:
model = InstructorToolCalling if self._is_gpt(self.llm) else ToolCalling
converter = Converter(
text=f"Only tools available:\n###\n{self._render()}\n\nReturn a valid schema for the tool, the tool name must be exactly equal one of the options, use this text to inform the valid ouput schema:\n\n{tool_string}```",
llm=self.llm,
model=model,
instructions=dedent(
"""\
The schema should have the following structure, only two keys:
- tool_name: str
- arguments: dict (with all arguments being passed)
if self.function_calling_llm:
model = (
InstructorToolCalling
if self._is_gpt(self.function_calling_llm)
else ToolCalling
)
converter = Converter(
text=f"Only tools available:\n###\n{self._render()}\n\nReturn a valid schema for the tool, the tool name must be exactly equal one of the options, use this text to inform the valid ouput schema:\n\n{tool_string}```",
llm=self.function_calling_llm,
model=model,
instructions=dedent(
"""\
The schema should have the following structure, only two keys:
- tool_name: str
- arguments: dict (with all arguments being passed)
Example:
{"tool_name": "tool name", "arguments": {"arg_name1": "value", "arg_name2": 2}}""",
),
max_attemps=1,
)
calling = converter.to_pydantic()
Example:
{"tool_name": "tool name", "arguments": {"arg_name1": "value", "arg_name2": 2}}""",
),
max_attemps=1,
)
calling = converter.to_pydantic()
if isinstance(calling, ConverterError):
raise calling
if isinstance(calling, ConverterError):
raise calling
else:
tool_name = self.action.tool
tool = self._select_tool(tool_name)
try:
arguments = ast.literal_eval(self.action.tool_input)
except Exception:
return ToolUsageErrorException(
f'{self._i18n.errors("tool_arguments_error")}'
)
if not isinstance(arguments, dict):
return ToolUsageErrorException(
f'{self._i18n.errors("tool_arguments_error")}'
)
calling = ToolCalling(
tool_name=tool.name,
arguments=arguments,
log=tool_string,
)
except Exception as e:
self._run_attempts += 1
if self._run_attempts > self._max_parsing_attempts:
self._telemetry.tool_usage_error(llm=self.llm)
self._telemetry.tool_usage_error(llm=self.function_calling_llm)
self.task.increment_tools_errors()
self._printer.print(content=f"\n\n{e}\n", color="red")
return ToolUsageErrorException(
f'{self._i18n.errors("tool_usage_error")}\n{self._i18n.slice("format").format(tool_names=self.tools_names)}'
f'{self._i18n.errors("tool_usage_error").format(error=e)}\nMoving on then. {self._i18n.slice("format").format(tool_names=self.tools_names)}'
)
return self._tool_calling(tool_string)

View File

@@ -5,26 +5,27 @@
"backstory": "You are a seasoned manager with a knack for getting the best out of your team.\nYou are also known for your ability to delegate work to the right people, and to ask the right questions to get the best out of your team.\nEven though you don't perform tasks by yourself, you have a lot of experience in the field, which allows you to properly evaluate the work of your team members."
},
"slices": {
"observation": "\nResult",
"task": "\n\nCurrent Task: {input}\n\n Begin! This is VERY important to you, your job depends on it!\n\n",
"observation": "\nObservation",
"task": "\n\nCurrent Task: {input}\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought: ",
"memory": "This is the summary of your work so far:\n{chat_history}",
"role_playing": "You are {role}.\n{backstory}\n\nYour personal goal is: {goal}",
"tools": "I have access to ONLY the following tools, I can use only these, use one at time:\n\n{tools}\n\nTo use a tool I MUST use the exact following format:\n\n```\nUse Tool: the tool I wanna use, should be one of [{tool_names}] and absolute all relevant input and context for using the tool, I must use only one tool at once.\nResult: [result of the tool]\n```\n\nTo give my final answer I'll use the exact following format:\n\n```\nFinal Answer: [my expected final answer, entire content of my most complete final answer goes here]\n```\nI MUST use these formats, my job depends on it!",
"no_tools": "To give my final answer use the exact following format:\n\n```\nFinal Answer: [my expected final answer, entire content of my most complete final answer goes here]\n```\nI MUST use these formats, my job depends on it!",
"format": "I MUST either use a tool (use one at time) OR give my best final answer. To use a single tool I MUST use the exact following format:\n\n```\nUse Tool: the tool I wanna use, should be one of [{tool_names}] and absolute all relevant input and context for using the tool, I must use only one tool at once.\nResult: [result of the tool]\n```\n\nTo give my final answer use the exact following format:\n\n```\nFinal Answer: [my expected final answer, entire content of my most complete final answer goes here]\n```\nI MUST use these formats, my job depends on it!",
"final_answer_format": "If I don't need to use any more tools, I must make sure use the correct format to give my final answer:\n\n```Final Answer: [my expected final answer, entire content of my most complete final answer goes here]```\n I MUST use these formats, my job depends on it!",
"format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nI just remembered the expected formats I must follow:\n\n```\nUse Tool: the tool I wanna use, and absolute all relevant input and context for using the tool, I must use only one tool at once.\nResult: [result of the tool]\n```\nOR\n```\nFinal Answer: [my expected final answer, entire content of my most complete final answer goes here]\n```\n",
"task_with_context": "{task}\nThis is the context you're working with:\n{context}",
"expected_output": "Your final answer must be: {expected_output}"
"role_playing": "You are {role}. {backstory}\nYour personal goal is: {goal}",
"tools": "\n\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\n{tools}\n\nUse the following format:\n\nThought: you should always think about what to do\nAction: the action to take, only one name of [{tool_names}], just the name, exactly as it's written.\nAction Input: the input to the action, just a simple a python dictionary using \" to wrap keys and values.\nObservation: the result of the action\n\nOnce all necessary information is gathered:\n\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n",
"no_tools": "To give my best complete final answer to the task use the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\nYour final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!\n\nThought: ",
"format": "I MUST either use a tool (use one at time) OR give my best final answer. To Use the following format:\n\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action, dictionary\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\nYour final answer must be the great and the most complete as possible, it must be outcome described\n\n ",
"final_answer_format": "If you don't need to use any more tools, you must give your best complete final answer, make sure it satisfy the expect criteria, use the EXACT format below:\n\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\n\n",
"format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nI just remembered the expected format I must follow:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task\nYour final answer must be the great and the most complete as possible, it must be outcome described\n\n",
"task_with_context": "{task}\n\nThis is the context you're working with:\n{context}",
"expected_output": "\nThis is the expect criteria for your final answer: {expected_output} \n you MUST return the actual complete content as the final answer, not a summary."
},
"errors": {
"unexpected_format": "\nSorry, I didn't use the expected format, I MUST either use a tool (use one at time) OR give my best final answer.\n",
"force_final_answer": "Actually, I used too many tools, so I'll stop now and give you my absolute BEST Final answer NOW, using exactly the expected format below:\n\n```\nFinal Answer: [my expected final answer, entire content of my most complete final answer goes here]\n```\nI MUST use these formats, my job depends on it!",
"force_final_answer": "Tool won't be use because it's time to give your final answer. Don't use tools and just your absolute BEST Final answer.",
"agent_tool_unexsiting_coworker": "\nError executing tool. Co-worker mentioned not found, it must to be one of the following options:\n{coworkers}\n",
"task_repeated_usage": "I already used the {tool} tool with input {tool_input}. So I already know that and must stop using it with same input. \nI could give my best complete final answer if I'm ready, using exactly the expected format below:\n\n```\nFinal Answer: [my expected final answer, entire content of my most complete final answer goes here]\n```\nI MUST use these formats, my job depends on it!",
"tool_usage_error": "It seems we encountered an unexpected error while trying to use the tool.",
"task_repeated_usage": "I tried reusing the same input, I must stop using this action input. I'll try something else instead.\n\n",
"tool_usage_error": "I encountered an error: {error}",
"tool_arguments_error": "Error: the Action Input is not a valid key, value dictionary.",
"wrong_tool_name": "You tried to use the tool {tool}, but it doesn't exist. You must use one of the following tools, use one at time: {tools}.",
"tool_usage_exception": "It seems we encountered an unexpected error while trying to use the tool. This was the error: {error}"
"tool_usage_exception": "I encountered an error while trying to use the tool. This was the error: {error}.\n Tool {tool} accepts there inputs: {tool_inputs}"
},
"tools": {
"delegate_work": "Delegate a specific task to one of the following co-workers: {coworkers}\nThe input to this tool should be the coworker, the task you want them to do, and ALL necessary context to exectue the task, they know nothing about the task, so share absolute everything you know, don't reference things but instead explain them.",