mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-09 08:08:32 +00:00
Crewating a tool output parser
This commit is contained in:
@@ -5,10 +5,11 @@ description: Guide on integrating CrewAI with various Large Language Models (LLM
|
||||
|
||||
## Connect CrewAI to LLMs
|
||||
!!! note "Default LLM"
|
||||
By default, crewAI uses OpenAI's GPT-4 model for language processing. However, you can configure your agents to use a different model or API. This guide will show you how to connect your agents to different LLMs.
|
||||
By default, crewAI uses OpenAI's GPT-4 model for language processing. However, you can configure your agents to use a different model or API. This guide will show you how to connect your agents to different LLMs. You can change the specific gpt model by setting the `OPENAI_MODEL_NAME` environment variable.
|
||||
|
||||
CrewAI offers flexibility in connecting to various LLMs, including local models via [Ollama](https://ollama.ai) and different APIs like Azure. It's compatible with all [LangChain LLM](https://python.langchain.com/docs/integrations/llms/) components, enabling diverse integrations for tailored AI solutions.
|
||||
|
||||
|
||||
## Ollama Integration
|
||||
Ollama is preferred for local LLM integration, offering customization and privacy benefits. It requires installation and configuration, including model adjustments via a Modelfile to optimize performance.
|
||||
|
||||
|
||||
@@ -120,6 +120,22 @@ class Telemetry:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def tool_repeated_usage(self, llm: Any, tool_name: str, attempts: int):
|
||||
"""Records the repeated usage 'error' of a tool by an agent."""
|
||||
if self.ready:
|
||||
try:
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Tool Repeated Usage")
|
||||
self._add_attribute(span, "tool_name", tool_name)
|
||||
self._add_attribute(span, "attempts", attempts)
|
||||
self._add_attribute(
|
||||
span, "llm", json.dumps(self._safe_llm_attributes(llm))
|
||||
)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def tool_usage(self, llm: Any, tool_name: str, attempts: int):
|
||||
"""Records the usage of a tool by an agent."""
|
||||
if self.ready:
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
from typing import Any, Dict
|
||||
|
||||
from pydantic import BaseModel as PydanticBaseModel
|
||||
from pydantic import Field as PydanticField
|
||||
from pydantic.v1 import BaseModel, Field
|
||||
|
||||
|
||||
@@ -10,3 +12,12 @@ class ToolCalling(BaseModel):
|
||||
arguments: Dict[str, Any] = Field(
|
||||
..., description="A dictinary of arguments to be passed to the function."
|
||||
)
|
||||
|
||||
|
||||
class InstructorToolCalling(PydanticBaseModel):
|
||||
function_name: str = PydanticField(
|
||||
..., description="The name of the function to be called."
|
||||
)
|
||||
arguments: Dict = PydanticField(
|
||||
..., description="A dictinary of arguments to be passed to the function."
|
||||
)
|
||||
|
||||
39
src/crewai/tools/tool_output_parser.py
Normal file
39
src/crewai/tools/tool_output_parser.py
Normal file
@@ -0,0 +1,39 @@
|
||||
import json
|
||||
from typing import Any, List
|
||||
|
||||
import regex
|
||||
from langchain.output_parsers import PydanticOutputParser
|
||||
from langchain_core.exceptions import OutputParserException
|
||||
from langchain_core.outputs import Generation
|
||||
from langchain_core.pydantic_v1 import ValidationError
|
||||
|
||||
|
||||
class ToolOutputParser(PydanticOutputParser):
|
||||
"""Parses the function calling of a tool usage and it's arguments."""
|
||||
|
||||
def parse_result(self, result: List[Generation], *, partial: bool = False) -> Any:
|
||||
result[0].text = self._transform_in_valid_json(result[0].text)
|
||||
json_object = super().parse_result(result)
|
||||
try:
|
||||
return self.pydantic_object.parse_obj(json_object)
|
||||
except ValidationError as e:
|
||||
name = self.pydantic_object.__name__
|
||||
msg = f"Failed to parse {name} from completion {json_object}. Got: {e}"
|
||||
raise OutputParserException(msg, llm_output=json_object)
|
||||
|
||||
def _transform_in_valid_json(self, text) -> str:
|
||||
text = text.replace("```", "").replace("json", "")
|
||||
json_pattern = r"\{(?:[^{}]|(?R))*\}"
|
||||
matches = regex.finditer(json_pattern, text)
|
||||
|
||||
for match in matches:
|
||||
try:
|
||||
# Attempt to parse the matched string as JSON
|
||||
json_obj = json.loads(match.group())
|
||||
# Return the first successfully parsed JSON object
|
||||
json_obj = json.dumps(json_obj)
|
||||
return str(json_obj)
|
||||
except json.JSONDecodeError:
|
||||
# If parsing fails, skip to the next match
|
||||
continue
|
||||
return text
|
||||
@@ -1,12 +1,15 @@
|
||||
from typing import Any, List
|
||||
from typing import Any, List, Union
|
||||
|
||||
from langchain.output_parsers import PydanticOutputParser
|
||||
import instructor
|
||||
from langchain.prompts import PromptTemplate
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_openai import ChatOpenAI
|
||||
from openai import OpenAI
|
||||
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.telemtry import Telemetry
|
||||
from crewai.tools.tool_calling import ToolCalling
|
||||
from crewai.tools.tool_calling import InstructorToolCalling, ToolCalling
|
||||
from crewai.tools.tool_output_parser import ToolOutputParser
|
||||
from crewai.utilities import I18N, Printer
|
||||
|
||||
|
||||
@@ -44,7 +47,7 @@ class ToolUsage:
|
||||
self._printer: Printer = Printer()
|
||||
self._telemetry: Telemetry = Telemetry()
|
||||
self._run_attempts: int = 1
|
||||
self._max_parsing_attempts: int = 3
|
||||
self._max_parsing_attempts: int = 2
|
||||
self._remeber_format_after_usages: int = 3
|
||||
self.tools_description = tools_description
|
||||
self.tools_names = tools_names
|
||||
@@ -62,38 +65,54 @@ class ToolUsage:
|
||||
tool = self._select_tool(calling.function_name)
|
||||
return self._use(tool_string=tool_string, tool=tool, calling=calling)
|
||||
|
||||
def _use(self, tool_string: str, tool: BaseTool, calling: ToolCalling) -> None:
|
||||
try:
|
||||
if self._check_tool_repeated_usage(calling=calling):
|
||||
def _use(
|
||||
self,
|
||||
tool_string: str,
|
||||
tool: BaseTool,
|
||||
calling: Union[ToolCalling, InstructorToolCalling],
|
||||
) -> None:
|
||||
if self._check_tool_repeated_usage(calling=calling):
|
||||
try:
|
||||
result = self._i18n.errors("task_repeated_usage").format(
|
||||
tool=calling.function_name, tool_input=calling.arguments
|
||||
tool=calling.function_name,
|
||||
tool_input=", ".join(calling.arguments.values()),
|
||||
)
|
||||
else:
|
||||
self.tools_handler.on_tool_start(calling=calling)
|
||||
|
||||
result = self.tools_handler.cache.read(
|
||||
tool=calling.function_name, input=calling.arguments
|
||||
self._printer.print(content=f"\n\n{result}\n", color="yellow")
|
||||
self._telemetry.tool_repeated_usage(
|
||||
llm=self.llm, tool_name=tool.name, attempts=self._run_attempts
|
||||
)
|
||||
result = self._format_result(result=result)
|
||||
return result
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if not result:
|
||||
result = tool._run(**calling.arguments)
|
||||
self.tools_handler.on_tool_end(calling=calling, output=result)
|
||||
self.tools_handler.on_tool_start(calling=calling)
|
||||
|
||||
self._printer.print(content=f"\n\n{result}\n", color="yellow")
|
||||
self._telemetry.tool_usage(
|
||||
llm=self.llm, tool_name=tool.name, attempts=self._run_attempts
|
||||
)
|
||||
result = self.tools_handler.cache.read(
|
||||
tool=calling.function_name, input=calling.arguments
|
||||
)
|
||||
|
||||
result = self._format_result(result=result)
|
||||
return result
|
||||
except Exception:
|
||||
self._run_attempts += 1
|
||||
if self._run_attempts > self._max_parsing_attempts:
|
||||
self._telemetry.tool_usage_error(llm=self.llm)
|
||||
return ToolUsageErrorException(
|
||||
self._i18n.errors("tool_usage_error")
|
||||
).message
|
||||
return self.use(tool_string=tool_string)
|
||||
if not result:
|
||||
try:
|
||||
result = tool._run(**calling.arguments)
|
||||
except Exception as e:
|
||||
self._run_attempts += 1
|
||||
if self._run_attempts > self._max_parsing_attempts:
|
||||
self._telemetry.tool_usage_error(llm=self.llm)
|
||||
return ToolUsageErrorException(
|
||||
self._i18n.errors("tool_usage_exception").format(error=e)
|
||||
).message
|
||||
return self.use(tool_string=tool_string)
|
||||
|
||||
self.tools_handler.on_tool_end(calling=calling, output=result)
|
||||
|
||||
self._printer.print(content=f"\n\n{result}\n", color="yellow")
|
||||
self._telemetry.tool_usage(
|
||||
llm=self.llm, tool_name=tool.name, attempts=self._run_attempts
|
||||
)
|
||||
|
||||
result = self._format_result(result=result)
|
||||
return result
|
||||
|
||||
def _format_result(self, result: Any) -> None:
|
||||
self.task.used_tools += 1
|
||||
@@ -111,13 +130,15 @@ class ToolUsage:
|
||||
)
|
||||
return result
|
||||
|
||||
def _check_tool_repeated_usage(self, calling: ToolCalling) -> None:
|
||||
def _check_tool_repeated_usage(
|
||||
self, calling: Union[ToolCalling, InstructorToolCalling]
|
||||
) -> None:
|
||||
if last_tool_usage := self.tools_handler.last_used_tool:
|
||||
return calling == last_tool_usage
|
||||
|
||||
def _select_tool(self, tool_name: str) -> BaseTool:
|
||||
for tool in self.tools:
|
||||
if tool.name == tool_name:
|
||||
if tool.name.lower() == tool_name.lower():
|
||||
return tool
|
||||
raise Exception(f"Tool '{tool_name}' not found.")
|
||||
|
||||
@@ -132,27 +153,63 @@ class ToolUsage:
|
||||
descriptions.append(
|
||||
"\n".join(
|
||||
[
|
||||
f"Funtion Name: {tool.name}",
|
||||
f"Funtion attributes: {args}",
|
||||
f"Tool Name: {tool.name.lower()}",
|
||||
f"Description: {tool.description}",
|
||||
f"Tool Arguments: {args}",
|
||||
]
|
||||
)
|
||||
)
|
||||
return "\n--\n".join(descriptions)
|
||||
|
||||
def _tool_calling(self, tool_string: str) -> ToolCalling:
|
||||
def _tool_calling(
|
||||
self, tool_string: str
|
||||
) -> Union[ToolCalling, InstructorToolCalling]:
|
||||
try:
|
||||
parser = PydanticOutputParser(pydantic_object=ToolCalling)
|
||||
prompt = PromptTemplate(
|
||||
template="Return a valid schema for the one tool you must use with its arguments and values.\n\nTools available:\n\n{available_tools}\n\nUse this text to inform a valid ouput schema:\n{tool_string}\n\n{format_instructions}\n```",
|
||||
input_variables=["tool_string"],
|
||||
partial_variables={
|
||||
"available_tools": self._render(),
|
||||
"format_instructions": parser.get_format_instructions(),
|
||||
},
|
||||
tool_string = tool_string.replace(
|
||||
"Thought: Do I need to use a tool? Yes", ""
|
||||
)
|
||||
chain = prompt | self.llm | parser
|
||||
calling = chain.invoke({"tool_string": tool_string})
|
||||
tool_string = tool_string.replace("Action:", "Tool Name:")
|
||||
tool_string = tool_string.replace("Action Input:", "Tool Arguments:")
|
||||
|
||||
if (isinstance(self.llm, ChatOpenAI)) and (
|
||||
self.llm.openai_api_base == None
|
||||
):
|
||||
client = instructor.patch(
|
||||
OpenAI(
|
||||
base_url=self.llm.openai_api_base,
|
||||
api_key=self.llm.openai_api_key,
|
||||
),
|
||||
mode=instructor.Mode.JSON,
|
||||
)
|
||||
calling = client.chat.completions.create(
|
||||
model=self.llm.model_name,
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": f"Tools available:\n\n{self._render()}\n\nReturn a valid schema for the tool, use this text to inform a valid ouput schema:\n{tool_string}```",
|
||||
}
|
||||
],
|
||||
response_model=InstructorToolCalling,
|
||||
)
|
||||
else:
|
||||
parser = ToolOutputParser(pydantic_object=ToolCalling)
|
||||
prompt = PromptTemplate(
|
||||
template="Tools available:\n\n{available_tools}\n\nReturn a valid schema for the tool, use this text to inform a valid ouput schema:\n{tool_string}\n\n{format_instructions}\n```",
|
||||
input_variables=["tool_string"],
|
||||
partial_variables={
|
||||
"available_tools": self._render(),
|
||||
"format_instructions": """
|
||||
The schema should have the following structure, only two key:
|
||||
- function_name: str
|
||||
- arguments: dict (with all arguments being passed)
|
||||
|
||||
Example:
|
||||
{"function_name": "function_name", "arguments": {"arg_name1": "value", "arg_name2": 2}}
|
||||
""",
|
||||
},
|
||||
)
|
||||
chain = prompt | self.llm | parser
|
||||
calling = chain.invoke({"tool_string": tool_string})
|
||||
|
||||
except Exception:
|
||||
self._run_attempts += 1
|
||||
|
||||
@@ -17,7 +17,8 @@
|
||||
"force_final_answer": "Στην πραγματικότητα, χρησιμοποίησα πάρα πολλά εργαλεία, οπότε θα σταματήσω τώρα και θα σας δώσω την απόλυτη ΚΑΛΥΤΕΡΗ τελική μου απάντηση ΤΩΡΑ, χρησιμοποιώντας την αναμενόμενη μορφή: ```\nΣκέφτηκα: Χρειάζεται να χρησιμοποιήσω ένα εργαλείο; Όχι\nΤελική απάντηση: [η απάντησή σας εδώ]```",
|
||||
"agent_tool_unexsiting_coworker": "\nΣφάλμα κατά την εκτέλεση του εργαλείου. Ο συνάδελφος που αναφέρεται στο Action Input δεν βρέθηκε, πρέπει να είναι μία από τις ακόλουθες επιλογές:\n{coworkers}..\n",
|
||||
"task_repeated_usage": "Μόλις χρησιμοποίησα το εργαλείο {tool} με είσοδο {tool_input}. Άρα το ξέρω ήδη και πρέπει να σταματήσω να το χρησιμοποιώ στη σειρά με την ίδια είσοδο. \nΘα μπορούσα να δώσω την τελική μου απάντηση εάν είμαι έτοιμος, χρησιμοποιώντας ακριβώς την αναμενόμενη μορφή παρακάτω: \n\nΣκέφτηκα: Χρειάζεται να χρησιμοποιήσω κάποιο εργαλείο; Όχι\nΤελική απάντηση: [η απάντησή σας εδώ]\n",
|
||||
"tool_usage_error": "Φαίνεται ότι αντιμετωπίσαμε ένα απροσδόκητο σφάλμα κατά την προσπάθεια χρήσης του εργαλείου."
|
||||
"tool_usage_error": "Φαίνεται ότι αντιμετωπίσαμε ένα απροσδόκητο σφάλμα κατά την προσπάθεια χρήσης του εργαλείου.",
|
||||
"tool_usage_exception": "Φαίνεται ότι αντιμετωπίσαμε ένα απροσδόκητο σφάλμα κατά την προσπάθεια χρήσης του εργαλείου. Αυτό ήταν το σφάλμα: {error}"
|
||||
},
|
||||
"tools": {
|
||||
"delegate_work": "Αναθέστε μια συγκεκριμένη εργασία σε έναν από τους παρακάτω συναδέλφους:\n{coworkers}.\nΗ εισαγωγή σε αυτό το εργαλείο θα πρέπει να είναι ο ρόλος του συναδέλφου, η εργασία που θέλετε να κάνει και ΟΛΟ το απαραίτητο πλαίσιο για την εκτέλεση της εργασίας, δεν γνωρίζουν τίποτα για την εργασία, γι' αυτό μοιραστείτε απολύτως όλα όσα γνωρίζετε, μην αναφέρετε πράγματα, αλλά εξηγήστε τα.",
|
||||
|
||||
@@ -17,7 +17,8 @@
|
||||
"force_final_answer": "Actually, I used too many tools, so I'll stop now and give you my absolute BEST Final answer NOW, using exaclty the expected format bellow: \n```\nThought: Do I need to use a tool? No\nFinal Answer: [your response here]```",
|
||||
"agent_tool_unexsiting_coworker": "\nError executing tool. Co-worker mentioned on the Action Input not found, it must to be one of the following options:\n{coworkers}.\n",
|
||||
"task_repeated_usage": "I just used the {tool} tool with input {tool_input}. So I already know that and must stop using it in a row with the same input. \nI could give my final answer if I'm ready, using exaclty the expected format bellow: \n\nThought: Do I need to use a tool? No\nFinal Answer: [your response here]\n",
|
||||
"tool_usage_error": "It seems we encountered an unexpected error while trying to use the tool."
|
||||
"tool_usage_error": "It seems we encountered an unexpected error while trying to use the tool.",
|
||||
"tool_usage_exception": "It seems we encountered an unexpected error while trying to use the tool. This was the error: {error}"
|
||||
},
|
||||
"tools": {
|
||||
"delegate_work": "Delegate a specific task to one of the following co-workers:\n{coworkers}.\nThe input to this tool should be the role of the coworker, the task you want them to do, and ALL necessary context to exectue the task, they know nothing about the task, so share absolute everything you know, don't reference things but instead explain them.",
|
||||
|
||||
Reference in New Issue
Block a user