From 4eb40736615ffa2d49439cf8a9550ef1cd0dda3d Mon Sep 17 00:00:00 2001 From: "OP (oppenheimer)" <21008429+Ompragash@users.noreply.github.com> Date: Mon, 15 Jul 2024 00:41:54 +0530 Subject: [PATCH 1/4] Add Groq - OpenAI Compatible API - details (#934) --- docs/how-to/LLM-Connections.md | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/docs/how-to/LLM-Connections.md b/docs/how-to/LLM-Connections.md index f1ac52e39..21361d0d3 100644 --- a/docs/how-to/LLM-Connections.md +++ b/docs/how-to/LLM-Connections.md @@ -127,7 +127,7 @@ llm = HuggingFaceHub( ``` ## OpenAI Compatible API Endpoints -Switch between APIs and models seamlessly using environment variables, supporting platforms like FastChat, LM Studio, and Mistral AI. +Switch between APIs and models seamlessly using environment variables, supporting platforms like FastChat, LM Studio, Groq, and Mistral AI. ### Configuration Examples #### FastChat @@ -144,6 +144,13 @@ OPENAI_API_BASE="http://localhost:1234/v1" OPENAI_API_KEY="lm-studio" ``` +#### Groq API +```sh +OPENAI_API_KEY=your-groq-api-key +OPENAI_MODEL_NAME='llama3-8b-8192' +OPENAI_API_BASE=https://api.groq.com/openai/v1 +``` + #### Mistral API ```sh OPENAI_API_KEY=your-mistral-api-key @@ -211,4 +218,4 @@ azure_agent = Agent( ``` ## Conclusion -Integrating CrewAI with different LLMs expands the framework's versatility, allowing for customized, efficient AI solutions across various domains and platforms. \ No newline at end of file +Integrating CrewAI with different LLMs expands the framework's versatility, allowing for customized, efficient AI solutions across various domains and platforms. From 7acf0b21071d4a7382c8c4a78aae63b9ccfbfb47 Mon Sep 17 00:00:00 2001 From: "Brandon Hancock (bhancock_ai)" <109994880+bhancockio@users.noreply.github.com> Date: Mon, 15 Jul 2024 07:53:41 -0400 Subject: [PATCH 2/4] Feature/use converter instead of manually trimming (#894) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Exploring output being passed to tool selector to see if we can better format data * WIP. Adding JSON repair functionality * Almost done implementing JSON repair. Testing fixes vs current base case. * More action cleanup with additional tests * WIP. Trying to figure out what is going on with tool descriptions * Update tool description generation * WIP. Trying to find out what is causing the tools to duplicate * Replacing tools properly instead of duplicating them accidentally * Fixing issues for MR * Update dependencies for JSON_REPAIR * More cleaning up pull request * preppering for call * Fix type-checking issues --------- Co-authored-by: João Moura --- poetry.lock | 23 +- pyproject.toml | 1 + src/crewai/agent.py | 63 ++- .../utilities/base_agent_tool.py | 15 +- ...erter_base.py => base_output_converter.py} | 8 +- src/crewai/agents/executor.py | 14 +- src/crewai/agents/parser.py | 42 +- src/crewai/crew.py | 43 +- src/crewai/tools/agent_tools.py | 2 +- src/crewai/utilities/converter.py | 21 +- tests/agents/__init__.py | 0 tests/agents/test_crew_agent_parser.py | 378 ++++++++++++++++++ tests/crew_test.py | 10 +- 13 files changed, 552 insertions(+), 68 deletions(-) rename src/crewai/agents/agent_builder/utilities/{base_output_converter_base.py => base_output_converter.py} (85%) create mode 100644 tests/agents/__init__.py create mode 100644 tests/agents/test_crew_agent_parser.py diff --git a/poetry.lock b/poetry.lock index 19cb150ea..8b948d207 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2282,6 +2282,17 @@ files = [ {file = "jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe"}, ] +[[package]] +name = "json-repair" +version = "0.25.2" +description = "A package to repair broken json strings" +optional = false +python-versions = ">=3.7" +files = [ + {file = "json_repair-0.25.2-py3-none-any.whl", hash = "sha256:51d67295c3184b6c41a3572689661c6128cef6cfc9fb04db63130709adfc5bf0"}, + {file = "json_repair-0.25.2.tar.gz", hash = "sha256:161a56d7e6bbfd4cad3a614087e3e0dbd0e10d402dd20dc7db418432428cb32b"}, +] + [[package]] name = "jsonpatch" version = "1.33" @@ -2395,8 +2406,8 @@ langchain-core = ">=0.2.10,<0.3.0" langchain-text-splitters = ">=0.2.0,<0.3.0" langsmith = ">=0.1.17,<0.2.0" numpy = [ - {version = ">=1.26.0,<2.0.0", markers = "python_version >= \"3.12\""}, {version = ">=1,<2", markers = "python_version < \"3.12\""}, + {version = ">=1.26.0,<2.0.0", markers = "python_version >= \"3.12\""}, ] pydantic = ">=1,<3" PyYAML = ">=5.3" @@ -2437,8 +2448,8 @@ langchain = ">=0.2.6,<0.3.0" langchain-core = ">=0.2.10,<0.3.0" langsmith = ">=0.1.0,<0.2.0" numpy = [ - {version = ">=1.26.0,<2.0.0", markers = "python_version >= \"3.12\""}, {version = ">=1,<2", markers = "python_version < \"3.12\""}, + {version = ">=1.26.0,<2.0.0", markers = "python_version >= \"3.12\""}, ] PyYAML = ">=5.3" requests = ">=2,<3" @@ -2461,8 +2472,8 @@ jsonpatch = ">=1.33,<2.0" langsmith = ">=0.1.75,<0.2.0" packaging = ">=23.2,<25" pydantic = [ - {version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""}, {version = ">=1,<3", markers = "python_full_version < \"3.12.4\""}, + {version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""}, ] PyYAML = ">=5.3" tenacity = ">=8.1.0,<8.4.0 || >8.4.0,<9.0.0" @@ -2511,8 +2522,8 @@ files = [ [package.dependencies] orjson = ">=3.9.14,<4.0.0" pydantic = [ - {version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""}, {version = ">=1,<3", markers = "python_full_version < \"3.12.4\""}, + {version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""}, ] requests = ">=2,<3" @@ -3989,8 +4000,8 @@ files = [ annotated-types = ">=0.4.0" pydantic-core = "2.20.1" typing-extensions = [ - {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, {version = ">=4.6.1", markers = "python_version < \"3.13\""}, + {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, ] [package.extras] @@ -6090,4 +6101,4 @@ tools = ["crewai-tools"] [metadata] lock-version = "2.0" python-versions = ">=3.10,<=3.13" -content-hash = "0dbf6f6e2e841fb3eec4ff87ea5d6b430f29702118fee91307983c6b2581e59e" +content-hash = "2cf5a3904e7cbcfebb85e198b6035252d47213a9b0dd3dd51837516e03b38d3e" diff --git a/pyproject.toml b/pyproject.toml index eeae5cadb..52cfdc375 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,6 +28,7 @@ appdirs = "^1.4.4" jsonref = "^1.1.0" agentops = { version = "^0.1.9", optional = true } embedchain = "^0.1.114" +json-repair = "^0.25.2" [tool.poetry.extras] tools = ["crewai-tools"] diff --git a/src/crewai/agent.py b/src/crewai/agent.py index 9f953d61d..505c5249b 100644 --- a/src/crewai/agent.py +++ b/src/crewai/agent.py @@ -1,9 +1,10 @@ import os +from inspect import signature from typing import Any, List, Optional, Tuple from langchain.agents.agent import RunnableAgent +from langchain.agents.tools import BaseTool from langchain.agents.tools import tool as LangChainTool -from langchain.tools.render import render_text_description from langchain_core.agents import AgentAction from langchain_core.callbacks import BaseCallbackHandler from langchain_openai import ChatOpenAI @@ -167,14 +168,16 @@ class Agent(BaseAgent): if memory.strip() != "": task_prompt += self.i18n.slice("memory").format(memory=memory) - tools = tools or self.tools - - parsed_tools = self._parse_tools(tools or []) # type: ignore # Argument 1 to "_parse_tools" of "Agent" has incompatible type "list[Any] | None"; expected "list[Any]" + tools = tools or self.tools or [] + parsed_tools = self._parse_tools(tools) self.create_agent_executor(tools=tools) self.agent_executor.tools = parsed_tools self.agent_executor.task = task - self.agent_executor.tools_description = render_text_description(parsed_tools) + # TODO: COMPARE WITH ARGS AND WITHOUT ARGS + self.agent_executor.tools_description = self._render_text_description_and_args( + parsed_tools + ) self.agent_executor.tools_names = self.__tools_names(parsed_tools) if self.crew and self.crew._train: @@ -189,6 +192,7 @@ class Agent(BaseAgent): "tools": self.agent_executor.tools_description, } )["output"] + if self.max_rpm: self._rpm_controller.stop_rpm_counter() @@ -220,7 +224,7 @@ class Agent(BaseAgent): Returns: An instance of the CrewAgentExecutor class. """ - tools = tools or self.tools + tools = tools or self.tools or [] agent_args = { "input": lambda x: x["input"], @@ -315,6 +319,7 @@ class Agent(BaseAgent): tools_list = [] for tool in tools: tools_list.append(tool) + return tools_list def _training_handler(self, task_prompt: str) -> str: @@ -341,6 +346,52 @@ class Agent(BaseAgent): ) return task_prompt + def _render_text_description(self, tools: List[BaseTool]) -> str: + """Render the tool name and description in plain text. + + Output will be in the format of: + + .. code-block:: markdown + + search: This tool is used for search + calculator: This tool is used for math + """ + description = "\n".join( + [ + f"Tool name: {tool.name}\nTool description:\n{tool.description}" + for tool in tools + ] + ) + + return description + + def _render_text_description_and_args(self, tools: List[BaseTool]) -> str: + """Render the tool name, description, and args in plain text. + + Output will be in the format of: + + .. code-block:: markdown + + search: This tool is used for search, args: {"query": {"type": "string"}} + calculator: This tool is used for math, \ + args: {"expression": {"type": "string"}} + """ + tool_strings = [] + for tool in tools: + args_schema = str(tool.args) + if hasattr(tool, "func") and tool.func: + sig = signature(tool.func) + description = ( + f"Tool Name: {tool.name}{sig}\nTool Description: {tool.description}" + ) + else: + description = ( + f"Tool Name: {tool.name}\nTool Description: {tool.description}" + ) + tool_strings.append(f"{description}\nTool Arguments: {args_schema}") + + return "\n".join(tool_strings) + @staticmethod def __tools_names(tools) -> str: return ", ".join([t.name for t in tools]) diff --git a/src/crewai/agents/agent_builder/utilities/base_agent_tool.py b/src/crewai/agents/agent_builder/utilities/base_agent_tool.py index c307aeb08..556873f39 100644 --- a/src/crewai/agents/agent_builder/utilities/base_agent_tool.py +++ b/src/crewai/agents/agent_builder/utilities/base_agent_tool.py @@ -24,6 +24,7 @@ class BaseAgentTools(BaseModel, ABC): is_list = coworker.startswith("[") and coworker.endswith("]") if is_list: coworker = coworker[1:-1].split(",")[0] + return coworker def delegate_work( @@ -40,11 +41,13 @@ class BaseAgentTools(BaseModel, ABC): coworker = self._get_coworker(coworker, **kwargs) return self._execute(coworker, question, context) - def _execute(self, agent: Union[str, None], task: str, context: Union[str, None]): + def _execute( + self, agent_name: Union[str, None], task: str, context: Union[str, None] + ): """Execute the command.""" try: - if agent is None: - agent = "" + if agent_name is None: + agent_name = "" # It is important to remove the quotes from the agent name. # The reason we have to do this is because less-powerful LLM's @@ -53,7 +56,7 @@ class BaseAgentTools(BaseModel, ABC): # {"task": "....", "coworker": ".... # when it should look like this: # {"task": "....", "coworker": "...."} - agent_name = agent.casefold().replace('"', "").replace("\n", "") + agent_name = agent_name.casefold().replace('"', "").replace("\n", "") agent = [ # type: ignore # Incompatible types in assignment (expression has type "list[BaseAgent]", variable has type "str | None") available_agent @@ -75,9 +78,9 @@ class BaseAgentTools(BaseModel, ABC): ) agent = agent[0] - task = Task( # type: ignore # Incompatible types in assignment (expression has type "Task", variable has type "str") + task_with_assigned_agent = Task( # type: ignore # Incompatible types in assignment (expression has type "Task", variable has type "str") description=task, agent=agent, expected_output="Your best answer to your coworker asking you this, accounting for the context shared.", ) - return agent.execute_task(task, context) # type: ignore # "str" has no attribute "execute_task" + return agent.execute_task(task_with_assigned_agent, context) diff --git a/src/crewai/agents/agent_builder/utilities/base_output_converter_base.py b/src/crewai/agents/agent_builder/utilities/base_output_converter.py similarity index 85% rename from src/crewai/agents/agent_builder/utilities/base_output_converter_base.py rename to src/crewai/agents/agent_builder/utilities/base_output_converter.py index b04e0ae03..c6007cd4d 100644 --- a/src/crewai/agents/agent_builder/utilities/base_output_converter_base.py +++ b/src/crewai/agents/agent_builder/utilities/base_output_converter.py @@ -1,7 +1,7 @@ from abc import ABC, abstractmethod from typing import Any, Optional -from pydantic import BaseModel, Field, PrivateAttr +from pydantic import BaseModel, Field class OutputConverter(BaseModel, ABC): @@ -21,7 +21,6 @@ class OutputConverter(BaseModel, ABC): max_attempts (int): Maximum number of conversion attempts (default: 3). """ - _is_gpt: bool = PrivateAttr(default=True) text: str = Field(description="Text to be converted.") llm: Any = Field(description="The language model to be used to convert the text.") model: Any = Field(description="The model to be used to convert the text.") @@ -41,7 +40,8 @@ class OutputConverter(BaseModel, ABC): """Convert text to json.""" pass - @abstractmethod # type: ignore # Name "_is_gpt" already defined on line 25 - def _is_gpt(self, llm): # type: ignore # Name "_is_gpt" already defined on line 25 + @property + @abstractmethod + def is_gpt(self) -> bool: """Return if llm provided is of gpt from openai.""" pass diff --git a/src/crewai/agents/executor.py b/src/crewai/agents/executor.py index 58ab81e6f..aa0076b0c 100644 --- a/src/crewai/agents/executor.py +++ b/src/crewai/agents/executor.py @@ -1,14 +1,6 @@ import threading import time -from typing import ( - Any, - Dict, - Iterator, - List, - Optional, - Tuple, - Union, -) +from typing import Any, Dict, Iterator, List, Optional, Tuple, Union from langchain.agents import AgentExecutor from langchain.agents.agent import ExceptionTool @@ -19,9 +11,7 @@ from langchain_core.tools import BaseTool from langchain_core.utils.input import get_color_mapping from pydantic import InstanceOf -from crewai.agents.agent_builder.base_agent_executor_mixin import ( - CrewAgentExecutorMixin, -) +from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin from crewai.agents.tools_handler import ToolsHandler from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException from crewai.utilities import I18N diff --git a/src/crewai/agents/parser.py b/src/crewai/agents/parser.py index 03c3350b3..7ad674172 100644 --- a/src/crewai/agents/parser.py +++ b/src/crewai/agents/parser.py @@ -1,6 +1,7 @@ import re from typing import Any, Union +from json_repair import repair_json from langchain.agents.output_parsers import ReActSingleInputOutputParser from langchain_core.agents import AgentAction, AgentFinish from langchain_core.exceptions import OutputParserException @@ -48,11 +49,15 @@ class CrewAgentParser(ReActSingleInputOutputParser): raise OutputParserException( f"{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}: {text}" ) - action = action_match.group(1).strip() - action_input = action_match.group(2) - tool_input = action_input.strip(" ") - tool_input = tool_input.strip('"') - return AgentAction(action, tool_input, text) + action = action_match.group(1) + clean_action = self._clean_action(action) + + action_input = action_match.group(2).strip() + + tool_input = action_input.strip(" ").strip('"') + safe_tool_input = self._safe_repair_json(tool_input) + + return AgentAction(clean_action, safe_tool_input, text) elif includes_answer: return AgentFinish( @@ -87,3 +92,30 @@ class CrewAgentParser(ReActSingleInputOutputParser): llm_output=text, send_to_llm=True, ) + + def _clean_action(self, text: str) -> str: + """Clean action string by removing non-essential formatting characters.""" + return re.sub(r"^\s*\*+\s*|\s*\*+\s*$", "", text).strip() + + def _safe_repair_json(self, tool_input: str) -> str: + UNABLE_TO_REPAIR_JSON_RESULTS = ['""', "{}"] + + # Skip repair if the input starts and ends with square brackets + # Explanation: The JSON parser has issues handling inputs that are enclosed in square brackets ('[]'). + # These are typically valid JSON arrays or strings that do not require repair. Attempting to repair such inputs + # might lead to unintended alterations, such as wrapping the entire input in additional layers or modifying + # the structure in a way that changes its meaning. By skipping the repair for inputs that start and end with + # square brackets, we preserve the integrity of these valid JSON structures and avoid unnecessary modifications. + if tool_input.startswith("[") and tool_input.endswith("]"): + return tool_input + + # Before repair, handle common LLM issues: + # 1. Replace """ with " to avoid JSON parser errors + + tool_input = tool_input.replace('"""', '"') + + result = repair_json(tool_input) + if result in UNABLE_TO_REPAIR_JSON_RESULTS: + return tool_input + + return str(result) diff --git a/src/crewai/crew.py b/src/crewai/crew.py index 954a8f583..5719f1ea6 100644 --- a/src/crewai/crew.py +++ b/src/crewai/crew.py @@ -6,15 +6,15 @@ from typing import Any, Dict, List, Optional, Tuple, Union from langchain_core.callbacks import BaseCallbackHandler from pydantic import ( - UUID4, - BaseModel, - ConfigDict, - Field, - InstanceOf, - Json, - PrivateAttr, - field_validator, - model_validator, + UUID4, + BaseModel, + ConfigDict, + Field, + InstanceOf, + Json, + PrivateAttr, + field_validator, + model_validator, ) from pydantic_core import PydanticCustomError @@ -503,7 +503,30 @@ class Crew(BaseModel): agent for agent in self.agents if agent != task.agent ] if len(self.agents) > 1 and len(agents_for_delegation) > 0: - task.tools += task.agent.get_delegation_tools(agents_for_delegation) + delegation_tools = task.agent.get_delegation_tools( + agents_for_delegation + ) + + # Add tools if they are not already in task.tools + for new_tool in delegation_tools: + # Find the index of the tool with the same name + existing_tool_index = next( + ( + index + for index, tool in enumerate(task.tools or []) + if tool.name == new_tool.name + ), + None, + ) + if not task.tools: + task.tools = [] + + if existing_tool_index is not None: + # Replace the existing tool + task.tools[existing_tool_index] = new_tool + else: + # Add the new tool + task.tools.append(new_tool) role = task.agent.role if task.agent is not None else "None" self._logger.log("debug", f"== Working Agent: {role}", color="bold_purple") diff --git a/src/crewai/tools/agent_tools.py b/src/crewai/tools/agent_tools.py index 59d340565..4f988b65d 100644 --- a/src/crewai/tools/agent_tools.py +++ b/src/crewai/tools/agent_tools.py @@ -7,7 +7,7 @@ class AgentTools(BaseAgentTools): """Default tools around agent delegation""" def tools(self): - coworkers = f"[{', '.join([f'{agent.role}' for agent in self.agents])}]" + coworkers = ", ".join([f"{agent.role}" for agent in self.agents]) tools = [ StructuredTool.from_function( func=self.delegate_work, diff --git a/src/crewai/utilities/converter.py b/src/crewai/utilities/converter.py index 436d68b76..1e2641e78 100644 --- a/src/crewai/utilities/converter.py +++ b/src/crewai/utilities/converter.py @@ -2,10 +2,8 @@ import json from langchain.schema import HumanMessage, SystemMessage from langchain_openai import ChatOpenAI -from pydantic import model_validator -from crewai.agents.agent_builder.utilities.base_output_converter_base import ( - OutputConverter, -) + +from crewai.agents.agent_builder.utilities.base_output_converter import OutputConverter class ConverterError(Exception): @@ -19,15 +17,10 @@ class ConverterError(Exception): class Converter(OutputConverter): """Class that converts text into either pydantic or json.""" - @model_validator(mode="after") - def check_llm_provider(self): - if not self._is_gpt(self.llm): - self._is_gpt = False - def to_pydantic(self, current_attempt=1): """Convert text to pydantic.""" try: - if self._is_gpt: + if self.is_gpt: return self._create_instructor().to_pydantic() else: return self._create_chain().invoke({}) @@ -41,7 +34,7 @@ class Converter(OutputConverter): def to_json(self, current_attempt=1): """Convert text to json.""" try: - if self._is_gpt: + if self.is_gpt: return self._create_instructor().to_json() else: return json.dumps(self._create_chain().invoke({}).model_dump()) @@ -75,5 +68,7 @@ class Converter(OutputConverter): ) return new_prompt | self.llm | parser - def _is_gpt(self, llm) -> bool: # type: ignore # BUG? Name "_is_gpt" defined on line 20 hides name from outer scope - return isinstance(llm, ChatOpenAI) and llm.openai_api_base is None + @property + def is_gpt(self) -> bool: + """Return if llm provided is of gpt from openai.""" + return isinstance(self.llm, ChatOpenAI) and self.llm.openai_api_base is None diff --git a/tests/agents/__init__.py b/tests/agents/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/agents/test_crew_agent_parser.py b/tests/agents/test_crew_agent_parser.py new file mode 100644 index 000000000..22fce9088 --- /dev/null +++ b/tests/agents/test_crew_agent_parser.py @@ -0,0 +1,378 @@ +import pytest +from crewai.agents.parser import CrewAgentParser +from langchain_core.agents import AgentAction, AgentFinish +from langchain_core.exceptions import OutputParserException + + +@pytest.fixture +def parser(): + p = CrewAgentParser() + p.agent = MockAgent() + return p + + +def test_valid_action_parsing_special_characters(parser): + text = "Thought: Let's find the temperature\nAction: search\nAction Input: what's the temperature in SF?" + result = parser.parse(text) + assert isinstance(result, AgentAction) + assert result.tool == "search" + assert result.tool_input == "what's the temperature in SF?" + + +def test_valid_action_parsing_with_json_tool_input(parser): + text = """ + Thought: Let's find the information + Action: query + Action Input: ** {"task": "What are some common challenges or barriers that you have observed or experienced when implementing AI-powered solutions in healthcare settings?", "context": "As we've discussed recent advancements in AI applications in healthcare, it's crucial to acknowledge the potential hurdles. Some possible obstacles include...", "coworker": "Senior Researcher"} + """ + result = parser.parse(text) + assert isinstance(result, AgentAction) + expected_tool_input = '{"task": "What are some common challenges or barriers that you have observed or experienced when implementing AI-powered solutions in healthcare settings?", "context": "As we\'ve discussed recent advancements in AI applications in healthcare, it\'s crucial to acknowledge the potential hurdles. Some possible obstacles include...", "coworker": "Senior Researcher"}' + assert result.tool == "query" + assert result.tool_input == expected_tool_input + + +def test_valid_action_parsing_with_quotes(parser): + text = 'Thought: Let\'s find the temperature\nAction: search\nAction Input: "temperature in SF"' + result = parser.parse(text) + assert isinstance(result, AgentAction) + assert result.tool == "search" + assert result.tool_input == "temperature in SF" + + +def test_valid_action_parsing_with_curly_braces(parser): + text = "Thought: Let's find the temperature\nAction: search\nAction Input: {temperature in SF}" + result = parser.parse(text) + assert isinstance(result, AgentAction) + assert result.tool == "search" + assert result.tool_input == "{temperature in SF}" + + +def test_valid_action_parsing_with_angle_brackets(parser): + text = "Thought: Let's find the temperature\nAction: search\nAction Input: " + result = parser.parse(text) + assert isinstance(result, AgentAction) + assert result.tool == "search" + assert result.tool_input == "" + + +def test_valid_action_parsing_with_parentheses(parser): + text = "Thought: Let's find the temperature\nAction: search\nAction Input: (temperature in SF)" + result = parser.parse(text) + assert isinstance(result, AgentAction) + assert result.tool == "search" + assert result.tool_input == "(temperature in SF)" + + +def test_valid_action_parsing_with_mixed_brackets(parser): + text = "Thought: Let's find the temperature\nAction: search\nAction Input: [temperature in {SF}]" + result = parser.parse(text) + assert isinstance(result, AgentAction) + assert result.tool == "search" + assert result.tool_input == "[temperature in {SF}]" + + +def test_valid_action_parsing_with_nested_quotes(parser): + text = "Thought: Let's find the temperature\nAction: search\nAction Input: \"what's the temperature in 'SF'?\"" + result = parser.parse(text) + assert isinstance(result, AgentAction) + assert result.tool == "search" + assert result.tool_input == "what's the temperature in 'SF'?" + + +def test_valid_action_parsing_with_incomplete_json(parser): + text = 'Thought: Let\'s find the temperature\nAction: search\nAction Input: {"query": "temperature in SF"' + result = parser.parse(text) + assert isinstance(result, AgentAction) + assert result.tool == "search" + assert result.tool_input == '{"query": "temperature in SF"}' + + +def test_valid_action_parsing_with_special_characters(parser): + text = "Thought: Let's find the temperature\nAction: search\nAction Input: what is the temperature in SF? @$%^&*" + result = parser.parse(text) + assert isinstance(result, AgentAction) + assert result.tool == "search" + assert result.tool_input == "what is the temperature in SF? @$%^&*" + + +def test_valid_action_parsing_with_combination(parser): + text = 'Thought: Let\'s find the temperature\nAction: search\nAction Input: "[what is the temperature in SF?]"' + result = parser.parse(text) + assert isinstance(result, AgentAction) + assert result.tool == "search" + assert result.tool_input == "[what is the temperature in SF?]" + + +def test_valid_action_parsing_with_mixed_quotes(parser): + text = "Thought: Let's find the temperature\nAction: search\nAction Input: \"what's the temperature in SF?\"" + result = parser.parse(text) + assert isinstance(result, AgentAction) + assert result.tool == "search" + assert result.tool_input == "what's the temperature in SF?" + + +def test_valid_action_parsing_with_newlines(parser): + text = "Thought: Let's find the temperature\nAction: search\nAction Input: what is\nthe temperature in SF?" + result = parser.parse(text) + assert isinstance(result, AgentAction) + assert result.tool == "search" + assert result.tool_input == "what is\nthe temperature in SF?" + + +def test_valid_action_parsing_with_escaped_characters(parser): + text = "Thought: Let's find the temperature\nAction: search\nAction Input: what is the temperature in SF? \\n" + result = parser.parse(text) + assert isinstance(result, AgentAction) + assert result.tool == "search" + assert result.tool_input == "what is the temperature in SF? \\n" + + +def test_valid_action_parsing_with_json_string(parser): + text = 'Thought: Let\'s find the temperature\nAction: search\nAction Input: {"query": "temperature in SF"}' + result = parser.parse(text) + assert isinstance(result, AgentAction) + assert result.tool == "search" + assert result.tool_input == '{"query": "temperature in SF"}' + + +def test_valid_action_parsing_with_unbalanced_quotes(parser): + text = "Thought: Let's find the temperature\nAction: search\nAction Input: \"what is the temperature in SF?" + result = parser.parse(text) + assert isinstance(result, AgentAction) + assert result.tool == "search" + assert result.tool_input == "what is the temperature in SF?" + + +def test_clean_action_no_formatting(parser): + action = "Ask question to senior researcher" + cleaned_action = parser._clean_action(action) + assert cleaned_action == "Ask question to senior researcher" + + +def test_clean_action_with_leading_asterisks(parser): + action = "** Ask question to senior researcher" + cleaned_action = parser._clean_action(action) + assert cleaned_action == "Ask question to senior researcher" + + +def test_clean_action_with_trailing_asterisks(parser): + action = "Ask question to senior researcher **" + cleaned_action = parser._clean_action(action) + assert cleaned_action == "Ask question to senior researcher" + + +def test_clean_action_with_leading_and_trailing_asterisks(parser): + action = "** Ask question to senior researcher **" + cleaned_action = parser._clean_action(action) + assert cleaned_action == "Ask question to senior researcher" + + +def test_clean_action_with_multiple_leading_asterisks(parser): + action = "**** Ask question to senior researcher" + cleaned_action = parser._clean_action(action) + assert cleaned_action == "Ask question to senior researcher" + + +def test_clean_action_with_multiple_trailing_asterisks(parser): + action = "Ask question to senior researcher ****" + cleaned_action = parser._clean_action(action) + assert cleaned_action == "Ask question to senior researcher" + + +def test_clean_action_with_spaces_and_asterisks(parser): + action = " ** Ask question to senior researcher ** " + cleaned_action = parser._clean_action(action) + print(f"Original action: '{action}'") + print(f"Cleaned action: '{cleaned_action}'") + assert cleaned_action == "Ask question to senior researcher" + + +def test_clean_action_with_only_asterisks(parser): + action = "****" + cleaned_action = parser._clean_action(action) + assert cleaned_action == "" + + +def test_clean_action_with_empty_string(parser): + action = "" + cleaned_action = parser._clean_action(action) + assert cleaned_action == "" + + +def test_valid_final_answer_parsing(parser): + text = ( + "Thought: I found the information\nFinal Answer: The temperature is 100 degrees" + ) + result = parser.parse(text) + assert isinstance(result, AgentFinish) + assert result.return_values["output"] == "The temperature is 100 degrees" + + +def test_missing_action_error(parser): + text = "Thought: Let's find the temperature\nAction Input: what is the temperature in SF?" + with pytest.raises(OutputParserException) as exc_info: + parser.parse(text) + assert "Could not parse LLM output" in str(exc_info.value) + + +def test_missing_action_input_error(parser): + text = "Thought: Let's find the temperature\nAction: search" + with pytest.raises(OutputParserException) as exc_info: + parser.parse(text) + assert "Could not parse LLM output" in str(exc_info.value) + + +def test_action_and_final_answer_error(parser): + text = "Thought: I found the information\nAction: search\nAction Input: what is the temperature in SF?\nFinal Answer: The temperature is 100 degrees" + with pytest.raises(OutputParserException) as exc_info: + parser.parse(text) + assert "both perform Action and give a Final Answer" in str(exc_info.value) + + +def test_safe_repair_json(parser): + invalid_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": Senior Researcher' + expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}' + result = parser._safe_repair_json(invalid_json) + assert result == expected_repaired_json + + +def test_safe_repair_json_unrepairable(parser): + invalid_json = "{invalid_json" + result = parser._safe_repair_json(invalid_json) + print("result:", invalid_json) + assert result == invalid_json # Should return the original if unrepairable + + +def test_safe_repair_json_missing_quotes(parser): + invalid_json = ( + '{task: "Research XAI", context: "Explainable AI", coworker: Senior Researcher}' + ) + expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}' + result = parser._safe_repair_json(invalid_json) + assert result == expected_repaired_json + + +def test_safe_repair_json_unclosed_brackets(parser): + invalid_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"' + expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}' + result = parser._safe_repair_json(invalid_json) + assert result == expected_repaired_json + + +def test_safe_repair_json_extra_commas(parser): + invalid_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher",}' + expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}' + result = parser._safe_repair_json(invalid_json) + assert result == expected_repaired_json + + +def test_safe_repair_json_trailing_commas(parser): + invalid_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher",}' + expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}' + result = parser._safe_repair_json(invalid_json) + assert result == expected_repaired_json + + +def test_safe_repair_json_single_quotes(parser): + invalid_json = "{'task': 'Research XAI', 'context': 'Explainable AI', 'coworker': 'Senior Researcher'}" + expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}' + result = parser._safe_repair_json(invalid_json) + assert result == expected_repaired_json + + +def test_safe_repair_json_mixed_quotes(parser): + invalid_json = "{'task': \"Research XAI\", 'context': \"Explainable AI\", 'coworker': 'Senior Researcher'}" + expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}' + result = parser._safe_repair_json(invalid_json) + assert result == expected_repaired_json + + +def test_safe_repair_json_unescaped_characters(parser): + invalid_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher\n"}' + expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}' + result = parser._safe_repair_json(invalid_json) + print("result:", result) + assert result == expected_repaired_json + + +def test_safe_repair_json_missing_colon(parser): + invalid_json = '{"task" "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}' + expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}' + result = parser._safe_repair_json(invalid_json) + assert result == expected_repaired_json + + +def test_safe_repair_json_missing_comma(parser): + invalid_json = '{"task": "Research XAI" "context": "Explainable AI", "coworker": "Senior Researcher"}' + expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}' + result = parser._safe_repair_json(invalid_json) + assert result == expected_repaired_json + + +def test_safe_repair_json_unexpected_trailing_characters(parser): + invalid_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"} random text' + expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}' + result = parser._safe_repair_json(invalid_json) + assert result == expected_repaired_json + + +def test_safe_repair_json_special_characters_key(parser): + invalid_json = '{"task!@#": "Research XAI", "context$%^": "Explainable AI", "coworker&*()": "Senior Researcher"}' + expected_repaired_json = '{"task!@#": "Research XAI", "context$%^": "Explainable AI", "coworker&*()": "Senior Researcher"}' + result = parser._safe_repair_json(invalid_json) + assert result == expected_repaired_json + + +def test_parsing_with_whitespace(parser): + text = " Thought: Let's find the temperature \n Action: search \n Action Input: what is the temperature in SF? " + result = parser.parse(text) + assert isinstance(result, AgentAction) + assert result.tool == "search" + assert result.tool_input == "what is the temperature in SF?" + + +def test_parsing_with_special_characters(parser): + text = 'Thought: Let\'s find the temperature\nAction: search\nAction Input: "what is the temperature in SF?"' + result = parser.parse(text) + assert isinstance(result, AgentAction) + assert result.tool == "search" + assert result.tool_input == "what is the temperature in SF?" + + +def test_integration_valid_and_invalid(parser): + text = """ + Thought: Let's find the temperature + Action: search + Action Input: what is the temperature in SF? + + Thought: I found the information + Final Answer: The temperature is 100 degrees + + Thought: Missing action + Action Input: invalid + + Thought: Missing action input + Action: invalid + """ + parts = text.strip().split("\n\n") + results = [] + for part in parts: + try: + result = parser.parse(part.strip()) + results.append(result) + except OutputParserException as e: + results.append(e) + + assert isinstance(results[0], AgentAction) + assert isinstance(results[1], AgentFinish) + assert isinstance(results[2], OutputParserException) + assert isinstance(results[3], OutputParserException) + + +class MockAgent: + def increment_formatting_errors(self): + pass + + +# TODO: ADD TEST TO MAKE SURE ** REMOVAL DOESN'T MESS UP ANYTHING diff --git a/tests/crew_test.py b/tests/crew_test.py index d7d15e117..0a41332e2 100644 --- a/tests/crew_test.py +++ b/tests/crew_test.py @@ -1238,10 +1238,10 @@ def test_agent_usage_metrics_are_captured_for_hierarchical_process(): print(crew.usage_metrics) assert crew.usage_metrics == { - "total_tokens": 311, - "prompt_tokens": 224, - "completion_tokens": 87, - "successful_requests": 1, + "total_tokens": 2217, + "prompt_tokens": 1847, + "completion_tokens": 370, + "successful_requests": 4, } @@ -1271,7 +1271,7 @@ def test_hierarchical_crew_creation_tasks_with_agents(): assert crew.manager_agent.tools is not None print("TOOL DESCRIPTION", crew.manager_agent.tools[0].description) assert crew.manager_agent.tools[0].description.startswith( - "Delegate a specific task to one of the following coworkers: [Senior Writer, Researcher]" + "Delegate a specific task to one of the following coworkers: Senior Writer" ) From 09938641cd3a74d83aa31bc926bdeb1b89dfa359 Mon Sep 17 00:00:00 2001 From: Eduardo Chiarotti Date: Mon, 15 Jul 2024 08:58:50 -0300 Subject: [PATCH 3/4] feat: add max retry limit to agent execution (#899) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: add max retry limit to agent execution * feat: add test to max retry limit feature * feat: add code execution docstring --------- Co-authored-by: João Moura --- src/crewai/agent.py | 29 +++++++++++++++++++------- tests/agent_test.py | 51 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 72 insertions(+), 8 deletions(-) diff --git a/src/crewai/agent.py b/src/crewai/agent.py index 505c5249b..bfe980813 100644 --- a/src/crewai/agent.py +++ b/src/crewai/agent.py @@ -8,7 +8,7 @@ from langchain.agents.tools import tool as LangChainTool from langchain_core.agents import AgentAction from langchain_core.callbacks import BaseCallbackHandler from langchain_openai import ChatOpenAI -from pydantic import Field, InstanceOf, model_validator +from pydantic import Field, InstanceOf, PrivateAttr, model_validator from crewai.agents import CacheHandler, CrewAgentExecutor, CrewAgentParser from crewai.agents.agent_builder.base_agent import BaseAgent @@ -55,8 +55,11 @@ class Agent(BaseAgent): tools: Tools at agents disposal step_callback: Callback to be executed after each step of the agent execution. callbacks: A list of callback functions from the langchain library that are triggered during the agent's execution process + allow_code_execution: Enable code execution for the agent. + max_retry_limit: Maximum number of retries for an agent to execute a task when an error occurs. """ + _times_executed: int = PrivateAttr(default=0) max_execution_time: Optional[int] = Field( default=None, description="Maximum execution time for an agent to execute a task", @@ -97,6 +100,10 @@ class Agent(BaseAgent): allow_code_execution: Optional[bool] = Field( default=False, description="Enable code execution for the agent." ) + max_retry_limit: int = Field( + default=2, + description="Maximum number of retries for an agent to execute a task when an error occurs.", + ) def __init__(__pydantic_self__, **data): config = data.pop("config", {}) @@ -185,13 +192,19 @@ class Agent(BaseAgent): else: task_prompt = self._use_trained_data(task_prompt=task_prompt) - result = self.agent_executor.invoke( - { - "input": task_prompt, - "tool_names": self.agent_executor.tools_names, - "tools": self.agent_executor.tools_description, - } - )["output"] + try: + result = self.agent_executor.invoke( + { + "input": task_prompt, + "tool_names": self.agent_executor.tools_names, + "tools": self.agent_executor.tools_description, + } + )["output"] + except Exception as e: + self._times_executed += 1 + if self._times_executed > self.max_retry_limit: + raise e + self.execute_task(task, context, tools) if self.max_rpm: self._rpm_controller.stop_rpm_counter() diff --git a/tests/agent_test.py b/tests/agent_test.py index d8e04c110..8ffbe591a 100644 --- a/tests/agent_test.py +++ b/tests/agent_test.py @@ -963,3 +963,54 @@ def test_agent_use_trained_data(crew_training_handler): crew_training_handler.assert_has_calls( [mock.call(), mock.call("trained_agents_data.pkl"), mock.call().load()] ) + + +def test_agent_max_retry_limit(): + agent = Agent( + role="test role", + goal="test goal", + backstory="test backstory", + max_retry_limit=1, + ) + + task = Task( + agent=agent, + description="Say the word: Hi", + expected_output="The word: Hi", + human_input=True, + ) + + error_message = "Error happening while sending prompt to model." + with patch.object( + CrewAgentExecutor, "invoke", wraps=agent.agent_executor.invoke + ) as invoke_mock: + invoke_mock.side_effect = Exception(error_message) + + assert agent._times_executed == 0 + assert agent.max_retry_limit == 1 + + with pytest.raises(Exception) as e: + agent.execute_task( + task=task, + ) + assert e.value.args[0] == error_message + assert agent._times_executed == 2 + + invoke_mock.assert_has_calls( + [ + mock.call( + { + "input": "Say the word: Hi\n\nThis is the expect criteria for your final answer: The word: Hi \n you MUST return the actual complete content as the final answer, not a summary.", + "tool_names": "", + "tools": "", + } + ), + mock.call( + { + "input": "Say the word: Hi\n\nThis is the expect criteria for your final answer: The word: Hi \n you MUST return the actual complete content as the final answer, not a summary.", + "tool_names": "", + "tools": "", + } + ), + ] + ) From b93632a53a36b753e88fb716e21047388be809f8 Mon Sep 17 00:00:00 2001 From: Gui Vieira Date: Mon, 15 Jul 2024 09:00:02 -0300 Subject: [PATCH 4/4] [DO NOT MERGE] Provide inputs on crew creation (#898) * Provide inputs on crew creation * Better naming * Add crew id and task index to tasks * Fix type again --- src/crewai/crew.py | 1 - src/crewai/telemetry/telemetry.py | 25 ++++++++++++++++++++----- 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/src/crewai/crew.py b/src/crewai/crew.py index 5719f1ea6..5aa3967cb 100644 --- a/src/crewai/crew.py +++ b/src/crewai/crew.py @@ -170,7 +170,6 @@ class Crew(BaseModel): self._rpm_controller = RPMController(max_rpm=self.max_rpm, logger=self._logger) self._telemetry = Telemetry() self._telemetry.set_tracer() - self._telemetry.crew_creation(self) return self @model_validator(mode="after") diff --git a/src/crewai/telemetry/telemetry.py b/src/crewai/telemetry/telemetry.py index 207cbeeb7..552c22edd 100644 --- a/src/crewai/telemetry/telemetry.py +++ b/src/crewai/telemetry/telemetry.py @@ -80,7 +80,7 @@ class Telemetry: self.ready = False self.trace_set = False - def crew_creation(self, crew): + def crew_creation(self, crew: Crew, inputs: dict[str, Any] | None): """Records the creation of a crew.""" if self.ready: try: @@ -93,6 +93,12 @@ class Telemetry: ) self._add_attribute(span, "python_version", platform.python_version()) self._add_attribute(span, "crew_id", str(crew.id)) + + if crew.share_crew: + self._add_attribute( + span, "crew_inputs", json.dumps(inputs) if inputs else None + ) + self._add_attribute(span, "crew_process", crew.process) self._add_attribute(span, "crew_memory", crew.memory) self._add_attribute(span, "crew_number_of_tasks", len(crew.tasks)) @@ -114,7 +120,7 @@ class Telemetry: "llm": json.dumps(self._safe_llm_attributes(agent.llm)), "delegation_enabled?": agent.allow_delegation, "tools_names": [ - tool.name.casefold() for tool in agent.tools + tool.name.casefold() for tool in agent.tools or [] ], } for agent in crew.agents @@ -139,7 +145,7 @@ class Telemetry: else None ), "tools_names": [ - tool.name.casefold() for tool in task.tools + tool.name.casefold() for tool in task.tools or [] ], } for task in crew.tasks @@ -161,10 +167,11 @@ class Telemetry: if self.ready: try: tracer = trace.get_tracer("crewai.telemetry") - span = tracer.start_span("Task Execution") created_span = tracer.start_span("Task Created") + self._add_attribute(created_span, "crew_id", str(crew.id)) + self._add_attribute(created_span, "task_index", crew.tasks.index(task)) self._add_attribute(created_span, "task_id", str(task.id)) if crew.share_crew: @@ -178,6 +185,10 @@ class Telemetry: created_span.set_status(Status(StatusCode.OK)) created_span.end() + span = tracer.start_span("Task Execution") + + self._add_attribute(span, "crew_id", str(crew.id)) + self._add_attribute(span, "task_index", crew.tasks.index(task)) self._add_attribute(span, "task_id", str(task.id)) if crew.share_crew: @@ -275,6 +286,8 @@ class Telemetry: """ if (self.ready) and (crew.share_crew): try: + self.crew_creation(crew, inputs) + tracer = trace.get_tracer("crewai.telemetry") span = tracer.start_span("Crew Execution") self._add_attribute( @@ -283,7 +296,9 @@ class Telemetry: pkg_resources.get_distribution("crewai").version, ) self._add_attribute(span, "crew_id", str(crew.id)) - self._add_attribute(span, "inputs", json.dumps(inputs)) + self._add_attribute( + span, "crew_inputs", json.dumps(inputs) if inputs else None + ) self._add_attribute( span, "crew_agents",