mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-18 13:28:31 +00:00
Compare commits
5 Commits
log-task-o
...
bugfix/lan
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
040e5a78d2 | ||
|
|
b93632a53a | ||
|
|
09938641cd | ||
|
|
7acf0b2107 | ||
|
|
4eb4073661 |
@@ -127,7 +127,7 @@ llm = HuggingFaceHub(
|
|||||||
```
|
```
|
||||||
|
|
||||||
## OpenAI Compatible API Endpoints
|
## OpenAI Compatible API Endpoints
|
||||||
Switch between APIs and models seamlessly using environment variables, supporting platforms like FastChat, LM Studio, and Mistral AI.
|
Switch between APIs and models seamlessly using environment variables, supporting platforms like FastChat, LM Studio, Groq, and Mistral AI.
|
||||||
|
|
||||||
### Configuration Examples
|
### Configuration Examples
|
||||||
#### FastChat
|
#### FastChat
|
||||||
@@ -144,6 +144,13 @@ OPENAI_API_BASE="http://localhost:1234/v1"
|
|||||||
OPENAI_API_KEY="lm-studio"
|
OPENAI_API_KEY="lm-studio"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### Groq API
|
||||||
|
```sh
|
||||||
|
OPENAI_API_KEY=your-groq-api-key
|
||||||
|
OPENAI_MODEL_NAME='llama3-8b-8192'
|
||||||
|
OPENAI_API_BASE=https://api.groq.com/openai/v1
|
||||||
|
```
|
||||||
|
|
||||||
#### Mistral API
|
#### Mistral API
|
||||||
```sh
|
```sh
|
||||||
OPENAI_API_KEY=your-mistral-api-key
|
OPENAI_API_KEY=your-mistral-api-key
|
||||||
@@ -211,4 +218,4 @@ azure_agent = Agent(
|
|||||||
```
|
```
|
||||||
|
|
||||||
## Conclusion
|
## Conclusion
|
||||||
Integrating CrewAI with different LLMs expands the framework's versatility, allowing for customized, efficient AI solutions across various domains and platforms.
|
Integrating CrewAI with different LLMs expands the framework's versatility, allowing for customized, efficient AI solutions across various domains and platforms.
|
||||||
|
|||||||
23
poetry.lock
generated
23
poetry.lock
generated
@@ -2282,6 +2282,17 @@ files = [
|
|||||||
{file = "jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe"},
|
{file = "jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe"},
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "json-repair"
|
||||||
|
version = "0.25.2"
|
||||||
|
description = "A package to repair broken json strings"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.7"
|
||||||
|
files = [
|
||||||
|
{file = "json_repair-0.25.2-py3-none-any.whl", hash = "sha256:51d67295c3184b6c41a3572689661c6128cef6cfc9fb04db63130709adfc5bf0"},
|
||||||
|
{file = "json_repair-0.25.2.tar.gz", hash = "sha256:161a56d7e6bbfd4cad3a614087e3e0dbd0e10d402dd20dc7db418432428cb32b"},
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "jsonpatch"
|
name = "jsonpatch"
|
||||||
version = "1.33"
|
version = "1.33"
|
||||||
@@ -2395,8 +2406,8 @@ langchain-core = ">=0.2.10,<0.3.0"
|
|||||||
langchain-text-splitters = ">=0.2.0,<0.3.0"
|
langchain-text-splitters = ">=0.2.0,<0.3.0"
|
||||||
langsmith = ">=0.1.17,<0.2.0"
|
langsmith = ">=0.1.17,<0.2.0"
|
||||||
numpy = [
|
numpy = [
|
||||||
{version = ">=1.26.0,<2.0.0", markers = "python_version >= \"3.12\""},
|
|
||||||
{version = ">=1,<2", markers = "python_version < \"3.12\""},
|
{version = ">=1,<2", markers = "python_version < \"3.12\""},
|
||||||
|
{version = ">=1.26.0,<2.0.0", markers = "python_version >= \"3.12\""},
|
||||||
]
|
]
|
||||||
pydantic = ">=1,<3"
|
pydantic = ">=1,<3"
|
||||||
PyYAML = ">=5.3"
|
PyYAML = ">=5.3"
|
||||||
@@ -2437,8 +2448,8 @@ langchain = ">=0.2.6,<0.3.0"
|
|||||||
langchain-core = ">=0.2.10,<0.3.0"
|
langchain-core = ">=0.2.10,<0.3.0"
|
||||||
langsmith = ">=0.1.0,<0.2.0"
|
langsmith = ">=0.1.0,<0.2.0"
|
||||||
numpy = [
|
numpy = [
|
||||||
{version = ">=1.26.0,<2.0.0", markers = "python_version >= \"3.12\""},
|
|
||||||
{version = ">=1,<2", markers = "python_version < \"3.12\""},
|
{version = ">=1,<2", markers = "python_version < \"3.12\""},
|
||||||
|
{version = ">=1.26.0,<2.0.0", markers = "python_version >= \"3.12\""},
|
||||||
]
|
]
|
||||||
PyYAML = ">=5.3"
|
PyYAML = ">=5.3"
|
||||||
requests = ">=2,<3"
|
requests = ">=2,<3"
|
||||||
@@ -2461,8 +2472,8 @@ jsonpatch = ">=1.33,<2.0"
|
|||||||
langsmith = ">=0.1.75,<0.2.0"
|
langsmith = ">=0.1.75,<0.2.0"
|
||||||
packaging = ">=23.2,<25"
|
packaging = ">=23.2,<25"
|
||||||
pydantic = [
|
pydantic = [
|
||||||
{version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""},
|
|
||||||
{version = ">=1,<3", markers = "python_full_version < \"3.12.4\""},
|
{version = ">=1,<3", markers = "python_full_version < \"3.12.4\""},
|
||||||
|
{version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""},
|
||||||
]
|
]
|
||||||
PyYAML = ">=5.3"
|
PyYAML = ">=5.3"
|
||||||
tenacity = ">=8.1.0,<8.4.0 || >8.4.0,<9.0.0"
|
tenacity = ">=8.1.0,<8.4.0 || >8.4.0,<9.0.0"
|
||||||
@@ -2511,8 +2522,8 @@ files = [
|
|||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
orjson = ">=3.9.14,<4.0.0"
|
orjson = ">=3.9.14,<4.0.0"
|
||||||
pydantic = [
|
pydantic = [
|
||||||
{version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""},
|
|
||||||
{version = ">=1,<3", markers = "python_full_version < \"3.12.4\""},
|
{version = ">=1,<3", markers = "python_full_version < \"3.12.4\""},
|
||||||
|
{version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""},
|
||||||
]
|
]
|
||||||
requests = ">=2,<3"
|
requests = ">=2,<3"
|
||||||
|
|
||||||
@@ -3989,8 +4000,8 @@ files = [
|
|||||||
annotated-types = ">=0.4.0"
|
annotated-types = ">=0.4.0"
|
||||||
pydantic-core = "2.20.1"
|
pydantic-core = "2.20.1"
|
||||||
typing-extensions = [
|
typing-extensions = [
|
||||||
{version = ">=4.12.2", markers = "python_version >= \"3.13\""},
|
|
||||||
{version = ">=4.6.1", markers = "python_version < \"3.13\""},
|
{version = ">=4.6.1", markers = "python_version < \"3.13\""},
|
||||||
|
{version = ">=4.12.2", markers = "python_version >= \"3.13\""},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.extras]
|
[package.extras]
|
||||||
@@ -6090,4 +6101,4 @@ tools = ["crewai-tools"]
|
|||||||
[metadata]
|
[metadata]
|
||||||
lock-version = "2.0"
|
lock-version = "2.0"
|
||||||
python-versions = ">=3.10,<=3.13"
|
python-versions = ">=3.10,<=3.13"
|
||||||
content-hash = "0dbf6f6e2e841fb3eec4ff87ea5d6b430f29702118fee91307983c6b2581e59e"
|
content-hash = "2cf5a3904e7cbcfebb85e198b6035252d47213a9b0dd3dd51837516e03b38d3e"
|
||||||
|
|||||||
@@ -28,6 +28,7 @@ appdirs = "^1.4.4"
|
|||||||
jsonref = "^1.1.0"
|
jsonref = "^1.1.0"
|
||||||
agentops = { version = "^0.1.9", optional = true }
|
agentops = { version = "^0.1.9", optional = true }
|
||||||
embedchain = "^0.1.114"
|
embedchain = "^0.1.114"
|
||||||
|
json-repair = "^0.25.2"
|
||||||
|
|
||||||
[tool.poetry.extras]
|
[tool.poetry.extras]
|
||||||
tools = ["crewai-tools"]
|
tools = ["crewai-tools"]
|
||||||
|
|||||||
@@ -1,13 +1,14 @@
|
|||||||
import os
|
import os
|
||||||
|
from inspect import signature
|
||||||
from typing import Any, List, Optional, Tuple
|
from typing import Any, List, Optional, Tuple
|
||||||
|
|
||||||
from langchain.agents.agent import RunnableAgent
|
from langchain.agents.agent import RunnableAgent
|
||||||
|
from langchain.agents.tools import BaseTool
|
||||||
from langchain.agents.tools import tool as LangChainTool
|
from langchain.agents.tools import tool as LangChainTool
|
||||||
from langchain.tools.render import render_text_description
|
|
||||||
from langchain_core.agents import AgentAction
|
from langchain_core.agents import AgentAction
|
||||||
from langchain_core.callbacks import BaseCallbackHandler
|
from langchain_core.callbacks import BaseCallbackHandler
|
||||||
from langchain_openai import ChatOpenAI
|
from langchain_openai import ChatOpenAI
|
||||||
from pydantic import Field, InstanceOf, model_validator
|
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
|
||||||
|
|
||||||
from crewai.agents import CacheHandler, CrewAgentExecutor, CrewAgentParser
|
from crewai.agents import CacheHandler, CrewAgentExecutor, CrewAgentParser
|
||||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||||
@@ -54,8 +55,11 @@ class Agent(BaseAgent):
|
|||||||
tools: Tools at agents disposal
|
tools: Tools at agents disposal
|
||||||
step_callback: Callback to be executed after each step of the agent execution.
|
step_callback: Callback to be executed after each step of the agent execution.
|
||||||
callbacks: A list of callback functions from the langchain library that are triggered during the agent's execution process
|
callbacks: A list of callback functions from the langchain library that are triggered during the agent's execution process
|
||||||
|
allow_code_execution: Enable code execution for the agent.
|
||||||
|
max_retry_limit: Maximum number of retries for an agent to execute a task when an error occurs.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
_times_executed: int = PrivateAttr(default=0)
|
||||||
max_execution_time: Optional[int] = Field(
|
max_execution_time: Optional[int] = Field(
|
||||||
default=None,
|
default=None,
|
||||||
description="Maximum execution time for an agent to execute a task",
|
description="Maximum execution time for an agent to execute a task",
|
||||||
@@ -96,6 +100,10 @@ class Agent(BaseAgent):
|
|||||||
allow_code_execution: Optional[bool] = Field(
|
allow_code_execution: Optional[bool] = Field(
|
||||||
default=False, description="Enable code execution for the agent."
|
default=False, description="Enable code execution for the agent."
|
||||||
)
|
)
|
||||||
|
max_retry_limit: int = Field(
|
||||||
|
default=2,
|
||||||
|
description="Maximum number of retries for an agent to execute a task when an error occurs.",
|
||||||
|
)
|
||||||
|
|
||||||
def __init__(__pydantic_self__, **data):
|
def __init__(__pydantic_self__, **data):
|
||||||
config = data.pop("config", {})
|
config = data.pop("config", {})
|
||||||
@@ -167,14 +175,15 @@ class Agent(BaseAgent):
|
|||||||
if memory.strip() != "":
|
if memory.strip() != "":
|
||||||
task_prompt += self.i18n.slice("memory").format(memory=memory)
|
task_prompt += self.i18n.slice("memory").format(memory=memory)
|
||||||
|
|
||||||
tools = tools or self.tools
|
tools = tools or self.tools or []
|
||||||
|
parsed_tools = self._parse_tools(tools)
|
||||||
parsed_tools = self._parse_tools(tools or []) # type: ignore # Argument 1 to "_parse_tools" of "Agent" has incompatible type "list[Any] | None"; expected "list[Any]"
|
|
||||||
self.create_agent_executor(tools=tools)
|
self.create_agent_executor(tools=tools)
|
||||||
self.agent_executor.tools = parsed_tools
|
self.agent_executor.tools = parsed_tools
|
||||||
self.agent_executor.task = task
|
self.agent_executor.task = task
|
||||||
|
|
||||||
self.agent_executor.tools_description = render_text_description(parsed_tools)
|
self.agent_executor.tools_description = self._render_text_description_and_args(
|
||||||
|
parsed_tools
|
||||||
|
)
|
||||||
self.agent_executor.tools_names = self.__tools_names(parsed_tools)
|
self.agent_executor.tools_names = self.__tools_names(parsed_tools)
|
||||||
|
|
||||||
if self.crew and self.crew._train:
|
if self.crew and self.crew._train:
|
||||||
@@ -182,13 +191,20 @@ class Agent(BaseAgent):
|
|||||||
else:
|
else:
|
||||||
task_prompt = self._use_trained_data(task_prompt=task_prompt)
|
task_prompt = self._use_trained_data(task_prompt=task_prompt)
|
||||||
|
|
||||||
result = self.agent_executor.invoke(
|
try:
|
||||||
{
|
result = self.agent_executor.invoke(
|
||||||
"input": task_prompt,
|
{
|
||||||
"tool_names": self.agent_executor.tools_names,
|
"input": task_prompt,
|
||||||
"tools": self.agent_executor.tools_description,
|
"tool_names": self.agent_executor.tools_names,
|
||||||
}
|
"tools": self.agent_executor.tools_description,
|
||||||
)["output"]
|
}
|
||||||
|
)["output"]
|
||||||
|
except Exception as e:
|
||||||
|
self._times_executed += 1
|
||||||
|
if self._times_executed > self.max_retry_limit:
|
||||||
|
raise e
|
||||||
|
self.execute_task(task, context, tools)
|
||||||
|
|
||||||
if self.max_rpm:
|
if self.max_rpm:
|
||||||
self._rpm_controller.stop_rpm_counter()
|
self._rpm_controller.stop_rpm_counter()
|
||||||
|
|
||||||
@@ -220,7 +236,7 @@ class Agent(BaseAgent):
|
|||||||
Returns:
|
Returns:
|
||||||
An instance of the CrewAgentExecutor class.
|
An instance of the CrewAgentExecutor class.
|
||||||
"""
|
"""
|
||||||
tools = tools or self.tools
|
tools = tools or self.tools or []
|
||||||
|
|
||||||
agent_args = {
|
agent_args = {
|
||||||
"input": lambda x: x["input"],
|
"input": lambda x: x["input"],
|
||||||
@@ -315,6 +331,7 @@ class Agent(BaseAgent):
|
|||||||
tools_list = []
|
tools_list = []
|
||||||
for tool in tools:
|
for tool in tools:
|
||||||
tools_list.append(tool)
|
tools_list.append(tool)
|
||||||
|
|
||||||
return tools_list
|
return tools_list
|
||||||
|
|
||||||
def _training_handler(self, task_prompt: str) -> str:
|
def _training_handler(self, task_prompt: str) -> str:
|
||||||
@@ -341,6 +358,52 @@ class Agent(BaseAgent):
|
|||||||
)
|
)
|
||||||
return task_prompt
|
return task_prompt
|
||||||
|
|
||||||
|
def _render_text_description(self, tools: List[BaseTool]) -> str:
|
||||||
|
"""Render the tool name and description in plain text.
|
||||||
|
|
||||||
|
Output will be in the format of:
|
||||||
|
|
||||||
|
.. code-block:: markdown
|
||||||
|
|
||||||
|
search: This tool is used for search
|
||||||
|
calculator: This tool is used for math
|
||||||
|
"""
|
||||||
|
description = "\n".join(
|
||||||
|
[
|
||||||
|
f"Tool name: {tool.name}\nTool description:\n{tool.description}"
|
||||||
|
for tool in tools
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
return description
|
||||||
|
|
||||||
|
def _render_text_description_and_args(self, tools: List[BaseTool]) -> str:
|
||||||
|
"""Render the tool name, description, and args in plain text.
|
||||||
|
|
||||||
|
Output will be in the format of:
|
||||||
|
|
||||||
|
.. code-block:: markdown
|
||||||
|
|
||||||
|
search: This tool is used for search, args: {"query": {"type": "string"}}
|
||||||
|
calculator: This tool is used for math, \
|
||||||
|
args: {"expression": {"type": "string"}}
|
||||||
|
"""
|
||||||
|
tool_strings = []
|
||||||
|
for tool in tools:
|
||||||
|
args_schema = str(tool.args)
|
||||||
|
if hasattr(tool, "func") and tool.func:
|
||||||
|
sig = signature(tool.func)
|
||||||
|
description = (
|
||||||
|
f"Tool Name: {tool.name}{sig}\nTool Description: {tool.description}"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
description = (
|
||||||
|
f"Tool Name: {tool.name}\nTool Description: {tool.description}"
|
||||||
|
)
|
||||||
|
tool_strings.append(f"{description}\nTool Arguments: {args_schema}")
|
||||||
|
|
||||||
|
return "\n".join(tool_strings)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def __tools_names(tools) -> str:
|
def __tools_names(tools) -> str:
|
||||||
return ", ".join([t.name for t in tools])
|
return ", ".join([t.name for t in tools])
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ class BaseAgentTools(BaseModel, ABC):
|
|||||||
is_list = coworker.startswith("[") and coworker.endswith("]")
|
is_list = coworker.startswith("[") and coworker.endswith("]")
|
||||||
if is_list:
|
if is_list:
|
||||||
coworker = coworker[1:-1].split(",")[0]
|
coworker = coworker[1:-1].split(",")[0]
|
||||||
|
|
||||||
return coworker
|
return coworker
|
||||||
|
|
||||||
def delegate_work(
|
def delegate_work(
|
||||||
@@ -40,11 +41,13 @@ class BaseAgentTools(BaseModel, ABC):
|
|||||||
coworker = self._get_coworker(coworker, **kwargs)
|
coworker = self._get_coworker(coworker, **kwargs)
|
||||||
return self._execute(coworker, question, context)
|
return self._execute(coworker, question, context)
|
||||||
|
|
||||||
def _execute(self, agent: Union[str, None], task: str, context: Union[str, None]):
|
def _execute(
|
||||||
|
self, agent_name: Union[str, None], task: str, context: Union[str, None]
|
||||||
|
):
|
||||||
"""Execute the command."""
|
"""Execute the command."""
|
||||||
try:
|
try:
|
||||||
if agent is None:
|
if agent_name is None:
|
||||||
agent = ""
|
agent_name = ""
|
||||||
|
|
||||||
# It is important to remove the quotes from the agent name.
|
# It is important to remove the quotes from the agent name.
|
||||||
# The reason we have to do this is because less-powerful LLM's
|
# The reason we have to do this is because less-powerful LLM's
|
||||||
@@ -53,7 +56,7 @@ class BaseAgentTools(BaseModel, ABC):
|
|||||||
# {"task": "....", "coworker": "....
|
# {"task": "....", "coworker": "....
|
||||||
# when it should look like this:
|
# when it should look like this:
|
||||||
# {"task": "....", "coworker": "...."}
|
# {"task": "....", "coworker": "...."}
|
||||||
agent_name = agent.casefold().replace('"', "").replace("\n", "")
|
agent_name = agent_name.casefold().replace('"', "").replace("\n", "")
|
||||||
|
|
||||||
agent = [ # type: ignore # Incompatible types in assignment (expression has type "list[BaseAgent]", variable has type "str | None")
|
agent = [ # type: ignore # Incompatible types in assignment (expression has type "list[BaseAgent]", variable has type "str | None")
|
||||||
available_agent
|
available_agent
|
||||||
@@ -75,9 +78,9 @@ class BaseAgentTools(BaseModel, ABC):
|
|||||||
)
|
)
|
||||||
|
|
||||||
agent = agent[0]
|
agent = agent[0]
|
||||||
task = Task( # type: ignore # Incompatible types in assignment (expression has type "Task", variable has type "str")
|
task_with_assigned_agent = Task( # type: ignore # Incompatible types in assignment (expression has type "Task", variable has type "str")
|
||||||
description=task,
|
description=task,
|
||||||
agent=agent,
|
agent=agent,
|
||||||
expected_output="Your best answer to your coworker asking you this, accounting for the context shared.",
|
expected_output="Your best answer to your coworker asking you this, accounting for the context shared.",
|
||||||
)
|
)
|
||||||
return agent.execute_task(task, context) # type: ignore # "str" has no attribute "execute_task"
|
return agent.execute_task(task_with_assigned_agent, context)
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from typing import Any, Optional
|
from typing import Any, Optional
|
||||||
|
|
||||||
from pydantic import BaseModel, Field, PrivateAttr
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
|
||||||
class OutputConverter(BaseModel, ABC):
|
class OutputConverter(BaseModel, ABC):
|
||||||
@@ -21,7 +21,6 @@ class OutputConverter(BaseModel, ABC):
|
|||||||
max_attempts (int): Maximum number of conversion attempts (default: 3).
|
max_attempts (int): Maximum number of conversion attempts (default: 3).
|
||||||
"""
|
"""
|
||||||
|
|
||||||
_is_gpt: bool = PrivateAttr(default=True)
|
|
||||||
text: str = Field(description="Text to be converted.")
|
text: str = Field(description="Text to be converted.")
|
||||||
llm: Any = Field(description="The language model to be used to convert the text.")
|
llm: Any = Field(description="The language model to be used to convert the text.")
|
||||||
model: Any = Field(description="The model to be used to convert the text.")
|
model: Any = Field(description="The model to be used to convert the text.")
|
||||||
@@ -41,7 +40,8 @@ class OutputConverter(BaseModel, ABC):
|
|||||||
"""Convert text to json."""
|
"""Convert text to json."""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod # type: ignore # Name "_is_gpt" already defined on line 25
|
@property
|
||||||
def _is_gpt(self, llm): # type: ignore # Name "_is_gpt" already defined on line 25
|
@abstractmethod
|
||||||
|
def is_gpt(self) -> bool:
|
||||||
"""Return if llm provided is of gpt from openai."""
|
"""Return if llm provided is of gpt from openai."""
|
||||||
pass
|
pass
|
||||||
@@ -1,14 +1,6 @@
|
|||||||
import threading
|
import threading
|
||||||
import time
|
import time
|
||||||
from typing import (
|
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
|
||||||
Any,
|
|
||||||
Dict,
|
|
||||||
Iterator,
|
|
||||||
List,
|
|
||||||
Optional,
|
|
||||||
Tuple,
|
|
||||||
Union,
|
|
||||||
)
|
|
||||||
|
|
||||||
from langchain.agents import AgentExecutor
|
from langchain.agents import AgentExecutor
|
||||||
from langchain.agents.agent import ExceptionTool
|
from langchain.agents.agent import ExceptionTool
|
||||||
@@ -19,9 +11,7 @@ from langchain_core.tools import BaseTool
|
|||||||
from langchain_core.utils.input import get_color_mapping
|
from langchain_core.utils.input import get_color_mapping
|
||||||
from pydantic import InstanceOf
|
from pydantic import InstanceOf
|
||||||
|
|
||||||
from crewai.agents.agent_builder.base_agent_executor_mixin import (
|
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
|
||||||
CrewAgentExecutorMixin,
|
|
||||||
)
|
|
||||||
from crewai.agents.tools_handler import ToolsHandler
|
from crewai.agents.tools_handler import ToolsHandler
|
||||||
from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
|
from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
|
||||||
from crewai.utilities import I18N
|
from crewai.utilities import I18N
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
import re
|
import re
|
||||||
from typing import Any, Union
|
from typing import Any, Union
|
||||||
|
|
||||||
|
from json_repair import repair_json
|
||||||
from langchain.agents.output_parsers import ReActSingleInputOutputParser
|
from langchain.agents.output_parsers import ReActSingleInputOutputParser
|
||||||
from langchain_core.agents import AgentAction, AgentFinish
|
from langchain_core.agents import AgentAction, AgentFinish
|
||||||
from langchain_core.exceptions import OutputParserException
|
from langchain_core.exceptions import OutputParserException
|
||||||
@@ -48,11 +49,15 @@ class CrewAgentParser(ReActSingleInputOutputParser):
|
|||||||
raise OutputParserException(
|
raise OutputParserException(
|
||||||
f"{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}: {text}"
|
f"{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}: {text}"
|
||||||
)
|
)
|
||||||
action = action_match.group(1).strip()
|
action = action_match.group(1)
|
||||||
action_input = action_match.group(2)
|
clean_action = self._clean_action(action)
|
||||||
tool_input = action_input.strip(" ")
|
|
||||||
tool_input = tool_input.strip('"')
|
action_input = action_match.group(2).strip()
|
||||||
return AgentAction(action, tool_input, text)
|
|
||||||
|
tool_input = action_input.strip(" ").strip('"')
|
||||||
|
safe_tool_input = self._safe_repair_json(tool_input)
|
||||||
|
|
||||||
|
return AgentAction(clean_action, safe_tool_input, text)
|
||||||
|
|
||||||
elif includes_answer:
|
elif includes_answer:
|
||||||
return AgentFinish(
|
return AgentFinish(
|
||||||
@@ -87,3 +92,30 @@ class CrewAgentParser(ReActSingleInputOutputParser):
|
|||||||
llm_output=text,
|
llm_output=text,
|
||||||
send_to_llm=True,
|
send_to_llm=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def _clean_action(self, text: str) -> str:
|
||||||
|
"""Clean action string by removing non-essential formatting characters."""
|
||||||
|
return re.sub(r"^\s*\*+\s*|\s*\*+\s*$", "", text).strip()
|
||||||
|
|
||||||
|
def _safe_repair_json(self, tool_input: str) -> str:
|
||||||
|
UNABLE_TO_REPAIR_JSON_RESULTS = ['""', "{}"]
|
||||||
|
|
||||||
|
# Skip repair if the input starts and ends with square brackets
|
||||||
|
# Explanation: The JSON parser has issues handling inputs that are enclosed in square brackets ('[]').
|
||||||
|
# These are typically valid JSON arrays or strings that do not require repair. Attempting to repair such inputs
|
||||||
|
# might lead to unintended alterations, such as wrapping the entire input in additional layers or modifying
|
||||||
|
# the structure in a way that changes its meaning. By skipping the repair for inputs that start and end with
|
||||||
|
# square brackets, we preserve the integrity of these valid JSON structures and avoid unnecessary modifications.
|
||||||
|
if tool_input.startswith("[") and tool_input.endswith("]"):
|
||||||
|
return tool_input
|
||||||
|
|
||||||
|
# Before repair, handle common LLM issues:
|
||||||
|
# 1. Replace """ with " to avoid JSON parser errors
|
||||||
|
|
||||||
|
tool_input = tool_input.replace('"""', '"')
|
||||||
|
|
||||||
|
result = repair_json(tool_input)
|
||||||
|
if result in UNABLE_TO_REPAIR_JSON_RESULTS:
|
||||||
|
return tool_input
|
||||||
|
|
||||||
|
return str(result)
|
||||||
|
|||||||
@@ -6,15 +6,15 @@ from typing import Any, Dict, List, Optional, Tuple, Union
|
|||||||
|
|
||||||
from langchain_core.callbacks import BaseCallbackHandler
|
from langchain_core.callbacks import BaseCallbackHandler
|
||||||
from pydantic import (
|
from pydantic import (
|
||||||
UUID4,
|
UUID4,
|
||||||
BaseModel,
|
BaseModel,
|
||||||
ConfigDict,
|
ConfigDict,
|
||||||
Field,
|
Field,
|
||||||
InstanceOf,
|
InstanceOf,
|
||||||
Json,
|
Json,
|
||||||
PrivateAttr,
|
PrivateAttr,
|
||||||
field_validator,
|
field_validator,
|
||||||
model_validator,
|
model_validator,
|
||||||
)
|
)
|
||||||
from pydantic_core import PydanticCustomError
|
from pydantic_core import PydanticCustomError
|
||||||
|
|
||||||
@@ -170,7 +170,6 @@ class Crew(BaseModel):
|
|||||||
self._rpm_controller = RPMController(max_rpm=self.max_rpm, logger=self._logger)
|
self._rpm_controller = RPMController(max_rpm=self.max_rpm, logger=self._logger)
|
||||||
self._telemetry = Telemetry()
|
self._telemetry = Telemetry()
|
||||||
self._telemetry.set_tracer()
|
self._telemetry.set_tracer()
|
||||||
self._telemetry.crew_creation(self)
|
|
||||||
return self
|
return self
|
||||||
|
|
||||||
@model_validator(mode="after")
|
@model_validator(mode="after")
|
||||||
@@ -503,7 +502,30 @@ class Crew(BaseModel):
|
|||||||
agent for agent in self.agents if agent != task.agent
|
agent for agent in self.agents if agent != task.agent
|
||||||
]
|
]
|
||||||
if len(self.agents) > 1 and len(agents_for_delegation) > 0:
|
if len(self.agents) > 1 and len(agents_for_delegation) > 0:
|
||||||
task.tools += task.agent.get_delegation_tools(agents_for_delegation)
|
delegation_tools = task.agent.get_delegation_tools(
|
||||||
|
agents_for_delegation
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add tools if they are not already in task.tools
|
||||||
|
for new_tool in delegation_tools:
|
||||||
|
# Find the index of the tool with the same name
|
||||||
|
existing_tool_index = next(
|
||||||
|
(
|
||||||
|
index
|
||||||
|
for index, tool in enumerate(task.tools or [])
|
||||||
|
if tool.name == new_tool.name
|
||||||
|
),
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
if not task.tools:
|
||||||
|
task.tools = []
|
||||||
|
|
||||||
|
if existing_tool_index is not None:
|
||||||
|
# Replace the existing tool
|
||||||
|
task.tools[existing_tool_index] = new_tool
|
||||||
|
else:
|
||||||
|
# Add the new tool
|
||||||
|
task.tools.append(new_tool)
|
||||||
|
|
||||||
role = task.agent.role if task.agent is not None else "None"
|
role = task.agent.role if task.agent is not None else "None"
|
||||||
self._logger.log("debug", f"== Working Agent: {role}", color="bold_purple")
|
self._logger.log("debug", f"== Working Agent: {role}", color="bold_purple")
|
||||||
|
|||||||
@@ -80,7 +80,7 @@ class Telemetry:
|
|||||||
self.ready = False
|
self.ready = False
|
||||||
self.trace_set = False
|
self.trace_set = False
|
||||||
|
|
||||||
def crew_creation(self, crew):
|
def crew_creation(self, crew: Crew, inputs: dict[str, Any] | None):
|
||||||
"""Records the creation of a crew."""
|
"""Records the creation of a crew."""
|
||||||
if self.ready:
|
if self.ready:
|
||||||
try:
|
try:
|
||||||
@@ -93,6 +93,12 @@ class Telemetry:
|
|||||||
)
|
)
|
||||||
self._add_attribute(span, "python_version", platform.python_version())
|
self._add_attribute(span, "python_version", platform.python_version())
|
||||||
self._add_attribute(span, "crew_id", str(crew.id))
|
self._add_attribute(span, "crew_id", str(crew.id))
|
||||||
|
|
||||||
|
if crew.share_crew:
|
||||||
|
self._add_attribute(
|
||||||
|
span, "crew_inputs", json.dumps(inputs) if inputs else None
|
||||||
|
)
|
||||||
|
|
||||||
self._add_attribute(span, "crew_process", crew.process)
|
self._add_attribute(span, "crew_process", crew.process)
|
||||||
self._add_attribute(span, "crew_memory", crew.memory)
|
self._add_attribute(span, "crew_memory", crew.memory)
|
||||||
self._add_attribute(span, "crew_number_of_tasks", len(crew.tasks))
|
self._add_attribute(span, "crew_number_of_tasks", len(crew.tasks))
|
||||||
@@ -114,7 +120,7 @@ class Telemetry:
|
|||||||
"llm": json.dumps(self._safe_llm_attributes(agent.llm)),
|
"llm": json.dumps(self._safe_llm_attributes(agent.llm)),
|
||||||
"delegation_enabled?": agent.allow_delegation,
|
"delegation_enabled?": agent.allow_delegation,
|
||||||
"tools_names": [
|
"tools_names": [
|
||||||
tool.name.casefold() for tool in agent.tools
|
tool.name.casefold() for tool in agent.tools or []
|
||||||
],
|
],
|
||||||
}
|
}
|
||||||
for agent in crew.agents
|
for agent in crew.agents
|
||||||
@@ -139,7 +145,7 @@ class Telemetry:
|
|||||||
else None
|
else None
|
||||||
),
|
),
|
||||||
"tools_names": [
|
"tools_names": [
|
||||||
tool.name.casefold() for tool in task.tools
|
tool.name.casefold() for tool in task.tools or []
|
||||||
],
|
],
|
||||||
}
|
}
|
||||||
for task in crew.tasks
|
for task in crew.tasks
|
||||||
@@ -161,10 +167,11 @@ class Telemetry:
|
|||||||
if self.ready:
|
if self.ready:
|
||||||
try:
|
try:
|
||||||
tracer = trace.get_tracer("crewai.telemetry")
|
tracer = trace.get_tracer("crewai.telemetry")
|
||||||
span = tracer.start_span("Task Execution")
|
|
||||||
|
|
||||||
created_span = tracer.start_span("Task Created")
|
created_span = tracer.start_span("Task Created")
|
||||||
|
|
||||||
|
self._add_attribute(created_span, "crew_id", str(crew.id))
|
||||||
|
self._add_attribute(created_span, "task_index", crew.tasks.index(task))
|
||||||
self._add_attribute(created_span, "task_id", str(task.id))
|
self._add_attribute(created_span, "task_id", str(task.id))
|
||||||
|
|
||||||
if crew.share_crew:
|
if crew.share_crew:
|
||||||
@@ -178,6 +185,10 @@ class Telemetry:
|
|||||||
created_span.set_status(Status(StatusCode.OK))
|
created_span.set_status(Status(StatusCode.OK))
|
||||||
created_span.end()
|
created_span.end()
|
||||||
|
|
||||||
|
span = tracer.start_span("Task Execution")
|
||||||
|
|
||||||
|
self._add_attribute(span, "crew_id", str(crew.id))
|
||||||
|
self._add_attribute(span, "task_index", crew.tasks.index(task))
|
||||||
self._add_attribute(span, "task_id", str(task.id))
|
self._add_attribute(span, "task_id", str(task.id))
|
||||||
|
|
||||||
if crew.share_crew:
|
if crew.share_crew:
|
||||||
@@ -275,6 +286,8 @@ class Telemetry:
|
|||||||
"""
|
"""
|
||||||
if (self.ready) and (crew.share_crew):
|
if (self.ready) and (crew.share_crew):
|
||||||
try:
|
try:
|
||||||
|
self.crew_creation(crew, inputs)
|
||||||
|
|
||||||
tracer = trace.get_tracer("crewai.telemetry")
|
tracer = trace.get_tracer("crewai.telemetry")
|
||||||
span = tracer.start_span("Crew Execution")
|
span = tracer.start_span("Crew Execution")
|
||||||
self._add_attribute(
|
self._add_attribute(
|
||||||
@@ -283,7 +296,9 @@ class Telemetry:
|
|||||||
pkg_resources.get_distribution("crewai").version,
|
pkg_resources.get_distribution("crewai").version,
|
||||||
)
|
)
|
||||||
self._add_attribute(span, "crew_id", str(crew.id))
|
self._add_attribute(span, "crew_id", str(crew.id))
|
||||||
self._add_attribute(span, "inputs", json.dumps(inputs))
|
self._add_attribute(
|
||||||
|
span, "crew_inputs", json.dumps(inputs) if inputs else None
|
||||||
|
)
|
||||||
self._add_attribute(
|
self._add_attribute(
|
||||||
span,
|
span,
|
||||||
"crew_agents",
|
"crew_agents",
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ class AgentTools(BaseAgentTools):
|
|||||||
"""Default tools around agent delegation"""
|
"""Default tools around agent delegation"""
|
||||||
|
|
||||||
def tools(self):
|
def tools(self):
|
||||||
coworkers = f"[{', '.join([f'{agent.role}' for agent in self.agents])}]"
|
coworkers = ", ".join([f"{agent.role}" for agent in self.agents])
|
||||||
tools = [
|
tools = [
|
||||||
StructuredTool.from_function(
|
StructuredTool.from_function(
|
||||||
func=self.delegate_work,
|
func=self.delegate_work,
|
||||||
|
|||||||
@@ -151,16 +151,12 @@ class ToolUsage:
|
|||||||
for k, v in calling.arguments.items()
|
for k, v in calling.arguments.items()
|
||||||
if k in acceptable_args
|
if k in acceptable_args
|
||||||
}
|
}
|
||||||
result = tool._run(**arguments)
|
result = tool.invoke(input=arguments)
|
||||||
except Exception:
|
except Exception:
|
||||||
if tool.args_schema:
|
arguments = calling.arguments
|
||||||
arguments = calling.arguments
|
result = tool.invoke(input=arguments)
|
||||||
result = tool._run(**arguments)
|
|
||||||
else:
|
|
||||||
arguments = calling.arguments.values() # type: ignore # Incompatible types in assignment (expression has type "dict_values[str, Any]", variable has type "dict[str, Any]")
|
|
||||||
result = tool._run(*arguments)
|
|
||||||
else:
|
else:
|
||||||
result = tool._run()
|
result = tool.invoke(input={})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self._run_attempts += 1
|
self._run_attempts += 1
|
||||||
if self._run_attempts > self._max_parsing_attempts:
|
if self._run_attempts > self._max_parsing_attempts:
|
||||||
|
|||||||
@@ -2,10 +2,8 @@ import json
|
|||||||
|
|
||||||
from langchain.schema import HumanMessage, SystemMessage
|
from langchain.schema import HumanMessage, SystemMessage
|
||||||
from langchain_openai import ChatOpenAI
|
from langchain_openai import ChatOpenAI
|
||||||
from pydantic import model_validator
|
|
||||||
from crewai.agents.agent_builder.utilities.base_output_converter_base import (
|
from crewai.agents.agent_builder.utilities.base_output_converter import OutputConverter
|
||||||
OutputConverter,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class ConverterError(Exception):
|
class ConverterError(Exception):
|
||||||
@@ -19,15 +17,10 @@ class ConverterError(Exception):
|
|||||||
class Converter(OutputConverter):
|
class Converter(OutputConverter):
|
||||||
"""Class that converts text into either pydantic or json."""
|
"""Class that converts text into either pydantic or json."""
|
||||||
|
|
||||||
@model_validator(mode="after")
|
|
||||||
def check_llm_provider(self):
|
|
||||||
if not self._is_gpt(self.llm):
|
|
||||||
self._is_gpt = False
|
|
||||||
|
|
||||||
def to_pydantic(self, current_attempt=1):
|
def to_pydantic(self, current_attempt=1):
|
||||||
"""Convert text to pydantic."""
|
"""Convert text to pydantic."""
|
||||||
try:
|
try:
|
||||||
if self._is_gpt:
|
if self.is_gpt:
|
||||||
return self._create_instructor().to_pydantic()
|
return self._create_instructor().to_pydantic()
|
||||||
else:
|
else:
|
||||||
return self._create_chain().invoke({})
|
return self._create_chain().invoke({})
|
||||||
@@ -41,7 +34,7 @@ class Converter(OutputConverter):
|
|||||||
def to_json(self, current_attempt=1):
|
def to_json(self, current_attempt=1):
|
||||||
"""Convert text to json."""
|
"""Convert text to json."""
|
||||||
try:
|
try:
|
||||||
if self._is_gpt:
|
if self.is_gpt:
|
||||||
return self._create_instructor().to_json()
|
return self._create_instructor().to_json()
|
||||||
else:
|
else:
|
||||||
return json.dumps(self._create_chain().invoke({}).model_dump())
|
return json.dumps(self._create_chain().invoke({}).model_dump())
|
||||||
@@ -75,5 +68,7 @@ class Converter(OutputConverter):
|
|||||||
)
|
)
|
||||||
return new_prompt | self.llm | parser
|
return new_prompt | self.llm | parser
|
||||||
|
|
||||||
def _is_gpt(self, llm) -> bool: # type: ignore # BUG? Name "_is_gpt" defined on line 20 hides name from outer scope
|
@property
|
||||||
return isinstance(llm, ChatOpenAI) and llm.openai_api_base is None
|
def is_gpt(self) -> bool:
|
||||||
|
"""Return if llm provided is of gpt from openai."""
|
||||||
|
return isinstance(self.llm, ChatOpenAI) and self.llm.openai_api_base is None
|
||||||
|
|||||||
@@ -963,3 +963,54 @@ def test_agent_use_trained_data(crew_training_handler):
|
|||||||
crew_training_handler.assert_has_calls(
|
crew_training_handler.assert_has_calls(
|
||||||
[mock.call(), mock.call("trained_agents_data.pkl"), mock.call().load()]
|
[mock.call(), mock.call("trained_agents_data.pkl"), mock.call().load()]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_agent_max_retry_limit():
|
||||||
|
agent = Agent(
|
||||||
|
role="test role",
|
||||||
|
goal="test goal",
|
||||||
|
backstory="test backstory",
|
||||||
|
max_retry_limit=1,
|
||||||
|
)
|
||||||
|
|
||||||
|
task = Task(
|
||||||
|
agent=agent,
|
||||||
|
description="Say the word: Hi",
|
||||||
|
expected_output="The word: Hi",
|
||||||
|
human_input=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
error_message = "Error happening while sending prompt to model."
|
||||||
|
with patch.object(
|
||||||
|
CrewAgentExecutor, "invoke", wraps=agent.agent_executor.invoke
|
||||||
|
) as invoke_mock:
|
||||||
|
invoke_mock.side_effect = Exception(error_message)
|
||||||
|
|
||||||
|
assert agent._times_executed == 0
|
||||||
|
assert agent.max_retry_limit == 1
|
||||||
|
|
||||||
|
with pytest.raises(Exception) as e:
|
||||||
|
agent.execute_task(
|
||||||
|
task=task,
|
||||||
|
)
|
||||||
|
assert e.value.args[0] == error_message
|
||||||
|
assert agent._times_executed == 2
|
||||||
|
|
||||||
|
invoke_mock.assert_has_calls(
|
||||||
|
[
|
||||||
|
mock.call(
|
||||||
|
{
|
||||||
|
"input": "Say the word: Hi\n\nThis is the expect criteria for your final answer: The word: Hi \n you MUST return the actual complete content as the final answer, not a summary.",
|
||||||
|
"tool_names": "",
|
||||||
|
"tools": "",
|
||||||
|
}
|
||||||
|
),
|
||||||
|
mock.call(
|
||||||
|
{
|
||||||
|
"input": "Say the word: Hi\n\nThis is the expect criteria for your final answer: The word: Hi \n you MUST return the actual complete content as the final answer, not a summary.",
|
||||||
|
"tool_names": "",
|
||||||
|
"tools": "",
|
||||||
|
}
|
||||||
|
),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|||||||
0
tests/agents/__init__.py
Normal file
0
tests/agents/__init__.py
Normal file
378
tests/agents/test_crew_agent_parser.py
Normal file
378
tests/agents/test_crew_agent_parser.py
Normal file
@@ -0,0 +1,378 @@
|
|||||||
|
import pytest
|
||||||
|
from crewai.agents.parser import CrewAgentParser
|
||||||
|
from langchain_core.agents import AgentAction, AgentFinish
|
||||||
|
from langchain_core.exceptions import OutputParserException
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def parser():
|
||||||
|
p = CrewAgentParser()
|
||||||
|
p.agent = MockAgent()
|
||||||
|
return p
|
||||||
|
|
||||||
|
|
||||||
|
def test_valid_action_parsing_special_characters(parser):
|
||||||
|
text = "Thought: Let's find the temperature\nAction: search\nAction Input: what's the temperature in SF?"
|
||||||
|
result = parser.parse(text)
|
||||||
|
assert isinstance(result, AgentAction)
|
||||||
|
assert result.tool == "search"
|
||||||
|
assert result.tool_input == "what's the temperature in SF?"
|
||||||
|
|
||||||
|
|
||||||
|
def test_valid_action_parsing_with_json_tool_input(parser):
|
||||||
|
text = """
|
||||||
|
Thought: Let's find the information
|
||||||
|
Action: query
|
||||||
|
Action Input: ** {"task": "What are some common challenges or barriers that you have observed or experienced when implementing AI-powered solutions in healthcare settings?", "context": "As we've discussed recent advancements in AI applications in healthcare, it's crucial to acknowledge the potential hurdles. Some possible obstacles include...", "coworker": "Senior Researcher"}
|
||||||
|
"""
|
||||||
|
result = parser.parse(text)
|
||||||
|
assert isinstance(result, AgentAction)
|
||||||
|
expected_tool_input = '{"task": "What are some common challenges or barriers that you have observed or experienced when implementing AI-powered solutions in healthcare settings?", "context": "As we\'ve discussed recent advancements in AI applications in healthcare, it\'s crucial to acknowledge the potential hurdles. Some possible obstacles include...", "coworker": "Senior Researcher"}'
|
||||||
|
assert result.tool == "query"
|
||||||
|
assert result.tool_input == expected_tool_input
|
||||||
|
|
||||||
|
|
||||||
|
def test_valid_action_parsing_with_quotes(parser):
|
||||||
|
text = 'Thought: Let\'s find the temperature\nAction: search\nAction Input: "temperature in SF"'
|
||||||
|
result = parser.parse(text)
|
||||||
|
assert isinstance(result, AgentAction)
|
||||||
|
assert result.tool == "search"
|
||||||
|
assert result.tool_input == "temperature in SF"
|
||||||
|
|
||||||
|
|
||||||
|
def test_valid_action_parsing_with_curly_braces(parser):
|
||||||
|
text = "Thought: Let's find the temperature\nAction: search\nAction Input: {temperature in SF}"
|
||||||
|
result = parser.parse(text)
|
||||||
|
assert isinstance(result, AgentAction)
|
||||||
|
assert result.tool == "search"
|
||||||
|
assert result.tool_input == "{temperature in SF}"
|
||||||
|
|
||||||
|
|
||||||
|
def test_valid_action_parsing_with_angle_brackets(parser):
|
||||||
|
text = "Thought: Let's find the temperature\nAction: search\nAction Input: <temperature in SF>"
|
||||||
|
result = parser.parse(text)
|
||||||
|
assert isinstance(result, AgentAction)
|
||||||
|
assert result.tool == "search"
|
||||||
|
assert result.tool_input == "<temperature in SF>"
|
||||||
|
|
||||||
|
|
||||||
|
def test_valid_action_parsing_with_parentheses(parser):
|
||||||
|
text = "Thought: Let's find the temperature\nAction: search\nAction Input: (temperature in SF)"
|
||||||
|
result = parser.parse(text)
|
||||||
|
assert isinstance(result, AgentAction)
|
||||||
|
assert result.tool == "search"
|
||||||
|
assert result.tool_input == "(temperature in SF)"
|
||||||
|
|
||||||
|
|
||||||
|
def test_valid_action_parsing_with_mixed_brackets(parser):
|
||||||
|
text = "Thought: Let's find the temperature\nAction: search\nAction Input: [temperature in {SF}]"
|
||||||
|
result = parser.parse(text)
|
||||||
|
assert isinstance(result, AgentAction)
|
||||||
|
assert result.tool == "search"
|
||||||
|
assert result.tool_input == "[temperature in {SF}]"
|
||||||
|
|
||||||
|
|
||||||
|
def test_valid_action_parsing_with_nested_quotes(parser):
|
||||||
|
text = "Thought: Let's find the temperature\nAction: search\nAction Input: \"what's the temperature in 'SF'?\""
|
||||||
|
result = parser.parse(text)
|
||||||
|
assert isinstance(result, AgentAction)
|
||||||
|
assert result.tool == "search"
|
||||||
|
assert result.tool_input == "what's the temperature in 'SF'?"
|
||||||
|
|
||||||
|
|
||||||
|
def test_valid_action_parsing_with_incomplete_json(parser):
|
||||||
|
text = 'Thought: Let\'s find the temperature\nAction: search\nAction Input: {"query": "temperature in SF"'
|
||||||
|
result = parser.parse(text)
|
||||||
|
assert isinstance(result, AgentAction)
|
||||||
|
assert result.tool == "search"
|
||||||
|
assert result.tool_input == '{"query": "temperature in SF"}'
|
||||||
|
|
||||||
|
|
||||||
|
def test_valid_action_parsing_with_special_characters(parser):
|
||||||
|
text = "Thought: Let's find the temperature\nAction: search\nAction Input: what is the temperature in SF? @$%^&*"
|
||||||
|
result = parser.parse(text)
|
||||||
|
assert isinstance(result, AgentAction)
|
||||||
|
assert result.tool == "search"
|
||||||
|
assert result.tool_input == "what is the temperature in SF? @$%^&*"
|
||||||
|
|
||||||
|
|
||||||
|
def test_valid_action_parsing_with_combination(parser):
|
||||||
|
text = 'Thought: Let\'s find the temperature\nAction: search\nAction Input: "[what is the temperature in SF?]"'
|
||||||
|
result = parser.parse(text)
|
||||||
|
assert isinstance(result, AgentAction)
|
||||||
|
assert result.tool == "search"
|
||||||
|
assert result.tool_input == "[what is the temperature in SF?]"
|
||||||
|
|
||||||
|
|
||||||
|
def test_valid_action_parsing_with_mixed_quotes(parser):
|
||||||
|
text = "Thought: Let's find the temperature\nAction: search\nAction Input: \"what's the temperature in SF?\""
|
||||||
|
result = parser.parse(text)
|
||||||
|
assert isinstance(result, AgentAction)
|
||||||
|
assert result.tool == "search"
|
||||||
|
assert result.tool_input == "what's the temperature in SF?"
|
||||||
|
|
||||||
|
|
||||||
|
def test_valid_action_parsing_with_newlines(parser):
|
||||||
|
text = "Thought: Let's find the temperature\nAction: search\nAction Input: what is\nthe temperature in SF?"
|
||||||
|
result = parser.parse(text)
|
||||||
|
assert isinstance(result, AgentAction)
|
||||||
|
assert result.tool == "search"
|
||||||
|
assert result.tool_input == "what is\nthe temperature in SF?"
|
||||||
|
|
||||||
|
|
||||||
|
def test_valid_action_parsing_with_escaped_characters(parser):
|
||||||
|
text = "Thought: Let's find the temperature\nAction: search\nAction Input: what is the temperature in SF? \\n"
|
||||||
|
result = parser.parse(text)
|
||||||
|
assert isinstance(result, AgentAction)
|
||||||
|
assert result.tool == "search"
|
||||||
|
assert result.tool_input == "what is the temperature in SF? \\n"
|
||||||
|
|
||||||
|
|
||||||
|
def test_valid_action_parsing_with_json_string(parser):
|
||||||
|
text = 'Thought: Let\'s find the temperature\nAction: search\nAction Input: {"query": "temperature in SF"}'
|
||||||
|
result = parser.parse(text)
|
||||||
|
assert isinstance(result, AgentAction)
|
||||||
|
assert result.tool == "search"
|
||||||
|
assert result.tool_input == '{"query": "temperature in SF"}'
|
||||||
|
|
||||||
|
|
||||||
|
def test_valid_action_parsing_with_unbalanced_quotes(parser):
|
||||||
|
text = "Thought: Let's find the temperature\nAction: search\nAction Input: \"what is the temperature in SF?"
|
||||||
|
result = parser.parse(text)
|
||||||
|
assert isinstance(result, AgentAction)
|
||||||
|
assert result.tool == "search"
|
||||||
|
assert result.tool_input == "what is the temperature in SF?"
|
||||||
|
|
||||||
|
|
||||||
|
def test_clean_action_no_formatting(parser):
|
||||||
|
action = "Ask question to senior researcher"
|
||||||
|
cleaned_action = parser._clean_action(action)
|
||||||
|
assert cleaned_action == "Ask question to senior researcher"
|
||||||
|
|
||||||
|
|
||||||
|
def test_clean_action_with_leading_asterisks(parser):
|
||||||
|
action = "** Ask question to senior researcher"
|
||||||
|
cleaned_action = parser._clean_action(action)
|
||||||
|
assert cleaned_action == "Ask question to senior researcher"
|
||||||
|
|
||||||
|
|
||||||
|
def test_clean_action_with_trailing_asterisks(parser):
|
||||||
|
action = "Ask question to senior researcher **"
|
||||||
|
cleaned_action = parser._clean_action(action)
|
||||||
|
assert cleaned_action == "Ask question to senior researcher"
|
||||||
|
|
||||||
|
|
||||||
|
def test_clean_action_with_leading_and_trailing_asterisks(parser):
|
||||||
|
action = "** Ask question to senior researcher **"
|
||||||
|
cleaned_action = parser._clean_action(action)
|
||||||
|
assert cleaned_action == "Ask question to senior researcher"
|
||||||
|
|
||||||
|
|
||||||
|
def test_clean_action_with_multiple_leading_asterisks(parser):
|
||||||
|
action = "**** Ask question to senior researcher"
|
||||||
|
cleaned_action = parser._clean_action(action)
|
||||||
|
assert cleaned_action == "Ask question to senior researcher"
|
||||||
|
|
||||||
|
|
||||||
|
def test_clean_action_with_multiple_trailing_asterisks(parser):
|
||||||
|
action = "Ask question to senior researcher ****"
|
||||||
|
cleaned_action = parser._clean_action(action)
|
||||||
|
assert cleaned_action == "Ask question to senior researcher"
|
||||||
|
|
||||||
|
|
||||||
|
def test_clean_action_with_spaces_and_asterisks(parser):
|
||||||
|
action = " ** Ask question to senior researcher ** "
|
||||||
|
cleaned_action = parser._clean_action(action)
|
||||||
|
print(f"Original action: '{action}'")
|
||||||
|
print(f"Cleaned action: '{cleaned_action}'")
|
||||||
|
assert cleaned_action == "Ask question to senior researcher"
|
||||||
|
|
||||||
|
|
||||||
|
def test_clean_action_with_only_asterisks(parser):
|
||||||
|
action = "****"
|
||||||
|
cleaned_action = parser._clean_action(action)
|
||||||
|
assert cleaned_action == ""
|
||||||
|
|
||||||
|
|
||||||
|
def test_clean_action_with_empty_string(parser):
|
||||||
|
action = ""
|
||||||
|
cleaned_action = parser._clean_action(action)
|
||||||
|
assert cleaned_action == ""
|
||||||
|
|
||||||
|
|
||||||
|
def test_valid_final_answer_parsing(parser):
|
||||||
|
text = (
|
||||||
|
"Thought: I found the information\nFinal Answer: The temperature is 100 degrees"
|
||||||
|
)
|
||||||
|
result = parser.parse(text)
|
||||||
|
assert isinstance(result, AgentFinish)
|
||||||
|
assert result.return_values["output"] == "The temperature is 100 degrees"
|
||||||
|
|
||||||
|
|
||||||
|
def test_missing_action_error(parser):
|
||||||
|
text = "Thought: Let's find the temperature\nAction Input: what is the temperature in SF?"
|
||||||
|
with pytest.raises(OutputParserException) as exc_info:
|
||||||
|
parser.parse(text)
|
||||||
|
assert "Could not parse LLM output" in str(exc_info.value)
|
||||||
|
|
||||||
|
|
||||||
|
def test_missing_action_input_error(parser):
|
||||||
|
text = "Thought: Let's find the temperature\nAction: search"
|
||||||
|
with pytest.raises(OutputParserException) as exc_info:
|
||||||
|
parser.parse(text)
|
||||||
|
assert "Could not parse LLM output" in str(exc_info.value)
|
||||||
|
|
||||||
|
|
||||||
|
def test_action_and_final_answer_error(parser):
|
||||||
|
text = "Thought: I found the information\nAction: search\nAction Input: what is the temperature in SF?\nFinal Answer: The temperature is 100 degrees"
|
||||||
|
with pytest.raises(OutputParserException) as exc_info:
|
||||||
|
parser.parse(text)
|
||||||
|
assert "both perform Action and give a Final Answer" in str(exc_info.value)
|
||||||
|
|
||||||
|
|
||||||
|
def test_safe_repair_json(parser):
|
||||||
|
invalid_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": Senior Researcher'
|
||||||
|
expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}'
|
||||||
|
result = parser._safe_repair_json(invalid_json)
|
||||||
|
assert result == expected_repaired_json
|
||||||
|
|
||||||
|
|
||||||
|
def test_safe_repair_json_unrepairable(parser):
|
||||||
|
invalid_json = "{invalid_json"
|
||||||
|
result = parser._safe_repair_json(invalid_json)
|
||||||
|
print("result:", invalid_json)
|
||||||
|
assert result == invalid_json # Should return the original if unrepairable
|
||||||
|
|
||||||
|
|
||||||
|
def test_safe_repair_json_missing_quotes(parser):
|
||||||
|
invalid_json = (
|
||||||
|
'{task: "Research XAI", context: "Explainable AI", coworker: Senior Researcher}'
|
||||||
|
)
|
||||||
|
expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}'
|
||||||
|
result = parser._safe_repair_json(invalid_json)
|
||||||
|
assert result == expected_repaired_json
|
||||||
|
|
||||||
|
|
||||||
|
def test_safe_repair_json_unclosed_brackets(parser):
|
||||||
|
invalid_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"'
|
||||||
|
expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}'
|
||||||
|
result = parser._safe_repair_json(invalid_json)
|
||||||
|
assert result == expected_repaired_json
|
||||||
|
|
||||||
|
|
||||||
|
def test_safe_repair_json_extra_commas(parser):
|
||||||
|
invalid_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher",}'
|
||||||
|
expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}'
|
||||||
|
result = parser._safe_repair_json(invalid_json)
|
||||||
|
assert result == expected_repaired_json
|
||||||
|
|
||||||
|
|
||||||
|
def test_safe_repair_json_trailing_commas(parser):
|
||||||
|
invalid_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher",}'
|
||||||
|
expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}'
|
||||||
|
result = parser._safe_repair_json(invalid_json)
|
||||||
|
assert result == expected_repaired_json
|
||||||
|
|
||||||
|
|
||||||
|
def test_safe_repair_json_single_quotes(parser):
|
||||||
|
invalid_json = "{'task': 'Research XAI', 'context': 'Explainable AI', 'coworker': 'Senior Researcher'}"
|
||||||
|
expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}'
|
||||||
|
result = parser._safe_repair_json(invalid_json)
|
||||||
|
assert result == expected_repaired_json
|
||||||
|
|
||||||
|
|
||||||
|
def test_safe_repair_json_mixed_quotes(parser):
|
||||||
|
invalid_json = "{'task': \"Research XAI\", 'context': \"Explainable AI\", 'coworker': 'Senior Researcher'}"
|
||||||
|
expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}'
|
||||||
|
result = parser._safe_repair_json(invalid_json)
|
||||||
|
assert result == expected_repaired_json
|
||||||
|
|
||||||
|
|
||||||
|
def test_safe_repair_json_unescaped_characters(parser):
|
||||||
|
invalid_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher\n"}'
|
||||||
|
expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}'
|
||||||
|
result = parser._safe_repair_json(invalid_json)
|
||||||
|
print("result:", result)
|
||||||
|
assert result == expected_repaired_json
|
||||||
|
|
||||||
|
|
||||||
|
def test_safe_repair_json_missing_colon(parser):
|
||||||
|
invalid_json = '{"task" "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}'
|
||||||
|
expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}'
|
||||||
|
result = parser._safe_repair_json(invalid_json)
|
||||||
|
assert result == expected_repaired_json
|
||||||
|
|
||||||
|
|
||||||
|
def test_safe_repair_json_missing_comma(parser):
|
||||||
|
invalid_json = '{"task": "Research XAI" "context": "Explainable AI", "coworker": "Senior Researcher"}'
|
||||||
|
expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}'
|
||||||
|
result = parser._safe_repair_json(invalid_json)
|
||||||
|
assert result == expected_repaired_json
|
||||||
|
|
||||||
|
|
||||||
|
def test_safe_repair_json_unexpected_trailing_characters(parser):
|
||||||
|
invalid_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"} random text'
|
||||||
|
expected_repaired_json = '{"task": "Research XAI", "context": "Explainable AI", "coworker": "Senior Researcher"}'
|
||||||
|
result = parser._safe_repair_json(invalid_json)
|
||||||
|
assert result == expected_repaired_json
|
||||||
|
|
||||||
|
|
||||||
|
def test_safe_repair_json_special_characters_key(parser):
|
||||||
|
invalid_json = '{"task!@#": "Research XAI", "context$%^": "Explainable AI", "coworker&*()": "Senior Researcher"}'
|
||||||
|
expected_repaired_json = '{"task!@#": "Research XAI", "context$%^": "Explainable AI", "coworker&*()": "Senior Researcher"}'
|
||||||
|
result = parser._safe_repair_json(invalid_json)
|
||||||
|
assert result == expected_repaired_json
|
||||||
|
|
||||||
|
|
||||||
|
def test_parsing_with_whitespace(parser):
|
||||||
|
text = " Thought: Let's find the temperature \n Action: search \n Action Input: what is the temperature in SF? "
|
||||||
|
result = parser.parse(text)
|
||||||
|
assert isinstance(result, AgentAction)
|
||||||
|
assert result.tool == "search"
|
||||||
|
assert result.tool_input == "what is the temperature in SF?"
|
||||||
|
|
||||||
|
|
||||||
|
def test_parsing_with_special_characters(parser):
|
||||||
|
text = 'Thought: Let\'s find the temperature\nAction: search\nAction Input: "what is the temperature in SF?"'
|
||||||
|
result = parser.parse(text)
|
||||||
|
assert isinstance(result, AgentAction)
|
||||||
|
assert result.tool == "search"
|
||||||
|
assert result.tool_input == "what is the temperature in SF?"
|
||||||
|
|
||||||
|
|
||||||
|
def test_integration_valid_and_invalid(parser):
|
||||||
|
text = """
|
||||||
|
Thought: Let's find the temperature
|
||||||
|
Action: search
|
||||||
|
Action Input: what is the temperature in SF?
|
||||||
|
|
||||||
|
Thought: I found the information
|
||||||
|
Final Answer: The temperature is 100 degrees
|
||||||
|
|
||||||
|
Thought: Missing action
|
||||||
|
Action Input: invalid
|
||||||
|
|
||||||
|
Thought: Missing action input
|
||||||
|
Action: invalid
|
||||||
|
"""
|
||||||
|
parts = text.strip().split("\n\n")
|
||||||
|
results = []
|
||||||
|
for part in parts:
|
||||||
|
try:
|
||||||
|
result = parser.parse(part.strip())
|
||||||
|
results.append(result)
|
||||||
|
except OutputParserException as e:
|
||||||
|
results.append(e)
|
||||||
|
|
||||||
|
assert isinstance(results[0], AgentAction)
|
||||||
|
assert isinstance(results[1], AgentFinish)
|
||||||
|
assert isinstance(results[2], OutputParserException)
|
||||||
|
assert isinstance(results[3], OutputParserException)
|
||||||
|
|
||||||
|
|
||||||
|
class MockAgent:
|
||||||
|
def increment_formatting_errors(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: ADD TEST TO MAKE SURE ** REMOVAL DOESN'T MESS UP ANYTHING
|
||||||
@@ -1238,10 +1238,10 @@ def test_agent_usage_metrics_are_captured_for_hierarchical_process():
|
|||||||
print(crew.usage_metrics)
|
print(crew.usage_metrics)
|
||||||
|
|
||||||
assert crew.usage_metrics == {
|
assert crew.usage_metrics == {
|
||||||
"total_tokens": 311,
|
"total_tokens": 2217,
|
||||||
"prompt_tokens": 224,
|
"prompt_tokens": 1847,
|
||||||
"completion_tokens": 87,
|
"completion_tokens": 370,
|
||||||
"successful_requests": 1,
|
"successful_requests": 4,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -1271,7 +1271,7 @@ def test_hierarchical_crew_creation_tasks_with_agents():
|
|||||||
assert crew.manager_agent.tools is not None
|
assert crew.manager_agent.tools is not None
|
||||||
print("TOOL DESCRIPTION", crew.manager_agent.tools[0].description)
|
print("TOOL DESCRIPTION", crew.manager_agent.tools[0].description)
|
||||||
assert crew.manager_agent.tools[0].description.startswith(
|
assert crew.manager_agent.tools[0].description.startswith(
|
||||||
"Delegate a specific task to one of the following coworkers: [Senior Writer, Researcher]"
|
"Delegate a specific task to one of the following coworkers: Senior Writer"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user