Merge remote-tracking branch 'upstream/main'

# Conflicts:
#	pyproject.toml
#	src/crewai/agent.py
#	src/crewai/crew.py
#	src/crewai/tools/tool_usage.py
This commit is contained in:
Braelyn Boynton
2024-05-02 12:52:31 -07:00
28 changed files with 7141 additions and 6682 deletions

View File

@@ -48,7 +48,7 @@ To get started with CrewAI, follow these simple steps:
pip install crewai
```
If you want to also install crewai-tools, which is a package with tools that can be used by the agents, but more dependencies, you can install it with, example below uses it:
If you want to install the 'crewai' package along with its optional features that include additional tools for agents, you can do so by using the following command: pip install 'crewai[tools]'. This command installs the basic package and also adds extra components which require more dependencies to function."
```shell
pip install 'crewai[tools]'

View File

@@ -1463,11 +1463,11 @@
"locked": false,
"fontSize": 20,
"fontFamily": 3,
"text": "Agents have the inert ability of\nreach out to another to delegate\nwork or ask questions.",
"text": "Agents have the innate ability of\nreach out to another to delegate\nwork or ask questions.",
"textAlign": "right",
"verticalAlign": "top",
"containerId": null,
"originalText": "Agents have the inert ability of\nreach out to another to delegate\nwork or ask questions.",
"originalText": "Agents have the innate ability of\nreach out to another to delegate\nwork or ask questions.",
"lineHeight": 1.2,
"baseline": 68
},
@@ -1734,4 +1734,4 @@
"viewBackgroundColor": "#ffffff"
},
"files": {}
}
}

View File

@@ -0,0 +1,29 @@
# BrowserbaseLoadTool
## Description
[Browserbase](https://browserbase.com) is a serverless platform for running headless browsers, it offers advanced debugging, session recordings, stealth mode, integrated proxies and captcha solving.
## Installation
- Get an API key from [browserbase.com](https://browserbase.com) and set it in environment variables (`BROWSERBASE_API_KEY`).
- Install the [Browserbase SDK](http://github.com/browserbase/python-sdk) along with `crewai[tools]` package:
```
pip install browserbase 'crewai[tools]'
```
## Example
Utilize the BrowserbaseLoadTool as follows to allow your agent to load websites:
```python
from crewai_tools import BrowserbaseLoadTool
tool = BrowserbaseLoadTool()
```
## Arguments
- `api_key`: Optional. Specifies Browserbase API key. Defaults is the `BROWSERBASE_API_KEY` environment variable.
- `text_content`: Optional. Load pages as readable text. Default is `False`.

View File

@@ -19,7 +19,7 @@ pip install 'crewai[tools]'
Below is a proposed example showcasing how to use the PGSearchTool for conducting a semantic search on a table within a PostgreSQL database:
```python
rom crewai_tools import PGSearchTool
from crewai_tools import PGSearchTool
# Initialize the tool with the database URI and the target table name
tool = PGSearchTool(db_uri='postgresql://user:password@localhost:5432/mydatabase', table_name='employees')
@@ -57,4 +57,4 @@ tool = PGSearchTool(
),
)
)
```
```

View File

@@ -17,7 +17,7 @@ pip install 'crewai[tools]'
Here are two examples demonstrating how to use the XMLSearchTool. The first example shows searching within a specific XML file, while the second example illustrates initiating a search without predefining an XML path, providing flexibility in search scope.
```python
from crewai_tools.tools.xml_search_tool import XMLSearchTool
from crewai_tools import XMLSearchTool
# Allow agents to search within any XML file's content as it learns about their paths during execution
tool = XMLSearchTool()

View File

@@ -139,6 +139,7 @@ nav:
- Agent Monitoring with AgentOps: 'how-to/AgentOps-Observability.md'
- Tools Docs:
- Google Serper Search: 'tools/SerperDevTool.md'
- Browserbase Web Loader: 'tools/BrowserbaseLoadTool.md'
- Scrape Website: 'tools/ScrapeWebsiteTool.md'
- Directory Read: 'tools/DirectoryReadTool.md'
- File Read: 'tools/FileReadTool.md'

1330
poetry.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
[tool.poetry]
name = "crewai"
version = "0.28.8"
version = "0.30.0rc3"
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
authors = ["Joao Moura <joao@crewai.com>"]
readme = "README.md"
@@ -23,7 +23,7 @@ opentelemetry-sdk = "^1.22.0"
opentelemetry-exporter-otlp-proto-http = "^1.22.0"
instructor = "^0.5.2"
regex = "^2023.12.25"
crewai-tools = { version = "^0.1.7", optional = true }
crewai-tools = { version = "^0.2.2", optional = true }
click = "^8.1.7"
python-dotenv = "^1.0.0"
embedchain = "^0.1.98"
@@ -46,7 +46,7 @@ mkdocs-material = {extras = ["imaging"], version = "^9.5.7"}
mkdocs-material-extensions = "^1.3.1"
pillow = "^10.2.0"
cairosvg = "^2.7.1"
crewai-tools = "^0.1.7"
crewai-tools = "^0.2.2"
[tool.isort]
profile = "black"

View File

@@ -132,6 +132,15 @@ class Agent(BaseModel):
callbacks: Optional[List[InstanceOf[BaseCallbackHandler]]] = Field(
default=None, description="Callback to be executed"
)
system_template: Optional[str] = Field(
default=None, description="System format for the agent."
)
prompt_template: Optional[str] = Field(
default=None, description="Prompt format for the agent."
)
response_template: Optional[str] = Field(
default=None, description="Response format for the agent."
)
_original_role: str | None = None
_original_goal: str | None = None
@@ -179,7 +188,9 @@ class Agent(BaseModel):
self.llm.callbacks = []
# Check if an instance of TokenCalcHandler already exists in the list
if not any(isinstance(handler, TokenCalcHandler) for handler in self.llm.callbacks):
if not any(
isinstance(handler, TokenCalcHandler) for handler in self.llm.callbacks
):
self.llm.callbacks.append(token_handler)
if not self.agent_executor:
@@ -308,7 +319,13 @@ class Agent(BaseModel):
"request_within_rpm_limit"
] = self._rpm_controller.check_or_wait
prompt = Prompts(i18n=self.i18n, tools=tools).task_execution()
prompt = Prompts(
i18n=self.i18n,
tools=tools,
system_template=self.system_template,
prompt_template=self.prompt_template,
response_template=self.response_template,
).task_execution()
execution_prompt = prompt.partial(
goal=self.goal,
@@ -316,7 +333,13 @@ class Agent(BaseModel):
backstory=self.backstory,
)
bind = self.llm.bind(stop=[self.i18n.slice("observation")])
stop_words = [self.i18n.slice("observation")]
if self.response_template:
stop_words.append(
self.response_template.split("{{ .Response }}")[1].strip()
)
bind = self.llm.bind(stop=stop_words)
inner_agent = agent_args | execution_prompt | bind | CrewAgentParser(agent=self)
self.agent_executor = CrewAgentExecutor(
agent=RunnableAgent(runnable=inner_agent), **executor_args

View File

@@ -40,6 +40,9 @@ class CrewAgentExecutor(AgentExecutor):
have_forced_answer: bool = False
force_answer_max_iterations: Optional[int] = None
step_callback: Optional[Any] = None
system_template: Optional[str] = None
prompt_template: Optional[str] = None
response_template: Optional[str] = None
@root_validator()
def set_force_answer_max_iterations(cls, values: Dict) -> Dict:
@@ -113,6 +116,7 @@ class CrewAgentExecutor(AgentExecutor):
# Allowing human input given task setting
if self.task.human_input:
self.should_ask_for_human_input = True
# Let's start tracking the number of iterations and time elapsed
self.iterations = 0
time_elapsed = 0.0
@@ -128,8 +132,10 @@ class CrewAgentExecutor(AgentExecutor):
intermediate_steps,
run_manager=run_manager,
)
if self.step_callback:
self.step_callback(next_step_output)
if isinstance(next_step_output, AgentFinish):
# Creating long term memory
create_long_term_memory = threading.Thread(
@@ -292,7 +298,6 @@ class CrewAgentExecutor(AgentExecutor):
tool=tool_calling.tool_name,
tools=", ".join([tool.name.casefold() for tool in self.tools]),
)
yield AgentStep(action=agent_action, observation=observation)
def _ask_human_input(self, final_answer: dict) -> str:

View File

@@ -52,7 +52,6 @@ class CrewAgentParser(ReActSingleInputOutputParser):
action_input = action_match.group(2)
tool_input = action_input.strip(" ")
tool_input = tool_input.strip('"')
return AgentAction(action, tool_input, text)
elif includes_answer:

View File

@@ -40,7 +40,7 @@ poetry run {{folder_name}}
This command initializes the {{name}} Crew, assembling the agents and assigning them tasks as defined in your configuration.
This example, unmodified, will run the create a `report.md` file with the output of a research on LLMs in the root folser
This example, unmodified, will run the create a `report.md` file with the output of a research on LLMs in the root folder.
## Understanding Your Crew
@@ -51,7 +51,7 @@ The {{name}} Crew is composed of multiple AI agents, each with unique roles, goa
For support, questions, or feedback regarding the {{crew_name}} Crew or crewAI.
- Visit our [documentation](https://docs.crewai.com)
- Reach out to us through our [GitHub repository](https://github.com/joaomdmoura/crewai)
- [Joing our Discord](https://discord.com/invite/X4JWnZnxPb)
- [Chat wtih our docs](https://chatg.pt/DWjSBZn)
- [Join our Discord](https://discord.com/invite/X4JWnZnxPb)
- [Chat with our docs](https://chatg.pt/DWjSBZn)
Let's create wonders together with the power and simplicity of crewAI.
Let's create wonders together with the power and simplicity of crewAI.

View File

@@ -48,6 +48,7 @@ class Crew(BaseModel):
verbose: Indicates the verbosity level for logging during execution.
config: Configuration settings for the crew.
max_rpm: Maximum number of requests per minute for the crew execution to be respected.
prompt_file: Path to the prompt json file to be used for the crew.
id: A unique identifier for the crew instance.
full_output: Whether the crew should return the full output with all tasks outputs or just the final output.
task_callback: Callback to be executed after each task for every agents execution.
@@ -115,13 +116,9 @@ class Crew(BaseModel):
default=None,
description="Maximum number of requests per minute for the crew execution to be respected.",
)
language: str = Field(
default="en",
description="Language used for the crew, defaults to English.",
)
language_file: str = Field(
prompt_file: str = Field(
default=None,
description="Path to the language file to be used for the crew.",
description="Path to the prompt json file to be used for the crew.",
)
output_log_file: Optional[Union[bool, str]] = Field(
default=False,
@@ -242,7 +239,7 @@ class Crew(BaseModel):
self._interpolate_inputs(inputs)
self._set_tasks_callbacks()
i18n = I18N(language=self.language, language_file=self.language_file)
i18n = I18N(prompt_file=self.prompt_file)
agentops.set_parent_key("daebe730-f54d-4af5-98df-e6946fb76d13")
agentops.add_tags(['crewai'])
@@ -317,7 +314,7 @@ class Crew(BaseModel):
def _run_hierarchical_process(self) -> str:
"""Creates and assigns a manager agent to make sure the crew completes the tasks."""
i18n = I18N(language=self.language, language_file=self.language_file)
i18n = I18N(prompt_file=self.prompt_file)
try:
self.manager_agent.allow_delegation = (
True # Forcing Allow delegation to the manager
@@ -360,7 +357,8 @@ class Crew(BaseModel):
def _set_tasks_callbacks(self) -> str:
"""Sets callback for every task suing task_callback"""
for task in self.tasks:
self.task_callback = task.callback
if not task.callback:
task.callback = self.task_callback
def _interpolate_inputs(self, inputs: Dict[str, Any]) -> str:
"""Interpolates the inputs in the tasks and agents."""

View File

@@ -1,6 +1,8 @@
import re
import threading
import uuid
from typing import Any, Dict, List, Optional, Type
import os
from langchain_openai import ChatOpenAI
from pydantic import UUID4, BaseModel, Field, field_validator, model_validator
@@ -245,7 +247,16 @@ class Task(BaseModel):
return exported_result.model_dump()
return exported_result
except Exception:
pass
# sometimes the response contains valid JSON in the middle of text
match = re.search(r"({.*})", result, re.DOTALL)
if match:
try:
exported_result = model.model_validate_json(match.group(0))
if self.output_json:
return exported_result.model_dump()
return exported_result
except Exception:
pass
llm = self.agent.function_calling_llm or self.agent.llm
@@ -281,6 +292,11 @@ class Task(BaseModel):
return isinstance(llm, ChatOpenAI) and llm.openai_api_base == None
def _save_file(self, result: Any) -> None:
directory = os.path.dirname(self.output_file)
if not os.path.exists(directory):
os.makedirs(directory)
with open(self.output_file, "w") as file:
file.write(result)
return None

View File

@@ -88,7 +88,9 @@ class Telemetry:
self._add_attribute(span, "python_version", platform.python_version())
self._add_attribute(span, "crew_id", str(crew.id))
self._add_attribute(span, "crew_process", crew.process)
self._add_attribute(span, "crew_language", crew.language)
self._add_attribute(
span, "crew_language", crew.prompt_file if crew.i18n else "None"
)
self._add_attribute(span, "crew_memory", crew.memory)
self._add_attribute(span, "crew_number_of_tasks", len(crew.tasks))
self._add_attribute(span, "crew_number_of_agents", len(crew.agents))
@@ -103,7 +105,7 @@ class Telemetry:
"verbose?": agent.verbose,
"max_iter": agent.max_iter,
"max_rpm": agent.max_rpm,
"i18n": agent.i18n.language,
"i18n": agent.i18n.prompt_file,
"llm": json.dumps(self._safe_llm_attributes(agent.llm)),
"delegation_enabled?": agent.allow_delegation,
"tools_names": [
@@ -232,7 +234,7 @@ class Telemetry:
"verbose?": agent.verbose,
"max_iter": agent.max_iter,
"max_rpm": agent.max_rpm,
"i18n": agent.i18n.language,
"i18n": agent.i18n.prompt_file,
"llm": json.dumps(self._safe_llm_attributes(agent.llm)),
"delegation_enabled?": agent.allow_delegation,
"tools_names": [

View File

@@ -34,11 +34,11 @@ class AgentTools(BaseModel):
return tools
def delegate_work(self, coworker: str, task: str, context: str):
"""Useful to delegate a specific task to a coworker passing all necessary context and names."""
"""Useful to delegate a specific task to a co-worker passing all necessary context and names."""
return self._execute(coworker, task, context)
def ask_question(self, coworker: str, question: str, context: str):
"""Useful to ask a question, opinion or take from a coworker passing all necessary context and names."""
"""Useful to ask a question, opinion or take from a co-worker passing all necessary context and names."""
return self._execute(coworker, question, context)
def _execute(self, agent, task, context):
@@ -67,6 +67,6 @@ class AgentTools(BaseModel):
task = Task(
description=task,
agent=agent,
expected_output="Your best answer to your coworker asking you this, accounting for the context shared.",
expected_output="Your best answer to your co-worker asking you this, accounting for the context shared.",
)
return agent.execute_task(task, context)

View File

@@ -1,6 +1,7 @@
import ast
from textwrap import dedent
from typing import Any, List, Union
from difflib import SequenceMatcher
from langchain_core.tools import BaseTool
from langchain_openai import ChatOpenAI
@@ -31,13 +32,13 @@ class ToolUsage:
Class that represents the usage of a tool by an agent.
Attributes:
task: Task being executed.
tools_handler: Tools handler that will manage the tool usage.
tools: List of tools available for the agent.
original_tools: Original tools available for the agent before being converted to BaseTool.
tools_description: Description of the tools available for the agent.
tools_names: Names of the tools available for the agent.
function_calling_llm: Language model to be used for the tool usage.
task: Task being executed.
tools_handler: Tools handler that will manage the tool usage.
tools: List of tools available for the agent.
original_tools: Original tools available for the agent before being converted to BaseTool.
tools_description: Description of the tools available for the agent.
tools_names: Names of the tools available for the agent.
function_calling_llm: Language model to be used for the tool usage.
"""
def __init__(
@@ -225,12 +226,15 @@ class ToolUsage:
def _select_tool(self, tool_name: str) -> BaseTool:
for tool in self.tools:
if tool.name.lower().strip() == tool_name.lower().strip():
if (
tool.name.lower().strip() == tool_name.lower().strip()
or SequenceMatcher(None, tool.name.lower().strip(), tool_name.lower().strip()).ratio() > 0.9
):
return tool
self.task.increment_tools_errors()
if tool_name and tool_name != "":
raise Exception(
f"Action '{tool_name}' don't exist, these are the only available Actions: {self.tools_description}"
f"Action '{tool_name}' don't exist, these are the only available Actions:\n {self.tools_description}"
)
else:
raise Exception(
@@ -270,17 +274,17 @@ class ToolUsage:
else ToolCalling
)
converter = Converter(
text=f"Only tools available:\n###\n{self._render()}\n\nReturn a valid schema for the tool, the tool name must be exactly equal one of the options, use this text to inform the valid ouput schema:\n\n{tool_string}```",
text=f"Only tools available:\n###\n{self._render()}\n\nReturn a valid schema for the tool, the tool name must be exactly equal one of the options, use this text to inform the valid output schema:\n\n{tool_string}```",
llm=self.function_calling_llm,
model=model,
instructions=dedent(
"""\
The schema should have the following structure, only two keys:
- tool_name: str
- arguments: dict (with all arguments being passed)
The schema should have the following structure, only two keys:
- tool_name: str
- arguments: dict (with all arguments being passed)
Example:
{"tool_name": "tool name", "arguments": {"arg_name1": "value", "arg_name2": 2}}""",
Example:
{"tool_name": "tool name", "arguments": {"arg_name1": "value", "arg_name2": 2}}""",
),
max_attemps=1,
)
@@ -292,7 +296,8 @@ class ToolUsage:
tool_name = self.action.tool
tool = self._select_tool(tool_name)
try:
arguments = ast.literal_eval(self.action.tool_input)
tool_input = self._validate_tool_input(self.action.tool_input)
arguments = ast.literal_eval(tool_input)
except Exception:
return ToolUsageErrorException(
f'{self._i18n.errors("tool_arguments_error")}'
@@ -318,3 +323,50 @@ class ToolUsage:
return self._tool_calling(tool_string)
return calling
def _validate_tool_input(self, tool_input: str) -> dict:
try:
ast.literal_eval(tool_input)
return tool_input
except Exception:
# Clean and ensure the string is properly enclosed in braces
tool_input = tool_input.strip()
if not tool_input.startswith("{"):
tool_input = "{" + tool_input
if not tool_input.endswith("}"):
tool_input += "}"
# Manually split the input into key-value pairs
entries = tool_input.strip("{} ").split(",")
formatted_entries = []
for entry in entries:
if ":" not in entry:
continue # Skip malformed entries
key, value = entry.split(":", 1)
key = key.strip().strip(
'"'
) # Remove extraneous white spaces and quotes
value = value.strip()
# Check and format the value based on its type
if value.isdigit(): # Check if value is a digit, hence integer
formatted_value = value
elif value.lower() in [
"true",
"false",
"null",
]: # Check for boolean and null values
formatted_value = value.lower()
else:
# Assume the value is a string and needs quotes
formatted_value = '"' + value.strip('"').replace('"', '\\"') + '"'
# Rebuild the entry with proper quoting
formatted_entry = f'"{key}": {formatted_value}'
formatted_entries.append(formatted_entry)
# Reconstruct the JSON string
new_json_string = "{" + ", ".join(formatted_entries) + "}"
return new_json_string

View File

@@ -6,12 +6,12 @@
},
"slices": {
"observation": "\nObservation",
"task": "\nCurrent Task: {input}\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought: ",
"task": "\nCurrent Task: {input}\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:",
"memory": "\n\n# Useful context: \n{memory}",
"role_playing": "You are {role}. {backstory}\nYour personal goal is: {goal}",
"tools": "\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\n{tools}\n\nUse the following format:\n\nThought: you should always think about what to do\nAction: the action to take, only one name of [{tool_names}], just the name, exactly as it's written.\nAction Input: the input to the action, just a simple a python dictionary using \" to wrap keys and values.\nObservation: the result of the action\n\nOnce all necessary information is gathered:\n\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n",
"tools": "\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\n{tools}\n\nUse the following format:\n\nThought: you should always think about what to do\nAction: the action to take, only one name of [{tool_names}], just the name, exactly as it's written.\nAction Input: the input to the action, just a simple a python dictionary, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n\nOnce all necessary information is gathered:\n\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n",
"no_tools": "To give my best complete final answer to the task use the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\nYour final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!",
"format": "I MUST either use a tool (use one at time) OR give my best final answer. To Use the following format:\n\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action, dictionary\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\nYour final answer must be the great and the most complete as possible, it must be outcome described\n\n ",
"format": "I MUST either use a tool (use one at time) OR give my best final answer. To Use the following format:\n\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action, dictionary enclosed in curly braces\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\nYour final answer must be the great and the most complete as possible, it must be outcome described\n\n ",
"final_answer_format": "If you don't need to use any more tools, you must give your best complete final answer, make sure it satisfy the expect criteria, use the EXACT format below:\n\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\n\n",
"format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nI just remembered the expected format I must follow:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task\nYour final answer must be the great and the most complete as possible, it must be outcome described\n\n",
"task_with_context": "{task}\n\nThis is the context you're working with:\n{context}",
@@ -20,7 +20,6 @@
"getting_input": "This is the agent final answer: {final_answer}\nPlease provide a feedback: "
},
"errors": {
"unexpected_format": "\nSorry, I didn't use the expected format, I MUST either use a tool (use one at time) OR give my best final answer.\n",
"force_final_answer": "Tool won't be use because it's time to give your final answer. Don't use tools and just your absolute BEST Final answer.",
"agent_tool_unexsiting_coworker": "\nError executing tool. Co-worker mentioned not found, it must to be one of the following options:\n{coworkers}\n",
"task_repeated_usage": "I tried reusing the same input, I must stop using this action input. I'll try something else instead.\n\n",
@@ -30,7 +29,7 @@
"tool_usage_exception": "I encountered an error while trying to use the tool. This was the error: {error}.\n Tool {tool} accepts these inputs: {tool_inputs}"
},
"tools": {
"delegate_work": "Delegate a specific task to one of the following co-workers: {coworkers}\nThe input to this tool should be the coworker, the task you want them to do, and ALL necessary context to exectue the task, they know nothing about the task, so share absolute everything you know, don't reference things but instead explain them.",
"ask_question": "Ask a specific question to one of the following co-workers: {coworkers}\nThe input to this tool should be the coworker, the question you have for them, and ALL necessary context to ask the question properly, they know nothing about the question, so share absolute everything you know, don't reference things but instead explain them."
"delegate_work": "Delegate a specific task to one of the following co-workers: {coworkers}\nThe input to this tool should be the co-worker, the task you want them to do, and ALL necessary context to exectue the task, they know nothing about the task, so share absolute everything you know, don't reference things but instead explain them.",
"ask_question": "Ask a specific question to one of the following co-workers: {coworkers}\nThe input to this tool should be the co-worker, the question you have for them, and ALL necessary context to ask the question properly, they know nothing about the question, so share absolute everything you know, don't reference things but instead explain them."
}
}

View File

@@ -2,44 +2,36 @@ import json
import os
from typing import Dict, Optional
from pydantic import BaseModel, Field, PrivateAttr, ValidationError, model_validator
from pydantic import BaseModel, Field, PrivateAttr, model_validator
class I18N(BaseModel):
_translations: Dict[str, Dict[str, str]] = PrivateAttr()
language_file: Optional[str] = Field(
_prompts: Dict[str, Dict[str, str]] = PrivateAttr()
prompt_file: Optional[str] = Field(
default=None,
description="Path to the translation file to load",
)
language: Optional[str] = Field(
default="en",
description="Language used to load translations",
description="Path to the prompt_file file to load",
)
@model_validator(mode="after")
def load_translation(self) -> "I18N":
"""Load translations from a JSON file based on the specified language."""
def load_prompts(self) -> "I18N":
"""Load prompts from a JSON file."""
try:
if self.language_file:
with open(self.language_file, "r") as f:
self._translations = json.load(f)
if self.prompt_file:
with open(self.prompt_file, "r") as f:
self._prompts = json.load(f)
else:
dir_path = os.path.dirname(os.path.realpath(__file__))
prompts_path = os.path.join(
dir_path, f"../translations/{self.language}.json"
)
prompts_path = os.path.join(dir_path, f"../translations/en.json")
with open(prompts_path, "r") as f:
self._translations = json.load(f)
self._prompts = json.load(f)
except FileNotFoundError:
raise ValidationError(
f"Translation file for language '{self.language}' not found."
)
raise Exception(f"Prompt file '{self.prompt_file}' not found.")
except json.JSONDecodeError:
raise ValidationError(f"Error decoding JSON from the prompts file.")
raise Exception(f"Error decoding JSON from the prompts file.")
if not self._translations:
self._translations = {}
if not self._prompts:
self._prompts = {}
return self
@@ -54,6 +46,6 @@ class I18N(BaseModel):
def retrieve(self, kind, key) -> str:
try:
return self._translations[kind][key]
return self._prompts[kind][key]
except:
raise ValidationError(f"Translation for '{kind}':'{key}' not found.")
raise Exception(f"Prompt for '{kind}':'{key}' not found.")

View File

@@ -1,3 +1,4 @@
import os
from pathlib import Path
import appdirs
@@ -13,6 +14,11 @@ def db_storage_path():
def get_project_directory_name():
cwd = Path.cwd()
project_directory_name = cwd.name
return project_directory_name
project_directory_name = os.environ.get("CREWAI_STORAGE_DIR")
if project_directory_name:
return project_directory_name
else:
cwd = Path.cwd()
project_directory_name = cwd.name
return project_directory_name

View File

@@ -1,4 +1,4 @@
from typing import Any, ClassVar
from typing import Any, ClassVar, Optional
from langchain.prompts import BasePromptTemplate, PromptTemplate
from pydantic import BaseModel, Field
@@ -7,16 +7,15 @@ from crewai.utilities import I18N
class Prompts(BaseModel):
"""Manages and generates prompts for a generic agent with support for different languages."""
"""Manages and generates prompts for a generic agent."""
i18n: I18N = Field(default=I18N())
tools: list[Any] = Field(default=[])
system_template: Optional[str] = None
prompt_template: Optional[str] = None
response_template: Optional[str] = None
SCRATCHPAD_SLICE: ClassVar[str] = "\n{agent_scratchpad}"
def task_execution_without_tools(self) -> BasePromptTemplate:
"""Generate a prompt for task execution without tools components."""
return self._build_prompt(["role_playing", "task"])
def task_execution(self) -> BasePromptTemplate:
"""Generate a standard prompt for task execution."""
slices = ["role_playing"]
@@ -24,12 +23,42 @@ class Prompts(BaseModel):
slices.append("tools")
else:
slices.append("no_tools")
slices.append("task")
return self._build_prompt(slices)
def _build_prompt(self, components: list[str]) -> BasePromptTemplate:
slices.append("task")
if not self.system_template and not self.prompt_template:
return self._build_prompt(slices)
else:
return self._build_prompt(
slices,
self.system_template,
self.prompt_template,
self.response_template,
)
def _build_prompt(
self,
components: list[str],
system_template=None,
prompt_template=None,
response_template=None,
) -> BasePromptTemplate:
"""Constructs a prompt string from specified components."""
prompt_parts = [self.i18n.slice(component) for component in components]
prompt_parts.append(self.SCRATCHPAD_SLICE)
prompt = PromptTemplate.from_template("".join(prompt_parts))
if not system_template and not prompt_template:
prompt_parts = [self.i18n.slice(component) for component in components]
prompt_parts.append(self.SCRATCHPAD_SLICE)
prompt = PromptTemplate.from_template("".join(prompt_parts))
else:
prompt_parts = [
self.i18n.slice(component)
for component in components
if component != "task"
]
system = system_template.replace("{{ .System }}", "".join(prompt_parts))
prompt = prompt_template.replace(
"{{ .Prompt }}",
"".join([self.i18n.slice("task"), self.SCRATCHPAD_SLICE]),
)
response = response_template.split("{{ .Response }}")[0]
prompt = PromptTemplate.from_template(f"{system}\n{prompt}\n{response}")
return prompt

View File

@@ -754,6 +754,7 @@ def test_agent_definition_based_on_dict():
assert agent.verbose == True
assert agent.tools == []
# test for human input
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_human_input():
@@ -780,6 +781,7 @@ def test_agent_human_input():
mock_human_input.assert_called_once()
assert output == "Hello"
def test_interpolate_inputs():
agent = Agent(
role="{topic} specialist",
@@ -797,3 +799,46 @@ def test_interpolate_inputs():
assert agent.goal == "Figure stuff out"
assert agent.backstory == "I am the master of nothing"
def test_system_and_prompt_template():
agent = Agent(
role="{topic} specialist",
goal="Figure {goal} out",
backstory="I am the master of {role}",
system_template="""<|start_header_id|>system<|end_header_id|>
{{ .System }}<|eot_id|>""",
prompt_template="""<|start_header_id|>user<|end_header_id|>
{{ .Prompt }}<|eot_id|>""",
response_template="""<|start_header_id|>assistant<|end_header_id|>
{{ .Response }}<|eot_id|>""",
)
template = agent.agent_executor.agent.dict()["runnable"]["middle"][0]["template"]
assert (
template
== """<|start_header_id|>system<|end_header_id|>
You are {role}. {backstory}
Your personal goal is: {goal}To give my best complete final answer to the task use the exact following format:
Thought: I now can give a great answer
Final Answer: my best complete final answer to the task.
Your final answer must be the great and the most complete as possible, it must be outcome described.
I MUST use these formats, my job depends on it!<|eot_id|>
<|start_header_id|>user<|end_header_id|>
Current Task: {input}
Begin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!
Thought:
{agent_scratchpad}<|eot_id|>
<|start_header_id|>assistant<|end_header_id|>
"""
)

View File

@@ -324,7 +324,7 @@ interactions:
Name: learn_about_ai\nTool Description: learn_about_AI(topic) -> float - Useful
for when you need to learn about AI to write an paragraph about it.\nTool Arguments:
{''topic'': {}}\n\nReturn a valid schema for the tool, the tool name must be
exactly equal one of the options, use this text to inform the valid ouput schema:\n\nThought:
exactly equal one of the options, use this text to inform the valid output schema:\n\nThought:
Before I can write an amazing article about AI, I need to understand the basics
of AI, its applications, and ethical considerations to ensure that the content
is well-rounded and informative.\n\nAction: learn_about_AI\nAction Input: {\"topic\":
@@ -854,7 +854,7 @@ interactions:
Name: learn_about_ai\nTool Description: learn_about_AI(topic) -> float - Useful
for when you need to learn about AI to write an paragraph about it.\nTool Arguments:
{''topic'': {}}\n\nReturn a valid schema for the tool, the tool name must be
exactly equal one of the options, use this text to inform the valid ouput schema:\n\nThought:
exactly equal one of the options, use this text to inform the valid output schema:\n\nThought:
Now that I know AI is a broad field, I should learn specifically about its applications
to provide concrete examples in my article.\n\nAction: learn_about_AI\nAction
Input: {\"topic\": \"applications of AI\"}```"}, {"role": "system", "content":
@@ -1325,7 +1325,7 @@ interactions:
Name: learn_about_ai\nTool Description: learn_about_AI(topic) -> float - Useful
for when you need to learn about AI to write an paragraph about it.\nTool Arguments:
{''topic'': {}}\n\nReturn a valid schema for the tool, the tool name must be
exactly equal one of the options, use this text to inform the valid ouput schema:\n\nThought:
exactly equal one of the options, use this text to inform the valid output schema:\n\nThought:
It seems there was an error with the action input. I need to correct the format
to ensure the tool works properly for gathering information on the applications
of AI.\n\nAction: learn_about_AI\nAction Input: {\"topic\": \"applications of
@@ -1807,7 +1807,7 @@ interactions:
Name: learn_about_ai\nTool Description: learn_about_AI(topic) -> float - Useful
for when you need to learn about AI to write an paragraph about it.\nTool Arguments:
{''topic'': {}}\n\nReturn a valid schema for the tool, the tool name must be
exactly equal one of the options, use this text to inform the valid ouput schema:\n\nThought:
exactly equal one of the options, use this text to inform the valid output schema:\n\nThought:
Having understood that AI is a broad field, and after the corrected attempt
to learn about its applications, I realize I need more detailed insights into
ethical considerations around AI to make sure the article covers a balanced

File diff suppressed because it is too large Load Diff

View File

@@ -648,10 +648,10 @@ def test_agent_usage_metrics_are_captured_for_sequential_process():
result = crew.kickoff()
assert result == "Howdy!"
assert crew.usage_metrics == {
"completion_tokens": 51,
"prompt_tokens": 483,
"successful_requests": 3,
"total_tokens": 534,
"completion_tokens": 17,
"prompt_tokens": 160,
"successful_requests": 1,
"total_tokens": 177,
}
@@ -678,10 +678,10 @@ def test_agent_usage_metrics_are_captured_for_hierarchical_process():
result = crew.kickoff()
assert result == '"Howdy!"'
assert crew.usage_metrics == {
"total_tokens": 2592,
"prompt_tokens": 2048,
"completion_tokens": 544,
"successful_requests": 6,
"total_tokens": 1650,
"prompt_tokens": 1367,
"completion_tokens": 283,
"successful_requests": 3,
}

View File

@@ -20,7 +20,7 @@ def test_save_and_search(long_term_memory):
metadata={"task": "test_task", "quality": 0.5},
)
long_term_memory.save(memory)
find = long_term_memory.search("test_task")[0]
find = long_term_memory.search("test_task", latest_n=5)[0]
assert find["score"] == 0.5
assert find["datetime"] == "test_datetime"
assert find["metadata"]["agent"] == "test_agent"

View File

@@ -0,0 +1,40 @@
{
"hierarchical_manager_agent": {
"role": "Lorem ipsum dolor sit amet",
"goal": "Lorem ipsum dolor sit amet",
"backstory": "Lorem ipsum dolor sit amet."
},
"planning_manager_agent": {
"role": "Lorem ipsum dolor sit amet",
"goal": "Lorem ipsum dolor sit amet",
"backstory": "Lorem ipsum dolor sit amet."
},
"slices": {
"observation": "Lorem ipsum dolor sit amet",
"task": "Lorem ipsum dolor sit amet",
"memory": "Lorem ipsum dolor sit amet",
"role_playing": "Lorem ipsum dolor sit amet",
"tools": "Lorem ipsum dolor sit amet",
"no_tools": "Lorem ipsum dolor sit amet",
"format": "Lorem ipsum dolor sit amet",
"final_answer_format": "Lorem ipsum dolor sit amet",
"format_without_tools": "Lorem ipsum dolor sit amet",
"task_with_context": "Lorem ipsum dolor sit amet",
"expected_output": "Lorem ipsum dolor sit amet",
"human_feedback": "Lorem ipsum dolor sit amet",
"getting_input": "Lorem ipsum dolor sit amet "
},
"errors": {
"force_final_answer": "Lorem ipsum dolor sit amet",
"agent_tool_unexsiting_coworker": "Lorem ipsum dolor sit amet",
"task_repeated_usage": "Lorem ipsum dolor sit amet",
"tool_usage_error": "Lorem ipsum dolor sit amet",
"tool_arguments_error": "Lorem ipsum dolor sit amet",
"wrong_tool_name": "Lorem ipsum dolor sit amet",
"tool_usage_exception": "Lorem ipsum dolor sit amet"
},
"tools": {
"delegate_work": "Lorem ipsum dolor sit amet",
"ask_question": "Lorem ipsum dolor sit amet"
}
}

View File

@@ -3,38 +3,42 @@ import pytest
from crewai.utilities.i18n import I18N
def test_load_translation():
i18n = I18N(language="en")
i18n.load_translation()
assert i18n._translations is not None
def test_load_prompts():
i18n = I18N()
i18n.load_prompts()
assert i18n._prompts is not None
def test_slice():
i18n = I18N(language="en")
i18n.load_translation()
i18n = I18N()
i18n.load_prompts()
assert isinstance(i18n.slice("role_playing"), str)
def test_errors():
i18n = I18N(language="en")
i18n.load_translation()
assert isinstance(i18n.errors("unexpected_format"), str)
def test_tools():
i18n = I18N(language="en")
i18n.load_translation()
i18n = I18N()
i18n.load_prompts()
assert isinstance(i18n.tools("ask_question"), str)
def test_retrieve():
i18n = I18N(language="en")
i18n.load_translation()
i18n = I18N()
i18n.load_prompts()
assert isinstance(i18n.retrieve("slices", "role_playing"), str)
def test_retrieve_not_found():
i18n = I18N(language="en")
i18n.load_translation()
i18n = I18N()
i18n.load_prompts()
with pytest.raises(Exception):
i18n.retrieve("nonexistent_kind", "nonexistent_key")
def test_prompt_file():
import os
path = os.path.join(os.path.dirname(__file__), "prompts.json")
i18n = I18N(prompt_file=path)
i18n.load_prompts()
assert isinstance(i18n.retrieve("slices", "role_playing"), str)
assert i18n.retrieve("slices", "role_playing") == "Lorem ipsum dolor sit amet"