mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-20 05:18:16 +00:00
Merge remote-tracking branch 'upstream/main'
# Conflicts: # pyproject.toml # src/crewai/agent.py # src/crewai/crew.py # src/crewai/tools/tool_usage.py
This commit is contained in:
@@ -132,6 +132,15 @@ class Agent(BaseModel):
|
||||
callbacks: Optional[List[InstanceOf[BaseCallbackHandler]]] = Field(
|
||||
default=None, description="Callback to be executed"
|
||||
)
|
||||
system_template: Optional[str] = Field(
|
||||
default=None, description="System format for the agent."
|
||||
)
|
||||
prompt_template: Optional[str] = Field(
|
||||
default=None, description="Prompt format for the agent."
|
||||
)
|
||||
response_template: Optional[str] = Field(
|
||||
default=None, description="Response format for the agent."
|
||||
)
|
||||
|
||||
_original_role: str | None = None
|
||||
_original_goal: str | None = None
|
||||
@@ -179,7 +188,9 @@ class Agent(BaseModel):
|
||||
self.llm.callbacks = []
|
||||
|
||||
# Check if an instance of TokenCalcHandler already exists in the list
|
||||
if not any(isinstance(handler, TokenCalcHandler) for handler in self.llm.callbacks):
|
||||
if not any(
|
||||
isinstance(handler, TokenCalcHandler) for handler in self.llm.callbacks
|
||||
):
|
||||
self.llm.callbacks.append(token_handler)
|
||||
|
||||
if not self.agent_executor:
|
||||
@@ -308,7 +319,13 @@ class Agent(BaseModel):
|
||||
"request_within_rpm_limit"
|
||||
] = self._rpm_controller.check_or_wait
|
||||
|
||||
prompt = Prompts(i18n=self.i18n, tools=tools).task_execution()
|
||||
prompt = Prompts(
|
||||
i18n=self.i18n,
|
||||
tools=tools,
|
||||
system_template=self.system_template,
|
||||
prompt_template=self.prompt_template,
|
||||
response_template=self.response_template,
|
||||
).task_execution()
|
||||
|
||||
execution_prompt = prompt.partial(
|
||||
goal=self.goal,
|
||||
@@ -316,7 +333,13 @@ class Agent(BaseModel):
|
||||
backstory=self.backstory,
|
||||
)
|
||||
|
||||
bind = self.llm.bind(stop=[self.i18n.slice("observation")])
|
||||
stop_words = [self.i18n.slice("observation")]
|
||||
if self.response_template:
|
||||
stop_words.append(
|
||||
self.response_template.split("{{ .Response }}")[1].strip()
|
||||
)
|
||||
|
||||
bind = self.llm.bind(stop=stop_words)
|
||||
inner_agent = agent_args | execution_prompt | bind | CrewAgentParser(agent=self)
|
||||
self.agent_executor = CrewAgentExecutor(
|
||||
agent=RunnableAgent(runnable=inner_agent), **executor_args
|
||||
|
||||
@@ -40,6 +40,9 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
have_forced_answer: bool = False
|
||||
force_answer_max_iterations: Optional[int] = None
|
||||
step_callback: Optional[Any] = None
|
||||
system_template: Optional[str] = None
|
||||
prompt_template: Optional[str] = None
|
||||
response_template: Optional[str] = None
|
||||
|
||||
@root_validator()
|
||||
def set_force_answer_max_iterations(cls, values: Dict) -> Dict:
|
||||
@@ -113,6 +116,7 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
# Allowing human input given task setting
|
||||
if self.task.human_input:
|
||||
self.should_ask_for_human_input = True
|
||||
|
||||
# Let's start tracking the number of iterations and time elapsed
|
||||
self.iterations = 0
|
||||
time_elapsed = 0.0
|
||||
@@ -128,8 +132,10 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
intermediate_steps,
|
||||
run_manager=run_manager,
|
||||
)
|
||||
|
||||
if self.step_callback:
|
||||
self.step_callback(next_step_output)
|
||||
|
||||
if isinstance(next_step_output, AgentFinish):
|
||||
# Creating long term memory
|
||||
create_long_term_memory = threading.Thread(
|
||||
@@ -292,7 +298,6 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
tool=tool_calling.tool_name,
|
||||
tools=", ".join([tool.name.casefold() for tool in self.tools]),
|
||||
)
|
||||
|
||||
yield AgentStep(action=agent_action, observation=observation)
|
||||
|
||||
def _ask_human_input(self, final_answer: dict) -> str:
|
||||
|
||||
@@ -52,7 +52,6 @@ class CrewAgentParser(ReActSingleInputOutputParser):
|
||||
action_input = action_match.group(2)
|
||||
tool_input = action_input.strip(" ")
|
||||
tool_input = tool_input.strip('"')
|
||||
|
||||
return AgentAction(action, tool_input, text)
|
||||
|
||||
elif includes_answer:
|
||||
|
||||
@@ -40,7 +40,7 @@ poetry run {{folder_name}}
|
||||
|
||||
This command initializes the {{name}} Crew, assembling the agents and assigning them tasks as defined in your configuration.
|
||||
|
||||
This example, unmodified, will run the create a `report.md` file with the output of a research on LLMs in the root folser
|
||||
This example, unmodified, will run the create a `report.md` file with the output of a research on LLMs in the root folder.
|
||||
|
||||
## Understanding Your Crew
|
||||
|
||||
@@ -51,7 +51,7 @@ The {{name}} Crew is composed of multiple AI agents, each with unique roles, goa
|
||||
For support, questions, or feedback regarding the {{crew_name}} Crew or crewAI.
|
||||
- Visit our [documentation](https://docs.crewai.com)
|
||||
- Reach out to us through our [GitHub repository](https://github.com/joaomdmoura/crewai)
|
||||
- [Joing our Discord](https://discord.com/invite/X4JWnZnxPb)
|
||||
- [Chat wtih our docs](https://chatg.pt/DWjSBZn)
|
||||
- [Join our Discord](https://discord.com/invite/X4JWnZnxPb)
|
||||
- [Chat with our docs](https://chatg.pt/DWjSBZn)
|
||||
|
||||
Let's create wonders together with the power and simplicity of crewAI.
|
||||
Let's create wonders together with the power and simplicity of crewAI.
|
||||
|
||||
@@ -48,6 +48,7 @@ class Crew(BaseModel):
|
||||
verbose: Indicates the verbosity level for logging during execution.
|
||||
config: Configuration settings for the crew.
|
||||
max_rpm: Maximum number of requests per minute for the crew execution to be respected.
|
||||
prompt_file: Path to the prompt json file to be used for the crew.
|
||||
id: A unique identifier for the crew instance.
|
||||
full_output: Whether the crew should return the full output with all tasks outputs or just the final output.
|
||||
task_callback: Callback to be executed after each task for every agents execution.
|
||||
@@ -115,13 +116,9 @@ class Crew(BaseModel):
|
||||
default=None,
|
||||
description="Maximum number of requests per minute for the crew execution to be respected.",
|
||||
)
|
||||
language: str = Field(
|
||||
default="en",
|
||||
description="Language used for the crew, defaults to English.",
|
||||
)
|
||||
language_file: str = Field(
|
||||
prompt_file: str = Field(
|
||||
default=None,
|
||||
description="Path to the language file to be used for the crew.",
|
||||
description="Path to the prompt json file to be used for the crew.",
|
||||
)
|
||||
output_log_file: Optional[Union[bool, str]] = Field(
|
||||
default=False,
|
||||
@@ -242,7 +239,7 @@ class Crew(BaseModel):
|
||||
self._interpolate_inputs(inputs)
|
||||
self._set_tasks_callbacks()
|
||||
|
||||
i18n = I18N(language=self.language, language_file=self.language_file)
|
||||
i18n = I18N(prompt_file=self.prompt_file)
|
||||
agentops.set_parent_key("daebe730-f54d-4af5-98df-e6946fb76d13")
|
||||
agentops.add_tags(['crewai'])
|
||||
|
||||
@@ -317,7 +314,7 @@ class Crew(BaseModel):
|
||||
def _run_hierarchical_process(self) -> str:
|
||||
"""Creates and assigns a manager agent to make sure the crew completes the tasks."""
|
||||
|
||||
i18n = I18N(language=self.language, language_file=self.language_file)
|
||||
i18n = I18N(prompt_file=self.prompt_file)
|
||||
try:
|
||||
self.manager_agent.allow_delegation = (
|
||||
True # Forcing Allow delegation to the manager
|
||||
@@ -360,7 +357,8 @@ class Crew(BaseModel):
|
||||
def _set_tasks_callbacks(self) -> str:
|
||||
"""Sets callback for every task suing task_callback"""
|
||||
for task in self.tasks:
|
||||
self.task_callback = task.callback
|
||||
if not task.callback:
|
||||
task.callback = self.task_callback
|
||||
|
||||
def _interpolate_inputs(self, inputs: Dict[str, Any]) -> str:
|
||||
"""Interpolates the inputs in the tasks and agents."""
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import re
|
||||
import threading
|
||||
import uuid
|
||||
from typing import Any, Dict, List, Optional, Type
|
||||
import os
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
from pydantic import UUID4, BaseModel, Field, field_validator, model_validator
|
||||
@@ -245,7 +247,16 @@ class Task(BaseModel):
|
||||
return exported_result.model_dump()
|
||||
return exported_result
|
||||
except Exception:
|
||||
pass
|
||||
# sometimes the response contains valid JSON in the middle of text
|
||||
match = re.search(r"({.*})", result, re.DOTALL)
|
||||
if match:
|
||||
try:
|
||||
exported_result = model.model_validate_json(match.group(0))
|
||||
if self.output_json:
|
||||
return exported_result.model_dump()
|
||||
return exported_result
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
llm = self.agent.function_calling_llm or self.agent.llm
|
||||
|
||||
@@ -281,6 +292,11 @@ class Task(BaseModel):
|
||||
return isinstance(llm, ChatOpenAI) and llm.openai_api_base == None
|
||||
|
||||
def _save_file(self, result: Any) -> None:
|
||||
directory = os.path.dirname(self.output_file)
|
||||
|
||||
if not os.path.exists(directory):
|
||||
os.makedirs(directory)
|
||||
|
||||
with open(self.output_file, "w") as file:
|
||||
file.write(result)
|
||||
return None
|
||||
|
||||
@@ -88,7 +88,9 @@ class Telemetry:
|
||||
self._add_attribute(span, "python_version", platform.python_version())
|
||||
self._add_attribute(span, "crew_id", str(crew.id))
|
||||
self._add_attribute(span, "crew_process", crew.process)
|
||||
self._add_attribute(span, "crew_language", crew.language)
|
||||
self._add_attribute(
|
||||
span, "crew_language", crew.prompt_file if crew.i18n else "None"
|
||||
)
|
||||
self._add_attribute(span, "crew_memory", crew.memory)
|
||||
self._add_attribute(span, "crew_number_of_tasks", len(crew.tasks))
|
||||
self._add_attribute(span, "crew_number_of_agents", len(crew.agents))
|
||||
@@ -103,7 +105,7 @@ class Telemetry:
|
||||
"verbose?": agent.verbose,
|
||||
"max_iter": agent.max_iter,
|
||||
"max_rpm": agent.max_rpm,
|
||||
"i18n": agent.i18n.language,
|
||||
"i18n": agent.i18n.prompt_file,
|
||||
"llm": json.dumps(self._safe_llm_attributes(agent.llm)),
|
||||
"delegation_enabled?": agent.allow_delegation,
|
||||
"tools_names": [
|
||||
@@ -232,7 +234,7 @@ class Telemetry:
|
||||
"verbose?": agent.verbose,
|
||||
"max_iter": agent.max_iter,
|
||||
"max_rpm": agent.max_rpm,
|
||||
"i18n": agent.i18n.language,
|
||||
"i18n": agent.i18n.prompt_file,
|
||||
"llm": json.dumps(self._safe_llm_attributes(agent.llm)),
|
||||
"delegation_enabled?": agent.allow_delegation,
|
||||
"tools_names": [
|
||||
|
||||
@@ -34,11 +34,11 @@ class AgentTools(BaseModel):
|
||||
return tools
|
||||
|
||||
def delegate_work(self, coworker: str, task: str, context: str):
|
||||
"""Useful to delegate a specific task to a coworker passing all necessary context and names."""
|
||||
"""Useful to delegate a specific task to a co-worker passing all necessary context and names."""
|
||||
return self._execute(coworker, task, context)
|
||||
|
||||
def ask_question(self, coworker: str, question: str, context: str):
|
||||
"""Useful to ask a question, opinion or take from a coworker passing all necessary context and names."""
|
||||
"""Useful to ask a question, opinion or take from a co-worker passing all necessary context and names."""
|
||||
return self._execute(coworker, question, context)
|
||||
|
||||
def _execute(self, agent, task, context):
|
||||
@@ -67,6 +67,6 @@ class AgentTools(BaseModel):
|
||||
task = Task(
|
||||
description=task,
|
||||
agent=agent,
|
||||
expected_output="Your best answer to your coworker asking you this, accounting for the context shared.",
|
||||
expected_output="Your best answer to your co-worker asking you this, accounting for the context shared.",
|
||||
)
|
||||
return agent.execute_task(task, context)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import ast
|
||||
from textwrap import dedent
|
||||
from typing import Any, List, Union
|
||||
from difflib import SequenceMatcher
|
||||
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_openai import ChatOpenAI
|
||||
@@ -31,13 +32,13 @@ class ToolUsage:
|
||||
Class that represents the usage of a tool by an agent.
|
||||
|
||||
Attributes:
|
||||
task: Task being executed.
|
||||
tools_handler: Tools handler that will manage the tool usage.
|
||||
tools: List of tools available for the agent.
|
||||
original_tools: Original tools available for the agent before being converted to BaseTool.
|
||||
tools_description: Description of the tools available for the agent.
|
||||
tools_names: Names of the tools available for the agent.
|
||||
function_calling_llm: Language model to be used for the tool usage.
|
||||
task: Task being executed.
|
||||
tools_handler: Tools handler that will manage the tool usage.
|
||||
tools: List of tools available for the agent.
|
||||
original_tools: Original tools available for the agent before being converted to BaseTool.
|
||||
tools_description: Description of the tools available for the agent.
|
||||
tools_names: Names of the tools available for the agent.
|
||||
function_calling_llm: Language model to be used for the tool usage.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
@@ -225,12 +226,15 @@ class ToolUsage:
|
||||
|
||||
def _select_tool(self, tool_name: str) -> BaseTool:
|
||||
for tool in self.tools:
|
||||
if tool.name.lower().strip() == tool_name.lower().strip():
|
||||
if (
|
||||
tool.name.lower().strip() == tool_name.lower().strip()
|
||||
or SequenceMatcher(None, tool.name.lower().strip(), tool_name.lower().strip()).ratio() > 0.9
|
||||
):
|
||||
return tool
|
||||
self.task.increment_tools_errors()
|
||||
if tool_name and tool_name != "":
|
||||
raise Exception(
|
||||
f"Action '{tool_name}' don't exist, these are the only available Actions: {self.tools_description}"
|
||||
f"Action '{tool_name}' don't exist, these are the only available Actions:\n {self.tools_description}"
|
||||
)
|
||||
else:
|
||||
raise Exception(
|
||||
@@ -270,17 +274,17 @@ class ToolUsage:
|
||||
else ToolCalling
|
||||
)
|
||||
converter = Converter(
|
||||
text=f"Only tools available:\n###\n{self._render()}\n\nReturn a valid schema for the tool, the tool name must be exactly equal one of the options, use this text to inform the valid ouput schema:\n\n{tool_string}```",
|
||||
text=f"Only tools available:\n###\n{self._render()}\n\nReturn a valid schema for the tool, the tool name must be exactly equal one of the options, use this text to inform the valid output schema:\n\n{tool_string}```",
|
||||
llm=self.function_calling_llm,
|
||||
model=model,
|
||||
instructions=dedent(
|
||||
"""\
|
||||
The schema should have the following structure, only two keys:
|
||||
- tool_name: str
|
||||
- arguments: dict (with all arguments being passed)
|
||||
The schema should have the following structure, only two keys:
|
||||
- tool_name: str
|
||||
- arguments: dict (with all arguments being passed)
|
||||
|
||||
Example:
|
||||
{"tool_name": "tool name", "arguments": {"arg_name1": "value", "arg_name2": 2}}""",
|
||||
Example:
|
||||
{"tool_name": "tool name", "arguments": {"arg_name1": "value", "arg_name2": 2}}""",
|
||||
),
|
||||
max_attemps=1,
|
||||
)
|
||||
@@ -292,7 +296,8 @@ class ToolUsage:
|
||||
tool_name = self.action.tool
|
||||
tool = self._select_tool(tool_name)
|
||||
try:
|
||||
arguments = ast.literal_eval(self.action.tool_input)
|
||||
tool_input = self._validate_tool_input(self.action.tool_input)
|
||||
arguments = ast.literal_eval(tool_input)
|
||||
except Exception:
|
||||
return ToolUsageErrorException(
|
||||
f'{self._i18n.errors("tool_arguments_error")}'
|
||||
@@ -318,3 +323,50 @@ class ToolUsage:
|
||||
return self._tool_calling(tool_string)
|
||||
|
||||
return calling
|
||||
|
||||
def _validate_tool_input(self, tool_input: str) -> dict:
|
||||
try:
|
||||
ast.literal_eval(tool_input)
|
||||
return tool_input
|
||||
except Exception:
|
||||
# Clean and ensure the string is properly enclosed in braces
|
||||
tool_input = tool_input.strip()
|
||||
if not tool_input.startswith("{"):
|
||||
tool_input = "{" + tool_input
|
||||
if not tool_input.endswith("}"):
|
||||
tool_input += "}"
|
||||
|
||||
# Manually split the input into key-value pairs
|
||||
entries = tool_input.strip("{} ").split(",")
|
||||
formatted_entries = []
|
||||
|
||||
for entry in entries:
|
||||
if ":" not in entry:
|
||||
continue # Skip malformed entries
|
||||
key, value = entry.split(":", 1)
|
||||
|
||||
key = key.strip().strip(
|
||||
'"'
|
||||
) # Remove extraneous white spaces and quotes
|
||||
value = value.strip()
|
||||
|
||||
# Check and format the value based on its type
|
||||
if value.isdigit(): # Check if value is a digit, hence integer
|
||||
formatted_value = value
|
||||
elif value.lower() in [
|
||||
"true",
|
||||
"false",
|
||||
"null",
|
||||
]: # Check for boolean and null values
|
||||
formatted_value = value.lower()
|
||||
else:
|
||||
# Assume the value is a string and needs quotes
|
||||
formatted_value = '"' + value.strip('"').replace('"', '\\"') + '"'
|
||||
|
||||
# Rebuild the entry with proper quoting
|
||||
formatted_entry = f'"{key}": {formatted_value}'
|
||||
formatted_entries.append(formatted_entry)
|
||||
|
||||
# Reconstruct the JSON string
|
||||
new_json_string = "{" + ", ".join(formatted_entries) + "}"
|
||||
return new_json_string
|
||||
|
||||
@@ -6,12 +6,12 @@
|
||||
},
|
||||
"slices": {
|
||||
"observation": "\nObservation",
|
||||
"task": "\nCurrent Task: {input}\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought: ",
|
||||
"task": "\nCurrent Task: {input}\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:",
|
||||
"memory": "\n\n# Useful context: \n{memory}",
|
||||
"role_playing": "You are {role}. {backstory}\nYour personal goal is: {goal}",
|
||||
"tools": "\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\n{tools}\n\nUse the following format:\n\nThought: you should always think about what to do\nAction: the action to take, only one name of [{tool_names}], just the name, exactly as it's written.\nAction Input: the input to the action, just a simple a python dictionary using \" to wrap keys and values.\nObservation: the result of the action\n\nOnce all necessary information is gathered:\n\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n",
|
||||
"tools": "\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\n{tools}\n\nUse the following format:\n\nThought: you should always think about what to do\nAction: the action to take, only one name of [{tool_names}], just the name, exactly as it's written.\nAction Input: the input to the action, just a simple a python dictionary, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n\nOnce all necessary information is gathered:\n\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n",
|
||||
"no_tools": "To give my best complete final answer to the task use the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\nYour final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!",
|
||||
"format": "I MUST either use a tool (use one at time) OR give my best final answer. To Use the following format:\n\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action, dictionary\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\nYour final answer must be the great and the most complete as possible, it must be outcome described\n\n ",
|
||||
"format": "I MUST either use a tool (use one at time) OR give my best final answer. To Use the following format:\n\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action, dictionary enclosed in curly braces\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\nYour final answer must be the great and the most complete as possible, it must be outcome described\n\n ",
|
||||
"final_answer_format": "If you don't need to use any more tools, you must give your best complete final answer, make sure it satisfy the expect criteria, use the EXACT format below:\n\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\n\n",
|
||||
"format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nI just remembered the expected format I must follow:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task\nYour final answer must be the great and the most complete as possible, it must be outcome described\n\n",
|
||||
"task_with_context": "{task}\n\nThis is the context you're working with:\n{context}",
|
||||
@@ -20,7 +20,6 @@
|
||||
"getting_input": "This is the agent final answer: {final_answer}\nPlease provide a feedback: "
|
||||
},
|
||||
"errors": {
|
||||
"unexpected_format": "\nSorry, I didn't use the expected format, I MUST either use a tool (use one at time) OR give my best final answer.\n",
|
||||
"force_final_answer": "Tool won't be use because it's time to give your final answer. Don't use tools and just your absolute BEST Final answer.",
|
||||
"agent_tool_unexsiting_coworker": "\nError executing tool. Co-worker mentioned not found, it must to be one of the following options:\n{coworkers}\n",
|
||||
"task_repeated_usage": "I tried reusing the same input, I must stop using this action input. I'll try something else instead.\n\n",
|
||||
@@ -30,7 +29,7 @@
|
||||
"tool_usage_exception": "I encountered an error while trying to use the tool. This was the error: {error}.\n Tool {tool} accepts these inputs: {tool_inputs}"
|
||||
},
|
||||
"tools": {
|
||||
"delegate_work": "Delegate a specific task to one of the following co-workers: {coworkers}\nThe input to this tool should be the coworker, the task you want them to do, and ALL necessary context to exectue the task, they know nothing about the task, so share absolute everything you know, don't reference things but instead explain them.",
|
||||
"ask_question": "Ask a specific question to one of the following co-workers: {coworkers}\nThe input to this tool should be the coworker, the question you have for them, and ALL necessary context to ask the question properly, they know nothing about the question, so share absolute everything you know, don't reference things but instead explain them."
|
||||
"delegate_work": "Delegate a specific task to one of the following co-workers: {coworkers}\nThe input to this tool should be the co-worker, the task you want them to do, and ALL necessary context to exectue the task, they know nothing about the task, so share absolute everything you know, don't reference things but instead explain them.",
|
||||
"ask_question": "Ask a specific question to one of the following co-workers: {coworkers}\nThe input to this tool should be the co-worker, the question you have for them, and ALL necessary context to ask the question properly, they know nothing about the question, so share absolute everything you know, don't reference things but instead explain them."
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,44 +2,36 @@ import json
|
||||
import os
|
||||
from typing import Dict, Optional
|
||||
|
||||
from pydantic import BaseModel, Field, PrivateAttr, ValidationError, model_validator
|
||||
from pydantic import BaseModel, Field, PrivateAttr, model_validator
|
||||
|
||||
|
||||
class I18N(BaseModel):
|
||||
_translations: Dict[str, Dict[str, str]] = PrivateAttr()
|
||||
language_file: Optional[str] = Field(
|
||||
_prompts: Dict[str, Dict[str, str]] = PrivateAttr()
|
||||
prompt_file: Optional[str] = Field(
|
||||
default=None,
|
||||
description="Path to the translation file to load",
|
||||
)
|
||||
language: Optional[str] = Field(
|
||||
default="en",
|
||||
description="Language used to load translations",
|
||||
description="Path to the prompt_file file to load",
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def load_translation(self) -> "I18N":
|
||||
"""Load translations from a JSON file based on the specified language."""
|
||||
def load_prompts(self) -> "I18N":
|
||||
"""Load prompts from a JSON file."""
|
||||
try:
|
||||
if self.language_file:
|
||||
with open(self.language_file, "r") as f:
|
||||
self._translations = json.load(f)
|
||||
if self.prompt_file:
|
||||
with open(self.prompt_file, "r") as f:
|
||||
self._prompts = json.load(f)
|
||||
else:
|
||||
dir_path = os.path.dirname(os.path.realpath(__file__))
|
||||
prompts_path = os.path.join(
|
||||
dir_path, f"../translations/{self.language}.json"
|
||||
)
|
||||
prompts_path = os.path.join(dir_path, f"../translations/en.json")
|
||||
|
||||
with open(prompts_path, "r") as f:
|
||||
self._translations = json.load(f)
|
||||
self._prompts = json.load(f)
|
||||
except FileNotFoundError:
|
||||
raise ValidationError(
|
||||
f"Translation file for language '{self.language}' not found."
|
||||
)
|
||||
raise Exception(f"Prompt file '{self.prompt_file}' not found.")
|
||||
except json.JSONDecodeError:
|
||||
raise ValidationError(f"Error decoding JSON from the prompts file.")
|
||||
raise Exception(f"Error decoding JSON from the prompts file.")
|
||||
|
||||
if not self._translations:
|
||||
self._translations = {}
|
||||
if not self._prompts:
|
||||
self._prompts = {}
|
||||
|
||||
return self
|
||||
|
||||
@@ -54,6 +46,6 @@ class I18N(BaseModel):
|
||||
|
||||
def retrieve(self, kind, key) -> str:
|
||||
try:
|
||||
return self._translations[kind][key]
|
||||
return self._prompts[kind][key]
|
||||
except:
|
||||
raise ValidationError(f"Translation for '{kind}':'{key}' not found.")
|
||||
raise Exception(f"Prompt for '{kind}':'{key}' not found.")
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import appdirs
|
||||
@@ -13,6 +14,11 @@ def db_storage_path():
|
||||
|
||||
|
||||
def get_project_directory_name():
|
||||
cwd = Path.cwd()
|
||||
project_directory_name = cwd.name
|
||||
return project_directory_name
|
||||
project_directory_name = os.environ.get("CREWAI_STORAGE_DIR")
|
||||
|
||||
if project_directory_name:
|
||||
return project_directory_name
|
||||
else:
|
||||
cwd = Path.cwd()
|
||||
project_directory_name = cwd.name
|
||||
return project_directory_name
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, ClassVar
|
||||
from typing import Any, ClassVar, Optional
|
||||
|
||||
from langchain.prompts import BasePromptTemplate, PromptTemplate
|
||||
from pydantic import BaseModel, Field
|
||||
@@ -7,16 +7,15 @@ from crewai.utilities import I18N
|
||||
|
||||
|
||||
class Prompts(BaseModel):
|
||||
"""Manages and generates prompts for a generic agent with support for different languages."""
|
||||
"""Manages and generates prompts for a generic agent."""
|
||||
|
||||
i18n: I18N = Field(default=I18N())
|
||||
tools: list[Any] = Field(default=[])
|
||||
system_template: Optional[str] = None
|
||||
prompt_template: Optional[str] = None
|
||||
response_template: Optional[str] = None
|
||||
SCRATCHPAD_SLICE: ClassVar[str] = "\n{agent_scratchpad}"
|
||||
|
||||
def task_execution_without_tools(self) -> BasePromptTemplate:
|
||||
"""Generate a prompt for task execution without tools components."""
|
||||
return self._build_prompt(["role_playing", "task"])
|
||||
|
||||
def task_execution(self) -> BasePromptTemplate:
|
||||
"""Generate a standard prompt for task execution."""
|
||||
slices = ["role_playing"]
|
||||
@@ -24,12 +23,42 @@ class Prompts(BaseModel):
|
||||
slices.append("tools")
|
||||
else:
|
||||
slices.append("no_tools")
|
||||
slices.append("task")
|
||||
return self._build_prompt(slices)
|
||||
|
||||
def _build_prompt(self, components: list[str]) -> BasePromptTemplate:
|
||||
slices.append("task")
|
||||
|
||||
if not self.system_template and not self.prompt_template:
|
||||
return self._build_prompt(slices)
|
||||
else:
|
||||
return self._build_prompt(
|
||||
slices,
|
||||
self.system_template,
|
||||
self.prompt_template,
|
||||
self.response_template,
|
||||
)
|
||||
|
||||
def _build_prompt(
|
||||
self,
|
||||
components: list[str],
|
||||
system_template=None,
|
||||
prompt_template=None,
|
||||
response_template=None,
|
||||
) -> BasePromptTemplate:
|
||||
"""Constructs a prompt string from specified components."""
|
||||
prompt_parts = [self.i18n.slice(component) for component in components]
|
||||
prompt_parts.append(self.SCRATCHPAD_SLICE)
|
||||
prompt = PromptTemplate.from_template("".join(prompt_parts))
|
||||
if not system_template and not prompt_template:
|
||||
prompt_parts = [self.i18n.slice(component) for component in components]
|
||||
prompt_parts.append(self.SCRATCHPAD_SLICE)
|
||||
prompt = PromptTemplate.from_template("".join(prompt_parts))
|
||||
else:
|
||||
prompt_parts = [
|
||||
self.i18n.slice(component)
|
||||
for component in components
|
||||
if component != "task"
|
||||
]
|
||||
system = system_template.replace("{{ .System }}", "".join(prompt_parts))
|
||||
prompt = prompt_template.replace(
|
||||
"{{ .Prompt }}",
|
||||
"".join([self.i18n.slice("task"), self.SCRATCHPAD_SLICE]),
|
||||
)
|
||||
response = response_template.split("{{ .Response }}")[0]
|
||||
prompt = PromptTemplate.from_template(f"{system}\n{prompt}\n{response}")
|
||||
return prompt
|
||||
|
||||
Reference in New Issue
Block a user