mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-10 00:28:31 +00:00
Adding support for system, prompt and answe templates
This commit is contained in:
@@ -121,6 +121,15 @@ class Agent(BaseModel):
|
|||||||
callbacks: Optional[List[InstanceOf[BaseCallbackHandler]]] = Field(
|
callbacks: Optional[List[InstanceOf[BaseCallbackHandler]]] = Field(
|
||||||
default=None, description="Callback to be executed"
|
default=None, description="Callback to be executed"
|
||||||
)
|
)
|
||||||
|
system_template: Optional[str] = Field(
|
||||||
|
default=None, description="System format for the agent."
|
||||||
|
)
|
||||||
|
prompt_template: Optional[str] = Field(
|
||||||
|
default=None, description="Prompt format for the agent."
|
||||||
|
)
|
||||||
|
response_template: Optional[str] = Field(
|
||||||
|
default=None, description="Response format for the agent."
|
||||||
|
)
|
||||||
|
|
||||||
_original_role: str | None = None
|
_original_role: str | None = None
|
||||||
_original_goal: str | None = None
|
_original_goal: str | None = None
|
||||||
@@ -167,7 +176,9 @@ class Agent(BaseModel):
|
|||||||
self.llm.callbacks = []
|
self.llm.callbacks = []
|
||||||
|
|
||||||
# Check if an instance of TokenCalcHandler already exists in the list
|
# Check if an instance of TokenCalcHandler already exists in the list
|
||||||
if not any(isinstance(handler, TokenCalcHandler) for handler in self.llm.callbacks):
|
if not any(
|
||||||
|
isinstance(handler, TokenCalcHandler) for handler in self.llm.callbacks
|
||||||
|
):
|
||||||
self.llm.callbacks.append(token_handler)
|
self.llm.callbacks.append(token_handler)
|
||||||
|
|
||||||
if not self.agent_executor:
|
if not self.agent_executor:
|
||||||
@@ -296,7 +307,13 @@ class Agent(BaseModel):
|
|||||||
"request_within_rpm_limit"
|
"request_within_rpm_limit"
|
||||||
] = self._rpm_controller.check_or_wait
|
] = self._rpm_controller.check_or_wait
|
||||||
|
|
||||||
prompt = Prompts(i18n=self.i18n, tools=tools).task_execution()
|
prompt = Prompts(
|
||||||
|
i18n=self.i18n,
|
||||||
|
tools=tools,
|
||||||
|
system_template=self.system_template,
|
||||||
|
prompt_template=self.prompt_template,
|
||||||
|
response_template=self.response_template,
|
||||||
|
).task_execution()
|
||||||
|
|
||||||
execution_prompt = prompt.partial(
|
execution_prompt = prompt.partial(
|
||||||
goal=self.goal,
|
goal=self.goal,
|
||||||
@@ -304,7 +321,13 @@ class Agent(BaseModel):
|
|||||||
backstory=self.backstory,
|
backstory=self.backstory,
|
||||||
)
|
)
|
||||||
|
|
||||||
bind = self.llm.bind(stop=[self.i18n.slice("observation")])
|
stop_words = [self.i18n.slice("observation")]
|
||||||
|
if self.response_template:
|
||||||
|
stop_words.append(
|
||||||
|
self.response_template.split("{{ .Response }}")[1].strip()
|
||||||
|
)
|
||||||
|
|
||||||
|
bind = self.llm.bind(stop=stop_words)
|
||||||
inner_agent = agent_args | execution_prompt | bind | CrewAgentParser(agent=self)
|
inner_agent = agent_args | execution_prompt | bind | CrewAgentParser(agent=self)
|
||||||
self.agent_executor = CrewAgentExecutor(
|
self.agent_executor = CrewAgentExecutor(
|
||||||
agent=RunnableAgent(runnable=inner_agent), **executor_args
|
agent=RunnableAgent(runnable=inner_agent), **executor_args
|
||||||
|
|||||||
@@ -40,6 +40,9 @@ class CrewAgentExecutor(AgentExecutor):
|
|||||||
have_forced_answer: bool = False
|
have_forced_answer: bool = False
|
||||||
force_answer_max_iterations: Optional[int] = None
|
force_answer_max_iterations: Optional[int] = None
|
||||||
step_callback: Optional[Any] = None
|
step_callback: Optional[Any] = None
|
||||||
|
system_template: Optional[str] = None
|
||||||
|
prompt_template: Optional[str] = None
|
||||||
|
response_template: Optional[str] = None
|
||||||
|
|
||||||
@root_validator()
|
@root_validator()
|
||||||
def set_force_answer_max_iterations(cls, values: Dict) -> Dict:
|
def set_force_answer_max_iterations(cls, values: Dict) -> Dict:
|
||||||
@@ -113,6 +116,7 @@ class CrewAgentExecutor(AgentExecutor):
|
|||||||
# Allowing human input given task setting
|
# Allowing human input given task setting
|
||||||
if self.task.human_input:
|
if self.task.human_input:
|
||||||
self.should_ask_for_human_input = True
|
self.should_ask_for_human_input = True
|
||||||
|
|
||||||
# Let's start tracking the number of iterations and time elapsed
|
# Let's start tracking the number of iterations and time elapsed
|
||||||
self.iterations = 0
|
self.iterations = 0
|
||||||
time_elapsed = 0.0
|
time_elapsed = 0.0
|
||||||
@@ -128,8 +132,10 @@ class CrewAgentExecutor(AgentExecutor):
|
|||||||
intermediate_steps,
|
intermediate_steps,
|
||||||
run_manager=run_manager,
|
run_manager=run_manager,
|
||||||
)
|
)
|
||||||
|
|
||||||
if self.step_callback:
|
if self.step_callback:
|
||||||
self.step_callback(next_step_output)
|
self.step_callback(next_step_output)
|
||||||
|
|
||||||
if isinstance(next_step_output, AgentFinish):
|
if isinstance(next_step_output, AgentFinish):
|
||||||
# Creating long term memory
|
# Creating long term memory
|
||||||
create_long_term_memory = threading.Thread(
|
create_long_term_memory = threading.Thread(
|
||||||
@@ -292,7 +298,6 @@ class CrewAgentExecutor(AgentExecutor):
|
|||||||
tool=tool_calling.tool_name,
|
tool=tool_calling.tool_name,
|
||||||
tools=", ".join([tool.name.casefold() for tool in self.tools]),
|
tools=", ".join([tool.name.casefold() for tool in self.tools]),
|
||||||
)
|
)
|
||||||
|
|
||||||
yield AgentStep(action=agent_action, observation=observation)
|
yield AgentStep(action=agent_action, observation=observation)
|
||||||
|
|
||||||
def _ask_human_input(self, final_answer: dict) -> str:
|
def _ask_human_input(self, final_answer: dict) -> str:
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
from typing import Any, ClassVar
|
from typing import Any, ClassVar, Optional
|
||||||
|
|
||||||
from langchain.prompts import BasePromptTemplate, PromptTemplate
|
from langchain.prompts import BasePromptTemplate, PromptTemplate
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
@@ -11,12 +11,11 @@ class Prompts(BaseModel):
|
|||||||
|
|
||||||
i18n: I18N = Field(default=I18N())
|
i18n: I18N = Field(default=I18N())
|
||||||
tools: list[Any] = Field(default=[])
|
tools: list[Any] = Field(default=[])
|
||||||
|
system_template: Optional[str] = None
|
||||||
|
prompt_template: Optional[str] = None
|
||||||
|
response_template: Optional[str] = None
|
||||||
SCRATCHPAD_SLICE: ClassVar[str] = "\n{agent_scratchpad}"
|
SCRATCHPAD_SLICE: ClassVar[str] = "\n{agent_scratchpad}"
|
||||||
|
|
||||||
def task_execution_without_tools(self) -> BasePromptTemplate:
|
|
||||||
"""Generate a prompt for task execution without tools components."""
|
|
||||||
return self._build_prompt(["role_playing", "task"])
|
|
||||||
|
|
||||||
def task_execution(self) -> BasePromptTemplate:
|
def task_execution(self) -> BasePromptTemplate:
|
||||||
"""Generate a standard prompt for task execution."""
|
"""Generate a standard prompt for task execution."""
|
||||||
slices = ["role_playing"]
|
slices = ["role_playing"]
|
||||||
@@ -24,12 +23,42 @@ class Prompts(BaseModel):
|
|||||||
slices.append("tools")
|
slices.append("tools")
|
||||||
else:
|
else:
|
||||||
slices.append("no_tools")
|
slices.append("no_tools")
|
||||||
slices.append("task")
|
|
||||||
return self._build_prompt(slices)
|
|
||||||
|
|
||||||
def _build_prompt(self, components: list[str]) -> BasePromptTemplate:
|
slices.append("task")
|
||||||
|
|
||||||
|
if not self.system_template and not self.prompt_template:
|
||||||
|
return self._build_prompt(slices)
|
||||||
|
else:
|
||||||
|
return self._build_prompt(
|
||||||
|
slices,
|
||||||
|
self.system_template,
|
||||||
|
self.prompt_template,
|
||||||
|
self.response_template,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _build_prompt(
|
||||||
|
self,
|
||||||
|
components: list[str],
|
||||||
|
system_template=None,
|
||||||
|
prompt_template=None,
|
||||||
|
response_template=None,
|
||||||
|
) -> BasePromptTemplate:
|
||||||
"""Constructs a prompt string from specified components."""
|
"""Constructs a prompt string from specified components."""
|
||||||
|
if not system_template and not prompt_template:
|
||||||
prompt_parts = [self.i18n.slice(component) for component in components]
|
prompt_parts = [self.i18n.slice(component) for component in components]
|
||||||
prompt_parts.append(self.SCRATCHPAD_SLICE)
|
prompt_parts.append(self.SCRATCHPAD_SLICE)
|
||||||
prompt = PromptTemplate.from_template("".join(prompt_parts))
|
prompt = PromptTemplate.from_template("".join(prompt_parts))
|
||||||
|
else:
|
||||||
|
prompt_parts = [
|
||||||
|
self.i18n.slice(component)
|
||||||
|
for component in components
|
||||||
|
if component != "task"
|
||||||
|
]
|
||||||
|
system = system_template.replace("{{ .System }}", "".join(prompt_parts))
|
||||||
|
prompt = prompt_template.replace(
|
||||||
|
"{{ .Prompt }}",
|
||||||
|
"".join([self.i18n.slice("task"), self.SCRATCHPAD_SLICE]),
|
||||||
|
)
|
||||||
|
response = response_template.split("{{ .Response }}")[0]
|
||||||
|
prompt = PromptTemplate.from_template(f"{system}\n{prompt}\n{response}")
|
||||||
return prompt
|
return prompt
|
||||||
|
|||||||
@@ -754,6 +754,7 @@ def test_agent_definition_based_on_dict():
|
|||||||
assert agent.verbose == True
|
assert agent.verbose == True
|
||||||
assert agent.tools == []
|
assert agent.tools == []
|
||||||
|
|
||||||
|
|
||||||
# test for human input
|
# test for human input
|
||||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
def test_agent_human_input():
|
def test_agent_human_input():
|
||||||
@@ -780,6 +781,7 @@ def test_agent_human_input():
|
|||||||
mock_human_input.assert_called_once()
|
mock_human_input.assert_called_once()
|
||||||
assert output == "Hello"
|
assert output == "Hello"
|
||||||
|
|
||||||
|
|
||||||
def test_interpolate_inputs():
|
def test_interpolate_inputs():
|
||||||
agent = Agent(
|
agent = Agent(
|
||||||
role="{topic} specialist",
|
role="{topic} specialist",
|
||||||
@@ -797,3 +799,46 @@ def test_interpolate_inputs():
|
|||||||
assert agent.goal == "Figure stuff out"
|
assert agent.goal == "Figure stuff out"
|
||||||
assert agent.backstory == "I am the master of nothing"
|
assert agent.backstory == "I am the master of nothing"
|
||||||
|
|
||||||
|
|
||||||
|
def test_system_and_prompt_template():
|
||||||
|
agent = Agent(
|
||||||
|
role="{topic} specialist",
|
||||||
|
goal="Figure {goal} out",
|
||||||
|
backstory="I am the master of {role}",
|
||||||
|
system_template="""<|start_header_id|>system<|end_header_id|>
|
||||||
|
|
||||||
|
{{ .System }}<|eot_id|>""",
|
||||||
|
prompt_template="""<|start_header_id|>user<|end_header_id|>
|
||||||
|
|
||||||
|
{{ .Prompt }}<|eot_id|>""",
|
||||||
|
response_template="""<|start_header_id|>assistant<|end_header_id|>
|
||||||
|
|
||||||
|
{{ .Response }}<|eot_id|>""",
|
||||||
|
)
|
||||||
|
|
||||||
|
template = agent.agent_executor.agent.dict()["runnable"]["middle"][0]["template"]
|
||||||
|
assert (
|
||||||
|
template
|
||||||
|
== """<|start_header_id|>system<|end_header_id|>
|
||||||
|
|
||||||
|
You are {role}. {backstory}
|
||||||
|
Your personal goal is: {goal}To give my best complete final answer to the task use the exact following format:
|
||||||
|
|
||||||
|
Thought: I now can give a great answer
|
||||||
|
Final Answer: my best complete final answer to the task.
|
||||||
|
Your final answer must be the great and the most complete as possible, it must be outcome described.
|
||||||
|
|
||||||
|
I MUST use these formats, my job depends on it!<|eot_id|>
|
||||||
|
<|start_header_id|>user<|end_header_id|>
|
||||||
|
|
||||||
|
|
||||||
|
Current Task: {input}
|
||||||
|
|
||||||
|
Begin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!
|
||||||
|
|
||||||
|
Thought:
|
||||||
|
{agent_scratchpad}<|eot_id|>
|
||||||
|
<|start_header_id|>assistant<|end_header_id|>
|
||||||
|
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|||||||
Reference in New Issue
Block a user