mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-08 23:58:34 +00:00
@@ -23,6 +23,7 @@ from crewai.agents import (
|
||||
CrewAgentOutputParser,
|
||||
ToolsHandler,
|
||||
)
|
||||
from crewai.i18n import I18N
|
||||
from crewai.prompts import Prompts
|
||||
|
||||
|
||||
@@ -54,13 +55,6 @@ class Agent(BaseModel):
|
||||
role: str = Field(description="Role of the agent")
|
||||
goal: str = Field(description="Objective of the agent")
|
||||
backstory: str = Field(description="Backstory of the agent")
|
||||
llm: Optional[Any] = Field(
|
||||
default_factory=lambda: ChatOpenAI(
|
||||
temperature=0.7,
|
||||
model_name="gpt-4",
|
||||
),
|
||||
description="Language model that will run the agent.",
|
||||
)
|
||||
memory: bool = Field(
|
||||
default=True, description="Whether the agent should have memory or not"
|
||||
)
|
||||
@@ -85,6 +79,16 @@ class Agent(BaseModel):
|
||||
cache_handler: Optional[InstanceOf[CacheHandler]] = Field(
|
||||
default=CacheHandler(), description="An instance of the CacheHandler class."
|
||||
)
|
||||
i18n: Optional[I18N] = Field(
|
||||
default=I18N(), description="Internationalization settings."
|
||||
)
|
||||
llm: Optional[Any] = Field(
|
||||
default_factory=lambda: ChatOpenAI(
|
||||
temperature=0.7,
|
||||
model_name="gpt-4",
|
||||
),
|
||||
description="Language model that will run the agent.",
|
||||
)
|
||||
|
||||
@field_validator("id", mode="before")
|
||||
@classmethod
|
||||
@@ -114,8 +118,8 @@ class Agent(BaseModel):
|
||||
Output of the agent
|
||||
"""
|
||||
if context:
|
||||
task = "\n".join(
|
||||
[task, "\nThis is the context you are working with:", context]
|
||||
task = self.i18n.slice("task_with_context").format(
|
||||
task=task, context=context
|
||||
)
|
||||
|
||||
tools = tools or self.tools
|
||||
@@ -148,6 +152,7 @@ class Agent(BaseModel):
|
||||
"agent_scratchpad": lambda x: format_log_to_str(x["intermediate_steps"]),
|
||||
}
|
||||
executor_args = {
|
||||
"i18n": self.i18n,
|
||||
"tools": self.tools,
|
||||
"verbose": self.verbose,
|
||||
"handle_parsing_errors": True,
|
||||
@@ -160,9 +165,9 @@ class Agent(BaseModel):
|
||||
)
|
||||
executor_args["memory"] = summary_memory
|
||||
agent_args["chat_history"] = lambda x: x["chat_history"]
|
||||
prompt = Prompts().task_execution_with_memory()
|
||||
prompt = Prompts(i18n=self.i18n).task_execution_with_memory()
|
||||
else:
|
||||
prompt = Prompts().task_execution()
|
||||
prompt = Prompts(i18n=self.i18n).task_execution()
|
||||
|
||||
execution_prompt = prompt.partial(
|
||||
goal=self.goal,
|
||||
@@ -170,13 +175,15 @@ class Agent(BaseModel):
|
||||
backstory=self.backstory,
|
||||
)
|
||||
|
||||
bind = self.llm.bind(stop=["\nObservation"])
|
||||
bind = self.llm.bind(stop=[self.i18n.slice("observation")])
|
||||
inner_agent = (
|
||||
agent_args
|
||||
| execution_prompt
|
||||
| bind
|
||||
| CrewAgentOutputParser(
|
||||
tools_handler=self.tools_handler, cache=self.cache_handler
|
||||
tools_handler=self.tools_handler,
|
||||
cache=self.cache_handler,
|
||||
i18n=self.i18n,
|
||||
)
|
||||
)
|
||||
self.agent_executor = CrewAgentExecutor(agent=inner_agent, **executor_args)
|
||||
|
||||
@@ -1,17 +1,23 @@
|
||||
from langchain_core.exceptions import OutputParserException
|
||||
|
||||
from crewai.i18n import I18N
|
||||
|
||||
|
||||
class TaskRepeatedUsageException(OutputParserException):
|
||||
"""Exception raised when a task is used twice in a roll."""
|
||||
|
||||
i18n: I18N = I18N()
|
||||
error: str = "TaskRepeatedUsageException"
|
||||
message: str = "I just used the {tool} tool with input {tool_input}. So I already know the result of that and don't need to use it now.\n"
|
||||
message: str
|
||||
|
||||
def __init__(self, tool: str, tool_input: str, text: str):
|
||||
def __init__(self, i18n: I18N, tool: str, tool_input: str, text: str):
|
||||
self.i18n = i18n
|
||||
self.text = text
|
||||
self.tool = tool
|
||||
self.tool_input = tool_input
|
||||
self.message = self.message.format(tool=tool, tool_input=tool_input)
|
||||
self.message = self.i18n.errors("task_repeated_usage").format(
|
||||
tool=tool, tool_input=tool_input
|
||||
)
|
||||
|
||||
super().__init__(
|
||||
error=self.error,
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import time
|
||||
from textwrap import dedent
|
||||
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
|
||||
|
||||
from langchain.agents import AgentExecutor
|
||||
@@ -12,11 +11,13 @@ from langchain_core.pydantic_v1 import root_validator
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_core.utils.input import get_color_mapping
|
||||
|
||||
from ..tools.cache_tools import CacheTools
|
||||
from .cache.cache_hit import CacheHit
|
||||
from crewai.agents.cache.cache_hit import CacheHit
|
||||
from crewai.i18n import I18N
|
||||
from crewai.tools.cache_tools import CacheTools
|
||||
|
||||
|
||||
class CrewAgentExecutor(AgentExecutor):
|
||||
i18n: I18N = I18N()
|
||||
iterations: int = 0
|
||||
max_iterations: Optional[int] = 15
|
||||
force_answer_max_iterations: Optional[int] = None
|
||||
@@ -31,13 +32,7 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
|
||||
def _force_answer(self, output: AgentAction):
|
||||
return AgentStep(
|
||||
action=output,
|
||||
observation=dedent(
|
||||
"""\
|
||||
I've used too many tools for this task.
|
||||
I'm going to give you my absolute BEST Final answer now and
|
||||
not use any more tools."""
|
||||
),
|
||||
action=output, observation=self.i18n.errors("used_too_many_tools")
|
||||
)
|
||||
|
||||
def _call(
|
||||
|
||||
@@ -4,9 +4,10 @@ from typing import Union
|
||||
from langchain.agents.output_parsers import ReActSingleInputOutputParser
|
||||
from langchain_core.agents import AgentAction, AgentFinish
|
||||
|
||||
from .cache import CacheHandler, CacheHit
|
||||
from .exceptions import TaskRepeatedUsageException
|
||||
from .tools_handler import ToolsHandler
|
||||
from crewai.agents.cache import CacheHandler, CacheHit
|
||||
from crewai.agents.exceptions import TaskRepeatedUsageException
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.i18n import I18N
|
||||
|
||||
FINAL_ANSWER_ACTION = "Final Answer:"
|
||||
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = (
|
||||
@@ -46,6 +47,7 @@ class CrewAgentOutputParser(ReActSingleInputOutputParser):
|
||||
|
||||
tools_handler: ToolsHandler
|
||||
cache: CacheHandler
|
||||
i18n: I18N
|
||||
|
||||
def parse(self, text: str) -> Union[AgentAction, AgentFinish, CacheHit]:
|
||||
FINAL_ANSWER_ACTION in text
|
||||
@@ -65,10 +67,13 @@ class CrewAgentOutputParser(ReActSingleInputOutputParser):
|
||||
}
|
||||
if usage == last_tool_usage:
|
||||
raise TaskRepeatedUsageException(
|
||||
tool=action, tool_input=tool_input, text=text
|
||||
text=text,
|
||||
tool=action,
|
||||
tool_input=tool_input,
|
||||
i18n=self.i18n,
|
||||
)
|
||||
|
||||
if result := self.cache.read(action, tool_input):
|
||||
if self.cache.read(action, tool_input):
|
||||
action = AgentAction(action, tool_input, text)
|
||||
return CacheHit(action=action, cache=self.cache)
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ from pydantic_core import PydanticCustomError
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.agents.cache import CacheHandler
|
||||
from crewai.i18n import I18N
|
||||
from crewai.process import Process
|
||||
from crewai.task import Task
|
||||
from crewai.tools.agent_tools import AgentTools
|
||||
@@ -44,6 +45,10 @@ class Crew(BaseModel):
|
||||
config: Optional[Union[Json, Dict[str, Any]]] = Field(default=None)
|
||||
cache_handler: Optional[InstanceOf[CacheHandler]] = Field(default=CacheHandler())
|
||||
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
|
||||
language: str = Field(
|
||||
default="en",
|
||||
description="Language used for the crew, defaults to English.",
|
||||
)
|
||||
|
||||
@field_validator("id", mode="before")
|
||||
@classmethod
|
||||
@@ -99,6 +104,7 @@ class Crew(BaseModel):
|
||||
"""Starts the crew to work on its assigned tasks."""
|
||||
for agent in self.agents:
|
||||
agent.cache_handler = self.cache_handler
|
||||
agent.i18n = I18N(language=self.language)
|
||||
|
||||
if self.process == Process.sequential:
|
||||
return self._sequential_loop()
|
||||
|
||||
45
src/crewai/i18n.py
Normal file
45
src/crewai/i18n.py
Normal file
@@ -0,0 +1,45 @@
|
||||
import json
|
||||
import os
|
||||
from typing import Dict, Optional
|
||||
|
||||
from pydantic import BaseModel, Field, PrivateAttr, ValidationError, model_validator
|
||||
|
||||
|
||||
class I18N(BaseModel):
|
||||
_translations: Optional[Dict[str, str]] = PrivateAttr()
|
||||
language: Optional[str] = Field(
|
||||
default="en",
|
||||
description="Language used to load translations",
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def load_translation(self) -> "I18N":
|
||||
"""Load translations from a JSON file based on the specified language."""
|
||||
try:
|
||||
dir_path = os.path.dirname(os.path.realpath(__file__))
|
||||
prompts_path = os.path.join(dir_path, f"translations/{self.language}.json")
|
||||
|
||||
with open(prompts_path, "r") as f:
|
||||
self._translations = json.load(f)
|
||||
except FileNotFoundError:
|
||||
raise ValidationError(
|
||||
f"Trasnlation file for language '{self.language}' not found."
|
||||
)
|
||||
except json.JSONDecodeError:
|
||||
raise ValidationError(f"Error decoding JSON from the prompts file.")
|
||||
return self
|
||||
|
||||
def slice(self, slice: str) -> str:
|
||||
return self.retrieve("slices", slice)
|
||||
|
||||
def errors(self, error: str) -> str:
|
||||
return self.retrieve("errors", error)
|
||||
|
||||
def tools(self, error: str) -> str:
|
||||
return self.retrieve("tools", error)
|
||||
|
||||
def retrieve(self, kind, key):
|
||||
try:
|
||||
return self._translations[kind].get(key)
|
||||
except:
|
||||
raise ValidationError(f"Translation for '{kind}':'{key}' not found.")
|
||||
@@ -1,36 +1,15 @@
|
||||
import json
|
||||
import os
|
||||
from typing import ClassVar, Dict, Optional
|
||||
from typing import ClassVar
|
||||
|
||||
from langchain.prompts import PromptTemplate
|
||||
from pydantic import BaseModel, Field, PrivateAttr, ValidationError, model_validator
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from .i18n import I18N
|
||||
|
||||
|
||||
class Prompts(BaseModel):
|
||||
"""Manages and generates prompts for a generic agent with support for different languages."""
|
||||
|
||||
_prompts: Optional[Dict[str, str]] = PrivateAttr()
|
||||
language: Optional[str] = Field(
|
||||
default="en",
|
||||
description="Language of the prompts.",
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def load_prompts(self) -> "Prompts":
|
||||
"""Load prompts from a JSON file based on the specified language."""
|
||||
try:
|
||||
dir_path = os.path.dirname(os.path.realpath(__file__))
|
||||
prompts_path = os.path.join(dir_path, f"prompts/{self.language}.json")
|
||||
|
||||
with open(prompts_path, "r") as f:
|
||||
self._prompts = json.load(f)["slices"]
|
||||
except FileNotFoundError:
|
||||
raise ValidationError(
|
||||
f"Prompt file for language '{self.language}' not found."
|
||||
)
|
||||
except json.JSONDecodeError:
|
||||
raise ValidationError(f"Error decoding JSON from the prompts file.")
|
||||
return self
|
||||
i18n: I18N = Field(default=I18N())
|
||||
|
||||
SCRATCHPAD_SLICE: ClassVar[str] = "\n{agent_scratchpad}"
|
||||
|
||||
@@ -48,10 +27,6 @@ class Prompts(BaseModel):
|
||||
|
||||
def _build_prompt(self, components: [str]) -> str:
|
||||
"""Constructs a prompt string from specified components."""
|
||||
prompt_parts = [
|
||||
self._prompts[component]
|
||||
for component in components
|
||||
if component in self._prompts
|
||||
]
|
||||
prompt_parts = [self.i18n.slice(component) for component in components]
|
||||
prompt_parts.append(self.SCRATCHPAD_SLICE)
|
||||
return PromptTemplate.from_template("".join(prompt_parts))
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
{
|
||||
"slices": {
|
||||
"task": "Begin! This is VERY important to you, your job depends on it!\n\nCurrent Task: {input}",
|
||||
"memory": "This is the summary of your work so far:\n{chat_history}",
|
||||
"role_playing": "You are {role}.\n{backstory}\n\nYour personal goal is: {goal}",
|
||||
"tools": "TOOLS:\n------\nYou have access to the following tools:\n\n{tools}\n\nTo use a tool, please use the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the action to take, should be one of [{tool_names}], just the name.\nAction Input: the input to the action\nObservation: the result of the action\n```\n\nWhen you have a response for your task, or if you do not need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your response here]"
|
||||
}
|
||||
}
|
||||
@@ -1,45 +1,34 @@
|
||||
from textwrap import dedent
|
||||
from typing import List
|
||||
from typing import List, Optional
|
||||
|
||||
from langchain.tools import Tool
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.i18n import I18N
|
||||
|
||||
|
||||
class AgentTools(BaseModel):
|
||||
"""Default tools around agent delegation"""
|
||||
|
||||
agents: List[Agent] = Field(description="List of agents in this crew.")
|
||||
i18n: Optional[I18N] = Field(
|
||||
default=I18N(), description="Internationalization settings."
|
||||
)
|
||||
|
||||
def tools(self):
|
||||
return [
|
||||
Tool.from_function(
|
||||
func=self.delegate_work,
|
||||
name="Delegate work to co-worker",
|
||||
description=dedent(
|
||||
f"""\
|
||||
Useful to delegate a specific task to one of the
|
||||
following co-workers: [{', '.join([agent.role for agent in self.agents])}].
|
||||
The input to this tool should be a pipe (|) separated text of length
|
||||
three, representing the co-worker you want to ask it to (one of the options),
|
||||
the task and all actual context you have for the task.
|
||||
For example, `coworker|task|context`.
|
||||
"""
|
||||
description=self.i18n.tools("delegate_work").format(
|
||||
coworkers=", ".join([agent.role for agent in self.agents])
|
||||
),
|
||||
),
|
||||
Tool.from_function(
|
||||
func=self.ask_question,
|
||||
name="Ask question to co-worker",
|
||||
description=dedent(
|
||||
f"""\
|
||||
Useful to ask a question, opinion or take from on
|
||||
of the following co-workers: [{', '.join([agent.role for agent in self.agents])}].
|
||||
The input to this tool should be a pipe (|) separated text of length
|
||||
three, representing the co-worker you want to ask it to (one of the options),
|
||||
the question and all actual context you have for the question.
|
||||
For example, `coworker|question|context`.
|
||||
"""
|
||||
description=self.i18n.tools("ask_question").format(
|
||||
coworkers=", ".join([agent.role for agent in self.agents])
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -57,10 +46,10 @@ class AgentTools(BaseModel):
|
||||
try:
|
||||
agent, task, context = command.split("|")
|
||||
except ValueError:
|
||||
return "\nError executing tool. Missing exact 3 pipe (|) separated values. For example, `coworker|task|context`. I need to make sure to pass context as context\n"
|
||||
return self.i18n.errors("agent_tool_missing_param")
|
||||
|
||||
if not agent or not task or not context:
|
||||
return "\nError executing tool. Missing exact 3 pipe (|) separated values. For example, `coworker|task|context`. I need to make sure to pass context as context.\n"
|
||||
return self.i18n.errors("agent_tool_missing_param")
|
||||
|
||||
agent = [
|
||||
available_agent
|
||||
@@ -69,7 +58,9 @@ class AgentTools(BaseModel):
|
||||
]
|
||||
|
||||
if not agent:
|
||||
return f"\nError executing tool. Co-worker mentioned on the Action Input not found, it must to be one of the following options: {', '.join([agent.role for agent in self.agents])}.\n"
|
||||
return self.i18n.errors("agent_tool_unexsiting_coworker").format(
|
||||
coworkers=", ".join([agent.role for agent in self.agents])
|
||||
)
|
||||
|
||||
agent = agent[0]
|
||||
return agent.execute_task(task, context)
|
||||
|
||||
20
src/crewai/translations/en.json
Normal file
20
src/crewai/translations/en.json
Normal file
@@ -0,0 +1,20 @@
|
||||
{
|
||||
"slices": {
|
||||
"observation": "\nObservation",
|
||||
"task": "Begin! This is VERY important to you, your job depends on it!\n\nCurrent Task: {input}",
|
||||
"memory": "This is the summary of your work so far:\n{chat_history}",
|
||||
"role_playing": "You are {role}.\n{backstory}\n\nYour personal goal is: {goal}",
|
||||
"tools": "TOOLS:\n------\nYou have access to the following tools:\n\n{tools}\n\nTo use a tool, please use the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the action to take, should be one of [{tool_names}], just the name.\nAction Input: the input to the action\nObservation: the result of the action\n```\n\nWhen you have a response for your task, or if you do not need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your response here]",
|
||||
"task_with_context": "{task}\nThis is the context you're working with:\n{context}"
|
||||
},
|
||||
"errors": {
|
||||
"used_too_many_tools": "I've used too many tools for this task. I'm going to give you my absolute BEST Final answer now and not use any more tools.",
|
||||
"agent_tool_missing_param": "\nError executing tool. Missing exact 3 pipe (|) separated values. For example, `coworker|task|context`. I need to make sure to pass context as context.\n",
|
||||
"agent_tool_unexsiting_coworker": "\nError executing tool. Co-worker mentioned on the Action Input not found, it must to be one of the following options: {coworkers}.\n",
|
||||
"task_repeated_usage": "I just used the {tool} tool with input {tool_input}. So I already know the result of that and don't need to use it now.\n"
|
||||
},
|
||||
"tools": {
|
||||
"delegate_work": "Useful to delegate a specific task to one of the following co-workers: {coworkers}.\nThe input to this tool should be a pipe (|) separated text of length 3 (three), representing the co-worker you want to ask it to (one of the options), the task and all actual context you have for the task.\nFor example, `coworker|task|context`.",
|
||||
"ask_question": "Useful to ask a question, opinion or take from on of the following co-workers: {coworkers}.\nThe input to this tool should be a pipe (|) separated text of length 3 (three), representing the co-worker you want to ask it to (one of the options), the question and all actual context you have for the question.\n For example, `coworker|question|context`."
|
||||
}
|
||||
}
|
||||
@@ -240,10 +240,15 @@ def test_agent_moved_on_after_max_iterations():
|
||||
allow_delegation=False,
|
||||
)
|
||||
|
||||
output = agent.execute_task(
|
||||
task="The final answer is 42. But don't give it yet, instead keep using the `get_final_answer` tool.",
|
||||
tools=[get_final_answer],
|
||||
)
|
||||
assert (
|
||||
output == "I have used the tool multiple times and the final answer remains 42."
|
||||
)
|
||||
with patch.object(
|
||||
CrewAgentExecutor, "_force_answer", wraps=agent.agent_executor._force_answer
|
||||
) as private_mock:
|
||||
output = agent.execute_task(
|
||||
task="The final answer is 42. But don't give it yet, instead keep using the `get_final_answer` tool.",
|
||||
tools=[get_final_answer],
|
||||
)
|
||||
assert (
|
||||
output
|
||||
== "I have used the tool multiple times and the final answer remains 42."
|
||||
)
|
||||
private_mock.assert_called_once()
|
||||
|
||||
@@ -48,7 +48,7 @@ def test_delegate_work_with_wrong_input():
|
||||
|
||||
assert (
|
||||
result
|
||||
== "\nError executing tool. Missing exact 3 pipe (|) separated values. For example, `coworker|task|context`. I need to make sure to pass context as context\n"
|
||||
== "\nError executing tool. Missing exact 3 pipe (|) separated values. For example, `coworker|task|context`. I need to make sure to pass context as context.\n"
|
||||
)
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user