mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-14 18:48:29 +00:00
Merge branch 'main' into brandon/cre-19-workflows
This commit is contained in:
@@ -1,3 +1,4 @@
|
||||
import warnings
|
||||
from crewai.agent import Agent
|
||||
from crewai.crew import Crew
|
||||
from crewai.pipeline import Pipeline
|
||||
@@ -5,4 +6,12 @@ from crewai.process import Process
|
||||
from crewai.routers import Router
|
||||
from crewai.task import Task
|
||||
|
||||
|
||||
warnings.filterwarnings(
|
||||
"ignore",
|
||||
message="Pydantic serializer warnings:",
|
||||
category=UserWarning,
|
||||
module="pydantic.main",
|
||||
)
|
||||
|
||||
__all__ = ["Agent", "Crew", "Process", "Task", "Pipeline", "Router"]
|
||||
|
||||
@@ -1,23 +1,17 @@
|
||||
import os
|
||||
from inspect import signature
|
||||
from typing import Any, List, Optional, Tuple
|
||||
|
||||
from langchain.agents.agent import RunnableAgent
|
||||
from langchain.agents.tools import BaseTool
|
||||
from langchain.agents.tools import tool as LangChainTool
|
||||
from langchain_core.agents import AgentAction
|
||||
from langchain_core.callbacks import BaseCallbackHandler
|
||||
from langchain_openai import ChatOpenAI
|
||||
from typing import Any, List, Optional
|
||||
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
|
||||
|
||||
from crewai.agents import CacheHandler, CrewAgentExecutor, CrewAgentParser
|
||||
from crewai.agents import CacheHandler
|
||||
from crewai.utilities import Converter, Prompts
|
||||
from crewai.tools.agent_tools import AgentTools
|
||||
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.memory.contextual.contextual_memory import ContextualMemory
|
||||
from crewai.tools.agent_tools import AgentTools
|
||||
from crewai.utilities import Converter, Prompts
|
||||
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE
|
||||
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
||||
|
||||
|
||||
def mock_agent_ops_provider():
|
||||
@@ -34,7 +28,6 @@ agentops = None
|
||||
|
||||
if os.environ.get("AGENTOPS_API_KEY"):
|
||||
try:
|
||||
import agentops # type: ignore # Name "agentops" already defined on line 21
|
||||
from agentops import track_agent
|
||||
except ImportError:
|
||||
track_agent = mock_agent_ops_provider()
|
||||
@@ -64,7 +57,6 @@ class Agent(BaseAgent):
|
||||
allow_delegation: Whether the agent is allowed to delegate tasks to other agents.
|
||||
tools: Tools at agents disposal
|
||||
step_callback: Callback to be executed after each step of the agent execution.
|
||||
callbacks: A list of callback functions from the langchain library that are triggered during the agent's execution process
|
||||
"""
|
||||
|
||||
_times_executed: int = PrivateAttr(default=0)
|
||||
@@ -81,18 +73,20 @@ class Agent(BaseAgent):
|
||||
default=None,
|
||||
description="Callback to be executed after each step of the agent execution.",
|
||||
)
|
||||
use_stop_words: bool = Field(
|
||||
default=True,
|
||||
description="Use stop words for the agent.",
|
||||
)
|
||||
use_system_prompt: Optional[bool] = Field(
|
||||
default=True,
|
||||
description="Use system prompt for the agent.",
|
||||
)
|
||||
llm: Any = Field(
|
||||
default_factory=lambda: ChatOpenAI(
|
||||
model=os.environ.get("OPENAI_MODEL_NAME", "gpt-4o")
|
||||
),
|
||||
description="Language model that will run the agent.",
|
||||
description="Language model that will run the agent.", default="gpt-4o"
|
||||
)
|
||||
function_calling_llm: Optional[Any] = Field(
|
||||
description="Language model that will run the agent.", default=None
|
||||
)
|
||||
callbacks: Optional[List[InstanceOf[BaseCallbackHandler]]] = Field(
|
||||
default=None, description="Callback to be executed"
|
||||
)
|
||||
system_template: Optional[str] = Field(
|
||||
default=None, description="System format for the agent."
|
||||
)
|
||||
@@ -108,6 +102,14 @@ class Agent(BaseAgent):
|
||||
allow_code_execution: Optional[bool] = Field(
|
||||
default=False, description="Enable code execution for the agent."
|
||||
)
|
||||
respect_context_window: bool = Field(
|
||||
default=True,
|
||||
description="Keep messages under the context window size by summarizing content.",
|
||||
)
|
||||
max_iter: int = Field(
|
||||
default=15,
|
||||
description="Maximum number of iterations for an agent to execute a task before giving it's best answer",
|
||||
)
|
||||
max_retry_limit: int = Field(
|
||||
default=2,
|
||||
description="Maximum number of retries for an agent to execute a task when an error occurs.",
|
||||
@@ -116,38 +118,17 @@ class Agent(BaseAgent):
|
||||
@model_validator(mode="after")
|
||||
def post_init_setup(self):
|
||||
self.agent_ops_agent_name = self.role
|
||||
|
||||
# Different llms store the model name in different attributes
|
||||
model_name = getattr(self.llm, "model_name", None) or getattr(
|
||||
self.llm, "deployment_name", None
|
||||
self.llm = self.llm.model_name if hasattr(self.llm, "model_name") else self.llm
|
||||
self.function_calling_llm = (
|
||||
self.function_calling_llm.model_name
|
||||
if hasattr(self.function_calling_llm, "model_name")
|
||||
else self.function_calling_llm
|
||||
)
|
||||
|
||||
if model_name:
|
||||
self._setup_llm_callbacks(model_name)
|
||||
|
||||
if not self.agent_executor:
|
||||
self._setup_agent_executor()
|
||||
|
||||
return self
|
||||
|
||||
def _setup_llm_callbacks(self, model_name: str):
|
||||
token_handler = TokenCalcHandler(model_name, self._token_process)
|
||||
|
||||
if not isinstance(self.llm.callbacks, list):
|
||||
self.llm.callbacks = []
|
||||
|
||||
if not any(
|
||||
isinstance(handler, TokenCalcHandler) for handler in self.llm.callbacks
|
||||
):
|
||||
self.llm.callbacks.append(token_handler)
|
||||
|
||||
if agentops and not any(
|
||||
isinstance(handler, agentops.LangchainCallbackHandler)
|
||||
for handler in self.llm.callbacks
|
||||
):
|
||||
agentops.stop_instrumenting()
|
||||
self.llm.callbacks.append(agentops.LangchainCallbackHandler())
|
||||
|
||||
def _setup_agent_executor(self):
|
||||
if not self.cache_handler:
|
||||
self.cache_handler = CacheHandler()
|
||||
@@ -190,15 +171,7 @@ class Agent(BaseAgent):
|
||||
task_prompt += self.i18n.slice("memory").format(memory=memory)
|
||||
|
||||
tools = tools or self.tools or []
|
||||
parsed_tools = self._parse_tools(tools)
|
||||
self.create_agent_executor(tools=tools)
|
||||
self.agent_executor.tools = parsed_tools
|
||||
self.agent_executor.task = task
|
||||
|
||||
self.agent_executor.tools_description = self._render_text_description_and_args(
|
||||
parsed_tools
|
||||
)
|
||||
self.agent_executor.tools_names = self.__tools_names(parsed_tools)
|
||||
self.create_agent_executor(tools=tools, task=task)
|
||||
|
||||
if self.crew and self.crew._train:
|
||||
task_prompt = self._training_handler(task_prompt=task_prompt)
|
||||
@@ -211,6 +184,7 @@ class Agent(BaseAgent):
|
||||
"input": task_prompt,
|
||||
"tool_names": self.agent_executor.tools_names,
|
||||
"tools": self.agent_executor.tools_description,
|
||||
"ask_for_human_input": task.human_input,
|
||||
}
|
||||
)["output"]
|
||||
except Exception as e:
|
||||
@@ -231,73 +205,25 @@ class Agent(BaseAgent):
|
||||
|
||||
return result
|
||||
|
||||
def format_log_to_str(
|
||||
self,
|
||||
intermediate_steps: List[Tuple[AgentAction, str]],
|
||||
observation_prefix: str = "Observation: ",
|
||||
llm_prefix: str = "",
|
||||
) -> str:
|
||||
"""Construct the scratchpad that lets the agent continue its thought process."""
|
||||
thoughts = ""
|
||||
for action, observation in intermediate_steps:
|
||||
thoughts += action.log
|
||||
thoughts += f"\n{observation_prefix}{observation}\n{llm_prefix}"
|
||||
return thoughts
|
||||
|
||||
def create_agent_executor(self, tools=None) -> None:
|
||||
def create_agent_executor(self, tools=None, task=None) -> None:
|
||||
"""Create an agent executor for the agent.
|
||||
|
||||
Returns:
|
||||
An instance of the CrewAgentExecutor class.
|
||||
"""
|
||||
tools = tools or self.tools or []
|
||||
|
||||
agent_args = {
|
||||
"input": lambda x: x["input"],
|
||||
"tools": lambda x: x["tools"],
|
||||
"tool_names": lambda x: x["tool_names"],
|
||||
"agent_scratchpad": lambda x: self.format_log_to_str(
|
||||
x["intermediate_steps"]
|
||||
),
|
||||
}
|
||||
|
||||
executor_args = {
|
||||
"llm": self.llm,
|
||||
"i18n": self.i18n,
|
||||
"crew": self.crew,
|
||||
"crew_agent": self,
|
||||
"tools": self._parse_tools(tools),
|
||||
"verbose": self.verbose,
|
||||
"original_tools": tools,
|
||||
"handle_parsing_errors": True,
|
||||
"max_iterations": self.max_iter,
|
||||
"max_execution_time": self.max_execution_time,
|
||||
"step_callback": self.step_callback,
|
||||
"tools_handler": self.tools_handler,
|
||||
"function_calling_llm": self.function_calling_llm,
|
||||
"callbacks": self.callbacks,
|
||||
"max_tokens": self.max_tokens,
|
||||
}
|
||||
|
||||
if self._rpm_controller:
|
||||
executor_args["request_within_rpm_limit"] = (
|
||||
self._rpm_controller.check_or_wait
|
||||
)
|
||||
parsed_tools = self._parse_tools(tools)
|
||||
|
||||
prompt = Prompts(
|
||||
i18n=self.i18n,
|
||||
agent=self,
|
||||
tools=tools,
|
||||
i18n=self.i18n,
|
||||
use_system_prompt=self.use_system_prompt,
|
||||
system_template=self.system_template,
|
||||
prompt_template=self.prompt_template,
|
||||
response_template=self.response_template,
|
||||
).task_execution()
|
||||
|
||||
execution_prompt = prompt.partial(
|
||||
goal=self.goal,
|
||||
role=self.role,
|
||||
backstory=self.backstory,
|
||||
)
|
||||
|
||||
stop_words = [self.i18n.slice("observation")]
|
||||
|
||||
if self.response_template:
|
||||
@@ -305,11 +231,27 @@ class Agent(BaseAgent):
|
||||
self.response_template.split("{{ .Response }}")[1].strip()
|
||||
)
|
||||
|
||||
bind = self.llm.bind(stop=stop_words)
|
||||
|
||||
inner_agent = agent_args | execution_prompt | bind | CrewAgentParser(agent=self)
|
||||
self.agent_executor = CrewAgentExecutor(
|
||||
agent=RunnableAgent(runnable=inner_agent), **executor_args
|
||||
llm=self.llm,
|
||||
task=task,
|
||||
agent=self,
|
||||
crew=self.crew,
|
||||
tools=parsed_tools,
|
||||
prompt=prompt,
|
||||
original_tools=tools,
|
||||
stop_words=stop_words,
|
||||
max_iter=self.max_iter,
|
||||
tools_handler=self.tools_handler,
|
||||
use_stop_words=self.use_stop_words,
|
||||
tools_names=self.__tools_names(parsed_tools),
|
||||
tools_description=self._render_text_description_and_args(parsed_tools),
|
||||
step_callback=self.step_callback,
|
||||
function_calling_llm=self.function_calling_llm,
|
||||
respect_context_window=self.respect_context_window,
|
||||
request_within_rpm_limit=self._rpm_controller.check_or_wait
|
||||
if self._rpm_controller
|
||||
else None,
|
||||
callbacks=[TokenCalcHandler(self._token_process)],
|
||||
)
|
||||
|
||||
def get_delegation_tools(self, agents: List[BaseAgent]):
|
||||
@@ -330,7 +272,7 @@ class Agent(BaseAgent):
|
||||
def get_output_converter(self, llm, text, model, instructions):
|
||||
return Converter(llm=llm, text=text, model=model, instructions=instructions)
|
||||
|
||||
def _parse_tools(self, tools: List[Any]) -> List[LangChainTool]: # type: ignore # Function "langchain_core.tools.tool" is not valid as a type
|
||||
def _parse_tools(self, tools: List[Any]) -> List[Any]: # type: ignore
|
||||
"""Parse tools to be used for the task."""
|
||||
tools_list = []
|
||||
try:
|
||||
@@ -373,7 +315,7 @@ class Agent(BaseAgent):
|
||||
)
|
||||
return task_prompt
|
||||
|
||||
def _render_text_description(self, tools: List[BaseTool]) -> str:
|
||||
def _render_text_description(self, tools: List[Any]) -> str:
|
||||
"""Render the tool name and description in plain text.
|
||||
|
||||
Output will be in the format of:
|
||||
@@ -392,7 +334,7 @@ class Agent(BaseAgent):
|
||||
|
||||
return description
|
||||
|
||||
def _render_text_description_and_args(self, tools: List[BaseTool]) -> str:
|
||||
def _render_text_description_and_args(self, tools: List[Any]) -> str:
|
||||
"""Render the tool name, description, and args in plain text.
|
||||
|
||||
Output will be in the format of:
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
from .cache.cache_handler import CacheHandler
|
||||
from .executor import CrewAgentExecutor
|
||||
from .parser import CrewAgentParser
|
||||
from .tools_handler import ToolsHandler
|
||||
|
||||
__all__ = ["CacheHandler", "CrewAgentExecutor", "CrewAgentParser", "ToolsHandler"]
|
||||
__all__ = ["CacheHandler", "CrewAgentParser", "ToolsHandler"]
|
||||
|
||||
@@ -102,7 +102,8 @@ class BaseAgent(ABC, BaseModel):
|
||||
description="Maximum number of requests per minute for the agent execution to be respected.",
|
||||
)
|
||||
allow_delegation: bool = Field(
|
||||
default=True, description="Allow delegation of tasks to agents"
|
||||
default=False,
|
||||
description="Enable agent to delegate and ask questions among each other.",
|
||||
)
|
||||
tools: Optional[List[Any]] = Field(
|
||||
default_factory=list, description="Tools at agents' disposal"
|
||||
@@ -224,10 +225,8 @@ class BaseAgent(ABC, BaseModel):
|
||||
|
||||
# Copy llm and clear callbacks
|
||||
existing_llm = shallow_copy(self.llm)
|
||||
existing_llm.callbacks = []
|
||||
copied_data = self.model_dump(exclude=exclude)
|
||||
copied_data = {k: v for k, v in copied_data.items() if v is not None}
|
||||
|
||||
copied_agent = type(self)(**copied_data, llm=existing_llm, tools=self.tools)
|
||||
|
||||
return copied_agent
|
||||
|
||||
@@ -19,15 +19,13 @@ class CrewAgentExecutorMixin:
|
||||
crew_agent: Optional["BaseAgent"]
|
||||
task: Optional["Task"]
|
||||
iterations: int
|
||||
force_answer_max_iterations: int
|
||||
have_forced_answer: bool
|
||||
max_iter: int
|
||||
_i18n: I18N
|
||||
|
||||
def _should_force_answer(self) -> bool:
|
||||
"""Determine if a forced answer is required based on iteration count."""
|
||||
return (
|
||||
self.iterations == self.force_answer_max_iterations
|
||||
) and not self.have_forced_answer
|
||||
return (self.iterations >= self.max_iter) and not self.have_forced_answer
|
||||
|
||||
def _create_short_term_memory(self, output) -> None:
|
||||
"""Create and save a short-term memory item if conditions are met."""
|
||||
|
||||
352
src/crewai/agents/crew_agent_executor.py
Normal file
352
src/crewai/agents/crew_agent_executor.py
Normal file
@@ -0,0 +1,352 @@
|
||||
import json
|
||||
import re
|
||||
from typing import Any, Dict, List, Union
|
||||
|
||||
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
|
||||
from crewai.agents.parser import CrewAgentParser
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
|
||||
from crewai.utilities import I18N, Printer
|
||||
from crewai.utilities.constants import TRAINING_DATA_FILE
|
||||
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
LLMContextLengthExceededException,
|
||||
)
|
||||
from crewai.utilities.logger import Logger
|
||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
from crewai.llm import LLM
|
||||
from crewai.agents.parser import (
|
||||
AgentAction,
|
||||
AgentFinish,
|
||||
OutputParserException,
|
||||
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE,
|
||||
)
|
||||
|
||||
|
||||
class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
_logger: Logger = Logger()
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
llm: Any,
|
||||
task: Any,
|
||||
crew: Any,
|
||||
agent: Any,
|
||||
prompt: dict[str, str],
|
||||
max_iter: int,
|
||||
tools: List[Any],
|
||||
tools_names: str,
|
||||
use_stop_words: bool,
|
||||
stop_words: List[str],
|
||||
tools_description: str,
|
||||
tools_handler: ToolsHandler,
|
||||
step_callback: Any = None,
|
||||
original_tools: List[Any] = [],
|
||||
function_calling_llm: Any = None,
|
||||
respect_context_window: bool = False,
|
||||
request_within_rpm_limit: Any = None,
|
||||
callbacks: List[Any] = [],
|
||||
):
|
||||
self._i18n: I18N = I18N()
|
||||
self.llm = llm
|
||||
self.task = task
|
||||
self.agent = agent
|
||||
self.crew = crew
|
||||
self.prompt = prompt
|
||||
self.tools = tools
|
||||
self.tools_names = tools_names
|
||||
self.stop = stop_words
|
||||
self.max_iter = max_iter
|
||||
self.callbacks = callbacks
|
||||
self._printer: Printer = Printer()
|
||||
self.tools_handler = tools_handler
|
||||
self.original_tools = original_tools
|
||||
self.step_callback = step_callback
|
||||
self.use_stop_words = use_stop_words
|
||||
self.tools_description = tools_description
|
||||
self.function_calling_llm = function_calling_llm
|
||||
self.respect_context_window = respect_context_window
|
||||
self.request_within_rpm_limit = request_within_rpm_limit
|
||||
self.ask_for_human_input = False
|
||||
self.messages: List[Dict[str, str]] = []
|
||||
self.iterations = 0
|
||||
self.have_forced_answer = False
|
||||
self.name_to_tool_map = {tool.name: tool for tool in self.tools}
|
||||
|
||||
def invoke(self, inputs: Dict[str, str]) -> Dict[str, Any]:
|
||||
if "system" in self.prompt:
|
||||
system_prompt = self._format_prompt(self.prompt.get("system", ""), inputs)
|
||||
user_prompt = self._format_prompt(self.prompt.get("user", ""), inputs)
|
||||
|
||||
self.messages.append(self._format_msg(system_prompt, role="system"))
|
||||
self.messages.append(self._format_msg(user_prompt))
|
||||
else:
|
||||
user_prompt = self._format_prompt(self.prompt.get("prompt", ""), inputs)
|
||||
self.messages.append(self._format_msg(user_prompt))
|
||||
|
||||
self._show_start_logs()
|
||||
|
||||
self.ask_for_human_input = bool(inputs.get("ask_for_human_input", False))
|
||||
formatted_answer = self._invoke_loop()
|
||||
|
||||
if self.ask_for_human_input:
|
||||
human_feedback = self._ask_human_input(formatted_answer.output)
|
||||
if self.crew and self.crew._train:
|
||||
self._handle_crew_training_output(formatted_answer, human_feedback)
|
||||
|
||||
# Making sure we only ask for it once, so disabling for the next thought loop
|
||||
self.ask_for_human_input = False
|
||||
self.messages.append(self._format_msg(f"Feedback: {human_feedback}"))
|
||||
formatted_answer = self._invoke_loop()
|
||||
|
||||
return {"output": formatted_answer.output}
|
||||
|
||||
def _invoke_loop(self, formatted_answer=None):
|
||||
try:
|
||||
while not isinstance(formatted_answer, AgentFinish):
|
||||
if not self.request_within_rpm_limit or self.request_within_rpm_limit():
|
||||
answer = LLM(
|
||||
self.llm,
|
||||
stop=self.stop if self.use_stop_words else None,
|
||||
callbacks=self.callbacks,
|
||||
).call(self.messages)
|
||||
|
||||
if not self.use_stop_words:
|
||||
try:
|
||||
self._format_answer(answer)
|
||||
except OutputParserException as e:
|
||||
if (
|
||||
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE
|
||||
in e.error
|
||||
):
|
||||
answer = answer.split("Observation:")[0].strip()
|
||||
|
||||
self.iterations += 1
|
||||
formatted_answer = self._format_answer(answer)
|
||||
|
||||
if isinstance(formatted_answer, AgentAction):
|
||||
action_result = self._use_tool(formatted_answer)
|
||||
formatted_answer.text += f"\nObservation: {action_result}"
|
||||
formatted_answer.result = action_result
|
||||
self._show_logs(formatted_answer)
|
||||
|
||||
if self.step_callback:
|
||||
self.step_callback(formatted_answer)
|
||||
|
||||
if self._should_force_answer():
|
||||
if self.have_forced_answer:
|
||||
return AgentFinish(
|
||||
output=self._i18n.errors(
|
||||
"force_final_answer_error"
|
||||
).format(formatted_answer.text),
|
||||
text=formatted_answer.text,
|
||||
)
|
||||
else:
|
||||
formatted_answer.text += (
|
||||
f'\n{self._i18n.errors("force_final_answer")}'
|
||||
)
|
||||
self.have_forced_answer = True
|
||||
self.messages.append(
|
||||
self._format_msg(formatted_answer.text, role="assistant")
|
||||
)
|
||||
except OutputParserException as e:
|
||||
self.messages.append({"role": "assistant", "content": e.error})
|
||||
return self._invoke_loop(formatted_answer)
|
||||
|
||||
except Exception as e:
|
||||
if LLMContextLengthExceededException(str(e))._is_context_limit_error(
|
||||
str(e)
|
||||
):
|
||||
self._handle_context_length()
|
||||
return self._invoke_loop(formatted_answer)
|
||||
else:
|
||||
raise e
|
||||
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
|
||||
def _show_start_logs(self):
|
||||
if self.agent.verbose or (
|
||||
hasattr(self, "crew") and getattr(self.crew, "verbose", False)
|
||||
):
|
||||
self._printer.print(
|
||||
content=f"\033[1m\033[95m# Agent:\033[00m \033[1m\033[92m{self.agent.role}\033[00m"
|
||||
)
|
||||
self._printer.print(
|
||||
content=f"\033[95m## Task:\033[00m \033[92m{self.task.description}\033[00m"
|
||||
)
|
||||
|
||||
def _show_logs(self, formatted_answer: Union[AgentAction, AgentFinish]):
|
||||
if self.agent.verbose or (
|
||||
hasattr(self, "crew") and getattr(self.crew, "verbose", False)
|
||||
):
|
||||
if isinstance(formatted_answer, AgentAction):
|
||||
thought = re.sub(r"\n+", "\n", formatted_answer.thought)
|
||||
formatted_json = json.dumps(
|
||||
json.loads(formatted_answer.tool_input),
|
||||
indent=2,
|
||||
ensure_ascii=False,
|
||||
)
|
||||
self._printer.print(
|
||||
content=f"\n\n\033[1m\033[95m# Agent:\033[00m \033[1m\033[92m{self.agent.role}\033[00m"
|
||||
)
|
||||
if thought and thought != "":
|
||||
self._printer.print(
|
||||
content=f"\033[95m## Thought:\033[00m \033[92m{thought}\033[00m"
|
||||
)
|
||||
self._printer.print(
|
||||
content=f"\033[95m## Using tool:\033[00m \033[92m{formatted_answer.tool}\033[00m"
|
||||
)
|
||||
self._printer.print(
|
||||
content=f"\033[95m## Tool Input:\033[00m \033[92m\n{formatted_json}\033[00m"
|
||||
)
|
||||
self._printer.print(
|
||||
content=f"\033[95m## Tool Output:\033[00m \033[92m\n{formatted_answer.result}\033[00m"
|
||||
)
|
||||
elif isinstance(formatted_answer, AgentFinish):
|
||||
self._printer.print(
|
||||
content=f"\n\n\033[1m\033[95m# Agent:\033[00m \033[1m\033[92m{self.agent.role}\033[00m"
|
||||
)
|
||||
self._printer.print(
|
||||
content=f"\033[95m## Final Answer:\033[00m \033[92m\n{formatted_answer.output}\033[00m"
|
||||
)
|
||||
|
||||
def _use_tool(self, agent_action: AgentAction) -> Any:
|
||||
tool_usage = ToolUsage(
|
||||
tools_handler=self.tools_handler,
|
||||
tools=self.tools,
|
||||
original_tools=self.original_tools,
|
||||
tools_description=self.tools_description,
|
||||
tools_names=self.tools_names,
|
||||
function_calling_llm=self.function_calling_llm,
|
||||
task=self.task, # type: ignore[arg-type]
|
||||
agent=self.agent,
|
||||
action=agent_action,
|
||||
)
|
||||
tool_calling = tool_usage.parse(agent_action.text)
|
||||
|
||||
if isinstance(tool_calling, ToolUsageErrorException):
|
||||
tool_result = tool_calling.message
|
||||
else:
|
||||
if tool_calling.tool_name.casefold().strip() in [
|
||||
name.casefold().strip() for name in self.name_to_tool_map
|
||||
] or tool_calling.tool_name.casefold().replace("_", " ") in [
|
||||
name.casefold().strip() for name in self.name_to_tool_map
|
||||
]:
|
||||
tool_result = tool_usage.use(tool_calling, agent_action.text)
|
||||
else:
|
||||
tool_result = self._i18n.errors("wrong_tool_name").format(
|
||||
tool=tool_calling.tool_name,
|
||||
tools=", ".join([tool.name.casefold() for tool in self.tools]),
|
||||
)
|
||||
return tool_result
|
||||
|
||||
def _summarize_messages(self) -> None:
|
||||
llm = LLM(self.llm)
|
||||
messages_groups = []
|
||||
|
||||
for message in self.messages:
|
||||
content = message["content"]
|
||||
for i in range(0, len(content), 5000):
|
||||
messages_groups.append(content[i : i + 5000])
|
||||
|
||||
summarized_contents = []
|
||||
for group in messages_groups:
|
||||
summary = llm.call(
|
||||
[
|
||||
self._format_msg(
|
||||
self._i18n.slices("summarizer_system_message"), role="system"
|
||||
),
|
||||
self._format_msg(
|
||||
self._i18n.errors("sumamrize_instruction").format(group=group),
|
||||
),
|
||||
]
|
||||
)
|
||||
summarized_contents.append(summary)
|
||||
|
||||
merged_summary = " ".join(str(content) for content in summarized_contents)
|
||||
|
||||
self.messages = [
|
||||
self._format_msg(
|
||||
self._i18n.errors("summary").format(merged_summary=merged_summary)
|
||||
)
|
||||
]
|
||||
|
||||
def _handle_context_length(self) -> None:
|
||||
if self.respect_context_window:
|
||||
self._logger.log(
|
||||
"debug",
|
||||
"Context length exceeded. Summarizing content to fit the model context window.",
|
||||
color="yellow",
|
||||
)
|
||||
self._summarize_messages()
|
||||
else:
|
||||
self._logger.log(
|
||||
"debug",
|
||||
"Context length exceeded. Consider using smaller text or RAG tools from crewai_tools.",
|
||||
color="red",
|
||||
)
|
||||
raise SystemExit(
|
||||
"Context length exceeded and user opted not to summarize. Consider using smaller text or RAG tools from crewai_tools."
|
||||
)
|
||||
|
||||
def _handle_crew_training_output(
|
||||
self, result: AgentFinish, human_feedback: str | None = None
|
||||
) -> None:
|
||||
"""Function to handle the process of the training data."""
|
||||
agent_id = str(self.agent.id)
|
||||
|
||||
if (
|
||||
CrewTrainingHandler(TRAINING_DATA_FILE).load()
|
||||
and not self.ask_for_human_input
|
||||
):
|
||||
training_data = CrewTrainingHandler(TRAINING_DATA_FILE).load()
|
||||
if training_data.get(agent_id):
|
||||
if self.crew is not None and hasattr(self.crew, "_train_iteration"):
|
||||
training_data[agent_id][self.crew._train_iteration][
|
||||
"improved_output"
|
||||
] = result.output
|
||||
CrewTrainingHandler(TRAINING_DATA_FILE).save(training_data)
|
||||
else:
|
||||
self._logger.log(
|
||||
"error",
|
||||
"Invalid crew or missing _train_iteration attribute.",
|
||||
color="red",
|
||||
)
|
||||
|
||||
if self.ask_for_human_input and human_feedback is not None:
|
||||
training_data = {
|
||||
"initial_output": result.output,
|
||||
"human_feedback": human_feedback,
|
||||
"agent": agent_id,
|
||||
"agent_role": self.agent.role,
|
||||
}
|
||||
if self.crew is not None and hasattr(self.crew, "_train_iteration"):
|
||||
train_iteration = self.crew._train_iteration
|
||||
if isinstance(train_iteration, int):
|
||||
CrewTrainingHandler(TRAINING_DATA_FILE).append(
|
||||
train_iteration, agent_id, training_data
|
||||
)
|
||||
else:
|
||||
self._logger.log(
|
||||
"error",
|
||||
"Invalid train iteration type. Expected int.",
|
||||
color="red",
|
||||
)
|
||||
else:
|
||||
self._logger.log(
|
||||
"error",
|
||||
"Crew is None or does not have _train_iteration attribute.",
|
||||
color="red",
|
||||
)
|
||||
|
||||
def _format_prompt(self, prompt: str, inputs: Dict[str, str]) -> str:
|
||||
prompt = prompt.replace("{input}", inputs["input"])
|
||||
prompt = prompt.replace("{tool_names}", inputs["tool_names"])
|
||||
prompt = prompt.replace("{tools}", inputs["tools"])
|
||||
return prompt
|
||||
|
||||
def _format_answer(self, answer: str) -> Union[AgentAction, AgentFinish]:
|
||||
return CrewAgentParser(agent=self.agent).parse(answer)
|
||||
|
||||
def _format_msg(self, prompt: str, role: str = "user") -> Dict[str, str]:
|
||||
return {"role": role, "content": prompt}
|
||||
@@ -1,397 +0,0 @@
|
||||
import threading
|
||||
import time
|
||||
from typing import Any, Dict, Iterator, List, Literal, Optional, Tuple, Union
|
||||
|
||||
import click
|
||||
from langchain.agents import AgentExecutor
|
||||
from langchain.agents.agent import ExceptionTool
|
||||
from langchain.callbacks.manager import CallbackManagerForChainRun
|
||||
from langchain.chains.summarize import load_summarize_chain
|
||||
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
||||
from langchain_core.agents import AgentAction, AgentFinish, AgentStep
|
||||
from langchain_core.exceptions import OutputParserException
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_core.utils.input import get_color_mapping
|
||||
from pydantic import InstanceOf
|
||||
|
||||
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
|
||||
from crewai.utilities import I18N
|
||||
from crewai.utilities.constants import TRAINING_DATA_FILE
|
||||
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
LLMContextLengthExceededException,
|
||||
)
|
||||
from crewai.utilities.logger import Logger
|
||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
|
||||
|
||||
class CrewAgentExecutor(AgentExecutor, CrewAgentExecutorMixin):
|
||||
_i18n: I18N = I18N()
|
||||
should_ask_for_human_input: bool = False
|
||||
llm: Any = None
|
||||
iterations: int = 0
|
||||
task: Any = None
|
||||
tools_description: str = ""
|
||||
tools_names: str = ""
|
||||
original_tools: List[Any] = []
|
||||
crew_agent: Any = None
|
||||
crew: Any = None
|
||||
function_calling_llm: Any = None
|
||||
request_within_rpm_limit: Any = None
|
||||
tools_handler: Optional[InstanceOf[ToolsHandler]] = None
|
||||
max_iterations: Optional[int] = 15
|
||||
have_forced_answer: bool = False
|
||||
force_answer_max_iterations: Optional[int] = None # type: ignore # Incompatible types in assignment (expression has type "int | None", base class "CrewAgentExecutorMixin" defined the type as "int")
|
||||
step_callback: Optional[Any] = None
|
||||
system_template: Optional[str] = None
|
||||
prompt_template: Optional[str] = None
|
||||
response_template: Optional[str] = None
|
||||
_logger: Logger = Logger()
|
||||
_fit_context_window_strategy: Optional[Literal["summarize"]] = "summarize"
|
||||
|
||||
def _call(
|
||||
self,
|
||||
inputs: Dict[str, str],
|
||||
run_manager: Optional[CallbackManagerForChainRun] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""Run text through and get agent response."""
|
||||
# Construct a mapping of tool name to tool for easy lookup
|
||||
name_to_tool_map = {tool.name: tool for tool in self.tools}
|
||||
# We construct a mapping from each tool to a color, used for logging.
|
||||
color_mapping = get_color_mapping(
|
||||
[tool.name.casefold() for tool in self.tools],
|
||||
excluded_colors=["green", "red"],
|
||||
)
|
||||
intermediate_steps: List[Tuple[AgentAction, str]] = []
|
||||
# Allowing human input given task setting
|
||||
if self.task and self.task.human_input:
|
||||
self.should_ask_for_human_input = True
|
||||
|
||||
# Let's start tracking the number of iterations and time elapsed
|
||||
self.iterations = 0
|
||||
time_elapsed = 0.0
|
||||
start_time = time.time()
|
||||
|
||||
# We now enter the agent loop (until it returns something).
|
||||
while self._should_continue(self.iterations, time_elapsed):
|
||||
if not self.request_within_rpm_limit or self.request_within_rpm_limit():
|
||||
next_step_output = self._take_next_step(
|
||||
name_to_tool_map,
|
||||
color_mapping,
|
||||
inputs,
|
||||
intermediate_steps,
|
||||
run_manager=run_manager,
|
||||
)
|
||||
|
||||
if self.step_callback:
|
||||
self.step_callback(next_step_output)
|
||||
|
||||
if isinstance(next_step_output, AgentFinish):
|
||||
# Creating long term memory
|
||||
create_long_term_memory = threading.Thread(
|
||||
target=self._create_long_term_memory, args=(next_step_output,)
|
||||
)
|
||||
create_long_term_memory.start()
|
||||
|
||||
return self._return(
|
||||
next_step_output, intermediate_steps, run_manager=run_manager
|
||||
)
|
||||
|
||||
intermediate_steps.extend(next_step_output)
|
||||
|
||||
if len(next_step_output) == 1:
|
||||
next_step_action = next_step_output[0]
|
||||
# See if tool should return directly
|
||||
tool_return = self._get_tool_return(next_step_action)
|
||||
if tool_return is not None:
|
||||
return self._return(
|
||||
tool_return, intermediate_steps, run_manager=run_manager
|
||||
)
|
||||
|
||||
self.iterations += 1
|
||||
time_elapsed = time.time() - start_time
|
||||
output = self.agent.return_stopped_response(
|
||||
self.early_stopping_method, intermediate_steps, **inputs
|
||||
)
|
||||
|
||||
return self._return(output, intermediate_steps, run_manager=run_manager)
|
||||
|
||||
def _iter_next_step(
|
||||
self,
|
||||
name_to_tool_map: Dict[str, BaseTool],
|
||||
color_mapping: Dict[str, str],
|
||||
inputs: Dict[str, str],
|
||||
intermediate_steps: List[Tuple[AgentAction, str]],
|
||||
run_manager: Optional[CallbackManagerForChainRun] = None,
|
||||
) -> Iterator[Union[AgentFinish, AgentAction, AgentStep]]:
|
||||
"""Take a single step in the thought-action-observation loop.
|
||||
|
||||
Override this to take control of how the agent makes and acts on choices.
|
||||
"""
|
||||
try:
|
||||
if self._should_force_answer():
|
||||
error = self._i18n.errors("force_final_answer")
|
||||
output = AgentAction("_Exception", error, error)
|
||||
self.have_forced_answer = True
|
||||
yield AgentStep(action=output, observation=error)
|
||||
return
|
||||
|
||||
intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)
|
||||
|
||||
# Call the LLM to see what to do.
|
||||
output = self.agent.plan(
|
||||
intermediate_steps,
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
**inputs,
|
||||
)
|
||||
|
||||
except OutputParserException as e:
|
||||
if isinstance(self.handle_parsing_errors, bool):
|
||||
raise_error = not self.handle_parsing_errors
|
||||
else:
|
||||
raise_error = False
|
||||
if raise_error:
|
||||
raise ValueError(
|
||||
"An output parsing error occurred. "
|
||||
"In order to pass this error back to the agent and have it try "
|
||||
"again, pass `handle_parsing_errors=True` to the AgentExecutor. "
|
||||
f"This is the error: {str(e)}"
|
||||
)
|
||||
str(e)
|
||||
if isinstance(self.handle_parsing_errors, bool):
|
||||
if e.send_to_llm:
|
||||
observation = f"\n{str(e.observation)}"
|
||||
str(e.llm_output)
|
||||
else:
|
||||
observation = ""
|
||||
elif isinstance(self.handle_parsing_errors, str):
|
||||
observation = f"\n{self.handle_parsing_errors}"
|
||||
elif callable(self.handle_parsing_errors):
|
||||
observation = f"\n{self.handle_parsing_errors(e)}"
|
||||
else:
|
||||
raise ValueError("Got unexpected type of `handle_parsing_errors`")
|
||||
output = AgentAction("_Exception", observation, "")
|
||||
|
||||
if run_manager:
|
||||
run_manager.on_agent_action(output, color="green")
|
||||
|
||||
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
|
||||
observation = ExceptionTool().run(
|
||||
output.tool_input,
|
||||
verbose=False,
|
||||
color=None,
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
**tool_run_kwargs,
|
||||
)
|
||||
|
||||
if self._should_force_answer():
|
||||
error = self._i18n.errors("force_final_answer")
|
||||
output = AgentAction("_Exception", error, error)
|
||||
yield AgentStep(action=output, observation=error)
|
||||
return
|
||||
|
||||
yield AgentStep(action=output, observation=observation)
|
||||
return
|
||||
|
||||
except Exception as e:
|
||||
if LLMContextLengthExceededException(str(e))._is_context_limit_error(
|
||||
str(e)
|
||||
):
|
||||
output = self._handle_context_length_error(
|
||||
intermediate_steps, run_manager, inputs
|
||||
)
|
||||
|
||||
if isinstance(output, AgentFinish):
|
||||
yield output
|
||||
elif isinstance(output, list):
|
||||
for step in output:
|
||||
yield step
|
||||
return
|
||||
|
||||
raise e
|
||||
|
||||
# If the tool chosen is the finishing tool, then we end and return.
|
||||
if isinstance(output, AgentFinish):
|
||||
if self.should_ask_for_human_input:
|
||||
human_feedback = self._ask_human_input(output.return_values["output"])
|
||||
|
||||
if self.crew and self.crew._train:
|
||||
self._handle_crew_training_output(output, human_feedback)
|
||||
|
||||
# Making sure we only ask for it once, so disabling for the next thought loop
|
||||
self.should_ask_for_human_input = False
|
||||
action = AgentAction(
|
||||
tool="Human Input", tool_input=human_feedback, log=output.log
|
||||
)
|
||||
|
||||
yield AgentStep(
|
||||
action=action,
|
||||
observation=self._i18n.slice("human_feedback").format(
|
||||
human_feedback=human_feedback
|
||||
),
|
||||
)
|
||||
return
|
||||
|
||||
else:
|
||||
if self.crew and self.crew._train:
|
||||
self._handle_crew_training_output(output)
|
||||
|
||||
yield output
|
||||
return
|
||||
|
||||
self._create_short_term_memory(output)
|
||||
|
||||
actions: List[AgentAction]
|
||||
actions = [output] if isinstance(output, AgentAction) else output
|
||||
yield from actions
|
||||
|
||||
for agent_action in actions:
|
||||
if run_manager:
|
||||
run_manager.on_agent_action(agent_action, color="green")
|
||||
|
||||
tool_usage = ToolUsage(
|
||||
tools_handler=self.tools_handler, # type: ignore # Argument "tools_handler" to "ToolUsage" has incompatible type "ToolsHandler | None"; expected "ToolsHandler"
|
||||
tools=self.tools, # type: ignore # Argument "tools" to "ToolUsage" has incompatible type "Sequence[BaseTool]"; expected "list[BaseTool]"
|
||||
original_tools=self.original_tools,
|
||||
tools_description=self.tools_description,
|
||||
tools_names=self.tools_names,
|
||||
function_calling_llm=self.function_calling_llm,
|
||||
task=self.task,
|
||||
agent=self.crew_agent,
|
||||
action=agent_action,
|
||||
)
|
||||
|
||||
tool_calling = tool_usage.parse(agent_action.log)
|
||||
|
||||
if isinstance(tool_calling, ToolUsageErrorException):
|
||||
observation = tool_calling.message
|
||||
else:
|
||||
if tool_calling.tool_name.casefold().strip() in [
|
||||
name.casefold().strip() for name in name_to_tool_map
|
||||
] or tool_calling.tool_name.casefold().replace("_", " ") in [
|
||||
name.casefold().strip() for name in name_to_tool_map
|
||||
]:
|
||||
observation = tool_usage.use(tool_calling, agent_action.log)
|
||||
else:
|
||||
observation = self._i18n.errors("wrong_tool_name").format(
|
||||
tool=tool_calling.tool_name,
|
||||
tools=", ".join([tool.name.casefold() for tool in self.tools]),
|
||||
)
|
||||
yield AgentStep(action=agent_action, observation=observation)
|
||||
|
||||
def _handle_crew_training_output(
|
||||
self, output: AgentFinish, human_feedback: str | None = None
|
||||
) -> None:
|
||||
"""Function to handle the process of the training data."""
|
||||
agent_id = str(self.crew_agent.id)
|
||||
|
||||
if (
|
||||
CrewTrainingHandler(TRAINING_DATA_FILE).load()
|
||||
and not self.should_ask_for_human_input
|
||||
):
|
||||
training_data = CrewTrainingHandler(TRAINING_DATA_FILE).load()
|
||||
if training_data.get(agent_id):
|
||||
training_data[agent_id][self.crew._train_iteration][
|
||||
"improved_output"
|
||||
] = output.return_values["output"]
|
||||
CrewTrainingHandler(TRAINING_DATA_FILE).save(training_data)
|
||||
|
||||
if self.should_ask_for_human_input and human_feedback is not None:
|
||||
training_data = {
|
||||
"initial_output": output.return_values["output"],
|
||||
"human_feedback": human_feedback,
|
||||
"agent": agent_id,
|
||||
"agent_role": self.crew_agent.role,
|
||||
}
|
||||
CrewTrainingHandler(TRAINING_DATA_FILE).append(
|
||||
self.crew._train_iteration, agent_id, training_data
|
||||
)
|
||||
|
||||
def _handle_context_length(
|
||||
self, intermediate_steps: List[Tuple[AgentAction, str]]
|
||||
) -> List[Tuple[AgentAction, str]]:
|
||||
text = intermediate_steps[0][1]
|
||||
original_action = intermediate_steps[0][0]
|
||||
|
||||
text_splitter = RecursiveCharacterTextSplitter(
|
||||
separators=["\n\n", "\n"],
|
||||
chunk_size=8000,
|
||||
chunk_overlap=500,
|
||||
)
|
||||
|
||||
if self._fit_context_window_strategy == "summarize":
|
||||
docs = text_splitter.create_documents([text])
|
||||
self._logger.log(
|
||||
"debug",
|
||||
"Summarizing Content, it is recommended to use a RAG tool",
|
||||
color="bold_blue",
|
||||
)
|
||||
summarize_chain = load_summarize_chain(
|
||||
self.llm, chain_type="map_reduce", verbose=True
|
||||
)
|
||||
summarized_docs = []
|
||||
for doc in docs:
|
||||
summary = summarize_chain.invoke(
|
||||
{"input_documents": [doc]}, return_only_outputs=True
|
||||
)
|
||||
|
||||
summarized_docs.append(summary["output_text"])
|
||||
|
||||
formatted_results = "\n\n".join(summarized_docs)
|
||||
summary_step = AgentStep(
|
||||
action=AgentAction(
|
||||
tool=original_action.tool,
|
||||
tool_input=original_action.tool_input,
|
||||
log=original_action.log,
|
||||
),
|
||||
observation=formatted_results,
|
||||
)
|
||||
summary_tuple = (summary_step.action, summary_step.observation)
|
||||
return [summary_tuple]
|
||||
|
||||
return intermediate_steps
|
||||
|
||||
def _handle_context_length_error(
|
||||
self,
|
||||
intermediate_steps: List[Tuple[AgentAction, str]],
|
||||
run_manager: Optional[CallbackManagerForChainRun],
|
||||
inputs: Dict[str, str],
|
||||
) -> Union[AgentFinish, List[AgentStep]]:
|
||||
self._logger.log(
|
||||
"debug",
|
||||
"Context length exceeded. Asking user if they want to use summarize prompt to fit, this will reduce context length.",
|
||||
color="yellow",
|
||||
)
|
||||
user_choice = click.confirm(
|
||||
"Context length exceeded. Do you want to summarize the text to fit models context window?"
|
||||
)
|
||||
if user_choice:
|
||||
self._logger.log(
|
||||
"debug",
|
||||
"Context length exceeded. Using summarize prompt to fit, this will reduce context length.",
|
||||
color="bold_blue",
|
||||
)
|
||||
intermediate_steps = self._handle_context_length(intermediate_steps)
|
||||
|
||||
output = self.agent.plan(
|
||||
intermediate_steps,
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
**inputs,
|
||||
)
|
||||
|
||||
if isinstance(output, AgentFinish):
|
||||
return output
|
||||
elif isinstance(output, AgentAction):
|
||||
return [AgentStep(action=output, observation=None)]
|
||||
else:
|
||||
return [AgentStep(action=action, observation=None) for action in output]
|
||||
else:
|
||||
self._logger.log(
|
||||
"debug",
|
||||
"Context length exceeded. Consider using smaller text or RAG tools from crewai_tools.",
|
||||
color="red",
|
||||
)
|
||||
raise SystemExit(
|
||||
"Context length exceeded and user opted not to summarize. Consider using smaller text or RAG tools from crewai_tools."
|
||||
)
|
||||
@@ -1,10 +1,6 @@
|
||||
import re
|
||||
from typing import Any, Union
|
||||
|
||||
from json_repair import repair_json
|
||||
from langchain.agents.output_parsers import ReActSingleInputOutputParser
|
||||
from langchain_core.agents import AgentAction, AgentFinish
|
||||
from langchain_core.exceptions import OutputParserException
|
||||
|
||||
from crewai.utilities import I18N
|
||||
|
||||
@@ -14,7 +10,39 @@ MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE = "I did it wrong. Invalid Forma
|
||||
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = "I did it wrong. Tried to both perform Action and give a Final Answer at the same time, I must do one or the other"
|
||||
|
||||
|
||||
class CrewAgentParser(ReActSingleInputOutputParser):
|
||||
class AgentAction:
|
||||
thought: str
|
||||
tool: str
|
||||
tool_input: str
|
||||
text: str
|
||||
result: str
|
||||
|
||||
def __init__(self, thought: str, tool: str, tool_input: str, text: str):
|
||||
self.thought = thought
|
||||
self.tool = tool
|
||||
self.tool_input = tool_input
|
||||
self.text = text
|
||||
|
||||
|
||||
class AgentFinish:
|
||||
thought: str
|
||||
output: str
|
||||
text: str
|
||||
|
||||
def __init__(self, thought: str, output: str, text: str):
|
||||
self.thought = thought
|
||||
self.output = output
|
||||
self.text = text
|
||||
|
||||
|
||||
class OutputParserException(Exception):
|
||||
error: str
|
||||
|
||||
def __init__(self, error: str):
|
||||
self.error = error
|
||||
|
||||
|
||||
class CrewAgentParser:
|
||||
"""Parses ReAct-style LLM calls that have a single tool input.
|
||||
|
||||
Expects output to be in one of two formats.
|
||||
@@ -38,7 +66,11 @@ class CrewAgentParser(ReActSingleInputOutputParser):
|
||||
_i18n: I18N = I18N()
|
||||
agent: Any = None
|
||||
|
||||
def __init__(self, agent: Any):
|
||||
self.agent = agent
|
||||
|
||||
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
|
||||
thought = self._extract_thought(text)
|
||||
includes_answer = FINAL_ANSWER_ACTION in text
|
||||
regex = (
|
||||
r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
|
||||
@@ -47,7 +79,7 @@ class CrewAgentParser(ReActSingleInputOutputParser):
|
||||
if action_match:
|
||||
if includes_answer:
|
||||
raise OutputParserException(
|
||||
f"{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}: {text}"
|
||||
f"{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}"
|
||||
)
|
||||
action = action_match.group(1)
|
||||
clean_action = self._clean_action(action)
|
||||
@@ -57,30 +89,23 @@ class CrewAgentParser(ReActSingleInputOutputParser):
|
||||
tool_input = action_input.strip(" ").strip('"')
|
||||
safe_tool_input = self._safe_repair_json(tool_input)
|
||||
|
||||
return AgentAction(clean_action, safe_tool_input, text)
|
||||
return AgentAction(thought, clean_action, safe_tool_input, text)
|
||||
|
||||
elif includes_answer:
|
||||
return AgentFinish(
|
||||
{"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text
|
||||
)
|
||||
final_answer = text.split(FINAL_ANSWER_ACTION)[-1].strip()
|
||||
return AgentFinish(thought, final_answer, text)
|
||||
|
||||
if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL):
|
||||
self.agent.increment_formatting_errors()
|
||||
raise OutputParserException(
|
||||
f"Could not parse LLM output: `{text}`",
|
||||
observation=f"{MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE}\n{self._i18n.slice('final_answer_format')}",
|
||||
llm_output=text,
|
||||
send_to_llm=True,
|
||||
f"{MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE}\n{self._i18n.slice('final_answer_format')}",
|
||||
)
|
||||
elif not re.search(
|
||||
r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)", text, re.DOTALL
|
||||
):
|
||||
self.agent.increment_formatting_errors()
|
||||
raise OutputParserException(
|
||||
f"Could not parse LLM output: `{text}`",
|
||||
observation=MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
|
||||
llm_output=text,
|
||||
send_to_llm=True,
|
||||
MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
|
||||
)
|
||||
else:
|
||||
format = self._i18n.slice("format_without_tools")
|
||||
@@ -88,11 +113,15 @@ class CrewAgentParser(ReActSingleInputOutputParser):
|
||||
self.agent.increment_formatting_errors()
|
||||
raise OutputParserException(
|
||||
error,
|
||||
observation=error,
|
||||
llm_output=text,
|
||||
send_to_llm=True,
|
||||
)
|
||||
|
||||
def _extract_thought(self, text: str) -> str:
|
||||
regex = r"(.*?)(?:\n\nAction|\n\nFinal Answer)"
|
||||
thought_match = re.search(regex, text, re.DOTALL)
|
||||
if thought_match:
|
||||
return thought_match.group(1).strip()
|
||||
return ""
|
||||
|
||||
def _clean_action(self, text: str) -> str:
|
||||
"""Clean action string by removing non-essential formatting characters."""
|
||||
return re.sub(r"^\s*\*+\s*|\s*\*+\s*$", "", text).strip()
|
||||
|
||||
@@ -79,7 +79,7 @@ class TokenManager:
|
||||
"""
|
||||
encrypted_data = self.read_secure_file(self.file_path)
|
||||
|
||||
decrypted_data = self.fernet.decrypt(encrypted_data)
|
||||
decrypted_data = self.fernet.decrypt(encrypted_data) # type: ignore
|
||||
data = json.loads(decrypted_data)
|
||||
|
||||
expiration = datetime.fromisoformat(data["expiration"])
|
||||
|
||||
@@ -117,7 +117,7 @@ class DeployCommand:
|
||||
else:
|
||||
self._handle_error(json_response)
|
||||
|
||||
def create_crew(self, confirm: bool) -> None:
|
||||
def create_crew(self, confirm: bool = False) -> None:
|
||||
"""
|
||||
Create a new crew deployment.
|
||||
"""
|
||||
|
||||
@@ -10,8 +10,6 @@ from crewai.project import CrewBase, agent, crew, task
|
||||
@CrewBase
|
||||
class {{crew_name}}Crew():
|
||||
"""{{crew_name}} crew"""
|
||||
agents_config = 'config/agents.yaml'
|
||||
tasks_config = 'config/tasks.yaml'
|
||||
|
||||
@agent
|
||||
def researcher(self) -> Agent:
|
||||
|
||||
@@ -6,7 +6,6 @@ from concurrent.futures import Future
|
||||
from hashlib import md5
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
|
||||
|
||||
from langchain_core.callbacks import BaseCallbackHandler
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
BaseModel,
|
||||
@@ -68,7 +67,6 @@ class Crew(BaseModel):
|
||||
manager_llm: The language model that will run manager agent.
|
||||
manager_agent: Custom agent that will be used as manager.
|
||||
memory: Whether the crew should use memory to store memories of it's execution.
|
||||
manager_callbacks: The callback handlers to be executed by the manager agent when hierarchical process is used
|
||||
cache: Whether the crew should use a cache to store the results of the tools execution.
|
||||
function_calling_llm: The language model that will run the tool calling for all the agents.
|
||||
process: The process flow that the crew will follow (e.g., sequential, hierarchical).
|
||||
@@ -126,10 +124,6 @@ class Crew(BaseModel):
|
||||
manager_agent: Optional[BaseAgent] = Field(
|
||||
description="Custom agent that will be used as manager.", default=None
|
||||
)
|
||||
manager_callbacks: Optional[List[InstanceOf[BaseCallbackHandler]]] = Field(
|
||||
default=None,
|
||||
description="A list of callback handlers to be executed by the manager agent when hierarchical process is used",
|
||||
)
|
||||
function_calling_llm: Optional[Any] = Field(
|
||||
description="Language model that will run the agent.", default=None
|
||||
)
|
||||
@@ -205,6 +199,12 @@ class Crew(BaseModel):
|
||||
if self.output_log_file:
|
||||
self._file_handler = FileHandler(self.output_log_file)
|
||||
self._rpm_controller = RPMController(max_rpm=self.max_rpm, logger=self._logger)
|
||||
self.function_calling_llm = (
|
||||
self.function_calling_llm.model_name
|
||||
if self.function_calling_llm is not None
|
||||
and hasattr(self.function_calling_llm, "model_name")
|
||||
else self.function_calling_llm
|
||||
)
|
||||
self._telemetry = Telemetry()
|
||||
self._telemetry.set_tracer()
|
||||
return self
|
||||
@@ -588,8 +588,14 @@ class Crew(BaseModel):
|
||||
"warning", "Manager agent should not have tools", color="orange"
|
||||
)
|
||||
manager.tools = []
|
||||
raise Exception("Manager agent should not have tools")
|
||||
manager.tools = self.manager_agent.get_delegation_tools(self.agents)
|
||||
else:
|
||||
self.manager_llm = (
|
||||
self.manager_llm.model_name
|
||||
if hasattr(self.manager_llm, "model_name")
|
||||
else self.manager_llm
|
||||
)
|
||||
manager = Agent(
|
||||
role=i18n.retrieve("hierarchical_manager_agent", "role"),
|
||||
goal=i18n.retrieve("hierarchical_manager_agent", "goal"),
|
||||
@@ -743,9 +749,6 @@ class Crew(BaseModel):
|
||||
task.tools.append(new_tool)
|
||||
|
||||
def _log_task_start(self, task: Task, role: str = "None"):
|
||||
color = self._logging_color
|
||||
self._logger.log("debug", f"== Working Agent: {role}", color=color)
|
||||
self._logger.log("info", f"== Starting Task: {task.description}", color=color)
|
||||
if self.output_log_file:
|
||||
self._file_handler.log(agent=role, task=task.description, status="started")
|
||||
|
||||
@@ -768,7 +771,6 @@ class Crew(BaseModel):
|
||||
|
||||
def _process_task_result(self, task: Task, output: TaskOutput) -> None:
|
||||
role = task.agent.role if task.agent is not None else "None"
|
||||
self._logger.log("debug", f"== [{role}] Task output: {output}\n\n")
|
||||
if self.output_log_file:
|
||||
self._file_handler.log(agent=role, task=output, status="completed")
|
||||
|
||||
@@ -921,16 +923,14 @@ class Crew(BaseModel):
|
||||
def calculate_usage_metrics(self) -> UsageMetrics:
|
||||
"""Calculates and returns the usage metrics."""
|
||||
total_usage_metrics = UsageMetrics()
|
||||
|
||||
for agent in self.agents:
|
||||
if hasattr(agent, "_token_process"):
|
||||
token_sum = agent._token_process.get_summary()
|
||||
total_usage_metrics.add_usage_metrics(token_sum)
|
||||
|
||||
if self.manager_agent and hasattr(self.manager_agent, "_token_process"):
|
||||
token_sum = self.manager_agent._token_process.get_summary()
|
||||
total_usage_metrics.add_usage_metrics(token_sum)
|
||||
|
||||
self.usage_metrics = total_usage_metrics
|
||||
return total_usage_metrics
|
||||
|
||||
def test(
|
||||
|
||||
20
src/crewai/llm.py
Normal file
20
src/crewai/llm.py
Normal file
@@ -0,0 +1,20 @@
|
||||
from typing import Any, Dict, List
|
||||
from litellm import completion
|
||||
import litellm
|
||||
|
||||
|
||||
class LLM:
|
||||
def __init__(self, model: str, stop: List[str] = [], callbacks: List[Any] = []):
|
||||
self.stop = stop
|
||||
self.model = model
|
||||
litellm.callbacks = callbacks
|
||||
|
||||
def call(self, messages: List[Dict[str, str]]) -> Dict[str, Any]:
|
||||
response = completion(
|
||||
stop=self.stop, model=self.model, messages=messages, num_retries=5
|
||||
)
|
||||
return response["choices"][0]["message"]["content"]
|
||||
|
||||
def _call_callbacks(self, formatted_answer):
|
||||
for callback in self.callbacks:
|
||||
callback(formatted_answer)
|
||||
@@ -89,7 +89,10 @@ def CrewBase(cls):
|
||||
callbacks: Dict[str, Callable],
|
||||
) -> None:
|
||||
if llm := agent_info.get("llm"):
|
||||
self.agents_config[agent_name]["llm"] = llms[llm]()
|
||||
try:
|
||||
self.agents_config[agent_name]["llm"] = llms[llm]()
|
||||
except KeyError:
|
||||
self.agents_config[agent_name]["llm"] = llm
|
||||
|
||||
if tools := agent_info.get("tools"):
|
||||
self.agents_config[agent_name]["tools"] = [
|
||||
|
||||
@@ -276,7 +276,9 @@ class Task(BaseModel):
|
||||
content = (
|
||||
json_output
|
||||
if json_output
|
||||
else pydantic_output.model_dump_json() if pydantic_output else result
|
||||
else pydantic_output.model_dump_json()
|
||||
if pydantic_output
|
||||
else result
|
||||
)
|
||||
self._save_file(content)
|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@ class TaskOutput(BaseModel):
|
||||
return self
|
||||
|
||||
@property
|
||||
def json(self) -> Optional[str]:
|
||||
def json(self) -> str:
|
||||
if self.output_format != OutputFormat.JSON:
|
||||
raise ValueError(
|
||||
"""
|
||||
|
||||
@@ -4,15 +4,28 @@ import asyncio
|
||||
import json
|
||||
import os
|
||||
import platform
|
||||
import warnings
|
||||
from typing import TYPE_CHECKING, Any, Optional
|
||||
from contextlib import contextmanager
|
||||
|
||||
import pkg_resources
|
||||
from opentelemetry import trace
|
||||
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
|
||||
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
|
||||
from opentelemetry.sdk.trace import TracerProvider
|
||||
from opentelemetry.sdk.trace.export import BatchSpanProcessor
|
||||
from opentelemetry.trace import Span, Status, StatusCode
|
||||
|
||||
@contextmanager
|
||||
def suppress_warnings():
|
||||
with warnings.catch_warnings():
|
||||
warnings.filterwarnings("ignore")
|
||||
yield
|
||||
|
||||
|
||||
with suppress_warnings():
|
||||
import pkg_resources
|
||||
|
||||
|
||||
from opentelemetry import trace # noqa: E402
|
||||
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter # noqa: E402
|
||||
from opentelemetry.sdk.resources import SERVICE_NAME, Resource # noqa: E402
|
||||
from opentelemetry.sdk.trace import TracerProvider # noqa: E402
|
||||
from opentelemetry.sdk.trace.export import BatchSpanProcessor # noqa: E402
|
||||
from opentelemetry.trace import Span, Status, StatusCode # noqa: E402
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.crew import Crew
|
||||
@@ -62,8 +75,9 @@ class Telemetry:
|
||||
def set_tracer(self):
|
||||
if self.ready and not self.trace_set:
|
||||
try:
|
||||
trace.set_tracer_provider(self.provider)
|
||||
self.trace_set = True
|
||||
with suppress_warnings():
|
||||
trace.set_tracer_provider(self.provider)
|
||||
self.trace_set = True
|
||||
except Exception:
|
||||
self.ready = False
|
||||
self.trace_set = False
|
||||
@@ -102,14 +116,8 @@ class Telemetry:
|
||||
"max_iter": agent.max_iter,
|
||||
"max_rpm": agent.max_rpm,
|
||||
"i18n": agent.i18n.prompt_file,
|
||||
"function_calling_llm": json.dumps(
|
||||
self._safe_llm_attributes(
|
||||
agent.function_calling_llm
|
||||
)
|
||||
),
|
||||
"llm": json.dumps(
|
||||
self._safe_llm_attributes(agent.llm)
|
||||
),
|
||||
"function_calling_llm": agent.function_calling_llm,
|
||||
"llm": agent.llm,
|
||||
"delegation_enabled?": agent.allow_delegation,
|
||||
"allow_code_execution?": agent.allow_code_execution,
|
||||
"max_retry_limit": agent.max_retry_limit,
|
||||
@@ -173,14 +181,8 @@ class Telemetry:
|
||||
"verbose?": agent.verbose,
|
||||
"max_iter": agent.max_iter,
|
||||
"max_rpm": agent.max_rpm,
|
||||
"function_calling_llm": json.dumps(
|
||||
self._safe_llm_attributes(
|
||||
agent.function_calling_llm
|
||||
)
|
||||
),
|
||||
"llm": json.dumps(
|
||||
self._safe_llm_attributes(agent.llm)
|
||||
),
|
||||
"function_calling_llm": agent.function_calling_llm,
|
||||
"llm": agent.llm,
|
||||
"delegation_enabled?": agent.allow_delegation,
|
||||
"allow_code_execution?": agent.allow_code_execution,
|
||||
"max_retry_limit": agent.max_retry_limit,
|
||||
@@ -294,9 +296,7 @@ class Telemetry:
|
||||
self._add_attribute(span, "tool_name", tool_name)
|
||||
self._add_attribute(span, "attempts", attempts)
|
||||
if llm:
|
||||
self._add_attribute(
|
||||
span, "llm", json.dumps(self._safe_llm_attributes(llm))
|
||||
)
|
||||
self._add_attribute(span, "llm", llm)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
except Exception:
|
||||
@@ -316,9 +316,7 @@ class Telemetry:
|
||||
self._add_attribute(span, "tool_name", tool_name)
|
||||
self._add_attribute(span, "attempts", attempts)
|
||||
if llm:
|
||||
self._add_attribute(
|
||||
span, "llm", json.dumps(self._safe_llm_attributes(llm))
|
||||
)
|
||||
self._add_attribute(span, "llm", llm)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
except Exception:
|
||||
@@ -336,9 +334,7 @@ class Telemetry:
|
||||
pkg_resources.get_distribution("crewai").version,
|
||||
)
|
||||
if llm:
|
||||
self._add_attribute(
|
||||
span, "llm", json.dumps(self._safe_llm_attributes(llm))
|
||||
)
|
||||
self._add_attribute(span, "llm", llm)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
except Exception:
|
||||
@@ -491,7 +487,7 @@ class Telemetry:
|
||||
"max_iter": agent.max_iter,
|
||||
"max_rpm": agent.max_rpm,
|
||||
"i18n": agent.i18n.prompt_file,
|
||||
"llm": json.dumps(self._safe_llm_attributes(agent.llm)),
|
||||
"llm": agent.llm,
|
||||
"delegation_enabled?": agent.allow_delegation,
|
||||
"tools_names": [
|
||||
tool.name.casefold() for tool in agent.tools or []
|
||||
@@ -567,11 +563,3 @@ class Telemetry:
|
||||
return span.set_attribute(key, value)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def _safe_llm_attributes(self, llm):
|
||||
attributes = ["name", "model_name", "model", "top_k", "temperature"]
|
||||
if llm:
|
||||
safe_attributes = {k: v for k, v in vars(llm).items() if k in attributes}
|
||||
safe_attributes["class"] = llm.__class__.__name__
|
||||
return safe_attributes
|
||||
return {}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
from langchain.tools import StructuredTool
|
||||
|
||||
from crewai.agents.agent_builder.utilities.base_agent_tool import BaseAgentTools
|
||||
|
||||
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
import json
|
||||
from typing import Any, List
|
||||
|
||||
import regex
|
||||
from langchain.output_parsers import PydanticOutputParser
|
||||
from langchain_core.exceptions import OutputParserException
|
||||
from langchain_core.outputs import Generation
|
||||
from pydantic import ValidationError
|
||||
|
||||
|
||||
class ToolOutputParser(PydanticOutputParser):
|
||||
"""Parses the function calling of a tool usage and it's arguments."""
|
||||
|
||||
def parse_result(self, result: List[Generation], *, partial: bool = False) -> Any:
|
||||
result[0].text = self._transform_in_valid_json(result[0].text)
|
||||
json_object = super().parse_result(result)
|
||||
try:
|
||||
return self.pydantic_object.parse_obj(json_object)
|
||||
except ValidationError as e:
|
||||
name = self.pydantic_object.__name__
|
||||
msg = f"Failed to parse {name} from completion {json_object}. Got: {e}"
|
||||
raise OutputParserException(msg, llm_output=json_object)
|
||||
|
||||
def _transform_in_valid_json(self, text) -> str:
|
||||
text = text.replace("```", "").replace("json", "")
|
||||
json_pattern = r"\{(?:[^{}]|(?R))*\}"
|
||||
matches = regex.finditer(json_pattern, text)
|
||||
|
||||
for match in matches:
|
||||
try:
|
||||
# Attempt to parse the matched string as JSON
|
||||
json_obj = json.loads(match.group())
|
||||
# Return the first successfully parsed JSON object
|
||||
json_obj = json.dumps(json_obj)
|
||||
return str(json_obj)
|
||||
except json.JSONDecodeError:
|
||||
# If parsing fails, skip to the next match
|
||||
continue
|
||||
return text
|
||||
@@ -4,9 +4,6 @@ from difflib import SequenceMatcher
|
||||
from textwrap import dedent
|
||||
from typing import Any, List, Union
|
||||
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.task import Task
|
||||
from crewai.telemetry import Telemetry
|
||||
@@ -20,7 +17,7 @@ if os.environ.get("AGENTOPS_API_KEY"):
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
OPENAI_BIGGER_MODELS = ["gpt-4o"]
|
||||
OPENAI_BIGGER_MODELS = ["gpt-4", "gpt-4o"]
|
||||
|
||||
|
||||
class ToolUsageErrorException(Exception):
|
||||
@@ -48,7 +45,7 @@ class ToolUsage:
|
||||
def __init__(
|
||||
self,
|
||||
tools_handler: ToolsHandler,
|
||||
tools: List[BaseTool],
|
||||
tools: List[Any],
|
||||
original_tools: List[Any],
|
||||
tools_description: str,
|
||||
tools_names: str,
|
||||
@@ -73,18 +70,9 @@ class ToolUsage:
|
||||
self.action = action
|
||||
self.function_calling_llm = function_calling_llm
|
||||
|
||||
# Handling bug (see https://github.com/langchain-ai/langchain/pull/16395): raise an error if tools_names have space for ChatOpenAI
|
||||
if isinstance(self.function_calling_llm, ChatOpenAI):
|
||||
if " " in self.tools_names:
|
||||
raise Exception(
|
||||
"Tools names should not have spaces for ChatOpenAI models."
|
||||
)
|
||||
|
||||
# Set the maximum parsing attempts for bigger models
|
||||
if (isinstance(self.function_calling_llm, ChatOpenAI)) and (
|
||||
self.function_calling_llm.openai_api_base is None
|
||||
):
|
||||
if self.function_calling_llm.model_name in OPENAI_BIGGER_MODELS:
|
||||
if self._is_gpt(self.function_calling_llm) and "4" in self.function_calling_llm:
|
||||
if self.function_calling_llm in OPENAI_BIGGER_MODELS:
|
||||
self._max_parsing_attempts = 2
|
||||
self._remember_format_after_usages = 4
|
||||
|
||||
@@ -116,7 +104,7 @@ class ToolUsage:
|
||||
def _use(
|
||||
self,
|
||||
tool_string: str,
|
||||
tool: BaseTool,
|
||||
tool: Any,
|
||||
calling: Union[ToolCalling, InstructorToolCalling],
|
||||
) -> str: # TODO: Fix this return type
|
||||
tool_event = agentops.ToolEvent(name=calling.tool_name) if agentops else None # type: ignore
|
||||
@@ -125,8 +113,6 @@ class ToolUsage:
|
||||
result = self._i18n.errors("task_repeated_usage").format(
|
||||
tool_names=self.tools_names
|
||||
)
|
||||
if self.agent.verbose:
|
||||
self._printer.print(content=f"\n\n{result}\n", color="purple")
|
||||
self._telemetry.tool_repeated_usage(
|
||||
llm=self.function_calling_llm,
|
||||
tool_name=tool.name,
|
||||
@@ -212,8 +198,6 @@ class ToolUsage:
|
||||
calling=calling, output=result, should_cache=should_cache
|
||||
)
|
||||
|
||||
if self.agent.verbose:
|
||||
self._printer.print(content=f"\n\n{result}\n", color="purple")
|
||||
if agentops:
|
||||
agentops.record(tool_event)
|
||||
self._telemetry.tool_usage(
|
||||
@@ -265,7 +249,7 @@ class ToolUsage:
|
||||
calling.arguments == last_tool_usage.arguments
|
||||
)
|
||||
|
||||
def _select_tool(self, tool_name: str) -> BaseTool:
|
||||
def _select_tool(self, tool_name: str) -> Any:
|
||||
order_tools = sorted(
|
||||
self.tools,
|
||||
key=lambda tool: SequenceMatcher(
|
||||
@@ -285,7 +269,7 @@ class ToolUsage:
|
||||
self.task.increment_tools_errors()
|
||||
if tool_name and tool_name != "":
|
||||
raise Exception(
|
||||
f"Action '{tool_name}' don't exist, these are the only available Actions:\n {self.tools_description}"
|
||||
f"Action '{tool_name}' don't exist, these are the only available Actions:\n{self.tools_description}"
|
||||
)
|
||||
else:
|
||||
raise Exception(
|
||||
@@ -312,7 +296,11 @@ class ToolUsage:
|
||||
return "\n--\n".join(descriptions)
|
||||
|
||||
def _is_gpt(self, llm) -> bool:
|
||||
return isinstance(llm, ChatOpenAI) and llm.openai_api_base is None
|
||||
return (
|
||||
"gpt" in str(llm).lower()
|
||||
or "o1-preview" in str(llm).lower()
|
||||
or "o1-mini" in str(llm).lower()
|
||||
)
|
||||
|
||||
def _tool_calling(
|
||||
self, tool_string: str
|
||||
@@ -325,7 +313,7 @@ class ToolUsage:
|
||||
else ToolCalling
|
||||
)
|
||||
converter = Converter(
|
||||
text=f"Only tools available:\n###\n{self._render()}\n\nReturn a valid schema for the tool, the tool name must be exactly equal one of the options, use this text to inform the valid output schema:\n\n{tool_string}```",
|
||||
text=f"Only tools available:\n###\n{self._render()}\n\nReturn a valid schema for the tool, the tool name must be exactly equal one of the options, use this text to inform the valid output schema:\n\n### TEXT \n{tool_string}",
|
||||
llm=self.function_calling_llm,
|
||||
model=model,
|
||||
instructions=dedent(
|
||||
|
||||
@@ -5,22 +5,26 @@
|
||||
"backstory": "You are a seasoned manager with a knack for getting the best out of your team.\nYou are also known for your ability to delegate work to the right people, and to ask the right questions to get the best out of your team.\nEven though you don't perform tasks by yourself, you have a lot of experience in the field, which allows you to properly evaluate the work of your team members."
|
||||
},
|
||||
"slices": {
|
||||
"observation": "\nObservation",
|
||||
"observation": "\nObservation:",
|
||||
"task": "\nCurrent Task: {input}\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:",
|
||||
"memory": "\n\n# Useful context: \n{memory}",
|
||||
"role_playing": "You are {role}. {backstory}\nYour personal goal is: {goal}",
|
||||
"tools": "\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\n{tools}\n\nUse the following format:\n\nThought: you should always think about what to do\nAction: the action to take, only one name of [{tool_names}], just the name, exactly as it's written.\nAction Input: the input to the action, just a simple python dictionary, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n\nOnce all necessary information is gathered:\n\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n",
|
||||
"no_tools": "To give my best complete final answer to the task use the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\nYour final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!",
|
||||
"format": "I MUST either use a tool (use one at time) OR give my best final answer. To Use the following format:\n\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action, dictionary enclosed in curly braces\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\nYour final answer must be the great and the most complete as possible, it must be outcome described\n\n ",
|
||||
"no_tools": "\nTo give my best complete final answer to the task use the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!",
|
||||
"format": "I MUST either use a tool (use one at time) OR give my best final answer not both at the same time. To Use the following format:\n\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action, dictionary enclosed in curly braces\nObservation: the result of the action\n... (this Thought/Action/Action Input/Result can repeat N times)\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n ",
|
||||
"final_answer_format": "If you don't need to use any more tools, you must give your best complete final answer, make sure it satisfy the expect criteria, use the EXACT format below:\n\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\n\n",
|
||||
"format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nI just remembered the expected format I must follow:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task\nYour final answer must be the great and the most complete as possible, it must be outcome described\n\n",
|
||||
"format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nI just remembered the expected format I must follow:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Result can repeat N times)\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n",
|
||||
"task_with_context": "{task}\n\nThis is the context you're working with:\n{context}",
|
||||
"expected_output": "\nThis is the expect criteria for your final answer: {expected_output} \n you MUST return the actual complete content as the final answer, not a summary.",
|
||||
"expected_output": "\nThis is the expect criteria for your final answer: {expected_output}\nyou MUST return the actual complete content as the final answer, not a summary.",
|
||||
"human_feedback": "You got human feedback on your work, re-evaluate it and give a new Final Answer when ready.\n {human_feedback}",
|
||||
"getting_input": "This is the agent's final answer: {final_answer}\nPlease provide feedback: "
|
||||
"getting_input": "This is the agent's final answer: {final_answer}\nPlease provide feedback: ",
|
||||
"summarizer_system_message": "You are a helpful assistant that summarizes text.",
|
||||
"sumamrize_instruction": "Summarize the following text, make sure to include all the important information: {group}",
|
||||
"summary": "This is a summary of our conversation so far:\n{merged_summary}"
|
||||
},
|
||||
"errors": {
|
||||
"force_final_answer": "Tool won't be use because it's time to give your final answer. Don't use tools and just your absolute BEST Final answer.",
|
||||
"force_final_answer_error": "You can't keep going, this was the best you could do.\n {formatted_answer.text}",
|
||||
"force_final_answer": "Now it's time you MUST give your absolute best final answer. You'll ignore all previous instructions, stop using any tools, and just return your absolute BEST Final answer.",
|
||||
"agent_tool_unexsiting_coworker": "\nError executing tool. coworker mentioned not found, it must be one of the following options:\n{coworkers}\n",
|
||||
"task_repeated_usage": "I tried reusing the same input, I must stop using this action input. I'll try something else instead.\n\n",
|
||||
"tool_usage_error": "I encountered an error: {error}",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from .converter import Converter, ConverterError
|
||||
from .file_handler import FileHandler
|
||||
from .i18n import I18N
|
||||
from .instructor import Instructor
|
||||
from .internal_instructor import InternalInstructor
|
||||
from .logger import Logger
|
||||
from .parser import YamlParser
|
||||
from .printer import Printer
|
||||
@@ -16,7 +16,7 @@ __all__ = [
|
||||
"ConverterError",
|
||||
"FileHandler",
|
||||
"I18N",
|
||||
"Instructor",
|
||||
"InternalInstructor",
|
||||
"Logger",
|
||||
"Printer",
|
||||
"Prompts",
|
||||
|
||||
@@ -2,8 +2,7 @@ import json
|
||||
import re
|
||||
from typing import Any, Optional, Type, Union
|
||||
|
||||
from langchain.schema import HumanMessage, SystemMessage
|
||||
from langchain_openai import ChatOpenAI
|
||||
from crewai.llm import LLM
|
||||
from pydantic import BaseModel, ValidationError
|
||||
|
||||
from crewai.agents.agent_builder.utilities.base_output_converter import OutputConverter
|
||||
@@ -28,7 +27,12 @@ class Converter(OutputConverter):
|
||||
if self.is_gpt:
|
||||
return self._create_instructor().to_pydantic()
|
||||
else:
|
||||
return self._create_chain().invoke({})
|
||||
return LLM(model=self.llm).call(
|
||||
[
|
||||
{"role": "system", "content": self.instructions},
|
||||
{"role": "user", "content": self.text},
|
||||
]
|
||||
)
|
||||
except Exception as e:
|
||||
if current_attempt < self.max_attempts:
|
||||
return self.to_pydantic(current_attempt + 1)
|
||||
@@ -42,7 +46,14 @@ class Converter(OutputConverter):
|
||||
if self.is_gpt:
|
||||
return self._create_instructor().to_json()
|
||||
else:
|
||||
return json.dumps(self._create_chain().invoke({}).model_dump())
|
||||
return json.dumps(
|
||||
LLM(model=self.llm).call(
|
||||
[
|
||||
{"role": "system", "content": self.instructions},
|
||||
{"role": "user", "content": self.text},
|
||||
]
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
if current_attempt < self.max_attempts:
|
||||
return self.to_json(current_attempt + 1)
|
||||
@@ -50,33 +61,39 @@ class Converter(OutputConverter):
|
||||
|
||||
def _create_instructor(self):
|
||||
"""Create an instructor."""
|
||||
from crewai.utilities import Instructor
|
||||
from crewai.utilities import InternalInstructor
|
||||
|
||||
inst = Instructor(
|
||||
inst = InternalInstructor(
|
||||
llm=self.llm,
|
||||
max_attempts=self.max_attempts,
|
||||
model=self.model,
|
||||
content=self.text,
|
||||
instructions=self.instructions,
|
||||
)
|
||||
return inst
|
||||
|
||||
def _create_chain(self):
|
||||
def _convert_with_instructions(self):
|
||||
"""Create a chain."""
|
||||
from crewai.utilities.crew_pydantic_output_parser import (
|
||||
CrewPydanticOutputParser,
|
||||
)
|
||||
|
||||
parser = CrewPydanticOutputParser(pydantic_object=self.model)
|
||||
new_prompt = SystemMessage(content=self.instructions) + HumanMessage(
|
||||
content=self.text
|
||||
result = LLM(model=self.llm).call(
|
||||
[
|
||||
{"role": "system", "content": self.instructions},
|
||||
{"role": "user", "content": self.text},
|
||||
]
|
||||
)
|
||||
return new_prompt | self.llm | parser
|
||||
return parser.parse_result(result)
|
||||
|
||||
@property
|
||||
def is_gpt(self) -> bool:
|
||||
"""Return if llm provided is of gpt from openai."""
|
||||
return isinstance(self.llm, ChatOpenAI) and self.llm.openai_api_base is None
|
||||
return (
|
||||
"gpt" in str(self.llm).lower()
|
||||
or "o1-preview" in str(self.llm).lower()
|
||||
or "o1-mini" in str(self.llm).lower()
|
||||
)
|
||||
|
||||
|
||||
def convert_to_model(
|
||||
@@ -89,23 +106,14 @@ def convert_to_model(
|
||||
model = output_pydantic or output_json
|
||||
if model is None:
|
||||
return result
|
||||
|
||||
try:
|
||||
escaped_result = json.dumps(json.loads(result, strict=False))
|
||||
return validate_model(escaped_result, model, bool(output_json))
|
||||
except json.JSONDecodeError as e:
|
||||
Printer().print(
|
||||
content=f"Error parsing JSON: {e}. Attempting to handle partial JSON.",
|
||||
color="yellow",
|
||||
)
|
||||
except json.JSONDecodeError:
|
||||
return handle_partial_json(
|
||||
result, model, bool(output_json), agent, converter_cls
|
||||
)
|
||||
except ValidationError as e:
|
||||
Printer().print(
|
||||
content=f"Pydantic validation error: {e}. Attempting to handle partial JSON.",
|
||||
color="yellow",
|
||||
)
|
||||
except ValidationError:
|
||||
return handle_partial_json(
|
||||
result, model, bool(output_json), agent, converter_cls
|
||||
)
|
||||
@@ -140,16 +148,10 @@ def handle_partial_json(
|
||||
if is_json_output:
|
||||
return exported_result.model_dump()
|
||||
return exported_result
|
||||
except json.JSONDecodeError as e:
|
||||
Printer().print(
|
||||
content=f"Error parsing JSON: {e}. The extracted JSON-like string is not valid JSON. Attempting alternative conversion method.",
|
||||
color="yellow",
|
||||
)
|
||||
except ValidationError as e:
|
||||
Printer().print(
|
||||
content=f"Pydantic validation error: {e}. The JSON structure doesn't match the expected model. Attempting alternative conversion method.",
|
||||
color="yellow",
|
||||
)
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
except ValidationError:
|
||||
pass
|
||||
except Exception as e:
|
||||
Printer().print(
|
||||
content=f"Unexpected error during partial JSON handling: {type(e).__name__}: {e}. Attempting alternative conversion method.",
|
||||
@@ -170,7 +172,6 @@ def convert_with_instructions(
|
||||
) -> Union[dict, BaseModel, str]:
|
||||
llm = agent.function_calling_llm or agent.llm
|
||||
instructions = get_conversion_instructions(model, llm)
|
||||
|
||||
converter = create_converter(
|
||||
agent=agent,
|
||||
converter_cls=converter_cls,
|
||||
@@ -179,6 +180,7 @@ def convert_with_instructions(
|
||||
model=model,
|
||||
instructions=instructions,
|
||||
)
|
||||
|
||||
exported_result = (
|
||||
converter.to_pydantic() if not is_json_output else converter.to_json()
|
||||
)
|
||||
@@ -202,9 +204,12 @@ def get_conversion_instructions(model: Type[BaseModel], llm: Any) -> str:
|
||||
|
||||
|
||||
def is_gpt(llm: Any) -> bool:
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
return isinstance(llm, ChatOpenAI) and llm.openai_api_base is None
|
||||
"""Return if llm provided is of gpt from openai."""
|
||||
return (
|
||||
"gpt" in str(llm).lower()
|
||||
or "o1-preview" in str(llm).lower()
|
||||
or "o1-mini" in str(llm).lower()
|
||||
)
|
||||
|
||||
|
||||
def create_converter(
|
||||
|
||||
@@ -1,33 +1,31 @@
|
||||
import json
|
||||
from typing import Any, List, Type
|
||||
|
||||
import regex
|
||||
from langchain.output_parsers import PydanticOutputParser
|
||||
from langchain_core.exceptions import OutputParserException
|
||||
from langchain_core.outputs import Generation
|
||||
from typing import Any, Type
|
||||
|
||||
from crewai.agents.parser import OutputParserException
|
||||
from pydantic import BaseModel, ValidationError
|
||||
|
||||
|
||||
class CrewPydanticOutputParser(PydanticOutputParser):
|
||||
class CrewPydanticOutputParser:
|
||||
"""Parses the text into pydantic models"""
|
||||
|
||||
pydantic_object: Type[BaseModel]
|
||||
|
||||
def parse_result(self, result: List[Generation]) -> Any:
|
||||
result[0].text = self._transform_in_valid_json(result[0].text)
|
||||
def parse_result(self, result: str) -> Any:
|
||||
result = self._transform_in_valid_json(result)
|
||||
|
||||
# Treating edge case of function calling llm returning the name instead of tool_name
|
||||
json_object = json.loads(result[0].text)
|
||||
json_object = json.loads(result)
|
||||
if "tool_name" not in json_object:
|
||||
json_object["tool_name"] = json_object.get("name", "")
|
||||
result[0].text = json.dumps(json_object)
|
||||
result = json.dumps(json_object)
|
||||
|
||||
try:
|
||||
return self.pydantic_object.model_validate(json_object)
|
||||
except ValidationError as e:
|
||||
name = self.pydantic_object.__name__
|
||||
msg = f"Failed to parse {name} from completion {json_object}. Got: {e}"
|
||||
raise OutputParserException(msg, llm_output=json_object)
|
||||
raise OutputParserException(error=msg)
|
||||
|
||||
def _transform_in_valid_json(self, text) -> str:
|
||||
text = text.replace("```", "").replace("json", "")
|
||||
|
||||
@@ -4,7 +4,6 @@ from crewai.agent import Agent
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.telemetry import Telemetry
|
||||
from langchain_openai import ChatOpenAI
|
||||
from pydantic import BaseModel, Field
|
||||
from rich.box import HEAVY_EDGE
|
||||
from rich.console import Console
|
||||
@@ -51,7 +50,7 @@ class CrewEvaluator:
|
||||
),
|
||||
backstory="Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed",
|
||||
verbose=False,
|
||||
llm=ChatOpenAI(model=self.openai_model_name),
|
||||
llm=self.openai_model_name,
|
||||
)
|
||||
|
||||
def _evaluation_task(
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import os
|
||||
from typing import List
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.utilities import Converter
|
||||
@@ -93,7 +92,11 @@ class TaskEvaluator:
|
||||
return converter.to_pydantic()
|
||||
|
||||
def _is_gpt(self, llm) -> bool:
|
||||
return isinstance(llm, ChatOpenAI) and llm.openai_api_base is None
|
||||
return (
|
||||
"gpt" in str(self.llm).lower()
|
||||
or "o1-preview" in str(self.llm).lower()
|
||||
or "o1-mini" in str(self.llm).lower()
|
||||
)
|
||||
|
||||
def evaluate_training_data(
|
||||
self, training_data: dict, agent_id: str
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
from typing import Any, Optional, Type
|
||||
|
||||
import instructor
|
||||
from pydantic import BaseModel, Field, PrivateAttr, model_validator
|
||||
|
||||
|
||||
class Instructor(BaseModel):
|
||||
"""Class that wraps an agent llm with instructor."""
|
||||
|
||||
_client: Any = PrivateAttr()
|
||||
content: str = Field(description="Content to be sent to the instructor.")
|
||||
agent: Optional[Any] = Field(
|
||||
description="The agent that needs to use instructor.", default=None
|
||||
)
|
||||
llm: Optional[Any] = Field(
|
||||
description="The agent that needs to use instructor.", default=None
|
||||
)
|
||||
instructions: Optional[str] = Field(
|
||||
description="Instructions to be sent to the instructor.",
|
||||
default=None,
|
||||
)
|
||||
model: Type[BaseModel] = Field(
|
||||
description="Pydantic model to be used to create an output."
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_instructor(self):
|
||||
"""Set instructor."""
|
||||
if self.agent and not self.llm:
|
||||
self.llm = self.agent.function_calling_llm or self.agent.llm
|
||||
|
||||
self._client = instructor.patch(
|
||||
self.llm.client._client,
|
||||
mode=instructor.Mode.TOOLS,
|
||||
)
|
||||
return self
|
||||
|
||||
def to_json(self):
|
||||
model = self.to_pydantic()
|
||||
return model.model_dump_json(indent=2)
|
||||
|
||||
def to_pydantic(self):
|
||||
messages = [{"role": "user", "content": self.content}]
|
||||
if self.instructions:
|
||||
messages.append({"role": "system", "content": self.instructions})
|
||||
|
||||
model = self._client.chat.completions.create(
|
||||
model=self.llm.model_name, response_model=self.model, messages=messages
|
||||
)
|
||||
return model
|
||||
47
src/crewai/utilities/internal_instructor.py
Normal file
47
src/crewai/utilities/internal_instructor.py
Normal file
@@ -0,0 +1,47 @@
|
||||
from typing import Any, Optional, Type
|
||||
|
||||
import instructor
|
||||
from litellm import completion
|
||||
|
||||
|
||||
class InternalInstructor:
|
||||
"""Class that wraps an agent llm with instructor."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
content: str,
|
||||
model: Type,
|
||||
agent: Optional[Any] = None,
|
||||
llm: Optional[str] = None,
|
||||
instructions: Optional[str] = None,
|
||||
):
|
||||
self.content = content
|
||||
self.agent = agent
|
||||
self.llm = llm
|
||||
self.instructions = instructions
|
||||
self.model = model
|
||||
self._client = None
|
||||
self.set_instructor()
|
||||
|
||||
def set_instructor(self):
|
||||
"""Set instructor."""
|
||||
if self.agent and not self.llm:
|
||||
self.llm = self.agent.function_calling_llm or self.agent.llm
|
||||
|
||||
self._client = instructor.from_litellm(
|
||||
completion,
|
||||
mode=instructor.Mode.TOOLS,
|
||||
)
|
||||
|
||||
def to_json(self):
|
||||
model = self.to_pydantic()
|
||||
return model.model_dump_json(indent=2)
|
||||
|
||||
def to_pydantic(self):
|
||||
messages = [{"role": "user", "content": self.content}]
|
||||
if self.instructions:
|
||||
messages.append({"role": "system", "content": self.instructions})
|
||||
model = self._client.chat.completions.create(
|
||||
model=self.llm, response_model=self.model, messages=messages
|
||||
)
|
||||
return model
|
||||
@@ -1,6 +1,4 @@
|
||||
from typing import Any, List, Optional
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.agent import Agent
|
||||
@@ -27,7 +25,7 @@ class CrewPlanner:
|
||||
self.tasks = tasks
|
||||
|
||||
if planning_agent_llm is None:
|
||||
self.planning_agent_llm = ChatOpenAI(model="gpt-4o-mini")
|
||||
self.planning_agent_llm = "gpt-4o-mini"
|
||||
else:
|
||||
self.planning_agent_llm = planning_agent_llm
|
||||
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
from typing import Optional
|
||||
|
||||
|
||||
class Printer:
|
||||
def print(self, content: str, color: str):
|
||||
def print(self, content: str, color: Optional[str] = None):
|
||||
if color == "purple":
|
||||
self._print_purple(content)
|
||||
elif color == "red":
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
from typing import Any, ClassVar, Optional
|
||||
|
||||
from langchain.prompts import BasePromptTemplate, PromptTemplate
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from typing import Any, Optional
|
||||
from crewai.utilities import I18N
|
||||
|
||||
|
||||
@@ -14,27 +11,38 @@ class Prompts(BaseModel):
|
||||
system_template: Optional[str] = None
|
||||
prompt_template: Optional[str] = None
|
||||
response_template: Optional[str] = None
|
||||
SCRATCHPAD_SLICE: ClassVar[str] = "\n{agent_scratchpad}"
|
||||
use_system_prompt: Optional[bool] = False
|
||||
agent: Any
|
||||
|
||||
def task_execution(self) -> BasePromptTemplate:
|
||||
def task_execution(self) -> dict[str, str]:
|
||||
"""Generate a standard prompt for task execution."""
|
||||
slices = ["role_playing"]
|
||||
if len(self.tools) > 0:
|
||||
slices.append("tools")
|
||||
else:
|
||||
slices.append("no_tools")
|
||||
|
||||
system = self._build_prompt(slices)
|
||||
slices.append("task")
|
||||
|
||||
if not self.system_template and not self.prompt_template:
|
||||
return self._build_prompt(slices)
|
||||
if (
|
||||
not self.system_template
|
||||
and not self.prompt_template
|
||||
and self.use_system_prompt
|
||||
):
|
||||
return {
|
||||
"system": system,
|
||||
"user": self._build_prompt(["task"]),
|
||||
"prompt": self._build_prompt(slices),
|
||||
}
|
||||
else:
|
||||
return self._build_prompt(
|
||||
slices,
|
||||
self.system_template,
|
||||
self.prompt_template,
|
||||
self.response_template,
|
||||
)
|
||||
return {
|
||||
"prompt": self._build_prompt(
|
||||
slices,
|
||||
self.system_template,
|
||||
self.prompt_template,
|
||||
self.response_template,
|
||||
)
|
||||
}
|
||||
|
||||
def _build_prompt(
|
||||
self,
|
||||
@@ -42,12 +50,11 @@ class Prompts(BaseModel):
|
||||
system_template=None,
|
||||
prompt_template=None,
|
||||
response_template=None,
|
||||
) -> BasePromptTemplate:
|
||||
) -> str:
|
||||
"""Constructs a prompt string from specified components."""
|
||||
if not system_template and not prompt_template:
|
||||
prompt_parts = [self.i18n.slice(component) for component in components]
|
||||
prompt_parts.append(self.SCRATCHPAD_SLICE)
|
||||
prompt = PromptTemplate.from_template("".join(prompt_parts))
|
||||
prompt = "".join(prompt_parts)
|
||||
else:
|
||||
prompt_parts = [
|
||||
self.i18n.slice(component)
|
||||
@@ -56,9 +63,14 @@ class Prompts(BaseModel):
|
||||
]
|
||||
system = system_template.replace("{{ .System }}", "".join(prompt_parts))
|
||||
prompt = prompt_template.replace(
|
||||
"{{ .Prompt }}",
|
||||
"".join([self.i18n.slice("task"), self.SCRATCHPAD_SLICE]),
|
||||
"{{ .Prompt }}", "".join(self.i18n.slice("task"))
|
||||
)
|
||||
response = response_template.split("{{ .Response }}")[0]
|
||||
prompt = PromptTemplate.from_template(f"{system}\n{prompt}\n{response}")
|
||||
prompt = f"{system}\n{prompt}\n{response}"
|
||||
|
||||
prompt = (
|
||||
prompt.replace("{goal}", self.agent.goal)
|
||||
.replace("{role}", self.agent.role)
|
||||
.replace("{backstory}", self.agent.backstory)
|
||||
)
|
||||
return prompt
|
||||
|
||||
@@ -52,7 +52,7 @@ class RPMController(BaseModel):
|
||||
self._timer = None
|
||||
|
||||
def _wait_for_next_minute(self):
|
||||
time.sleep(60)
|
||||
time.sleep(1)
|
||||
self._current_rpm = 0
|
||||
|
||||
def _reset_request_count(self):
|
||||
|
||||
@@ -1,36 +1,17 @@
|
||||
from typing import Any, Dict, List
|
||||
|
||||
import tiktoken
|
||||
from langchain.callbacks.base import BaseCallbackHandler
|
||||
from langchain.schema import LLMResult
|
||||
|
||||
from litellm.integrations.custom_logger import CustomLogger
|
||||
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
|
||||
|
||||
|
||||
class TokenCalcHandler(BaseCallbackHandler):
|
||||
model_name: str = ""
|
||||
token_cost_process: TokenProcess
|
||||
encoding: tiktoken.Encoding
|
||||
|
||||
def __init__(self, model_name, token_cost_process):
|
||||
self.model_name = model_name
|
||||
class TokenCalcHandler(CustomLogger):
|
||||
def __init__(self, token_cost_process: TokenProcess):
|
||||
self.token_cost_process = token_cost_process
|
||||
try:
|
||||
self.encoding = tiktoken.encoding_for_model(self.model_name)
|
||||
except KeyError:
|
||||
self.encoding = tiktoken.get_encoding("cl100k_base")
|
||||
|
||||
def on_llm_start(
|
||||
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
|
||||
) -> None:
|
||||
def log_success_event(self, kwargs, response_obj, start_time, end_time):
|
||||
if self.token_cost_process is None:
|
||||
return
|
||||
|
||||
for prompt in prompts:
|
||||
self.token_cost_process.sum_prompt_tokens(len(self.encoding.encode(prompt)))
|
||||
|
||||
async def on_llm_new_token(self, token: str, **kwargs) -> None:
|
||||
self.token_cost_process.sum_completion_tokens(1)
|
||||
|
||||
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
|
||||
self.token_cost_process.sum_successful_requests(1)
|
||||
self.token_cost_process.sum_prompt_tokens(response_obj["usage"].prompt_tokens)
|
||||
self.token_cost_process.sum_completion_tokens(
|
||||
response_obj["usage"].completion_tokens
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user