rebuilding executor

This commit is contained in:
João Moura
2024-09-11 16:58:56 -07:00
parent 322780a5f3
commit 3cd464497b
134 changed files with 35040 additions and 1620079 deletions

View File

@@ -1,23 +1,17 @@
import os
from inspect import signature
from typing import Any, List, Optional, Tuple
from langchain.agents.agent import RunnableAgent
from langchain.agents.tools import BaseTool
from langchain.agents.tools import tool as LangChainTool
from langchain_core.agents import AgentAction
from langchain_core.callbacks import BaseCallbackHandler
from langchain_openai import ChatOpenAI
from typing import Any, List, Optional
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
from crewai.agents import CacheHandler, CrewAgentExecutor, CrewAgentParser
from crewai.agents import CacheHandler
from crewai.utilities import Converter, Prompts
from crewai.tools.agent_tools import AgentTools
from crewai.agents.crew_agent_executor import CrewAgentExecutor
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.memory.contextual.contextual_memory import ContextualMemory
from crewai.tools.agent_tools import AgentTools
from crewai.utilities import Converter, Prompts
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE
from crewai.utilities.token_counter_callback import TokenCalcHandler
from crewai.utilities.training_handler import CrewTrainingHandler
from crewai.utilities.token_counter_callback import TokenCalcHandler
def mock_agent_ops_provider():
@@ -34,7 +28,6 @@ agentops = None
if os.environ.get("AGENTOPS_API_KEY"):
try:
import agentops # type: ignore # Name "agentops" already defined on line 21
from agentops import track_agent
except ImportError:
track_agent = mock_agent_ops_provider()
@@ -64,7 +57,6 @@ class Agent(BaseAgent):
allow_delegation: Whether the agent is allowed to delegate tasks to other agents.
tools: Tools at agents disposal
step_callback: Callback to be executed after each step of the agent execution.
callbacks: A list of callback functions from the langchain library that are triggered during the agent's execution process
"""
_times_executed: int = PrivateAttr(default=0)
@@ -82,17 +74,11 @@ class Agent(BaseAgent):
description="Callback to be executed after each step of the agent execution.",
)
llm: Any = Field(
default_factory=lambda: ChatOpenAI(
model=os.environ.get("OPENAI_MODEL_NAME", "gpt-4o")
),
description="Language model that will run the agent.",
description="Language model that will run the agent.", default="gpt-4o"
)
function_calling_llm: Optional[Any] = Field(
description="Language model that will run the agent.", default=None
)
callbacks: Optional[List[InstanceOf[BaseCallbackHandler]]] = Field(
default=None, description="Callback to be executed"
)
system_template: Optional[str] = Field(
default=None, description="System format for the agent."
)
@@ -108,6 +94,9 @@ class Agent(BaseAgent):
allow_code_execution: Optional[bool] = Field(
default=False, description="Enable code execution for the agent."
)
sliding_context_window: Optional[bool] = Field(
default=False, description="Enable sliding context window for the agent."
)
max_retry_limit: int = Field(
default=2,
description="Maximum number of retries for an agent to execute a task when an error occurs.",
@@ -116,38 +105,17 @@ class Agent(BaseAgent):
@model_validator(mode="after")
def post_init_setup(self):
self.agent_ops_agent_name = self.role
# Different llms store the model name in different attributes
model_name = getattr(self.llm, "model_name", None) or getattr(
self.llm, "deployment_name", None
self.llm = self.llm.model_name if hasattr(self.llm, "model_name") else self.llm
self.function_calling_llm = (
self.function_calling_llm.model_name
if hasattr(self.function_calling_llm, "model_name")
else self.function_calling_llm
)
if model_name:
self._setup_llm_callbacks(model_name)
if not self.agent_executor:
self._setup_agent_executor()
return self
def _setup_llm_callbacks(self, model_name: str):
token_handler = TokenCalcHandler(model_name, self._token_process)
if not isinstance(self.llm.callbacks, list):
self.llm.callbacks = []
if not any(
isinstance(handler, TokenCalcHandler) for handler in self.llm.callbacks
):
self.llm.callbacks.append(token_handler)
if agentops and not any(
isinstance(handler, agentops.LangchainCallbackHandler)
for handler in self.llm.callbacks
):
agentops.stop_instrumenting()
self.llm.callbacks.append(agentops.LangchainCallbackHandler())
def _setup_agent_executor(self):
if not self.cache_handler:
self.cache_handler = CacheHandler()
@@ -190,15 +158,7 @@ class Agent(BaseAgent):
task_prompt += self.i18n.slice("memory").format(memory=memory)
tools = tools or self.tools or []
parsed_tools = self._parse_tools(tools)
self.create_agent_executor(tools=tools)
self.agent_executor.tools = parsed_tools
self.agent_executor.task = task
self.agent_executor.tools_description = self._render_text_description_and_args(
parsed_tools
)
self.agent_executor.tools_names = self.__tools_names(parsed_tools)
self.create_agent_executor(tools=tools, task=task)
if self.crew and self.crew._train:
task_prompt = self._training_handler(task_prompt=task_prompt)
@@ -211,6 +171,7 @@ class Agent(BaseAgent):
"input": task_prompt,
"tool_names": self.agent_executor.tools_names,
"tools": self.agent_executor.tools_description,
"should_ask_for_human_input": task.human_input,
}
)["output"]
except Exception as e:
@@ -231,73 +192,24 @@ class Agent(BaseAgent):
return result
def format_log_to_str(
self,
intermediate_steps: List[Tuple[AgentAction, str]],
observation_prefix: str = "Observation: ",
llm_prefix: str = "",
) -> str:
"""Construct the scratchpad that lets the agent continue its thought process."""
thoughts = ""
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += f"\n{observation_prefix}{observation}\n{llm_prefix}"
return thoughts
def create_agent_executor(self, tools=None) -> None:
def create_agent_executor(self, tools=None, task=None) -> None:
"""Create an agent executor for the agent.
Returns:
An instance of the CrewAgentExecutor class.
"""
tools = tools or self.tools or []
agent_args = {
"input": lambda x: x["input"],
"tools": lambda x: x["tools"],
"tool_names": lambda x: x["tool_names"],
"agent_scratchpad": lambda x: self.format_log_to_str(
x["intermediate_steps"]
),
}
executor_args = {
"llm": self.llm,
"i18n": self.i18n,
"crew": self.crew,
"crew_agent": self,
"tools": self._parse_tools(tools),
"verbose": self.verbose,
"original_tools": tools,
"handle_parsing_errors": True,
"max_iterations": self.max_iter,
"max_execution_time": self.max_execution_time,
"step_callback": self.step_callback,
"tools_handler": self.tools_handler,
"function_calling_llm": self.function_calling_llm,
"callbacks": self.callbacks,
"max_tokens": self.max_tokens,
}
if self._rpm_controller:
executor_args["request_within_rpm_limit"] = (
self._rpm_controller.check_or_wait
)
parsed_tools = self._parse_tools(tools)
prompt = Prompts(
i18n=self.i18n,
agent=self,
tools=tools,
i18n=self.i18n,
system_template=self.system_template,
prompt_template=self.prompt_template,
response_template=self.response_template,
).task_execution()
execution_prompt = prompt.partial(
goal=self.goal,
role=self.role,
backstory=self.backstory,
)
stop_words = [self.i18n.slice("observation")]
if self.response_template:
@@ -305,11 +217,26 @@ class Agent(BaseAgent):
self.response_template.split("{{ .Response }}")[1].strip()
)
bind = self.llm.bind(stop=stop_words)
inner_agent = agent_args | execution_prompt | bind | CrewAgentParser(agent=self)
self.agent_executor = CrewAgentExecutor(
agent=RunnableAgent(runnable=inner_agent), **executor_args
llm=self.llm,
task=task,
agent=self,
crew=self.crew,
tools=parsed_tools,
prompt=prompt,
original_tools=tools,
stop_words=stop_words,
max_iter=self.max_iter,
tools_handler=self.tools_handler,
tools_names=self.__tools_names(parsed_tools),
tools_description=self._render_text_description_and_args(parsed_tools),
step_callback=self.step_callback,
function_calling_llm=self.function_calling_llm,
sliding_context_window=self.sliding_context_window,
request_within_rpm_limit=self._rpm_controller.check_or_wait
if self._rpm_controller
else None,
callbacks=[TokenCalcHandler(self._token_process)],
)
def get_delegation_tools(self, agents: List[BaseAgent]):
@@ -330,7 +257,7 @@ class Agent(BaseAgent):
def get_output_converter(self, llm, text, model, instructions):
return Converter(llm=llm, text=text, model=model, instructions=instructions)
def _parse_tools(self, tools: List[Any]) -> List[LangChainTool]: # type: ignore # Function "langchain_core.tools.tool" is not valid as a type
def _parse_tools(self, tools: List[Any]) -> List[Any]: # type: ignore # Function "langchain_core.tools.tool" is not valid as a type
"""Parse tools to be used for the task."""
tools_list = []
try:
@@ -373,7 +300,7 @@ class Agent(BaseAgent):
)
return task_prompt
def _render_text_description(self, tools: List[BaseTool]) -> str:
def _render_text_description(self, tools: List[Any]) -> str:
"""Render the tool name and description in plain text.
Output will be in the format of:
@@ -392,7 +319,7 @@ class Agent(BaseAgent):
return description
def _render_text_description_and_args(self, tools: List[BaseTool]) -> str:
def _render_text_description_and_args(self, tools: List[Any]) -> str:
"""Render the tool name, description, and args in plain text.
Output will be in the format of:

View File

@@ -1,6 +1,5 @@
from .cache.cache_handler import CacheHandler
from .executor import CrewAgentExecutor
from .parser import CrewAgentParser
from .tools_handler import ToolsHandler
__all__ = ["CacheHandler", "CrewAgentExecutor", "CrewAgentParser", "ToolsHandler"]
__all__ = ["CacheHandler", "CrewAgentParser", "ToolsHandler"]

View File

@@ -224,10 +224,8 @@ class BaseAgent(ABC, BaseModel):
# Copy llm and clear callbacks
existing_llm = shallow_copy(self.llm)
existing_llm.callbacks = []
copied_data = self.model_dump(exclude=exclude)
copied_data = {k: v for k, v in copied_data.items() if v is not None}
copied_agent = type(self)(**copied_data, llm=existing_llm, tools=self.tools)
return copied_agent

View File

@@ -19,15 +19,15 @@ class CrewAgentExecutorMixin:
crew_agent: Optional["BaseAgent"]
task: Optional["Task"]
iterations: int
force_answer_max_iterations: int
have_forced_answer: bool
_i18n: I18N
def _should_force_answer(self) -> bool:
"""Determine if a forced answer is required based on iteration count."""
return (
self.iterations == self.force_answer_max_iterations
) and not self.have_forced_answer
print("*** self.iterations", self.iterations)
print("*** self.max_iter", self.max_iter)
print("*** self.have_forced_answer", self.have_forced_answer)
return (self.iterations >= self.max_iter) and not self.have_forced_answer
def _create_short_term_memory(self, output) -> None:
"""Create and save a short-term memory item if conditions are met."""

View File

@@ -0,0 +1,267 @@
from typing import Any, Dict, List
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
from crewai.agents.parser import CrewAgentParser
from crewai.agents.tools_handler import ToolsHandler
from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
from crewai.utilities import I18N
from crewai.utilities.constants import TRAINING_DATA_FILE
from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededException,
)
from crewai.utilities.logger import Logger
from crewai.utilities.training_handler import CrewTrainingHandler
from crewai.llm import LLM
from crewai.agents.parser import AgentAction, AgentFinish, OutputParserException
class CrewAgentExecutor(CrewAgentExecutorMixin):
_logger: Logger = Logger()
def __init__(
self,
llm: Any,
task: Any,
crew: Any,
agent: Any,
prompt: str,
max_iter: int,
tools: List[Any],
tools_names: str,
stop_words: List[str],
tools_description: str,
tools_handler: ToolsHandler,
step_callback: Any = None,
original_tools: List[Any] = [],
function_calling_llm: Any = None,
sliding_context_window: bool = False,
request_within_rpm_limit: Any = None,
callbacks: List[Any] = [],
):
self._i18n: I18N = I18N()
self.llm = llm
self.task = task
self.agent = agent
self.crew = crew
self.prompt = prompt
self.tools = tools
self.tools_names = tools_names
self.stop = stop_words
self.max_iter = max_iter
self.callbacks = callbacks
self.tools_handler = tools_handler
self.original_tools = original_tools
self.step_callback = step_callback
self.tools_description = tools_description
self.function_calling_llm = function_calling_llm
self.sliding_context_window = sliding_context_window
self.request_within_rpm_limit = request_within_rpm_limit
self.should_ask_for_human_input = False
self.messages = []
self.iterations = 0
self.have_forced_answer = False
self.name_to_tool_map = {tool.name: tool for tool in self.tools}
def invoke(self, inputs: Dict[str, str]) -> Dict[str, Any]:
formatted_answer = None
formatted_prompt = self._format_prompt(self.prompt, inputs)
self.should_ask_for_human_input = inputs.get(
"should_ask_for_human_input", False
)
self.messages = self._messages(formatted_prompt)
formatted_answer = self._invoke_loop(formatted_answer)
if self.should_ask_for_human_input:
human_feedback = self._ask_human_input(formatted_answer.output)
if self.crew and self.crew._train:
self._handle_crew_training_output(formatted_answer, human_feedback)
# Making sure we only ask for it once, so disabling for the next thought loop
self.should_ask_for_human_input = False
self.messages.append(
{"role": "user", "content": f"Feedback: {human_feedback}"}
)
formatted_answer = self._invoke_loop(None)
return {"output": formatted_answer.output}
def _invoke_loop(self, formatted_answer):
try:
while not isinstance(formatted_answer, AgentFinish):
# print('2222222')
if not self.request_within_rpm_limit or self.request_within_rpm_limit():
# print('3333333')
answer = LLM(
self.llm, stop=self.stop, callbacks=self.callbacks
).call(self.messages)
self.iterations += 1
print("*** self.iterations", self.iterations)
# if self.iterations > 11:
# sadasd
formatted_answer = self._format_answer(answer)
if isinstance(formatted_answer, AgentAction):
# print('4444444')
action_result = self._use_tool(formatted_answer)
formatted_answer.text += f"\nObservation: {action_result}"
# print(formatted_answer)
if self.step_callback:
formatted_answer.result = action_result
self.step_callback(formatted_answer)
if self._should_force_answer():
if self.have_forced_answer:
return {
"output": self._i18n.errors(
"force_final_answer_error"
).format(formatted_answer.text)
}
else:
formatted_answer.text += (
f'\n{self._i18n.errors("force_final_answer")}'
)
self.have_forced_answer = True
self.messages.append(
{"role": "assistant", "content": formatted_answer.text}
)
except OutputParserException as e:
# print('5555555')
self.messages.append({"role": "assistant", "content": e.error})
self._invoke_loop(formatted_answer)
except Exception as e:
# print('6666666')
print("*** e", e)
if LLMContextLengthExceededException(str(e))._is_context_limit_error(
str(e)
):
self._handle_context_length()
self._invoke_loop(formatted_answer)
# print('7777777')
return formatted_answer
def _use_tool(self, agent_action: AgentAction) -> None:
tool_usage = ToolUsage(
tools_handler=self.tools_handler,
tools=self.tools,
original_tools=self.original_tools,
tools_description=self.tools_description,
tools_names=self.tools_names,
function_calling_llm=self.function_calling_llm,
task=self.task,
agent=self.agent,
action=agent_action,
)
tool_calling = tool_usage.parse(agent_action.text)
if isinstance(tool_calling, ToolUsageErrorException):
tool_result = tool_calling.message
else:
if tool_calling.tool_name.casefold().strip() in [
name.casefold().strip() for name in self.name_to_tool_map
] or tool_calling.tool_name.casefold().replace("_", " ") in [
name.casefold().strip() for name in self.name_to_tool_map
]:
tool_result = tool_usage.use(tool_calling, agent_action.text)
else:
tool_result = self._i18n.errors("wrong_tool_name").format(
tool=tool_calling.tool_name,
tools=", ".join([tool.name.casefold() for tool in self.tools]),
)
return tool_result
def _summarize_messages(self) -> None:
llm = LLM(self.llm)
grouped_messages = []
for message in self.messages:
content = message["content"]
for i in range(0, len(content), 5000):
grouped_messages.append(content[i : i + 5000])
summarized_contents = []
for group in grouped_messages:
summary = llm.call(
[
{
"role": "system",
"content": "You are a helpful assistant that summarizes text.",
},
{
"role": "user",
"content": f"Summarize the following text, make sure to include all the important information: {group}",
},
]
)
summarized_contents.append(summary)
merged_summary = " ".join(summarized_contents)
self.messages = [
{
"role": "user",
"content": f"This is a summary of our conversation so far:\n{merged_summary}",
}
]
def _handle_context_length(self) -> None:
if self.sliding_context_window:
self._logger.log(
"debug",
"Context length exceeded. Summarizing content to fit the model context window.",
color="yellow",
)
self._summarize_messages()
else:
self._logger.log(
"debug",
"Context length exceeded. Consider using smaller text or RAG tools from crewai_tools.",
color="red",
)
raise SystemExit(
"Context length exceeded and user opted not to summarize. Consider using smaller text or RAG tools from crewai_tools."
)
def _handle_crew_training_output(
self, result: AgentFinish, human_feedback: str | None = None
) -> None:
"""Function to handle the process of the training data."""
agent_id = str(self.agent.id)
if (
CrewTrainingHandler(TRAINING_DATA_FILE).load()
and not self.should_ask_for_human_input
):
training_data = CrewTrainingHandler(TRAINING_DATA_FILE).load()
if training_data.get(agent_id):
training_data[agent_id][self.crew._train_iteration][
"improved_output"
] = result.output
CrewTrainingHandler(TRAINING_DATA_FILE).save(training_data)
if self.should_ask_for_human_input and human_feedback is not None:
training_data = {
"initial_output": result.output,
"human_feedback": human_feedback,
"agent": agent_id,
"agent_role": self.agent.role,
}
CrewTrainingHandler(TRAINING_DATA_FILE).append(
self.crew._train_iteration, agent_id, training_data
)
def _format_prompt(self, prompt: str, inputs: Dict[str, str]) -> str:
prompt = prompt.replace("{input}", inputs["input"])
prompt = prompt.replace("{tool_names}", inputs["tool_names"])
prompt = prompt.replace("{tools}", inputs["tools"])
return prompt
def _format_answer(self, answer: str) -> str:
return CrewAgentParser(agent=self).parse(answer)
def _messages(self, prompt: str) -> List[Dict[str, str]]:
return [{"role": "user", "content": prompt}]

View File

@@ -1,397 +0,0 @@
import threading
import time
from typing import Any, Dict, Iterator, List, Literal, Optional, Tuple, Union
import click
from langchain.agents import AgentExecutor
from langchain.agents.agent import ExceptionTool
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.summarize import load_summarize_chain
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_core.agents import AgentAction, AgentFinish, AgentStep
from langchain_core.exceptions import OutputParserException
from langchain_core.tools import BaseTool
from langchain_core.utils.input import get_color_mapping
from pydantic import InstanceOf
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
from crewai.agents.tools_handler import ToolsHandler
from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
from crewai.utilities import I18N
from crewai.utilities.constants import TRAINING_DATA_FILE
from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededException,
)
from crewai.utilities.logger import Logger
from crewai.utilities.training_handler import CrewTrainingHandler
class CrewAgentExecutor(AgentExecutor, CrewAgentExecutorMixin):
_i18n: I18N = I18N()
should_ask_for_human_input: bool = False
llm: Any = None
iterations: int = 0
task: Any = None
tools_description: str = ""
tools_names: str = ""
original_tools: List[Any] = []
crew_agent: Any = None
crew: Any = None
function_calling_llm: Any = None
request_within_rpm_limit: Any = None
tools_handler: Optional[InstanceOf[ToolsHandler]] = None
max_iterations: Optional[int] = 15
have_forced_answer: bool = False
force_answer_max_iterations: Optional[int] = None # type: ignore # Incompatible types in assignment (expression has type "int | None", base class "CrewAgentExecutorMixin" defined the type as "int")
step_callback: Optional[Any] = None
system_template: Optional[str] = None
prompt_template: Optional[str] = None
response_template: Optional[str] = None
_logger: Logger = Logger()
_fit_context_window_strategy: Optional[Literal["summarize"]] = "summarize"
def _call(
self,
inputs: Dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Run text through and get agent response."""
# Construct a mapping of tool name to tool for easy lookup
name_to_tool_map = {tool.name: tool for tool in self.tools}
# We construct a mapping from each tool to a color, used for logging.
color_mapping = get_color_mapping(
[tool.name.casefold() for tool in self.tools],
excluded_colors=["green", "red"],
)
intermediate_steps: List[Tuple[AgentAction, str]] = []
# Allowing human input given task setting
if self.task and self.task.human_input:
self.should_ask_for_human_input = True
# Let's start tracking the number of iterations and time elapsed
self.iterations = 0
time_elapsed = 0.0
start_time = time.time()
# We now enter the agent loop (until it returns something).
while self._should_continue(self.iterations, time_elapsed):
if not self.request_within_rpm_limit or self.request_within_rpm_limit():
next_step_output = self._take_next_step(
name_to_tool_map,
color_mapping,
inputs,
intermediate_steps,
run_manager=run_manager,
)
if self.step_callback:
self.step_callback(next_step_output)
if isinstance(next_step_output, AgentFinish):
# Creating long term memory
create_long_term_memory = threading.Thread(
target=self._create_long_term_memory, args=(next_step_output,)
)
create_long_term_memory.start()
return self._return(
next_step_output, intermediate_steps, run_manager=run_manager
)
intermediate_steps.extend(next_step_output)
if len(next_step_output) == 1:
next_step_action = next_step_output[0]
# See if tool should return directly
tool_return = self._get_tool_return(next_step_action)
if tool_return is not None:
return self._return(
tool_return, intermediate_steps, run_manager=run_manager
)
self.iterations += 1
time_elapsed = time.time() - start_time
output = self.agent.return_stopped_response(
self.early_stopping_method, intermediate_steps, **inputs
)
return self._return(output, intermediate_steps, run_manager=run_manager)
def _iter_next_step(
self,
name_to_tool_map: Dict[str, BaseTool],
color_mapping: Dict[str, str],
inputs: Dict[str, str],
intermediate_steps: List[Tuple[AgentAction, str]],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Iterator[Union[AgentFinish, AgentAction, AgentStep]]:
"""Take a single step in the thought-action-observation loop.
Override this to take control of how the agent makes and acts on choices.
"""
try:
if self._should_force_answer():
error = self._i18n.errors("force_final_answer")
output = AgentAction("_Exception", error, error)
self.have_forced_answer = True
yield AgentStep(action=output, observation=error)
return
intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)
# Call the LLM to see what to do.
output = self.agent.plan(
intermediate_steps,
callbacks=run_manager.get_child() if run_manager else None,
**inputs,
)
except OutputParserException as e:
if isinstance(self.handle_parsing_errors, bool):
raise_error = not self.handle_parsing_errors
else:
raise_error = False
if raise_error:
raise ValueError(
"An output parsing error occurred. "
"In order to pass this error back to the agent and have it try "
"again, pass `handle_parsing_errors=True` to the AgentExecutor. "
f"This is the error: {str(e)}"
)
str(e)
if isinstance(self.handle_parsing_errors, bool):
if e.send_to_llm:
observation = f"\n{str(e.observation)}"
str(e.llm_output)
else:
observation = ""
elif isinstance(self.handle_parsing_errors, str):
observation = f"\n{self.handle_parsing_errors}"
elif callable(self.handle_parsing_errors):
observation = f"\n{self.handle_parsing_errors(e)}"
else:
raise ValueError("Got unexpected type of `handle_parsing_errors`")
output = AgentAction("_Exception", observation, "")
if run_manager:
run_manager.on_agent_action(output, color="green")
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
observation = ExceptionTool().run(
output.tool_input,
verbose=False,
color=None,
callbacks=run_manager.get_child() if run_manager else None,
**tool_run_kwargs,
)
if self._should_force_answer():
error = self._i18n.errors("force_final_answer")
output = AgentAction("_Exception", error, error)
yield AgentStep(action=output, observation=error)
return
yield AgentStep(action=output, observation=observation)
return
except Exception as e:
if LLMContextLengthExceededException(str(e))._is_context_limit_error(
str(e)
):
output = self._handle_context_length_error(
intermediate_steps, run_manager, inputs
)
if isinstance(output, AgentFinish):
yield output
elif isinstance(output, list):
for step in output:
yield step
return
raise e
# If the tool chosen is the finishing tool, then we end and return.
if isinstance(output, AgentFinish):
if self.should_ask_for_human_input:
human_feedback = self._ask_human_input(output.return_values["output"])
if self.crew and self.crew._train:
self._handle_crew_training_output(output, human_feedback)
# Making sure we only ask for it once, so disabling for the next thought loop
self.should_ask_for_human_input = False
action = AgentAction(
tool="Human Input", tool_input=human_feedback, log=output.log
)
yield AgentStep(
action=action,
observation=self._i18n.slice("human_feedback").format(
human_feedback=human_feedback
),
)
return
else:
if self.crew and self.crew._train:
self._handle_crew_training_output(output)
yield output
return
self._create_short_term_memory(output)
actions: List[AgentAction]
actions = [output] if isinstance(output, AgentAction) else output
yield from actions
for agent_action in actions:
if run_manager:
run_manager.on_agent_action(agent_action, color="green")
tool_usage = ToolUsage(
tools_handler=self.tools_handler, # type: ignore # Argument "tools_handler" to "ToolUsage" has incompatible type "ToolsHandler | None"; expected "ToolsHandler"
tools=self.tools, # type: ignore # Argument "tools" to "ToolUsage" has incompatible type "Sequence[BaseTool]"; expected "list[BaseTool]"
original_tools=self.original_tools,
tools_description=self.tools_description,
tools_names=self.tools_names,
function_calling_llm=self.function_calling_llm,
task=self.task,
agent=self.crew_agent,
action=agent_action,
)
tool_calling = tool_usage.parse(agent_action.log)
if isinstance(tool_calling, ToolUsageErrorException):
observation = tool_calling.message
else:
if tool_calling.tool_name.casefold().strip() in [
name.casefold().strip() for name in name_to_tool_map
] or tool_calling.tool_name.casefold().replace("_", " ") in [
name.casefold().strip() for name in name_to_tool_map
]:
observation = tool_usage.use(tool_calling, agent_action.log)
else:
observation = self._i18n.errors("wrong_tool_name").format(
tool=tool_calling.tool_name,
tools=", ".join([tool.name.casefold() for tool in self.tools]),
)
yield AgentStep(action=agent_action, observation=observation)
def _handle_crew_training_output(
self, output: AgentFinish, human_feedback: str | None = None
) -> None:
"""Function to handle the process of the training data."""
agent_id = str(self.crew_agent.id)
if (
CrewTrainingHandler(TRAINING_DATA_FILE).load()
and not self.should_ask_for_human_input
):
training_data = CrewTrainingHandler(TRAINING_DATA_FILE).load()
if training_data.get(agent_id):
training_data[agent_id][self.crew._train_iteration][
"improved_output"
] = output.return_values["output"]
CrewTrainingHandler(TRAINING_DATA_FILE).save(training_data)
if self.should_ask_for_human_input and human_feedback is not None:
training_data = {
"initial_output": output.return_values["output"],
"human_feedback": human_feedback,
"agent": agent_id,
"agent_role": self.crew_agent.role,
}
CrewTrainingHandler(TRAINING_DATA_FILE).append(
self.crew._train_iteration, agent_id, training_data
)
def _handle_context_length(
self, intermediate_steps: List[Tuple[AgentAction, str]]
) -> List[Tuple[AgentAction, str]]:
text = intermediate_steps[0][1]
original_action = intermediate_steps[0][0]
text_splitter = RecursiveCharacterTextSplitter(
separators=["\n\n", "\n"],
chunk_size=8000,
chunk_overlap=500,
)
if self._fit_context_window_strategy == "summarize":
docs = text_splitter.create_documents([text])
self._logger.log(
"debug",
"Summarizing Content, it is recommended to use a RAG tool",
color="bold_blue",
)
summarize_chain = load_summarize_chain(
self.llm, chain_type="map_reduce", verbose=True
)
summarized_docs = []
for doc in docs:
summary = summarize_chain.invoke(
{"input_documents": [doc]}, return_only_outputs=True
)
summarized_docs.append(summary["output_text"])
formatted_results = "\n\n".join(summarized_docs)
summary_step = AgentStep(
action=AgentAction(
tool=original_action.tool,
tool_input=original_action.tool_input,
log=original_action.log,
),
observation=formatted_results,
)
summary_tuple = (summary_step.action, summary_step.observation)
return [summary_tuple]
return intermediate_steps
def _handle_context_length_error(
self,
intermediate_steps: List[Tuple[AgentAction, str]],
run_manager: Optional[CallbackManagerForChainRun],
inputs: Dict[str, str],
) -> Union[AgentFinish, List[AgentStep]]:
self._logger.log(
"debug",
"Context length exceeded. Asking user if they want to use summarize prompt to fit, this will reduce context length.",
color="yellow",
)
user_choice = click.confirm(
"Context length exceeded. Do you want to summarize the text to fit models context window?"
)
if user_choice:
self._logger.log(
"debug",
"Context length exceeded. Using summarize prompt to fit, this will reduce context length.",
color="bold_blue",
)
intermediate_steps = self._handle_context_length(intermediate_steps)
output = self.agent.plan(
intermediate_steps,
callbacks=run_manager.get_child() if run_manager else None,
**inputs,
)
if isinstance(output, AgentFinish):
return output
elif isinstance(output, AgentAction):
return [AgentStep(action=output, observation=None)]
else:
return [AgentStep(action=action, observation=None) for action in output]
else:
self._logger.log(
"debug",
"Context length exceeded. Consider using smaller text or RAG tools from crewai_tools.",
color="red",
)
raise SystemExit(
"Context length exceeded and user opted not to summarize. Consider using smaller text or RAG tools from crewai_tools."
)

View File

@@ -1,10 +1,6 @@
import re
from typing import Any, Union
from json_repair import repair_json
from langchain.agents.output_parsers import ReActSingleInputOutputParser
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from crewai.utilities import I18N
@@ -14,7 +10,35 @@ MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE = "I did it wrong. Invalid Forma
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = "I did it wrong. Tried to both perform Action and give a Final Answer at the same time, I must do one or the other"
class CrewAgentParser(ReActSingleInputOutputParser):
class AgentAction:
tool: str
tool_input: str
text: str
result: str
def __init__(self, tool: str, tool_input: str, text: str):
self.tool = tool
self.tool_input = tool_input
self.text = text
class AgentFinish:
output: str
text: str
def __init__(self, output: str, text: str):
self.output = output
self.text = text
class OutputParserException(Exception):
error: str
def __init__(self, error: str):
self.error = error
class CrewAgentParser:
"""Parses ReAct-style LLM calls that have a single tool input.
Expects output to be in one of two formats.
@@ -38,6 +62,9 @@ class CrewAgentParser(ReActSingleInputOutputParser):
_i18n: I18N = I18N()
agent: Any = None
def __init__(self, agent: Any):
self.agent = agent
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
includes_answer = FINAL_ANSWER_ACTION in text
regex = (
@@ -60,27 +87,19 @@ class CrewAgentParser(ReActSingleInputOutputParser):
return AgentAction(clean_action, safe_tool_input, text)
elif includes_answer:
return AgentFinish(
{"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text
)
return AgentFinish(text.split(FINAL_ANSWER_ACTION)[-1].strip(), text)
if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL):
self.agent.increment_formatting_errors()
raise OutputParserException(
f"Could not parse LLM output: `{text}`",
observation=f"{MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE}\n{self._i18n.slice('final_answer_format')}",
llm_output=text,
send_to_llm=True,
f"{MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE}\n{self._i18n.slice('final_answer_format')}",
)
elif not re.search(
r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)", text, re.DOTALL
):
self.agent.increment_formatting_errors()
raise OutputParserException(
f"Could not parse LLM output: `{text}`",
observation=MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
llm_output=text,
send_to_llm=True,
MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
)
else:
format = self._i18n.slice("format_without_tools")
@@ -88,9 +107,6 @@ class CrewAgentParser(ReActSingleInputOutputParser):
self.agent.increment_formatting_errors()
raise OutputParserException(
error,
observation=error,
llm_output=text,
send_to_llm=True,
)
def _clean_action(self, text: str) -> str:

View File

@@ -117,7 +117,7 @@ class DeployCommand:
else:
self._handle_error(json_response)
def create_crew(self, confirm: bool) -> None:
def create_crew(self, confirm: bool = False) -> None:
"""
Create a new crew deployment.
"""

View File

@@ -205,6 +205,11 @@ class Crew(BaseModel):
if self.output_log_file:
self._file_handler = FileHandler(self.output_log_file)
self._rpm_controller = RPMController(max_rpm=self.max_rpm, logger=self._logger)
self.function_calling_llm = (
self.function_calling_llm.model_name
if hasattr(self.function_calling_llm, "model_name")
else self.function_calling_llm
)
self._telemetry = Telemetry()
self._telemetry.set_tracer()
return self
@@ -588,8 +593,14 @@ class Crew(BaseModel):
"warning", "Manager agent should not have tools", color="orange"
)
manager.tools = []
raise Exception("Manager agent should not have tools")
manager.tools = self.manager_agent.get_delegation_tools(self.agents)
else:
self.manager_llm = (
self.manager_llm.model_name
if hasattr(self.manager_llm, "model_name")
else self.manager_llm
)
manager = Agent(
role=i18n.retrieve("hierarchical_manager_agent", "role"),
goal=i18n.retrieve("hierarchical_manager_agent", "goal"),
@@ -921,16 +932,14 @@ class Crew(BaseModel):
def calculate_usage_metrics(self) -> UsageMetrics:
"""Calculates and returns the usage metrics."""
total_usage_metrics = UsageMetrics()
for agent in self.agents:
if hasattr(agent, "_token_process"):
token_sum = agent._token_process.get_summary()
total_usage_metrics.add_usage_metrics(token_sum)
if self.manager_agent and hasattr(self.manager_agent, "_token_process"):
token_sum = self.manager_agent._token_process.get_summary()
total_usage_metrics.add_usage_metrics(token_sum)
self.usage_metrics = total_usage_metrics
return total_usage_metrics
def test(

21
src/crewai/llm.py Normal file
View File

@@ -0,0 +1,21 @@
from typing import Any, Dict, List
from litellm import completion
import litellm
class LLM:
def __init__(self, model: str, stop: List[str] = [], callbacks: List[Any] = []):
self.stop = stop
self.model = model
litellm.callbacks = callbacks
litellm.set_verbose = True
def call(self, messages: List[Dict[str, str]]) -> Dict[str, Any]:
response = completion(
stop=self.stop, model=self.model, messages=messages, num_retries=5
)
return response["choices"][0]["message"]["content"]
def _call_callbacks(self, formatted_answer):
for callback in self.callbacks:
callback(formatted_answer)

95
src/crewai/lol.py Normal file
View File

@@ -0,0 +1,95 @@
from typing import Callable, List, Dict, Any
from functools import wraps
class Flow:
def __init__(self):
self._start_method = None
self._listeners: Dict[str, List[str]] = {}
self._methods: Dict[str, Callable] = {}
def run(self):
if not self._start_method:
raise ValueError("No start method defined")
result = self._methods[self._start_method](self)
self._execute_listeners(self._start_method, result)
def _execute_listeners(self, trigger_method: str, result: Any):
if trigger_method in self._listeners:
for listener in self._listeners[trigger_method]:
listener_result = self._methods[listener](self, result)
self._execute_listeners(listener, listener_result)
def start():
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
if not self._start_method:
self._start_method = func.__name__
return func(self, *args, **kwargs)
return wrapper
return decorator
def listen(*trigger_methods):
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
for trigger in trigger_methods:
trigger_name = trigger.__name__ if callable(trigger) else trigger
if trigger_name not in self._listeners:
self._listeners[trigger_name] = []
self._listeners[trigger_name].append(func.__name__)
return func(self, *args, **kwargs)
return wrapper
return decorator
class FlowMeta(type):
def __new__(mcs, name, bases, attrs):
new_cls = super().__new__(mcs, name, bases, attrs)
for name, method in attrs.items():
if hasattr(method, "_is_start"):
new_cls._start_method = name
if hasattr(method, "_listeners"):
for trigger in method._listeners:
if trigger not in new_cls._listeners:
new_cls._listeners[trigger] = []
new_cls._listeners[trigger].append(name)
new_cls._methods[name] = method
return new_cls
class BaseFlow(Flow, metaclass=FlowMeta):
_start_method = None
_listeners = {}
_methods = {}
# Example usage:
class ExampleFlow(BaseFlow):
@start()
def start_method(self):
print("Starting the flow")
return "Start result"
@listen(start_method)
def second_method(self, result):
print(f"Second method, received: {result}")
return "Second result"
@listen(second_method)
def third_method(self, result):
print(f"Third method, received: {result}")
return "Third result"
# Run the flow
flow = ExampleFlow()
flow.run()

View File

@@ -102,14 +102,8 @@ class Telemetry:
"max_iter": agent.max_iter,
"max_rpm": agent.max_rpm,
"i18n": agent.i18n.prompt_file,
"function_calling_llm": json.dumps(
self._safe_llm_attributes(
agent.function_calling_llm
)
),
"llm": json.dumps(
self._safe_llm_attributes(agent.llm)
),
"function_calling_llm": agent.function_calling_llm,
"llm": agent.llm,
"delegation_enabled?": agent.allow_delegation,
"allow_code_execution?": agent.allow_code_execution,
"max_retry_limit": agent.max_retry_limit,
@@ -173,14 +167,8 @@ class Telemetry:
"verbose?": agent.verbose,
"max_iter": agent.max_iter,
"max_rpm": agent.max_rpm,
"function_calling_llm": json.dumps(
self._safe_llm_attributes(
agent.function_calling_llm
)
),
"llm": json.dumps(
self._safe_llm_attributes(agent.llm)
),
"function_calling_llm": agent.function_calling_llm,
"llm": agent.llm,
"delegation_enabled?": agent.allow_delegation,
"allow_code_execution?": agent.allow_code_execution,
"max_retry_limit": agent.max_retry_limit,
@@ -294,9 +282,7 @@ class Telemetry:
self._add_attribute(span, "tool_name", tool_name)
self._add_attribute(span, "attempts", attempts)
if llm:
self._add_attribute(
span, "llm", json.dumps(self._safe_llm_attributes(llm))
)
self._add_attribute(span, "llm", llm)
span.set_status(Status(StatusCode.OK))
span.end()
except Exception:
@@ -316,9 +302,7 @@ class Telemetry:
self._add_attribute(span, "tool_name", tool_name)
self._add_attribute(span, "attempts", attempts)
if llm:
self._add_attribute(
span, "llm", json.dumps(self._safe_llm_attributes(llm))
)
self._add_attribute(span, "llm", llm)
span.set_status(Status(StatusCode.OK))
span.end()
except Exception:
@@ -336,9 +320,7 @@ class Telemetry:
pkg_resources.get_distribution("crewai").version,
)
if llm:
self._add_attribute(
span, "llm", json.dumps(self._safe_llm_attributes(llm))
)
self._add_attribute(span, "llm", llm)
span.set_status(Status(StatusCode.OK))
span.end()
except Exception:
@@ -491,7 +473,7 @@ class Telemetry:
"max_iter": agent.max_iter,
"max_rpm": agent.max_rpm,
"i18n": agent.i18n.prompt_file,
"llm": json.dumps(self._safe_llm_attributes(agent.llm)),
"llm": agent.llm,
"delegation_enabled?": agent.allow_delegation,
"tools_names": [
tool.name.casefold() for tool in agent.tools or []
@@ -567,11 +549,3 @@ class Telemetry:
return span.set_attribute(key, value)
except Exception:
pass
def _safe_llm_attributes(self, llm):
attributes = ["name", "model_name", "model", "top_k", "temperature"]
if llm:
safe_attributes = {k: v for k, v in vars(llm).items() if k in attributes}
safe_attributes["class"] = llm.__class__.__name__
return safe_attributes
return {}

View File

@@ -4,9 +4,6 @@ from difflib import SequenceMatcher
from textwrap import dedent
from typing import Any, List, Union
from langchain_core.tools import BaseTool
from langchain_openai import ChatOpenAI
from crewai.agents.tools_handler import ToolsHandler
from crewai.task import Task
from crewai.telemetry import Telemetry
@@ -20,7 +17,7 @@ if os.environ.get("AGENTOPS_API_KEY"):
except ImportError:
pass
OPENAI_BIGGER_MODELS = ["gpt-4o"]
OPENAI_BIGGER_MODELS = ["gpt-4", "gpt-4o"]
class ToolUsageErrorException(Exception):
@@ -48,7 +45,7 @@ class ToolUsage:
def __init__(
self,
tools_handler: ToolsHandler,
tools: List[BaseTool],
tools: List[Any],
original_tools: List[Any],
tools_description: str,
tools_names: str,
@@ -73,18 +70,9 @@ class ToolUsage:
self.action = action
self.function_calling_llm = function_calling_llm
# Handling bug (see https://github.com/langchain-ai/langchain/pull/16395): raise an error if tools_names have space for ChatOpenAI
if isinstance(self.function_calling_llm, ChatOpenAI):
if " " in self.tools_names:
raise Exception(
"Tools names should not have spaces for ChatOpenAI models."
)
# Set the maximum parsing attempts for bigger models
if (isinstance(self.function_calling_llm, ChatOpenAI)) and (
self.function_calling_llm.openai_api_base is None
):
if self.function_calling_llm.model_name in OPENAI_BIGGER_MODELS:
if self._is_gpt(self.function_calling_llm) and "4" in self.function_calling_llm:
if self.function_calling_llm in OPENAI_BIGGER_MODELS:
self._max_parsing_attempts = 2
self._remember_format_after_usages = 4
@@ -116,7 +104,7 @@ class ToolUsage:
def _use(
self,
tool_string: str,
tool: BaseTool,
tool: Any,
calling: Union[ToolCalling, InstructorToolCalling],
) -> str: # TODO: Fix this return type
tool_event = agentops.ToolEvent(name=calling.tool_name) if agentops else None # type: ignore
@@ -265,7 +253,7 @@ class ToolUsage:
calling.arguments == last_tool_usage.arguments
)
def _select_tool(self, tool_name: str) -> BaseTool:
def _select_tool(self, tool_name: str) -> Any:
order_tools = sorted(
self.tools,
key=lambda tool: SequenceMatcher(
@@ -285,7 +273,7 @@ class ToolUsage:
self.task.increment_tools_errors()
if tool_name and tool_name != "":
raise Exception(
f"Action '{tool_name}' don't exist, these are the only available Actions:\n {self.tools_description}"
f"Action '{tool_name}' don't exist, these are the only available Actions:\n{self.tools_description}"
)
else:
raise Exception(
@@ -312,7 +300,7 @@ class ToolUsage:
return "\n--\n".join(descriptions)
def _is_gpt(self, llm) -> bool:
return isinstance(llm, ChatOpenAI) and llm.openai_api_base is None
return False if not llm else "gpt" in llm.lower()
def _tool_calling(
self, tool_string: str

View File

@@ -10,17 +10,18 @@
"memory": "\n\n# Useful context: \n{memory}",
"role_playing": "You are {role}. {backstory}\nYour personal goal is: {goal}",
"tools": "\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\n{tools}\n\nUse the following format:\n\nThought: you should always think about what to do\nAction: the action to take, only one name of [{tool_names}], just the name, exactly as it's written.\nAction Input: the input to the action, just a simple python dictionary, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n\nOnce all necessary information is gathered:\n\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n",
"no_tools": "To give my best complete final answer to the task use the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\nYour final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!",
"format": "I MUST either use a tool (use one at time) OR give my best final answer. To Use the following format:\n\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action, dictionary enclosed in curly braces\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\nYour final answer must be the great and the most complete as possible, it must be outcome described\n\n ",
"no_tools": "\nTo give my best complete final answer to the task use the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!",
"format": "I MUST either use a tool (use one at time) OR give my best final answer. To Use the following format:\n\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action, dictionary enclosed in curly braces\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n ",
"final_answer_format": "If you don't need to use any more tools, you must give your best complete final answer, make sure it satisfy the expect criteria, use the EXACT format below:\n\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\n\n",
"format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nI just remembered the expected format I must follow:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task\nYour final answer must be the great and the most complete as possible, it must be outcome described\n\n",
"format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nI just remembered the expected format I must follow:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n",
"task_with_context": "{task}\n\nThis is the context you're working with:\n{context}",
"expected_output": "\nThis is the expect criteria for your final answer: {expected_output} \n you MUST return the actual complete content as the final answer, not a summary.",
"human_feedback": "You got human feedback on your work, re-evaluate it and give a new Final Answer when ready.\n {human_feedback}",
"getting_input": "This is the agent's final answer: {final_answer}\nPlease provide feedback: "
},
"errors": {
"force_final_answer": "Tool won't be use because it's time to give your final answer. Don't use tools and just your absolute BEST Final answer.",
"force_final_answer_error": "I can't keep going, this was the best I could do.\n {formatted_answer.text}",
"force_final_answer": "Now it's time you MUST give your absolute best final answer. You'll ignore all previous instructions, stop using any tools, and just return your absolute BEST Final answer.",
"agent_tool_unexsiting_coworker": "\nError executing tool. coworker mentioned not found, it must be one of the following options:\n{coworkers}\n",
"task_repeated_usage": "I tried reusing the same input, I must stop using this action input. I'll try something else instead.\n\n",
"tool_usage_error": "I encountered an error: {error}",

View File

@@ -2,8 +2,7 @@ import json
import re
from typing import Any, Optional, Type, Union
from langchain.schema import HumanMessage, SystemMessage
from langchain_openai import ChatOpenAI
from crewai.llm import LLM
from pydantic import BaseModel, ValidationError
from crewai.agents.agent_builder.utilities.base_output_converter import OutputConverter
@@ -28,7 +27,12 @@ class Converter(OutputConverter):
if self.is_gpt:
return self._create_instructor().to_pydantic()
else:
return self._create_chain().invoke({})
return LLM(model=self.llm).call(
[
{"role": "system", "content": self.instructions},
{"role": "user", "content": self.text},
]
)
except Exception as e:
if current_attempt < self.max_attempts:
return self.to_pydantic(current_attempt + 1)
@@ -42,7 +46,14 @@ class Converter(OutputConverter):
if self.is_gpt:
return self._create_instructor().to_json()
else:
return json.dumps(self._create_chain().invoke({}).model_dump())
return json.dumps(
LLM(model=self.llm).call(
[
{"role": "system", "content": self.instructions},
{"role": "user", "content": self.text},
]
)
)
except Exception as e:
if current_attempt < self.max_attempts:
return self.to_json(current_attempt + 1)
@@ -61,22 +72,25 @@ class Converter(OutputConverter):
)
return inst
def _create_chain(self):
def _convert_with_instructions(self):
"""Create a chain."""
from crewai.utilities.crew_pydantic_output_parser import (
CrewPydanticOutputParser,
)
parser = CrewPydanticOutputParser(pydantic_object=self.model)
new_prompt = SystemMessage(content=self.instructions) + HumanMessage(
content=self.text
result = LLM(model=self.llm).call(
[
{"role": "system", "content": self.instructions},
{"role": "user", "content": self.text},
]
)
return new_prompt | self.llm | parser
return parser.parse_result(result)
@property
def is_gpt(self) -> bool:
"""Return if llm provided is of gpt from openai."""
return isinstance(self.llm, ChatOpenAI) and self.llm.openai_api_base is None
return "gpt" in str(self.llm).lower()
def convert_to_model(
@@ -87,9 +101,9 @@ def convert_to_model(
converter_cls: Optional[Type[Converter]] = None,
) -> Union[dict, BaseModel, str]:
model = output_pydantic or output_json
if model is None:
return result
try:
escaped_result = json.dumps(json.loads(result, strict=False))
return validate_model(escaped_result, model, bool(output_json))
@@ -140,11 +154,8 @@ def handle_partial_json(
if is_json_output:
return exported_result.model_dump()
return exported_result
except json.JSONDecodeError as e:
Printer().print(
content=f"Error parsing JSON: {e}. The extracted JSON-like string is not valid JSON. Attempting alternative conversion method.",
color="yellow",
)
except json.JSONDecodeError:
pass
except ValidationError as e:
Printer().print(
content=f"Pydantic validation error: {e}. The JSON structure doesn't match the expected model. Attempting alternative conversion method.",
@@ -170,7 +181,6 @@ def convert_with_instructions(
) -> Union[dict, BaseModel, str]:
llm = agent.function_calling_llm or agent.llm
instructions = get_conversion_instructions(model, llm)
converter = create_converter(
agent=agent,
converter_cls=converter_cls,
@@ -179,6 +189,7 @@ def convert_with_instructions(
model=model,
instructions=instructions,
)
exported_result = (
converter.to_pydantic() if not is_json_output else converter.to_json()
)

View File

@@ -1,33 +1,31 @@
import json
from typing import Any, List, Type
import regex
from langchain.output_parsers import PydanticOutputParser
from langchain_core.exceptions import OutputParserException
from langchain_core.outputs import Generation
from typing import Any, Type
from crewai.agents.parser import OutputParserException
from pydantic import BaseModel, ValidationError
class CrewPydanticOutputParser(PydanticOutputParser):
class CrewPydanticOutputParser:
"""Parses the text into pydantic models"""
pydantic_object: Type[BaseModel]
def parse_result(self, result: List[Generation]) -> Any:
result[0].text = self._transform_in_valid_json(result[0].text)
def parse_result(self, result: str) -> Any:
result = self._transform_in_valid_json(result)
# Treating edge case of function calling llm returning the name instead of tool_name
json_object = json.loads(result[0].text)
json_object = json.loads(result)
if "tool_name" not in json_object:
json_object["tool_name"] = json_object.get("name", "")
result[0].text = json.dumps(json_object)
result = json.dumps(json_object)
try:
return self.pydantic_object.model_validate(json_object)
except ValidationError as e:
name = self.pydantic_object.__name__
msg = f"Failed to parse {name} from completion {json_object}. Got: {e}"
raise OutputParserException(msg, llm_output=json_object)
raise OutputParserException(error=msg)
def _transform_in_valid_json(self, text) -> str:
text = text.replace("```", "").replace("json", "")

View File

@@ -1,7 +1,6 @@
import os
from typing import List
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field
from crewai.utilities import Converter
@@ -84,7 +83,7 @@ class TaskEvaluator:
instructions = f"{instructions}\n\nReturn only valid JSON with the following schema:\n```json\n{model_schema}\n```"
converter = Converter(
llm=self.llm,
llm=self.function_calling_llm,
text=evaluation_query,
model=TaskEvaluation,
instructions=instructions,
@@ -93,7 +92,7 @@ class TaskEvaluator:
return converter.to_pydantic()
def _is_gpt(self, llm) -> bool:
return isinstance(llm, ChatOpenAI) and llm.openai_api_base is None
return "gpt" in str(self.llm).lower()
def evaluate_training_data(
self, training_data: dict, agent_id: str

View File

@@ -1,6 +1,7 @@
from typing import Any, Optional, Type
import instructor
from litellm import completion
from pydantic import BaseModel, Field, PrivateAttr, model_validator
@@ -12,7 +13,7 @@ class Instructor(BaseModel):
agent: Optional[Any] = Field(
description="The agent that needs to use instructor.", default=None
)
llm: Optional[Any] = Field(
llm: Optional[str] = Field(
description="The agent that needs to use instructor.", default=None
)
instructions: Optional[str] = Field(
@@ -29,8 +30,8 @@ class Instructor(BaseModel):
if self.agent and not self.llm:
self.llm = self.agent.function_calling_llm or self.agent.llm
self._client = instructor.patch(
self.llm.client._client,
self._client = instructor.from_litellm(
completion,
mode=instructor.Mode.TOOLS,
)
return self
@@ -45,6 +46,6 @@ class Instructor(BaseModel):
messages.append({"role": "system", "content": self.instructions})
model = self._client.chat.completions.create(
model=self.llm.model_name, response_model=self.model, messages=messages
model=self.llm, response_model=self.model, messages=messages
)
return model

View File

@@ -1,8 +1,5 @@
from typing import Any, ClassVar, Optional
from langchain.prompts import BasePromptTemplate, PromptTemplate
from pydantic import BaseModel, Field
from typing import Any, Optional
from crewai.utilities import I18N
@@ -14,9 +11,9 @@ class Prompts(BaseModel):
system_template: Optional[str] = None
prompt_template: Optional[str] = None
response_template: Optional[str] = None
SCRATCHPAD_SLICE: ClassVar[str] = "\n{agent_scratchpad}"
agent: Any
def task_execution(self) -> BasePromptTemplate:
def task_execution(self) -> str:
"""Generate a standard prompt for task execution."""
slices = ["role_playing"]
if len(self.tools) > 0:
@@ -42,12 +39,11 @@ class Prompts(BaseModel):
system_template=None,
prompt_template=None,
response_template=None,
) -> BasePromptTemplate:
) -> str:
"""Constructs a prompt string from specified components."""
if not system_template and not prompt_template:
prompt_parts = [self.i18n.slice(component) for component in components]
prompt_parts.append(self.SCRATCHPAD_SLICE)
prompt = PromptTemplate.from_template("".join(prompt_parts))
prompt = "".join(prompt_parts)
else:
prompt_parts = [
self.i18n.slice(component)
@@ -56,9 +52,14 @@ class Prompts(BaseModel):
]
system = system_template.replace("{{ .System }}", "".join(prompt_parts))
prompt = prompt_template.replace(
"{{ .Prompt }}",
"".join([self.i18n.slice("task"), self.SCRATCHPAD_SLICE]),
"{{ .Prompt }}", "".join(self.i18n.slice("task"))
)
response = response_template.split("{{ .Response }}")[0]
prompt = PromptTemplate.from_template(f"{system}\n{prompt}\n{response}")
prompt = f"{system}\n{prompt}\n{response}"
prompt = (
prompt.replace("{goal}", self.agent.goal)
.replace("{role}", self.agent.role)
.replace("{backstory}", self.agent.backstory)
)
return prompt

View File

@@ -1,36 +1,20 @@
from typing import Any, Dict, List
import tiktoken
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import LLMResult
from litellm.integrations.custom_logger import CustomLogger
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
class TokenCalcHandler(BaseCallbackHandler):
model_name: str = ""
token_cost_process: TokenProcess
encoding: tiktoken.Encoding
def __init__(self, model_name, token_cost_process):
self.model_name = model_name
class TokenCalcHandler(CustomLogger):
def __init__(self, token_cost_process: TokenProcess):
self.token_cost_process = token_cost_process
try:
self.encoding = tiktoken.encoding_for_model(self.model_name)
except KeyError:
self.encoding = tiktoken.get_encoding("cl100k_base")
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
def log_success_event(self, kwargs, response_obj, start_time, end_time):
if self.token_cost_process is None:
return
for prompt in prompts:
self.token_cost_process.sum_prompt_tokens(len(self.encoding.encode(prompt)))
async def on_llm_new_token(self, token: str, **kwargs) -> None:
self.token_cost_process.sum_completion_tokens(1)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
self.token_cost_process.sum_successful_requests(1)
print("*** response_obj", response_obj)
self.token_cost_process.sum_prompt_tokens(
response_obj["usage"]["prompt_tokens"]
)
self.token_cost_process.sum_completion_tokens(
response_obj["usage"]["completion_tokens"]
)