mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-20 21:38:14 +00:00
Fix static typing errors (#187)
Co-authored-by: João Moura <joaomdmoura@gmail.com>
This commit is contained in:
@@ -2,10 +2,12 @@ import uuid
|
||||
from typing import Any, List, Optional
|
||||
|
||||
from langchain.agents.format_scratchpad import format_log_to_str
|
||||
from langchain.agents.agent import RunnableAgent
|
||||
from langchain.memory import ConversationSummaryMemory
|
||||
from langchain.tools.render import render_text_description
|
||||
from langchain_core.runnables.config import RunnableConfig
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
BaseModel,
|
||||
@@ -47,7 +49,7 @@ class Agent(BaseModel):
|
||||
tools: Tools at agents disposal
|
||||
"""
|
||||
|
||||
__hash__ = object.__hash__
|
||||
__hash__ = object.__hash__ # type: ignore
|
||||
_logger: Logger = PrivateAttr()
|
||||
_rpm_controller: RPMController = PrivateAttr(default=None)
|
||||
_request_within_rpm_limit: Any = PrivateAttr(default=None)
|
||||
@@ -80,21 +82,19 @@ class Agent(BaseModel):
|
||||
max_iter: Optional[int] = Field(
|
||||
default=15, description="Maximum iterations for an agent to execute a task"
|
||||
)
|
||||
agent_executor: Optional[InstanceOf[CrewAgentExecutor]] = Field(
|
||||
agent_executor: InstanceOf[CrewAgentExecutor] = Field(
|
||||
default=None, description="An instance of the CrewAgentExecutor class."
|
||||
)
|
||||
tools_handler: Optional[InstanceOf[ToolsHandler]] = Field(
|
||||
tools_handler: InstanceOf[ToolsHandler] = Field(
|
||||
default=None, description="An instance of the ToolsHandler class."
|
||||
)
|
||||
cache_handler: Optional[InstanceOf[CacheHandler]] = Field(
|
||||
cache_handler: InstanceOf[CacheHandler] = Field(
|
||||
default=CacheHandler(), description="An instance of the CacheHandler class."
|
||||
)
|
||||
i18n: Optional[I18N] = Field(
|
||||
default=I18N(), description="Internationalization settings."
|
||||
)
|
||||
llm: Optional[Any] = Field(
|
||||
i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
|
||||
llm: Any = Field(
|
||||
default_factory=lambda: ChatOpenAI(
|
||||
model_name="gpt-4",
|
||||
model="gpt-4",
|
||||
),
|
||||
description="Language model that will run the agent.",
|
||||
)
|
||||
@@ -140,6 +140,7 @@ class Agent(BaseModel):
|
||||
Returns:
|
||||
Output of the agent
|
||||
"""
|
||||
|
||||
if context:
|
||||
task = self.i18n.slice("task_with_context").format(
|
||||
task=task, context=context
|
||||
@@ -203,9 +204,9 @@ class Agent(BaseModel):
|
||||
}
|
||||
|
||||
if self._rpm_controller:
|
||||
executor_args[
|
||||
"request_within_rpm_limit"
|
||||
] = self._rpm_controller.check_or_wait
|
||||
executor_args["request_within_rpm_limit"] = (
|
||||
self._rpm_controller.check_or_wait
|
||||
)
|
||||
|
||||
if self.memory:
|
||||
summary_memory = ConversationSummaryMemory(
|
||||
@@ -234,7 +235,9 @@ class Agent(BaseModel):
|
||||
i18n=self.i18n,
|
||||
)
|
||||
)
|
||||
self.agent_executor = CrewAgentExecutor(agent=inner_agent, **executor_args)
|
||||
self.agent_executor = CrewAgentExecutor(
|
||||
agent=RunnableAgent(runnable=inner_agent), **executor_args
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def __tools_names(tools) -> str:
|
||||
|
||||
4
src/crewai/agents/cache/cache_handler.py
vendored
4
src/crewai/agents/cache/cache_handler.py
vendored
@@ -1,12 +1,10 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import PrivateAttr
|
||||
|
||||
|
||||
class CacheHandler:
|
||||
"""Callback handler for tool usage."""
|
||||
|
||||
_cache: PrivateAttr = {}
|
||||
_cache: dict = {}
|
||||
|
||||
def __init__(self):
|
||||
self._cache = {}
|
||||
|
||||
@@ -108,8 +108,12 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
if self._should_force_answer():
|
||||
if isinstance(output, AgentAction):
|
||||
output = output
|
||||
else:
|
||||
elif isinstance(output, CacheHit):
|
||||
output = output.action
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Unexpected output type from agent: {type(output)}"
|
||||
)
|
||||
yield self._force_answer(output)
|
||||
return
|
||||
|
||||
|
||||
@@ -50,7 +50,6 @@ class CrewAgentOutputParser(ReActSingleInputOutputParser):
|
||||
i18n: I18N
|
||||
|
||||
def parse(self, text: str) -> Union[AgentAction, AgentFinish, CacheHit]:
|
||||
FINAL_ANSWER_ACTION in text
|
||||
regex = (
|
||||
r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
|
||||
)
|
||||
|
||||
@@ -10,9 +10,9 @@ class ToolsHandler(BaseCallbackHandler):
|
||||
"""Callback handler for tool usage."""
|
||||
|
||||
last_used_tool: Dict[str, Any] = {}
|
||||
cache: CacheHandler = None
|
||||
cache: CacheHandler
|
||||
|
||||
def __init__(self, cache: CacheHandler = None, **kwargs: Any):
|
||||
def __init__(self, cache: CacheHandler, **kwargs: Any):
|
||||
"""Initialize the callback handler."""
|
||||
self.cache = cache
|
||||
super().__init__(**kwargs)
|
||||
|
||||
@@ -38,12 +38,10 @@ class Crew(BaseModel):
|
||||
id: A unique identifier for the crew instance.
|
||||
"""
|
||||
|
||||
__hash__ = object.__hash__
|
||||
__hash__ = object.__hash__ # type: ignore
|
||||
_rpm_controller: RPMController = PrivateAttr()
|
||||
_logger: Logger = PrivateAttr()
|
||||
_cache_handler: Optional[InstanceOf[CacheHandler]] = PrivateAttr(
|
||||
default=CacheHandler()
|
||||
)
|
||||
_cache_handler: InstanceOf[CacheHandler] = PrivateAttr(default=CacheHandler())
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
tasks: List[Task] = Field(default_factory=list)
|
||||
agents: List[Agent] = Field(default_factory=list)
|
||||
@@ -69,20 +67,20 @@ class Crew(BaseModel):
|
||||
"may_not_set_field", "The 'id' field cannot be set by the user.", {}
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@field_validator("config", mode="before")
|
||||
@classmethod
|
||||
def check_config_type(
|
||||
cls, v: Union[Json, Dict[str, Any]]
|
||||
) -> Union[Json, Dict[str, Any]]:
|
||||
"""Validates that the config is a valid type.
|
||||
|
||||
Args:
|
||||
v: The config to be validated.
|
||||
|
||||
Returns:
|
||||
The config if it is valid.
|
||||
"""
|
||||
return json.loads(v) if isinstance(v, Json) else v
|
||||
|
||||
# TODO: Improve typing
|
||||
return json.loads(v) if isinstance(v, Json) else v # type: ignore
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_private_attrs(self) -> "Crew":
|
||||
@@ -112,6 +110,8 @@ class Crew(BaseModel):
|
||||
return self
|
||||
|
||||
def _setup_from_config(self):
|
||||
assert self.config is not None, "Config should not be None."
|
||||
|
||||
"""Initializes agents and tasks from the provided config."""
|
||||
if not self.config.get("agents") or not self.config.get("tasks"):
|
||||
raise PydanticCustomError(
|
||||
@@ -143,19 +143,24 @@ class Crew(BaseModel):
|
||||
|
||||
if self.process == Process.sequential:
|
||||
return self._sequential_loop()
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f"The process '{self.process}' is not implemented yet."
|
||||
)
|
||||
|
||||
def _sequential_loop(self) -> str:
|
||||
"""Executes tasks sequentially and returns the final output."""
|
||||
task_output = None
|
||||
task_output = ""
|
||||
for task in self.tasks:
|
||||
self._prepare_and_execute_task(task)
|
||||
task_output = task.execute(task_output)
|
||||
self._logger.log(
|
||||
"debug", f"[{task.agent.role}] Task output: {task_output}\n\n"
|
||||
)
|
||||
|
||||
role = task.agent.role if task.agent is not None else "None"
|
||||
self._logger.log("debug", f"[{role}] Task output: {task_output}\n\n")
|
||||
|
||||
if self.max_rpm:
|
||||
self._rpm_controller.stop_rpm_counter()
|
||||
|
||||
return task_output
|
||||
|
||||
def _prepare_and_execute_task(self, task: Task) -> None:
|
||||
@@ -164,8 +169,9 @@ class Crew(BaseModel):
|
||||
Args:
|
||||
task: The task to be executed.
|
||||
"""
|
||||
if task.agent.allow_delegation:
|
||||
if task.agent is not None and task.agent.allow_delegation:
|
||||
task.tools += AgentTools(agents=self.agents).tools()
|
||||
|
||||
self._logger.log("debug", f"Working Agent: {task.agent.role}")
|
||||
role = task.agent.role if task.agent is not None else "None"
|
||||
self._logger.log("debug", f"Working Agent: {role}")
|
||||
self._logger.log("info", f"Starting Task: {task.description}")
|
||||
|
||||
@@ -12,7 +12,7 @@ from crewai.utilities import I18N
|
||||
class Task(BaseModel):
|
||||
"""Class that represent a task to be executed."""
|
||||
|
||||
__hash__ = object.__hash__
|
||||
__hash__ = object.__hash__ # type: ignore
|
||||
i18n: I18N = I18N()
|
||||
description: str = Field(description="Description of the actual task.")
|
||||
agent: Optional[Agent] = Field(
|
||||
@@ -20,7 +20,7 @@ class Task(BaseModel):
|
||||
)
|
||||
tools: List[Any] = Field(
|
||||
default_factory=list,
|
||||
description="Tools the agent are limited to use for this task.",
|
||||
description="Tools the agent is limited to use for this task.",
|
||||
)
|
||||
expected_output: str = Field(
|
||||
description="Clear definition of expected output for the task.",
|
||||
@@ -46,7 +46,7 @@ class Task(BaseModel):
|
||||
@model_validator(mode="after")
|
||||
def check_tools(self):
|
||||
"""Check if the tools are set."""
|
||||
if not self.tools and (self.agent and self.agent.tools):
|
||||
if not self.tools and self.agent and self.agent.tools:
|
||||
self.tools.extend(self.agent.tools)
|
||||
return self
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import List, Optional
|
||||
from typing import List
|
||||
|
||||
from langchain.tools import Tool
|
||||
from pydantic import BaseModel, Field
|
||||
@@ -11,9 +11,7 @@ class AgentTools(BaseModel):
|
||||
"""Default tools around agent delegation"""
|
||||
|
||||
agents: List[Agent] = Field(description="List of agents in this crew.")
|
||||
i18n: Optional[I18N] = Field(
|
||||
default=I18N(), description="Internationalization settings."
|
||||
)
|
||||
i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
|
||||
|
||||
def tools(self):
|
||||
return [
|
||||
|
||||
@@ -6,7 +6,7 @@ from pydantic import BaseModel, Field, PrivateAttr, ValidationError, model_valid
|
||||
|
||||
|
||||
class I18N(BaseModel):
|
||||
_translations: Optional[Dict[str, str]] = PrivateAttr()
|
||||
_translations: Dict[str, Dict[str, str]] = PrivateAttr()
|
||||
language: Optional[str] = Field(
|
||||
default="en",
|
||||
description="Language used to load translations",
|
||||
@@ -25,10 +25,14 @@ class I18N(BaseModel):
|
||||
self._translations = json.load(f)
|
||||
except FileNotFoundError:
|
||||
raise ValidationError(
|
||||
f"Trasnlation file for language '{self.language}' not found."
|
||||
f"Translation file for language '{self.language}' not found."
|
||||
)
|
||||
except json.JSONDecodeError:
|
||||
raise ValidationError(f"Error decoding JSON from the prompts file.")
|
||||
|
||||
if not self._translations:
|
||||
self._translations = {}
|
||||
|
||||
return self
|
||||
|
||||
def slice(self, slice: str) -> str:
|
||||
@@ -40,8 +44,8 @@ class I18N(BaseModel):
|
||||
def tools(self, error: str) -> str:
|
||||
return self.retrieve("tools", error)
|
||||
|
||||
def retrieve(self, kind, key):
|
||||
def retrieve(self, kind, key) -> str:
|
||||
try:
|
||||
return self._translations[kind].get(key)
|
||||
return self._translations[kind][key]
|
||||
except:
|
||||
raise ValidationError(f"Translation for '{kind}':'{key}' not found.")
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from typing import ClassVar
|
||||
|
||||
from langchain.prompts import PromptTemplate
|
||||
from langchain.prompts import PromptTemplate, BasePromptTemplate
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.utilities import I18N
|
||||
@@ -13,19 +13,19 @@ class Prompts(BaseModel):
|
||||
|
||||
SCRATCHPAD_SLICE: ClassVar[str] = "\n{agent_scratchpad}"
|
||||
|
||||
def task_execution_with_memory(self) -> str:
|
||||
def task_execution_with_memory(self) -> BasePromptTemplate:
|
||||
"""Generate a prompt for task execution with memory components."""
|
||||
return self._build_prompt(["role_playing", "tools", "memory", "task"])
|
||||
|
||||
def task_execution_without_tools(self) -> str:
|
||||
def task_execution_without_tools(self) -> BasePromptTemplate:
|
||||
"""Generate a prompt for task execution without tools components."""
|
||||
return self._build_prompt(["role_playing", "task"])
|
||||
|
||||
def task_execution(self) -> str:
|
||||
def task_execution(self) -> BasePromptTemplate:
|
||||
"""Generate a standard prompt for task execution."""
|
||||
return self._build_prompt(["role_playing", "tools", "task"])
|
||||
|
||||
def _build_prompt(self, components: [str]) -> str:
|
||||
def _build_prompt(self, components: list[str]) -> BasePromptTemplate:
|
||||
"""Constructs a prompt string from specified components."""
|
||||
prompt_parts = [self.i18n.slice(component) for component in components]
|
||||
prompt_parts.append(self.SCRATCHPAD_SLICE)
|
||||
|
||||
@@ -12,7 +12,7 @@ class RPMController(BaseModel):
|
||||
max_rpm: Union[int, None] = Field(default=None)
|
||||
logger: Logger = Field(default=None)
|
||||
_current_rpm: int = PrivateAttr(default=0)
|
||||
_timer: threading.Timer = PrivateAttr(default=None)
|
||||
_timer: threading.Timer | None = PrivateAttr(default=None)
|
||||
_lock: threading.Lock = PrivateAttr(default=None)
|
||||
|
||||
@model_validator(mode="after")
|
||||
|
||||
Reference in New Issue
Block a user