This commit is contained in:
Howard Gil
2024-05-21 12:18:03 -07:00
70 changed files with 211160 additions and 4737 deletions

View File

@@ -226,7 +226,7 @@ class Agent(BaseModel):
Output of the agent
"""
if self.tools_handler:
self.tools_handler.last_used_tool = {}
self.tools_handler.last_used_tool = {} # type: ignore # Incompatible types in assignment (expression has type "dict[Never, Never]", variable has type "ToolCalling")
task_prompt = task.prompt()
@@ -246,7 +246,7 @@ class Agent(BaseModel):
task_prompt += self.i18n.slice("memory").format(memory=memory)
tools = tools or self.tools
parsed_tools = self._parse_tools(tools)
parsed_tools = self._parse_tools(tools) # type: ignore # Argument 1 to "_parse_tools" of "Agent" has incompatible type "list[Any] | None"; expected "list[Any]"
self.create_agent_executor(tools=tools)
self.agent_executor.tools = parsed_tools
@@ -386,7 +386,7 @@ class Agent(BaseModel):
thoughts += f"\n{observation_prefix}{observation}\n{llm_prefix}"
return thoughts
def _parse_tools(self, tools: List[Any]) -> List[LangChainTool]:
def _parse_tools(self, tools: List[Any]) -> List[LangChainTool]: # type: ignore # Function "langchain_core.tools.tool" is not valid as a type
"""Parse tools to be used for the task."""
# tentatively try to import from crewai_tools import BaseTool as CrewAITool
tools_list = []

View File

@@ -35,7 +35,7 @@ class CrewAgentExecutor(AgentExecutor):
crew: Any = None
function_calling_llm: Any = None
request_within_rpm_limit: Any = None
tools_handler: InstanceOf[ToolsHandler] = None
tools_handler: Optional[InstanceOf[ToolsHandler]] = None
max_iterations: Optional[int] = 15
have_forced_answer: bool = False
force_answer_max_iterations: Optional[int] = None
@@ -189,7 +189,7 @@ class CrewAgentExecutor(AgentExecutor):
intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)
# Call the LLM to see what to do.
output = self.agent.plan(
output = self.agent.plan( # type: ignore # Incompatible types in assignment (expression has type "AgentAction | AgentFinish | list[AgentAction]", variable has type "AgentAction")
intermediate_steps,
callbacks=run_manager.get_child() if run_manager else None,
**inputs,
@@ -275,8 +275,8 @@ class CrewAgentExecutor(AgentExecutor):
run_manager.on_agent_action(agent_action, color="green")
tool_usage = ToolUsage(
tools_handler=self.tools_handler,
tools=self.tools,
tools_handler=self.tools_handler, # type: ignore # Argument "tools_handler" to "ToolUsage" has incompatible type "ToolsHandler | None"; expected "ToolsHandler"
tools=self.tools, # type: ignore # Argument "tools" to "ToolUsage" has incompatible type "Sequence[BaseTool]"; expected "list[BaseTool]"
original_tools=self.original_tools,
tools_description=self.tools_description,
tools_names=self.tools_names,

View File

@@ -8,13 +8,13 @@ from .cache.cache_handler import CacheHandler
class ToolsHandler:
"""Callback handler for tool usage."""
last_used_tool: ToolCalling = {}
cache: CacheHandler
last_used_tool: ToolCalling = {} # type: ignore # BUG?: Incompatible types in assignment (expression has type "Dict[...]", variable has type "ToolCalling")
cache: Optional[CacheHandler]
def __init__(self, cache: Optional[CacheHandler] = None):
"""Initialize the callback handler."""
self.cache = cache
self.last_used_tool = {}
self.last_used_tool = {} # type: ignore # BUG?: same as above
def on_tool_use(
self,
@@ -23,7 +23,7 @@ class ToolsHandler:
should_cache: bool = True,
) -> Any:
"""Run when tool ends running."""
self.last_used_tool = calling
self.last_used_tool = calling # type: ignore # BUG?: Incompatible types in assignment (expression has type "Union[ToolCalling, InstructorToolCalling]", variable has type "ToolCalling")
if self.cache and should_cache and calling.tool_name != CacheTools().name:
self.cache.add(
tool=calling.tool_name,

View File

@@ -1,4 +1,5 @@
import click
import pkg_resources
from .create_crew import create_crew
@@ -15,5 +16,22 @@ def create(project_name):
create_crew(project_name)
@crewai.command()
@click.option(
"--tools", is_flag=True, help="Show the installed version of crewai tools"
)
def version(tools):
"""Show the installed version of crewai."""
crewai_version = pkg_resources.get_distribution("crewai").version
click.echo(f"crewai version: {crewai_version}")
if tools:
try:
tools_version = pkg_resources.get_distribution("crewai[tools]").version
click.echo(f"crewai tools version: {tools_version}")
except pkg_resources.DistributionNotFound:
click.echo("crewai tools not installed")
if __name__ == "__main__":
crewai()

View File

@@ -6,7 +6,7 @@ authors = ["Your Name <you@example.com>"]
[tool.poetry.dependencies]
python = ">=3.10,<=3.13"
crewai = {extras = ["tools"], version = "^0.28.8"}
crewai = {extras = ["tools"], version = "^0.30.11"}
[tool.poetry.scripts]
{{folder_name}} = "{{folder_name}}.main:run"

View File

View File

@@ -169,8 +169,8 @@ class Crew(BaseModel):
"""Set private attributes."""
if self.memory:
self._long_term_memory = LongTermMemory()
self._short_term_memory = ShortTermMemory(embedder_config=self.embedder)
self._entity_memory = EntityMemory(embedder_config=self.embedder)
self._short_term_memory = ShortTermMemory(crew=self, embedder_config=self.embedder)
self._entity_memory = EntityMemory(crew=self, embedder_config=self.embedder)
return self
@model_validator(mode="after")
@@ -247,7 +247,7 @@ class Crew(BaseModel):
def kickoff(self, inputs: Optional[Dict[str, Any]] = {}) -> str:
"""Starts the crew to work on its assigned tasks."""
self._execution_span = self._telemetry.crew_execution_span(self)
self._interpolate_inputs(inputs)
self._interpolate_inputs(inputs) # type: ignore # Argument 1 to "_interpolate_inputs" of "Crew" has incompatible type "dict[str, Any] | None"; expected "dict[str, Any]"
self._set_tasks_callbacks()
i18n = I18N(prompt_file=self.prompt_file)
@@ -270,8 +270,8 @@ class Crew(BaseModel):
if self.process == Process.sequential:
result = self._run_sequential_process()
elif self.process == Process.hierarchical:
result, manager_metrics = self._run_hierarchical_process()
metrics.append(manager_metrics)
result, manager_metrics = self._run_hierarchical_process() # type: ignore # Unpacking a string is disallowed
metrics.append(manager_metrics) # type: ignore # Cannot determine type of "manager_metrics"
else:
raise NotImplementedError(
@@ -291,7 +291,7 @@ class Crew(BaseModel):
"""Executes tasks sequentially and returns the final output."""
task_output = ""
for task in self.tasks:
if task.agent.allow_delegation:
if task.agent.allow_delegation: # type: ignore # Item "None" of "Agent | None" has no attribute "allow_delegation"
agents_for_delegation = [
agent for agent in self.agents if agent != task.agent
]
@@ -364,23 +364,23 @@ class Crew(BaseModel):
)
self._finish_execution(task_output)
return self._format_output(task_output), manager._token_process.get_summary()
return self._format_output(task_output), manager._token_process.get_summary() # type: ignore # Incompatible return value type (got "tuple[str, Any]", expected "str")
def _set_tasks_callbacks(self) -> str:
def _set_tasks_callbacks(self) -> None:
"""Sets callback for every task suing task_callback"""
for task in self.tasks:
if not task.callback:
task.callback = self.task_callback
def _interpolate_inputs(self, inputs: Dict[str, Any]) -> str:
def _interpolate_inputs(self, inputs: Dict[str, Any]) -> None:
"""Interpolates the inputs in the tasks and agents."""
[task.interpolate_inputs(inputs) for task in self.tasks]
[agent.interpolate_inputs(inputs) for agent in self.agents]
[task.interpolate_inputs(inputs) for task in self.tasks] # type: ignore # "interpolate_inputs" of "Task" does not return a value (it only ever returns None)
[agent.interpolate_inputs(inputs) for agent in self.agents] # type: ignore # "interpolate_inputs" of "Agent" does not return a value (it only ever returns None)
def _format_output(self, output: str) -> str:
"""Formats the output of the crew execution."""
if self.full_output:
return {
return { # type: ignore # Incompatible return value type (got "dict[str, Sequence[str | TaskOutput | None]]", expected "str")
"final_output": output,
"tasks_outputs": [task.output for task in self.tasks if task],
}

View File

@@ -1,3 +1,5 @@
from typing import Optional
from crewai.memory import EntityMemory, LongTermMemory, ShortTermMemory
@@ -32,7 +34,7 @@ class ContextualMemory:
formatted_results = "\n".join([f"- {result}" for result in stm_results])
return f"Recent Insights:\n{formatted_results}" if stm_results else ""
def _fetch_ltm_context(self, task) -> str:
def _fetch_ltm_context(self, task) -> Optional[str]:
"""
Fetches historical data or insights from LTM that are relevant to the task's description and expected_output,
formatted as bullet points.
@@ -44,10 +46,10 @@ class ContextualMemory:
formatted_results = [
suggestion
for result in ltm_results
for suggestion in result["metadata"]["suggestions"]
for suggestion in result["metadata"]["suggestions"] # type: ignore # Invalid index type "str" for "str"; expected type "SupportsIndex | slice"
]
formatted_results = list(dict.fromkeys(formatted_results))
formatted_results = "\n".join([f"- {result}" for result in formatted_results])
formatted_results = "\n".join([f"- {result}" for result in formatted_results]) # type: ignore # Incompatible types in assignment (expression has type "str", variable has type "list[str]")
return f"Historical Data:\n{formatted_results}" if ltm_results else ""
@@ -58,6 +60,6 @@ class ContextualMemory:
"""
em_results = self.em.search(query)
formatted_results = "\n".join(
[f"- {result['context']}" for result in em_results]
[f"- {result['context']}" for result in em_results] # type: ignore # Invalid index type "str" for "str"; expected type "SupportsIndex | slice"
)
return f"Entities:\n{formatted_results}" if em_results else ""

View File

@@ -10,13 +10,13 @@ class EntityMemory(Memory):
Inherits from the Memory class.
"""
def __init__(self, embedder_config=None):
def __init__(self, crew=None, embedder_config=None):
storage = RAGStorage(
type="entities", allow_reset=False, embedder_config=embedder_config
type="entities", allow_reset=False, embedder_config=embedder_config, crew=crew
)
super().__init__(storage)
def save(self, item: EntityMemoryItem) -> None:
def save(self, item: EntityMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory"
"""Saves an entity item into the SQLite storage."""
data = f"{item.name}({item.type}): {item.description}"
super().save(data, item.metadata)

View File

@@ -18,10 +18,10 @@ class LongTermMemory(Memory):
storage = LTMSQLiteStorage()
super().__init__(storage)
def save(self, item: LongTermMemoryItem) -> None:
def save(self, item: LongTermMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory"
metadata = item.metadata
metadata.update({"agent": item.agent, "expected_output": item.expected_output})
self.storage.save(
self.storage.save( # type: ignore # BUG?: Unexpected keyword argument "task_description","score","datetime" for "save" of "Storage"
task_description=item.task,
score=metadata["quality"],
metadata=metadata,
@@ -29,4 +29,4 @@ class LongTermMemory(Memory):
)
def search(self, task: str, latest_n: int = 3) -> Dict[str, Any]:
return self.storage.load(task, latest_n)
return self.storage.load(task, latest_n) # type: ignore # BUG?: "Storage" has no attribute "load"

View File

@@ -1,4 +1,4 @@
from typing import Any, Dict, Union
from typing import Any, Dict, Optional, Union
class LongTermMemoryItem:
@@ -8,8 +8,8 @@ class LongTermMemoryItem:
task: str,
expected_output: str,
datetime: str,
quality: Union[int, float] = None,
metadata: Dict[str, Any] = None,
quality: Optional[Union[int, float]] = None,
metadata: Optional[Dict[str, Any]] = None,
):
self.task = task
self.agent = agent

View File

@@ -1,4 +1,4 @@
from typing import Any, Dict
from typing import Any, Dict, Optional
from crewai.memory.storage.interface import Storage
@@ -12,12 +12,16 @@ class Memory:
self.storage = storage
def save(
self, value: Any, metadata: Dict[str, Any] = None, agent: str = None
self,
value: Any,
metadata: Optional[Dict[str, Any]] = None,
agent: Optional[str] = None,
) -> None:
metadata = metadata or {}
if agent:
metadata["agent"] = agent
self.storage.save(value, metadata)
self.storage.save(value, metadata) # type: ignore # Maybe BUG? Should be self.storage.save(key, value, metadata)
def search(self, query: str) -> Dict[str, Any]:
return self.storage.search(query)

View File

@@ -12,12 +12,12 @@ class ShortTermMemory(Memory):
MemoryItem instances.
"""
def __init__(self, embedder_config=None):
storage = RAGStorage(type="short_term", embedder_config=embedder_config)
def __init__(self, crew=None, embedder_config=None):
storage = RAGStorage(type="short_term", embedder_config=embedder_config, crew=crew)
super().__init__(storage)
def save(self, item: ShortTermMemoryItem) -> None:
def save(self, item: ShortTermMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory"
super().save(item.data, item.metadata, item.agent)
def search(self, query: str, score_threshold: float = 0.35):
return self.storage.search(query=query, score_threshold=score_threshold)
return self.storage.search(query=query, score_threshold=score_threshold) # type: ignore # BUG? The reference is to the parent class, but the parent class does not have this parameters

View File

@@ -1,8 +1,10 @@
from typing import Any, Dict
from typing import Any, Dict, Optional
class ShortTermMemoryItem:
def __init__(self, data: Any, agent: str, metadata: Dict[str, Any] = None):
def __init__(
self, data: Any, agent: str, metadata: Optional[Dict[str, Any]] = None
):
self.data = data
self.agent = agent
self.metadata = metadata if metadata is not None else {}

View File

@@ -7,5 +7,5 @@ class Storage:
def save(self, key: str, value: Any, metadata: Dict[str, Any]) -> None:
pass
def search(self, key: str) -> Dict[str, Any]:
def search(self, key: str) -> Dict[str, Any]: # type: ignore
pass

View File

@@ -1,6 +1,6 @@
import json
import sqlite3
from typing import Any, Dict, Union
from typing import Any, Dict, List, Optional, Union
from crewai.utilities import Printer
from crewai.utilities.paths import db_storage_path
@@ -11,7 +11,9 @@ class LTMSQLiteStorage:
An updated SQLite storage class for LTM data storage.
"""
def __init__(self, db_path=f"{db_storage_path()}/long_term_memory_storage.db"):
def __init__(
self, db_path: str = f"{db_storage_path()}/long_term_memory_storage.db"
) -> None:
self.db_path = db_path
self._printer: Printer = Printer()
self._initialize_db()
@@ -67,7 +69,9 @@ class LTMSQLiteStorage:
color="red",
)
def load(self, task_description: str, latest_n: int) -> Dict[str, Any]:
def load(
self, task_description: str, latest_n: int
) -> Optional[List[Dict[str, Any]]]:
"""Queries the LTM table by task description with error handling."""
try:
with sqlite3.connect(self.db_path) as conn:

View File

@@ -2,7 +2,7 @@ import contextlib
import io
import logging
import os
from typing import Any, Dict
from typing import Any, Dict, List, Optional
from embedchain import App
from embedchain.llm.base import BaseLlm
@@ -37,13 +37,18 @@ class RAGStorage(Storage):
search efficiency.
"""
def __init__(self, type, allow_reset=True, embedder_config=None):
def __init__(self, type, allow_reset=True, embedder_config=None, crew=None):
super().__init__()
if (
not os.getenv("OPENAI_API_KEY")
and not os.getenv("OPENAI_BASE_URL") == "https://api.openai.com/v1"
):
os.environ["OPENAI_API_KEY"] = "fake"
agents = crew.agents if crew else []
agents = [agent.role for agent in agents]
agents = "_".join(agents)
config = {
"app": {
"config": {"name": type, "collect_metrics": False, "log_level": "ERROR"}
@@ -58,7 +63,7 @@ class RAGStorage(Storage):
"provider": "chroma",
"config": {
"collection_name": type,
"dir": f"{db_storage_path()}/{type}",
"dir": f"{db_storage_path()}/{type}/{agents}",
"allow_reset": allow_reset,
},
},
@@ -72,16 +77,16 @@ class RAGStorage(Storage):
if allow_reset:
self.app.reset()
def save(self, value: Any, metadata: Dict[str, Any]) -> None:
def save(self, value: Any, metadata: Dict[str, Any]) -> None: # type: ignore # BUG?: Should be save(key, value, metadata) Signature of "save" incompatible with supertype "Storage"
self._generate_embedding(value, metadata)
def search(
def search( # type: ignore # BUG?: Signature of "search" incompatible with supertype "Storage"
self,
query: str,
limit: int = 3,
filter: dict = None,
filter: Optional[dict] = None,
score_threshold: float = 0.35,
) -> Dict[str, Any]:
) -> List[Any]:
with suppress_logging():
try:
results = (

View File

@@ -1,16 +1,18 @@
import inspect
import os
from pathlib import Path
import yaml
from dotenv import load_dotenv
import os
from pathlib import Path
from pydantic import ConfigDict
from dotenv import load_dotenv
load_dotenv()
def CrewBase(cls):
class WrappedClass(cls):
is_crew_class = True
model_config = ConfigDict(arbitrary_types_allowed=True)
is_crew_class: bool = True
base_directory = None
for frame_info in inspect.stack():
@@ -40,6 +42,7 @@ def CrewBase(cls):
@staticmethod
def load_yaml(config_path: str):
with open(config_path, "r") as file:
# parsedContent = YamlParser.parse(file) # type: ignore # Argument 1 to "parse" has incompatible type "TextIOWrapper"; expected "YamlParser"
return yaml.safe_load(file)
return WrappedClass

View File

@@ -1,8 +1,8 @@
import os
import re
import threading
import uuid
from typing import Any, Dict, List, Optional, Type
import os
from langchain_openai import ChatOpenAI
from pydantic import UUID4, BaseModel, Field, field_validator, model_validator
@@ -41,7 +41,7 @@ class Task(BaseModel):
tools_errors: int = 0
delegations: int = 0
i18n: I18N = I18N()
thread: threading.Thread = None
thread: Optional[threading.Thread] = None
prompt_context: Optional[str] = None
description: str = Field(description="Description of the actual task.")
expected_output: str = Field(
@@ -109,6 +109,14 @@ class Task(BaseModel):
"may_not_set_field", "This field is not to be set by the user.", {}
)
@field_validator("output_file")
@classmethod
def output_file_validattion(cls, value: str) -> str:
"""Validate the output file path by removing the / from the beginning of the path."""
if value.startswith("/"):
return value[1:]
return value
@model_validator(mode="after")
def set_attributes_based_on_config(self) -> "Task":
"""Set attributes based on the agent configuration."""
@@ -136,7 +144,7 @@ class Task(BaseModel):
)
return self
def execute(
def execute( # type: ignore # Missing return statement
self,
agent: Agent | None = None,
context: Optional[str] = None,
@@ -155,12 +163,15 @@ class Task(BaseModel):
)
if self.context:
# type: ignore # Incompatible types in assignment (expression has type "list[Never]", variable has type "str | None")
context = []
for task in self.context:
if task.async_execution:
task.thread.join()
task.thread.join() # type: ignore # Item "None" of "Thread | None" has no attribute "join"
if task and task.output:
# type: ignore # Item "str" of "str | None" has no attribute "append"
context.append(task.output.raw_output)
# type: ignore # Argument 1 to "join" of "str" has incompatible type "str | None"; expected "Iterable[str]"
context = "\n".join(context)
self.prompt_context = context
@@ -193,6 +204,7 @@ class Task(BaseModel):
description=self.description,
exported_output=exported_output,
raw_output=result,
agent=agent.role,
)
if self.callback:
@@ -242,25 +254,29 @@ class Task(BaseModel):
# try to convert task_output directly to pydantic/json
try:
# type: ignore # Item "None" of "type[BaseModel] | None" has no attribute "model_validate_json"
exported_result = model.model_validate_json(result)
if self.output_json:
return exported_result.model_dump()
return exported_result.model_dump() # type: ignore # "str" has no attribute "model_dump"
return exported_result
except Exception:
# sometimes the response contains valid JSON in the middle of text
match = re.search(r"({.*})", result, re.DOTALL)
if match:
try:
# type: ignore # Item "None" of "type[BaseModel] | None" has no attribute "model_validate_json"
exported_result = model.model_validate_json(match.group(0))
if self.output_json:
return exported_result.model_dump()
return exported_result.model_dump() # type: ignore # "str" has no attribute "model_dump"
return exported_result
except Exception:
pass
# type: ignore # Item "None" of "Agent | None" has no attribute "function_calling_llm"
llm = self.agent.function_calling_llm or self.agent.llm
if not self._is_gpt(llm):
# type: ignore # Argument "model" to "PydanticSchemaParser" has incompatible type "type[BaseModel] | None"; expected "type[BaseModel]"
model_schema = PydanticSchemaParser(model=model).get_schema()
instructions = f"{instructions}\n\nThe json should have the following structure, with the following keys:\n{model_schema}"
@@ -282,22 +298,24 @@ class Task(BaseModel):
if self.output_file:
content = (
exported_result if not self.output_pydantic else exported_result.json()
exported_result if not self.output_pydantic else exported_result.json() # type: ignore # "str" has no attribute "json"
)
self._save_file(content)
return exported_result
def _is_gpt(self, llm) -> bool:
return isinstance(llm, ChatOpenAI) and llm.openai_api_base == None
return isinstance(llm, ChatOpenAI) and llm.openai_api_base is None
def _save_file(self, result: Any) -> None:
# type: ignore # Value of type variable "AnyOrLiteralStr" of "dirname" cannot be "str | None"
directory = os.path.dirname(self.output_file)
if not os.path.exists(directory):
if directory and not os.path.exists(directory):
os.makedirs(directory)
with open(self.output_file, "w") as file:
# type: ignore # Argument 1 to "open" has incompatible type "str | None"; expected "int | str | bytes | PathLike[str] | PathLike[bytes]"
with open(self.output_file, "w", encoding='utf-8') as file:
file.write(result)
return None

View File

@@ -11,6 +11,7 @@ class TaskOutput(BaseModel):
exported_output: Union[str, BaseModel] = Field(
description="Output of the task", default=None
)
agent: str = Field(description="Agent that executed the task")
raw_output: str = Field(description="Result of the task")
@model_validator(mode="after")

View File

@@ -1,4 +1,4 @@
from typing import List
from typing import List, Union
from langchain.tools import StructuredTool
from pydantic import BaseModel, Field
@@ -33,12 +33,20 @@ class AgentTools(BaseModel):
]
return tools
def delegate_work(self, coworker: str, task: str, context: str):
def delegate_work(self, task: str, context: str, coworker: Union[str, None] = None, **kwargs):
"""Useful to delegate a specific task to a co-worker passing all necessary context and names."""
coworker = coworker or kwargs.get("co_worker") or kwargs.get("co-worker")
is_list = coworker.startswith("[") and coworker.endswith("]")
if is_list:
coworker = coworker[1:-1].split(",")[0]
return self._execute(coworker, task, context)
def ask_question(self, coworker: str, question: str, context: str):
def ask_question(self, question: str, context: str, coworker: Union[str, None] = None, **kwargs):
"""Useful to ask a question, opinion or take from a co-worker passing all necessary context and names."""
coworker = coworker or kwargs.get("co_worker") or kwargs.get("co-worker")
is_list = coworker.startswith("[") and coworker.endswith("]")
if is_list:
coworker = coworker[1:-1].split(",")[0]
return self._execute(coworker, question, context)
def _execute(self, agent, task, context):
@@ -49,7 +57,7 @@ class AgentTools(BaseModel):
for available_agent in self.agents
if available_agent.role.casefold().strip() == agent.casefold().strip()
]
except:
except Exception as _:
return self.i18n.errors("agent_tool_unexsiting_coworker").format(
coworkers="\n".join(
[f"- {agent.role.casefold()}" for agent in self.agents]

View File

@@ -70,7 +70,7 @@ class ToolUsage:
# Set the maximum parsing attempts for bigger models
if (isinstance(self.function_calling_llm, ChatOpenAI)) and (
self.function_calling_llm.openai_api_base == None
self.function_calling_llm.openai_api_base is None
):
if self.function_calling_llm.model_name in OPENAI_BIGGER_MODELS:
self._max_parsing_attempts = 2
@@ -88,6 +88,8 @@ class ToolUsage:
self._printer.print(content=f"\n\n{error}\n", color="red")
self.task.increment_tools_errors()
return error
# BUG? The code below seems to be unreachable
try:
tool = self._select_tool(calling.tool_name)
except Exception as e:
@@ -95,6 +97,7 @@ class ToolUsage:
self.task.increment_tools_errors()
self._printer.print(content=f"\n\n{error}\n", color="red")
return error
# type: ignore # BUG?: "_use" of "ToolUsage" does not return a value (it only ever returns None)
return f"{self._use(tool_string=tool_string, tool=tool, calling=calling)}"
def _use(
@@ -102,8 +105,9 @@ class ToolUsage:
tool_string: str,
tool: BaseTool,
calling: Union[ToolCalling, InstructorToolCalling],
) -> None:
) -> None: # TODO: Fix this return type
tool_event = agentops.ToolEvent(name=calling.tool_name) if agentops else None
# type: ignore # _check_tool_repeated_usage of "ToolUsage" does not return a value (it only ever returns None)
if self._check_tool_repeated_usage(calling=calling):
try:
result = self._i18n.errors("task_repeated_usage").format(
@@ -115,15 +119,18 @@ class ToolUsage:
tool_name=tool.name,
attempts=self._run_attempts,
)
# type: ignore # "_format_result" of "ToolUsage" does not return a value (it only ever returns None)
result = self._format_result(result=result)
return result
return result # type: ignore # Fix the reutrn type of this function
except Exception:
self.task.increment_tools_errors()
# type: ignore # Incompatible types in assignment (expression has type "None", variable has type "str")
result = None
if self.tools_handler.cache:
result = self.tools_handler.cache.read(
result = self.tools_handler.cache.read( # type: ignore # Incompatible types in assignment (expression has type "str | None", variable has type "str")
tool=calling.tool_name, input=calling.arguments
)
@@ -137,6 +144,7 @@ class ToolUsage:
if calling.arguments:
try:
# type: ignore # Item "None" of "type[BaseModel] | None" has no attribute "schema"
acceptable_args = tool.args_schema.schema()["properties"].keys()
arguments = {
k: v
@@ -149,6 +157,7 @@ class ToolUsage:
arguments = calling.arguments
result = tool._run(**arguments)
else:
# type: ignore # Incompatible types in assignment (expression has type "dict_values[str, Any]", variable has type "dict[str, Any]")
arguments = calling.arguments.values()
result = tool._run(*arguments)
else:
@@ -165,13 +174,14 @@ class ToolUsage:
).message
self.task.increment_tools_errors()
self._printer.print(content=f"\n\n{error_message}\n", color="red")
return error
return error # type: ignore # No return value expected
self.task.increment_tools_errors()
if agentops:
agentops.record(
agentops.ErrorEvent(exception=e, trigger_event=tool_event)
)
return self.use(calling=calling, tool_string=tool_string)
return self.use(calling=calling, tool_string=tool_string) # type: ignore # No return value expected
if self.tools_handler:
should_cache = True
@@ -180,9 +190,9 @@ class ToolUsage:
)
if (
hasattr(original_tool, "cache_function")
and original_tool.cache_function
and original_tool.cache_function # type: ignore # Item "None" of "Any | None" has no attribute "cache_function"
):
should_cache = original_tool.cache_function(
should_cache = original_tool.cache_function( # type: ignore # Item "None" of "Any | None" has no attribute "cache_function"
calling.arguments, result
)
@@ -198,12 +208,14 @@ class ToolUsage:
tool_name=tool.name,
attempts=self._run_attempts,
)
# type: ignore # "_format_result" of "ToolUsage" does not return a value (it only ever returns None)
result = self._format_result(result=result)
return result
return result # type: ignore # No return value expected
def _format_result(self, result: Any) -> None:
self.task.used_tools += 1
if self._should_remember_format():
if self._should_remember_format(): # type: ignore # "_should_remember_format" of "ToolUsage" does not return a value (it only ever returns None)
# type: ignore # "_remember_format" of "ToolUsage" does not return a value (it only ever returns None)
result = self._remember_format(result=result)
return result
@@ -215,26 +227,33 @@ class ToolUsage:
result += "\n\n" + self._i18n.slice("tools").format(
tools=self.tools_description, tool_names=self.tools_names
)
return result
return result # type: ignore # No return value expected
def _check_tool_repeated_usage(
self, calling: Union[ToolCalling, InstructorToolCalling]
) -> None:
if not self.tools_handler:
return False
return False # type: ignore # No return value expected
if last_tool_usage := self.tools_handler.last_used_tool:
return (calling.tool_name == last_tool_usage.tool_name) and (
return (calling.tool_name == last_tool_usage.tool_name) and ( # type: ignore # No return value expected
calling.arguments == last_tool_usage.arguments
)
def _select_tool(self, tool_name: str) -> BaseTool:
for tool in self.tools:
order_tools = sorted(
self.tools,
key=lambda tool: SequenceMatcher(
None, tool.name.lower().strip(), tool_name.lower().strip()
).ratio(),
reverse=True,
)
for tool in order_tools:
if (
tool.name.lower().strip() == tool_name.lower().strip()
or SequenceMatcher(
None, tool.name.lower().strip(), tool_name.lower().strip()
).ratio()
> 0.9
> 0.85
):
return tool
self.task.increment_tools_errors()
@@ -267,7 +286,7 @@ class ToolUsage:
return "\n--\n".join(descriptions)
def _is_gpt(self, llm) -> bool:
return isinstance(llm, ChatOpenAI) and llm.openai_api_base == None
return isinstance(llm, ChatOpenAI) and llm.openai_api_base is None
def _tool_calling(
self, tool_string: str
@@ -305,14 +324,14 @@ class ToolUsage:
tool_input = self._validate_tool_input(self.action.tool_input)
arguments = ast.literal_eval(tool_input)
except Exception:
return ToolUsageErrorException(
return ToolUsageErrorException( # type: ignore # Incompatible return value type (got "ToolUsageErrorException", expected "ToolCalling | InstructorToolCalling")
f'{self._i18n.errors("tool_arguments_error")}'
)
if not isinstance(arguments, dict):
return ToolUsageErrorException(
return ToolUsageErrorException( # type: ignore # Incompatible return value type (got "ToolUsageErrorException", expected "ToolCalling | InstructorToolCalling")
f'{self._i18n.errors("tool_arguments_error")}'
)
calling = ToolCalling(
calling = ToolCalling( # type: ignore # Unexpected keyword argument "log" for "ToolCalling"
tool_name=tool.name,
arguments=arguments,
log=tool_string,
@@ -323,7 +342,7 @@ class ToolUsage:
self._telemetry.tool_usage_error(llm=self.function_calling_llm)
self.task.increment_tools_errors()
self._printer.print(content=f"\n\n{e}\n", color="red")
return ToolUsageErrorException(
return ToolUsageErrorException( # type: ignore # Incompatible return value type (got "ToolUsageErrorException", expected "ToolCalling | InstructorToolCalling")
f'{self._i18n.errors("tool_usage_error").format(error=e)}\nMoving on then. {self._i18n.slice("format").format(tool_names=self.tools_names)}'
)
return self._tool_calling(tool_string)

View File

@@ -29,7 +29,7 @@
"tool_usage_exception": "I encountered an error while trying to use the tool. This was the error: {error}.\n Tool {tool} accepts these inputs: {tool_inputs}"
},
"tools": {
"delegate_work": "Delegate a specific task to one of the following co-workers: {coworkers}\nThe input to this tool should be the co-worker, the task you want them to do, and ALL necessary context to exectue the task, they know nothing about the task, so share absolute everything you know, don't reference things but instead explain them.",
"delegate_work": "Delegate a specific task to one of the following co-workers: {coworkers}\nThe input to this tool should be the co-worker, the task you want them to do, and ALL necessary context to execute the task, they know nothing about the task, so share absolute everything you know, don't reference things but instead explain them.",
"ask_question": "Ask a specific question to one of the following co-workers: {coworkers}\nThe input to this tool should be the co-worker, the question you have for them, and ALL necessary context to ask the question properly, they know nothing about the question, so share absolute everything you know, don't reference things but instead explain them."
}
}

View File

@@ -6,3 +6,4 @@ from .printer import Printer
from .prompts import Prompts
from .rpm_controller import RPMController
from .fileHandler import FileHandler
from .parser import YamlParser

View File

@@ -83,5 +83,5 @@ class Converter(BaseModel):
)
return new_prompt | self.llm | parser
def _is_gpt(self, llm) -> bool:
return isinstance(llm, ChatOpenAI) and llm.openai_api_base == None
def _is_gpt(self, llm) -> bool: # type: ignore # BUG? Name "_is_gpt" defined on line 20 hides name from outer scope
return isinstance(llm, ChatOpenAI) and llm.openai_api_base is None

View File

@@ -70,4 +70,4 @@ class TaskEvaluator:
return converter.to_pydantic()
def _is_gpt(self, llm) -> bool:
return isinstance(llm, ChatOpenAI) and llm.openai_api_base == None
return isinstance(llm, ChatOpenAI) and llm.openai_api_base is None

View File

@@ -21,14 +21,14 @@ class I18N(BaseModel):
self._prompts = json.load(f)
else:
dir_path = os.path.dirname(os.path.realpath(__file__))
prompts_path = os.path.join(dir_path, f"../translations/en.json")
prompts_path = os.path.join(dir_path, "../translations/en.json")
with open(prompts_path, "r") as f:
self._prompts = json.load(f)
except FileNotFoundError:
raise Exception(f"Prompt file '{self.prompt_file}' not found.")
except json.JSONDecodeError:
raise Exception(f"Error decoding JSON from the prompts file.")
raise Exception("Error decoding JSON from the prompts file.")
if not self._prompts:
self._prompts = {}
@@ -47,5 +47,5 @@ class I18N(BaseModel):
def retrieve(self, kind, key) -> str:
try:
return self._prompts[kind][key]
except:
except Exception as _:
raise Exception(f"Prompt for '{kind}':'{key}' not found.")

View File

@@ -0,0 +1,17 @@
import re
class YamlParser:
def parse(file):
content = file.read()
# Replace single { and } with doubled ones, while leaving already doubled ones intact and the other special characters {# and {%
modified_content = re.sub(r"(?<!\{){(?!\{)(?!\#)(?!\%)", "{{", content)
modified_content = re.sub(
r"(?<!\})(?<!\%)(?<!\#)\}(?!})", "}}", modified_content
)
# Check for 'context:' not followed by '[' and raise an error
if re.search(r"context:(?!\s*\[)", modified_content):
raise ValueError(
"Context is currently only supported in code when creating a task. Please use the 'context' key in the task configuration."
)
return modified_content

View File

@@ -22,7 +22,7 @@ class TokenProcess:
def sum_successful_requests(self, requests: int):
self.successful_requests = self.successful_requests + requests
def get_summary(self) -> str:
def get_summary(self) -> Dict[str, Any]:
return {
"total_tokens": self.total_tokens,
"prompt_tokens": self.prompt_tokens,
@@ -42,12 +42,12 @@ class TokenCalcHandler(BaseCallbackHandler):
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
if "gpt" in self.model:
try:
encoding = tiktoken.encoding_for_model(self.model)
else:
except KeyError:
encoding = tiktoken.get_encoding("cl100k_base")
if self.token_cost_process == None:
if self.token_cost_process is None:
return
for prompt in prompts: