feat: add mypy as type checker, update code and add comment to reference (#591)

* fix: fix test actually running

* fix: fix test to not send request to openai

* fix: fix linting to remove cli files

* fix: exclude only files that breaks black

* fix: Fix all Ruff checkings on the code and Fix Test with repeated name

* fix: Change linter name on yml file

* feat: update pre-commit

* feat: remove need for isort on the code

* feat: add mypy as type checker, update code and add comment to reference

* feat: remove black linter

* feat: remove poetry to run the command

* feat: change logic to test mypy

* feat: update tests yml to try to fix the tests gh action

* feat: try to add just mypy to run on gh action

* feat: fix yml file

* feat: add comment to avoid issue on gh action

* feat: decouple pytest from the necessity of poetry install

* feat: change tests.yml to test different approach

* feat: change to poetry run

* fix: parameter field on yml file

* fix: update parameters to be on the pyproject

* fix: update pyproject to remove import untyped errors
This commit is contained in:
Eduardo Chiarotti
2024-05-10 16:37:52 -03:00
committed by GitHub
parent 8430c2f9af
commit 1ec4da6947
23 changed files with 1064 additions and 310 deletions

View File

@@ -1,4 +1,3 @@
name: Run Type Checks name: Run Type Checks
on: [pull_request] on: [pull_request]
@@ -12,19 +11,16 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v2 uses: actions/checkout@v4
- name: Setup Python - name: Setup Python
uses: actions/setup-python@v4 uses: actions/setup-python@v4
with: with:
python-version: '3.10' python-version: "3.10"
- name: Install Requirements - name: Install Requirements
run: | run: |
sudo apt-get update && pip install mypy
pip install poetry &&
poetry lock &&
poetry install
- name: Run type checks - name: Run type checks
run: poetry run pyright run: mypy src

View File

@@ -231,7 +231,7 @@ poetry run pytest
### Running static type checks ### Running static type checks
```bash ```bash
poetry run pyright poetry run mypy
``` ```
### Packaging ### Packaging

1154
poetry.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -32,7 +32,7 @@ tools = ["crewai-tools"]
[tool.poetry.group.dev.dependencies] [tool.poetry.group.dev.dependencies]
isort = "^5.13.2" isort = "^5.13.2"
pyright = ">=1.1.350,<2.0.0" mypy = "1.10.0"
autoflake = "^2.2.1" autoflake = "^2.2.1"
pre-commit = "^3.6.0" pre-commit = "^3.6.0"
mkdocs = "^1.4.3" mkdocs = "^1.4.3"
@@ -52,6 +52,11 @@ python-dotenv = "1.0.0"
[tool.poetry.scripts] [tool.poetry.scripts]
crewai = "crewai.cli.cli:crewai" crewai = "crewai.cli.cli:crewai"
[tool.mypy]
ignore_missing_imports = true
disable_error_code = 'import-untyped'
exclude = ["cli/templates/main.py", "cli/templates/crew.py"]
[build-system] [build-system]
requires = ["poetry-core"] requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api" build-backend = "poetry.core.masonry.api"

View File

@@ -204,7 +204,7 @@ class Agent(BaseModel):
Output of the agent Output of the agent
""" """
if self.tools_handler: if self.tools_handler:
self.tools_handler.last_used_tool = {} self.tools_handler.last_used_tool = {} # type: ignore # Incompatible types in assignment (expression has type "dict[Never, Never]", variable has type "ToolCalling")
task_prompt = task.prompt() task_prompt = task.prompt()
@@ -224,7 +224,7 @@ class Agent(BaseModel):
task_prompt += self.i18n.slice("memory").format(memory=memory) task_prompt += self.i18n.slice("memory").format(memory=memory)
tools = tools or self.tools tools = tools or self.tools
parsed_tools = self._parse_tools(tools) parsed_tools = self._parse_tools(tools) # type: ignore # Argument 1 to "_parse_tools" of "Agent" has incompatible type "list[Any] | None"; expected "list[Any]"
self.create_agent_executor(tools=tools) self.create_agent_executor(tools=tools)
self.agent_executor.tools = parsed_tools self.agent_executor.tools = parsed_tools
@@ -364,7 +364,7 @@ class Agent(BaseModel):
thoughts += f"\n{observation_prefix}{observation}\n{llm_prefix}" thoughts += f"\n{observation_prefix}{observation}\n{llm_prefix}"
return thoughts return thoughts
def _parse_tools(self, tools: List[Any]) -> List[LangChainTool]: def _parse_tools(self, tools: List[Any]) -> List[LangChainTool]: # type: ignore # Function "langchain_core.tools.tool" is not valid as a type
"""Parse tools to be used for the task.""" """Parse tools to be used for the task."""
# tentatively try to import from crewai_tools import BaseTool as CrewAITool # tentatively try to import from crewai_tools import BaseTool as CrewAITool
tools_list = [] tools_list = []

View File

@@ -35,7 +35,7 @@ class CrewAgentExecutor(AgentExecutor):
crew: Any = None crew: Any = None
function_calling_llm: Any = None function_calling_llm: Any = None
request_within_rpm_limit: Any = None request_within_rpm_limit: Any = None
tools_handler: InstanceOf[ToolsHandler] = None tools_handler: Optional[InstanceOf[ToolsHandler]] = None
max_iterations: Optional[int] = 15 max_iterations: Optional[int] = 15
have_forced_answer: bool = False have_forced_answer: bool = False
force_answer_max_iterations: Optional[int] = None force_answer_max_iterations: Optional[int] = None
@@ -189,7 +189,7 @@ class CrewAgentExecutor(AgentExecutor):
intermediate_steps = self._prepare_intermediate_steps(intermediate_steps) intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)
# Call the LLM to see what to do. # Call the LLM to see what to do.
output = self.agent.plan( output = self.agent.plan( # type: ignore # Incompatible types in assignment (expression has type "AgentAction | AgentFinish | list[AgentAction]", variable has type "AgentAction")
intermediate_steps, intermediate_steps,
callbacks=run_manager.get_child() if run_manager else None, callbacks=run_manager.get_child() if run_manager else None,
**inputs, **inputs,
@@ -275,8 +275,8 @@ class CrewAgentExecutor(AgentExecutor):
run_manager.on_agent_action(agent_action, color="green") run_manager.on_agent_action(agent_action, color="green")
tool_usage = ToolUsage( tool_usage = ToolUsage(
tools_handler=self.tools_handler, tools_handler=self.tools_handler, # type: ignore # Argument "tools_handler" to "ToolUsage" has incompatible type "ToolsHandler | None"; expected "ToolsHandler"
tools=self.tools, tools=self.tools, # type: ignore # Argument "tools" to "ToolUsage" has incompatible type "Sequence[BaseTool]"; expected "list[BaseTool]"
original_tools=self.original_tools, original_tools=self.original_tools,
tools_description=self.tools_description, tools_description=self.tools_description,
tools_names=self.tools_names, tools_names=self.tools_names,

View File

@@ -8,13 +8,13 @@ from .cache.cache_handler import CacheHandler
class ToolsHandler: class ToolsHandler:
"""Callback handler for tool usage.""" """Callback handler for tool usage."""
last_used_tool: ToolCalling = {} last_used_tool: ToolCalling = {} # type: ignore # BUG?: Incompatible types in assignment (expression has type "Dict[...]", variable has type "ToolCalling")
cache: CacheHandler cache: Optional[CacheHandler]
def __init__(self, cache: Optional[CacheHandler] = None): def __init__(self, cache: Optional[CacheHandler] = None):
"""Initialize the callback handler.""" """Initialize the callback handler."""
self.cache = cache self.cache = cache
self.last_used_tool = {} self.last_used_tool = {} # type: ignore # BUG?: same as above
def on_tool_use( def on_tool_use(
self, self,
@@ -23,7 +23,7 @@ class ToolsHandler:
should_cache: bool = True, should_cache: bool = True,
) -> Any: ) -> Any:
"""Run when tool ends running.""" """Run when tool ends running."""
self.last_used_tool = calling self.last_used_tool = calling # type: ignore # BUG?: Incompatible types in assignment (expression has type "Union[ToolCalling, InstructorToolCalling]", variable has type "ToolCalling")
if self.cache and should_cache and calling.tool_name != CacheTools().name: if self.cache and should_cache and calling.tool_name != CacheTools().name:
self.cache.add( self.cache.add(
tool=calling.tool_name, tool=calling.tool_name,

View File

@@ -242,7 +242,7 @@ class Crew(BaseModel):
def kickoff(self, inputs: Optional[Dict[str, Any]] = {}) -> str: def kickoff(self, inputs: Optional[Dict[str, Any]] = {}) -> str:
"""Starts the crew to work on its assigned tasks.""" """Starts the crew to work on its assigned tasks."""
self._execution_span = self._telemetry.crew_execution_span(self) self._execution_span = self._telemetry.crew_execution_span(self)
self._interpolate_inputs(inputs) self._interpolate_inputs(inputs) # type: ignore # Argument 1 to "_interpolate_inputs" of "Crew" has incompatible type "dict[str, Any] | None"; expected "dict[str, Any]"
self._set_tasks_callbacks() self._set_tasks_callbacks()
i18n = I18N(prompt_file=self.prompt_file) i18n = I18N(prompt_file=self.prompt_file)
@@ -263,8 +263,8 @@ class Crew(BaseModel):
if self.process == Process.sequential: if self.process == Process.sequential:
result = self._run_sequential_process() result = self._run_sequential_process()
elif self.process == Process.hierarchical: elif self.process == Process.hierarchical:
result, manager_metrics = self._run_hierarchical_process() result, manager_metrics = self._run_hierarchical_process() # type: ignore # Unpacking a string is disallowed
metrics.append(manager_metrics) metrics.append(manager_metrics) # type: ignore # Cannot determine type of "manager_metrics"
else: else:
raise NotImplementedError( raise NotImplementedError(
@@ -284,7 +284,7 @@ class Crew(BaseModel):
"""Executes tasks sequentially and returns the final output.""" """Executes tasks sequentially and returns the final output."""
task_output = "" task_output = ""
for task in self.tasks: for task in self.tasks:
if task.agent.allow_delegation: if task.agent.allow_delegation: # type: ignore # Item "None" of "Agent | None" has no attribute "allow_delegation"
agents_for_delegation = [ agents_for_delegation = [
agent for agent in self.agents if agent != task.agent agent for agent in self.agents if agent != task.agent
] ]
@@ -357,23 +357,23 @@ class Crew(BaseModel):
) )
self._finish_execution(task_output) self._finish_execution(task_output)
return self._format_output(task_output), manager._token_process.get_summary() return self._format_output(task_output), manager._token_process.get_summary() # type: ignore # Incompatible return value type (got "tuple[str, Any]", expected "str")
def _set_tasks_callbacks(self) -> str: def _set_tasks_callbacks(self) -> None:
"""Sets callback for every task suing task_callback""" """Sets callback for every task suing task_callback"""
for task in self.tasks: for task in self.tasks:
if not task.callback: if not task.callback:
task.callback = self.task_callback task.callback = self.task_callback
def _interpolate_inputs(self, inputs: Dict[str, Any]) -> str: def _interpolate_inputs(self, inputs: Dict[str, Any]) -> None:
"""Interpolates the inputs in the tasks and agents.""" """Interpolates the inputs in the tasks and agents."""
[task.interpolate_inputs(inputs) for task in self.tasks] [task.interpolate_inputs(inputs) for task in self.tasks] # type: ignore # "interpolate_inputs" of "Task" does not return a value (it only ever returns None)
[agent.interpolate_inputs(inputs) for agent in self.agents] [agent.interpolate_inputs(inputs) for agent in self.agents] # type: ignore # "interpolate_inputs" of "Agent" does not return a value (it only ever returns None)
def _format_output(self, output: str) -> str: def _format_output(self, output: str) -> str:
"""Formats the output of the crew execution.""" """Formats the output of the crew execution."""
if self.full_output: if self.full_output:
return { return { # type: ignore # Incompatible return value type (got "dict[str, Sequence[str | TaskOutput | None]]", expected "str")
"final_output": output, "final_output": output,
"tasks_outputs": [task.output for task in self.tasks if task], "tasks_outputs": [task.output for task in self.tasks if task],
} }

View File

@@ -1,3 +1,5 @@
from typing import Optional
from crewai.memory import EntityMemory, LongTermMemory, ShortTermMemory from crewai.memory import EntityMemory, LongTermMemory, ShortTermMemory
@@ -32,7 +34,7 @@ class ContextualMemory:
formatted_results = "\n".join([f"- {result}" for result in stm_results]) formatted_results = "\n".join([f"- {result}" for result in stm_results])
return f"Recent Insights:\n{formatted_results}" if stm_results else "" return f"Recent Insights:\n{formatted_results}" if stm_results else ""
def _fetch_ltm_context(self, task) -> str: def _fetch_ltm_context(self, task) -> Optional[str]:
""" """
Fetches historical data or insights from LTM that are relevant to the task's description and expected_output, Fetches historical data or insights from LTM that are relevant to the task's description and expected_output,
formatted as bullet points. formatted as bullet points.
@@ -44,10 +46,10 @@ class ContextualMemory:
formatted_results = [ formatted_results = [
suggestion suggestion
for result in ltm_results for result in ltm_results
for suggestion in result["metadata"]["suggestions"] for suggestion in result["metadata"]["suggestions"] # type: ignore # Invalid index type "str" for "str"; expected type "SupportsIndex | slice"
] ]
formatted_results = list(dict.fromkeys(formatted_results)) formatted_results = list(dict.fromkeys(formatted_results))
formatted_results = "\n".join([f"- {result}" for result in formatted_results]) formatted_results = "\n".join([f"- {result}" for result in formatted_results]) # type: ignore # Incompatible types in assignment (expression has type "str", variable has type "list[str]")
return f"Historical Data:\n{formatted_results}" if ltm_results else "" return f"Historical Data:\n{formatted_results}" if ltm_results else ""
@@ -58,6 +60,6 @@ class ContextualMemory:
""" """
em_results = self.em.search(query) em_results = self.em.search(query)
formatted_results = "\n".join( formatted_results = "\n".join(
[f"- {result['context']}" for result in em_results] [f"- {result['context']}" for result in em_results] # type: ignore # Invalid index type "str" for "str"; expected type "SupportsIndex | slice"
) )
return f"Entities:\n{formatted_results}" if em_results else "" return f"Entities:\n{formatted_results}" if em_results else ""

View File

@@ -16,7 +16,7 @@ class EntityMemory(Memory):
) )
super().__init__(storage) super().__init__(storage)
def save(self, item: EntityMemoryItem) -> None: def save(self, item: EntityMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory"
"""Saves an entity item into the SQLite storage.""" """Saves an entity item into the SQLite storage."""
data = f"{item.name}({item.type}): {item.description}" data = f"{item.name}({item.type}): {item.description}"
super().save(data, item.metadata) super().save(data, item.metadata)

View File

@@ -18,10 +18,10 @@ class LongTermMemory(Memory):
storage = LTMSQLiteStorage() storage = LTMSQLiteStorage()
super().__init__(storage) super().__init__(storage)
def save(self, item: LongTermMemoryItem) -> None: def save(self, item: LongTermMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory"
metadata = item.metadata metadata = item.metadata
metadata.update({"agent": item.agent, "expected_output": item.expected_output}) metadata.update({"agent": item.agent, "expected_output": item.expected_output})
self.storage.save( self.storage.save( # type: ignore # BUG?: Unexpected keyword argument "task_description","score","datetime" for "save" of "Storage"
task_description=item.task, task_description=item.task,
score=metadata["quality"], score=metadata["quality"],
metadata=metadata, metadata=metadata,
@@ -29,4 +29,4 @@ class LongTermMemory(Memory):
) )
def search(self, task: str, latest_n: int = 3) -> Dict[str, Any]: def search(self, task: str, latest_n: int = 3) -> Dict[str, Any]:
return self.storage.load(task, latest_n) return self.storage.load(task, latest_n) # type: ignore # BUG?: "Storage" has no attribute "load"

View File

@@ -1,4 +1,4 @@
from typing import Any, Dict, Union from typing import Any, Dict, Optional, Union
class LongTermMemoryItem: class LongTermMemoryItem:
@@ -8,8 +8,8 @@ class LongTermMemoryItem:
task: str, task: str,
expected_output: str, expected_output: str,
datetime: str, datetime: str,
quality: Union[int, float] = None, quality: Optional[Union[int, float]] = None,
metadata: Dict[str, Any] = None, metadata: Optional[Dict[str, Any]] = None,
): ):
self.task = task self.task = task
self.agent = agent self.agent = agent

View File

@@ -1,4 +1,4 @@
from typing import Any, Dict from typing import Any, Dict, Optional
from crewai.memory.storage.interface import Storage from crewai.memory.storage.interface import Storage
@@ -12,12 +12,16 @@ class Memory:
self.storage = storage self.storage = storage
def save( def save(
self, value: Any, metadata: Dict[str, Any] = None, agent: str = None self,
value: Any,
metadata: Optional[Dict[str, Any]] = None,
agent: Optional[str] = None,
) -> None: ) -> None:
metadata = metadata or {} metadata = metadata or {}
if agent: if agent:
metadata["agent"] = agent metadata["agent"] = agent
self.storage.save(value, metadata)
self.storage.save(value, metadata) # type: ignore # Maybe BUG? Should be self.storage.save(key, value, metadata)
def search(self, query: str) -> Dict[str, Any]: def search(self, query: str) -> Dict[str, Any]:
return self.storage.search(query) return self.storage.search(query)

View File

@@ -16,8 +16,8 @@ class ShortTermMemory(Memory):
storage = RAGStorage(type="short_term", embedder_config=embedder_config) storage = RAGStorage(type="short_term", embedder_config=embedder_config)
super().__init__(storage) super().__init__(storage)
def save(self, item: ShortTermMemoryItem) -> None: def save(self, item: ShortTermMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory"
super().save(item.data, item.metadata, item.agent) super().save(item.data, item.metadata, item.agent)
def search(self, query: str, score_threshold: float = 0.35): def search(self, query: str, score_threshold: float = 0.35):
return self.storage.search(query=query, score_threshold=score_threshold) return self.storage.search(query=query, score_threshold=score_threshold) # type: ignore # BUG? The reference is to the parent class, but the parent class does not have this parameters

View File

@@ -1,8 +1,10 @@
from typing import Any, Dict from typing import Any, Dict, Optional
class ShortTermMemoryItem: class ShortTermMemoryItem:
def __init__(self, data: Any, agent: str, metadata: Dict[str, Any] = None): def __init__(
self, data: Any, agent: str, metadata: Optional[Dict[str, Any]] = None
):
self.data = data self.data = data
self.agent = agent self.agent = agent
self.metadata = metadata if metadata is not None else {} self.metadata = metadata if metadata is not None else {}

View File

@@ -7,5 +7,5 @@ class Storage:
def save(self, key: str, value: Any, metadata: Dict[str, Any]) -> None: def save(self, key: str, value: Any, metadata: Dict[str, Any]) -> None:
pass pass
def search(self, key: str) -> Dict[str, Any]: def search(self, key: str) -> Dict[str, Any]: # type: ignore
pass pass

View File

@@ -1,6 +1,6 @@
import json import json
import sqlite3 import sqlite3
from typing import Any, Dict, Union from typing import Any, Dict, List, Optional, Union
from crewai.utilities import Printer from crewai.utilities import Printer
from crewai.utilities.paths import db_storage_path from crewai.utilities.paths import db_storage_path
@@ -11,7 +11,9 @@ class LTMSQLiteStorage:
An updated SQLite storage class for LTM data storage. An updated SQLite storage class for LTM data storage.
""" """
def __init__(self, db_path=f"{db_storage_path()}/long_term_memory_storage.db"): def __init__(
self, db_path: str = f"{db_storage_path()}/long_term_memory_storage.db"
) -> None:
self.db_path = db_path self.db_path = db_path
self._printer: Printer = Printer() self._printer: Printer = Printer()
self._initialize_db() self._initialize_db()
@@ -67,7 +69,9 @@ class LTMSQLiteStorage:
color="red", color="red",
) )
def load(self, task_description: str, latest_n: int) -> Dict[str, Any]: def load(
self, task_description: str, latest_n: int
) -> Optional[List[Dict[str, Any]]]:
"""Queries the LTM table by task description with error handling.""" """Queries the LTM table by task description with error handling."""
try: try:
with sqlite3.connect(self.db_path) as conn: with sqlite3.connect(self.db_path) as conn:

View File

@@ -2,7 +2,7 @@ import contextlib
import io import io
import logging import logging
import os import os
from typing import Any, Dict from typing import Any, Dict, List, Optional
from embedchain import App from embedchain import App
from embedchain.llm.base import BaseLlm from embedchain.llm.base import BaseLlm
@@ -72,16 +72,16 @@ class RAGStorage(Storage):
if allow_reset: if allow_reset:
self.app.reset() self.app.reset()
def save(self, value: Any, metadata: Dict[str, Any]) -> None: def save(self, value: Any, metadata: Dict[str, Any]) -> None: # type: ignore # BUG?: Should be save(key, value, metadata) Signature of "save" incompatible with supertype "Storage"
self._generate_embedding(value, metadata) self._generate_embedding(value, metadata)
def search( def search( # type: ignore # BUG?: Signature of "search" incompatible with supertype "Storage"
self, self,
query: str, query: str,
limit: int = 3, limit: int = 3,
filter: dict = None, filter: Optional[dict] = None,
score_threshold: float = 0.35, score_threshold: float = 0.35,
) -> Dict[str, Any]: ) -> List[Any]:
with suppress_logging(): with suppress_logging():
try: try:
results = ( results = (

View File

@@ -1,11 +1,12 @@
import inspect import inspect
import os import os
from pathlib import Path from pathlib import Path
from crewai.utilities.parser import YamlParser
import yaml import yaml
from dotenv import load_dotenv from dotenv import load_dotenv
from crewai.utilities.parser import YamlParser
load_dotenv() load_dotenv()
@@ -41,7 +42,7 @@ def CrewBase(cls):
@staticmethod @staticmethod
def load_yaml(config_path: str): def load_yaml(config_path: str):
with open(config_path, "r") as file: with open(config_path, "r") as file:
parsedContent = YamlParser.parse(file) parsedContent = YamlParser.parse(file) # type: ignore # Argument 1 to "parse" has incompatible type "TextIOWrapper"; expected "YamlParser"
return yaml.safe_load(parsedContent) return yaml.safe_load(parsedContent)
return WrappedClass return WrappedClass

View File

@@ -41,7 +41,7 @@ class Task(BaseModel):
tools_errors: int = 0 tools_errors: int = 0
delegations: int = 0 delegations: int = 0
i18n: I18N = I18N() i18n: I18N = I18N()
thread: threading.Thread = None thread: Optional[threading.Thread] = None
prompt_context: Optional[str] = None prompt_context: Optional[str] = None
description: str = Field(description="Description of the actual task.") description: str = Field(description="Description of the actual task.")
expected_output: str = Field( expected_output: str = Field(
@@ -144,7 +144,7 @@ class Task(BaseModel):
) )
return self return self
def execute( def execute( # type: ignore # Missing return statement
self, self,
agent: Agent | None = None, agent: Agent | None = None,
context: Optional[str] = None, context: Optional[str] = None,
@@ -163,13 +163,13 @@ class Task(BaseModel):
) )
if self.context: if self.context:
context = [] context = [] # type: ignore # Incompatible types in assignment (expression has type "list[Never]", variable has type "str | None")
for task in self.context: for task in self.context:
if task.async_execution: if task.async_execution:
task.thread.join() task.thread.join() # type: ignore # Item "None" of "Thread | None" has no attribute "join"
if task and task.output: if task and task.output:
context.append(task.output.raw_output) context.append(task.output.raw_output) # type: ignore # Item "str" of "str | None" has no attribute "append"
context = "\n".join(context) context = "\n".join(context) # type: ignore # Argument 1 to "join" of "str" has incompatible type "str | None"; expected "Iterable[str]"
self.prompt_context = context self.prompt_context = context
tools = tools or self.tools tools = tools or self.tools
@@ -250,26 +250,26 @@ class Task(BaseModel):
# try to convert task_output directly to pydantic/json # try to convert task_output directly to pydantic/json
try: try:
exported_result = model.model_validate_json(result) exported_result = model.model_validate_json(result) # type: ignore # Item "None" of "type[BaseModel] | None" has no attribute "model_validate_json"
if self.output_json: if self.output_json:
return exported_result.model_dump() return exported_result.model_dump() # type: ignore # "str" has no attribute "model_dump"
return exported_result return exported_result
except Exception: except Exception:
# sometimes the response contains valid JSON in the middle of text # sometimes the response contains valid JSON in the middle of text
match = re.search(r"({.*})", result, re.DOTALL) match = re.search(r"({.*})", result, re.DOTALL)
if match: if match:
try: try:
exported_result = model.model_validate_json(match.group(0)) exported_result = model.model_validate_json(match.group(0)) # type: ignore # Item "None" of "type[BaseModel] | None" has no attribute "model_validate_json"
if self.output_json: if self.output_json:
return exported_result.model_dump() return exported_result.model_dump() # type: ignore # "str" has no attribute "model_dump"
return exported_result return exported_result
except Exception: except Exception:
pass pass
llm = self.agent.function_calling_llm or self.agent.llm llm = self.agent.function_calling_llm or self.agent.llm # type: ignore # Item "None" of "Agent | None" has no attribute "function_calling_llm"
if not self._is_gpt(llm): if not self._is_gpt(llm):
model_schema = PydanticSchemaParser(model=model).get_schema() model_schema = PydanticSchemaParser(model=model).get_schema() # type: ignore # Argument "model" to "PydanticSchemaParser" has incompatible type "type[BaseModel] | None"; expected "type[BaseModel]"
instructions = f"{instructions}\n\nThe json should have the following structure, with the following keys:\n{model_schema}" instructions = f"{instructions}\n\nThe json should have the following structure, with the following keys:\n{model_schema}"
converter = Converter( converter = Converter(
@@ -290,7 +290,7 @@ class Task(BaseModel):
if self.output_file: if self.output_file:
content = ( content = (
exported_result if not self.output_pydantic else exported_result.json() exported_result if not self.output_pydantic else exported_result.json() # type: ignore # "str" has no attribute "json"
) )
self._save_file(content) self._save_file(content)
@@ -300,12 +300,12 @@ class Task(BaseModel):
return isinstance(llm, ChatOpenAI) and llm.openai_api_base is None return isinstance(llm, ChatOpenAI) and llm.openai_api_base is None
def _save_file(self, result: Any) -> None: def _save_file(self, result: Any) -> None:
directory = os.path.dirname(self.output_file) directory = os.path.dirname(self.output_file) # type: ignore # Value of type variable "AnyOrLiteralStr" of "dirname" cannot be "str | None"
if directory and not os.path.exists(directory): if directory and not os.path.exists(directory):
os.makedirs(directory) os.makedirs(directory)
with open(self.output_file, "w") as file: with open(self.output_file, "w") as file: # type: ignore # Argument 1 to "open" has incompatible type "str | None"; expected "int | str | bytes | PathLike[str] | PathLike[bytes]"
file.write(result) file.write(result)
return None return None

View File

@@ -82,6 +82,8 @@ class ToolUsage:
self._printer.print(content=f"\n\n{error}\n", color="red") self._printer.print(content=f"\n\n{error}\n", color="red")
self.task.increment_tools_errors() self.task.increment_tools_errors()
return error return error
# BUG? The code below seems to be unreachable
try: try:
tool = self._select_tool(calling.tool_name) tool = self._select_tool(calling.tool_name)
except Exception as e: except Exception as e:
@@ -89,15 +91,15 @@ class ToolUsage:
self.task.increment_tools_errors() self.task.increment_tools_errors()
self._printer.print(content=f"\n\n{error}\n", color="red") self._printer.print(content=f"\n\n{error}\n", color="red")
return error return error
return f"{self._use(tool_string=tool_string, tool=tool, calling=calling)}" return f"{self._use(tool_string=tool_string, tool=tool, calling=calling)}" # type: ignore # BUG?: "_use" of "ToolUsage" does not return a value (it only ever returns None)
def _use( def _use(
self, self,
tool_string: str, tool_string: str,
tool: BaseTool, tool: BaseTool,
calling: Union[ToolCalling, InstructorToolCalling], calling: Union[ToolCalling, InstructorToolCalling],
) -> None: ) -> None: # TODO: Fix this return type
if self._check_tool_repeated_usage(calling=calling): if self._check_tool_repeated_usage(calling=calling): # type: ignore # _check_tool_repeated_usage of "ToolUsage" does not return a value (it only ever returns None)
try: try:
result = self._i18n.errors("task_repeated_usage").format( result = self._i18n.errors("task_repeated_usage").format(
tool_names=self.tools_names tool_names=self.tools_names
@@ -108,15 +110,16 @@ class ToolUsage:
tool_name=tool.name, tool_name=tool.name,
attempts=self._run_attempts, attempts=self._run_attempts,
) )
result = self._format_result(result=result) result = self._format_result(result=result) # type: ignore # "_format_result" of "ToolUsage" does not return a value (it only ever returns None)
return result return result # type: ignore # Fix the reutrn type of this function
except Exception: except Exception:
self.task.increment_tools_errors() self.task.increment_tools_errors()
result = None result = None # type: ignore # Incompatible types in assignment (expression has type "None", variable has type "str")
if self.tools_handler.cache: if self.tools_handler.cache:
result = self.tools_handler.cache.read( result = self.tools_handler.cache.read( # type: ignore # Incompatible types in assignment (expression has type "str | None", variable has type "str")
tool=calling.tool_name, input=calling.arguments tool=calling.tool_name, input=calling.arguments
) )
@@ -130,7 +133,7 @@ class ToolUsage:
if calling.arguments: if calling.arguments:
try: try:
acceptable_args = tool.args_schema.schema()["properties"].keys() acceptable_args = tool.args_schema.schema()["properties"].keys() # type: ignore # Item "None" of "type[BaseModel] | None" has no attribute "schema"
arguments = { arguments = {
k: v k: v
for k, v in calling.arguments.items() for k, v in calling.arguments.items()
@@ -142,7 +145,7 @@ class ToolUsage:
arguments = calling.arguments arguments = calling.arguments
result = tool._run(**arguments) result = tool._run(**arguments)
else: else:
arguments = calling.arguments.values() arguments = calling.arguments.values() # type: ignore # Incompatible types in assignment (expression has type "dict_values[str, Any]", variable has type "dict[str, Any]")
result = tool._run(*arguments) result = tool._run(*arguments)
else: else:
result = tool._run() result = tool._run()
@@ -158,9 +161,10 @@ class ToolUsage:
).message ).message
self.task.increment_tools_errors() self.task.increment_tools_errors()
self._printer.print(content=f"\n\n{error_message}\n", color="red") self._printer.print(content=f"\n\n{error_message}\n", color="red")
return error return error # type: ignore # No return value expected
self.task.increment_tools_errors() self.task.increment_tools_errors()
return self.use(calling=calling, tool_string=tool_string) return self.use(calling=calling, tool_string=tool_string) # type: ignore # No return value expected
if self.tools_handler: if self.tools_handler:
should_cache = True should_cache = True
@@ -169,9 +173,9 @@ class ToolUsage:
) )
if ( if (
hasattr(original_tool, "cache_function") hasattr(original_tool, "cache_function")
and original_tool.cache_function and original_tool.cache_function # type: ignore # Item "None" of "Any | None" has no attribute "cache_function"
): ):
should_cache = original_tool.cache_function( should_cache = original_tool.cache_function( # type: ignore # Item "None" of "Any | None" has no attribute "cache_function"
calling.arguments, result calling.arguments, result
) )
@@ -185,13 +189,13 @@ class ToolUsage:
tool_name=tool.name, tool_name=tool.name,
attempts=self._run_attempts, attempts=self._run_attempts,
) )
result = self._format_result(result=result) result = self._format_result(result=result) # type: ignore # "_format_result" of "ToolUsage" does not return a value (it only ever returns None)
return result return result # type: ignore # No return value expected
def _format_result(self, result: Any) -> None: def _format_result(self, result: Any) -> None:
self.task.used_tools += 1 self.task.used_tools += 1
if self._should_remember_format(): if self._should_remember_format(): # type: ignore # "_should_remember_format" of "ToolUsage" does not return a value (it only ever returns None)
result = self._remember_format(result=result) result = self._remember_format(result=result) # type: ignore # "_remember_format" of "ToolUsage" does not return a value (it only ever returns None)
return result return result
def _should_remember_format(self) -> None: def _should_remember_format(self) -> None:
@@ -202,15 +206,15 @@ class ToolUsage:
result += "\n\n" + self._i18n.slice("tools").format( result += "\n\n" + self._i18n.slice("tools").format(
tools=self.tools_description, tool_names=self.tools_names tools=self.tools_description, tool_names=self.tools_names
) )
return result return result # type: ignore # No return value expected
def _check_tool_repeated_usage( def _check_tool_repeated_usage(
self, calling: Union[ToolCalling, InstructorToolCalling] self, calling: Union[ToolCalling, InstructorToolCalling]
) -> None: ) -> None:
if not self.tools_handler: if not self.tools_handler:
return False return False # type: ignore # No return value expected
if last_tool_usage := self.tools_handler.last_used_tool: if last_tool_usage := self.tools_handler.last_used_tool:
return (calling.tool_name == last_tool_usage.tool_name) and ( return (calling.tool_name == last_tool_usage.tool_name) and ( # type: ignore # No return value expected
calling.arguments == last_tool_usage.arguments calling.arguments == last_tool_usage.arguments
) )
@@ -292,14 +296,14 @@ class ToolUsage:
tool_input = self._validate_tool_input(self.action.tool_input) tool_input = self._validate_tool_input(self.action.tool_input)
arguments = ast.literal_eval(tool_input) arguments = ast.literal_eval(tool_input)
except Exception: except Exception:
return ToolUsageErrorException( return ToolUsageErrorException( # type: ignore # Incompatible return value type (got "ToolUsageErrorException", expected "ToolCalling | InstructorToolCalling")
f'{self._i18n.errors("tool_arguments_error")}' f'{self._i18n.errors("tool_arguments_error")}'
) )
if not isinstance(arguments, dict): if not isinstance(arguments, dict):
return ToolUsageErrorException( return ToolUsageErrorException( # type: ignore # Incompatible return value type (got "ToolUsageErrorException", expected "ToolCalling | InstructorToolCalling")
f'{self._i18n.errors("tool_arguments_error")}' f'{self._i18n.errors("tool_arguments_error")}'
) )
calling = ToolCalling( calling = ToolCalling( # type: ignore # Unexpected keyword argument "log" for "ToolCalling"
tool_name=tool.name, tool_name=tool.name,
arguments=arguments, arguments=arguments,
log=tool_string, log=tool_string,
@@ -310,7 +314,7 @@ class ToolUsage:
self._telemetry.tool_usage_error(llm=self.function_calling_llm) self._telemetry.tool_usage_error(llm=self.function_calling_llm)
self.task.increment_tools_errors() self.task.increment_tools_errors()
self._printer.print(content=f"\n\n{e}\n", color="red") self._printer.print(content=f"\n\n{e}\n", color="red")
return ToolUsageErrorException( return ToolUsageErrorException( # type: ignore # Incompatible return value type (got "ToolUsageErrorException", expected "ToolCalling | InstructorToolCalling")
f'{self._i18n.errors("tool_usage_error").format(error=e)}\nMoving on then. {self._i18n.slice("format").format(tool_names=self.tools_names)}' f'{self._i18n.errors("tool_usage_error").format(error=e)}\nMoving on then. {self._i18n.slice("format").format(tool_names=self.tools_names)}'
) )
return self._tool_calling(tool_string) return self._tool_calling(tool_string)

View File

@@ -83,5 +83,5 @@ class Converter(BaseModel):
) )
return new_prompt | self.llm | parser return new_prompt | self.llm | parser
def _is_gpt(self, llm) -> bool: def _is_gpt(self, llm) -> bool: # type: ignore # BUG? Name "_is_gpt" defined on line 20 hides name from outer scope
return isinstance(llm, ChatOpenAI) and llm.openai_api_base is None return isinstance(llm, ChatOpenAI) and llm.openai_api_base is None

View File

@@ -22,7 +22,7 @@ class TokenProcess:
def sum_successful_requests(self, requests: int): def sum_successful_requests(self, requests: int):
self.successful_requests = self.successful_requests + requests self.successful_requests = self.successful_requests + requests
def get_summary(self) -> str: def get_summary(self) -> Dict[str, Any]:
return { return {
"total_tokens": self.total_tokens, "total_tokens": self.total_tokens,
"prompt_tokens": self.prompt_tokens, "prompt_tokens": self.prompt_tokens,