Feat: Add Ruff to improve linting/formatting (#588)

* fix: fix test actually running

* fix: fix test to not send request to openai

* fix: fix linting to remove cli files

* fix: exclude only files that breaks black

* fix: Fix all Ruff checkings on the code and Fix Test with repeated name

* fix: Change linter name on yml file

* feat: update pre-commit

* feat: remove need for isort on the code

* feat: remove black linter

* feat: update tests yml to try to fix the tests gh action
This commit is contained in:
Eduardo Chiarotti
2024-05-10 11:53:53 -03:00
committed by GitHub
parent 04b4191de5
commit aeba64feaf
13 changed files with 30 additions and 53 deletions

View File

@@ -297,7 +297,7 @@ class Task(BaseModel):
return exported_result
def _is_gpt(self, llm) -> bool:
return isinstance(llm, ChatOpenAI) and llm.openai_api_base == None
return isinstance(llm, ChatOpenAI) and llm.openai_api_base is None
def _save_file(self, result: Any) -> None:
directory = os.path.dirname(self.output_file)

View File

@@ -49,7 +49,7 @@ class AgentTools(BaseModel):
for available_agent in self.agents
if available_agent.role.casefold().strip() == agent.casefold().strip()
]
except:
except Exception as _:
return self.i18n.errors("agent_tool_unexsiting_coworker").format(
coworkers="\n".join(
[f"- {agent.role.casefold()}" for agent in self.agents]

View File

@@ -64,7 +64,7 @@ class ToolUsage:
# Set the maximum parsing attempts for bigger models
if (isinstance(self.function_calling_llm, ChatOpenAI)) and (
self.function_calling_llm.openai_api_base == None
self.function_calling_llm.openai_api_base is None
):
if self.function_calling_llm.model_name in OPENAI_BIGGER_MODELS:
self._max_parsing_attempts = 2
@@ -254,7 +254,7 @@ class ToolUsage:
return "\n--\n".join(descriptions)
def _is_gpt(self, llm) -> bool:
return isinstance(llm, ChatOpenAI) and llm.openai_api_base == None
return isinstance(llm, ChatOpenAI) and llm.openai_api_base is None
def _tool_calling(
self, tool_string: str

View File

@@ -84,4 +84,4 @@ class Converter(BaseModel):
return new_prompt | self.llm | parser
def _is_gpt(self, llm) -> bool:
return isinstance(llm, ChatOpenAI) and llm.openai_api_base == None
return isinstance(llm, ChatOpenAI) and llm.openai_api_base is None

View File

@@ -58,4 +58,4 @@ class TaskEvaluator:
return converter.to_pydantic()
def _is_gpt(self, llm) -> bool:
return isinstance(llm, ChatOpenAI) and llm.openai_api_base == None
return isinstance(llm, ChatOpenAI) and llm.openai_api_base is None

View File

@@ -21,14 +21,14 @@ class I18N(BaseModel):
self._prompts = json.load(f)
else:
dir_path = os.path.dirname(os.path.realpath(__file__))
prompts_path = os.path.join(dir_path, f"../translations/en.json")
prompts_path = os.path.join(dir_path, "../translations/en.json")
with open(prompts_path, "r") as f:
self._prompts = json.load(f)
except FileNotFoundError:
raise Exception(f"Prompt file '{self.prompt_file}' not found.")
except json.JSONDecodeError:
raise Exception(f"Error decoding JSON from the prompts file.")
raise Exception("Error decoding JSON from the prompts file.")
if not self._prompts:
self._prompts = {}
@@ -47,5 +47,5 @@ class I18N(BaseModel):
def retrieve(self, kind, key) -> str:
try:
return self._prompts[kind][key]
except:
except Exception as _:
raise Exception(f"Prompt for '{kind}':'{key}' not found.")

View File

@@ -47,7 +47,7 @@ class TokenCalcHandler(BaseCallbackHandler):
else:
encoding = tiktoken.get_encoding("cl100k_base")
if self.token_cost_process == None:
if self.token_cost_process is None:
return
for prompt in prompts: