Feat: Add Ruff to improve linting/formatting (#588)

* fix: fix test actually running

* fix: fix test to not send request to openai

* fix: fix linting to remove cli files

* fix: exclude only files that breaks black

* fix: Fix all Ruff checkings on the code and Fix Test with repeated name

* fix: Change linter name on yml file

* feat: update pre-commit

* feat: remove need for isort on the code

* feat: remove black linter

* feat: update tests yml to try to fix the tests gh action
This commit is contained in:
Eduardo Chiarotti
2024-05-10 11:53:53 -03:00
committed by GitHub
parent 04b4191de5
commit aeba64feaf
13 changed files with 30 additions and 53 deletions

View File

@@ -10,7 +10,7 @@ jobs:
- name: Install Requirements
run: |
pip install black
pip install ruff
- name: Run Black
run: black . --exclude "cli/templates/crew.py" --extend-exclude "cli/templates/main.py"
- name: Run Ruff Linter
run: ruff check --exclude "templates","__init__.py"

View File

@@ -14,12 +14,12 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v4
with:
python-version: '3.10'
python-version: "3.10"
- name: Install Requirements
run: |

View File

@@ -1,21 +1,7 @@
repos:
- repo: https://github.com/psf/black-pre-commit-mirror
rev: 23.12.1
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.4.4
hooks:
- id: black
language_version: python3.11
files: \.(py)$
exclude: 'src/crewai/cli/templates/(crew|main)\.py'
- repo: https://github.com/pycqa/isort
rev: 5.13.2
hooks:
- id: isort
name: isort (python)
args: ["--profile", "black", "--filter-files"]
- repo: https://github.com/PyCQA/autoflake
rev: v2.2.1
hooks:
- id: autoflake
args: ['--in-place', '--remove-all-unused-imports', '--remove-unused-variables', '--ignore-init-module-imports']
# Run the linter.
- id: ruff
args: [--fix]

View File

@@ -4,9 +4,7 @@ version = "0.30.0rc7"
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
authors = ["Joao Moura <joao@crewai.com>"]
readme = "README.md"
packages = [
{ include = "crewai", from = "src" },
]
packages = [{ include = "crewai", from = "src" }]
[tool.poetry.urls]
Homepage = "https://crewai.com"
@@ -40,17 +38,12 @@ pre-commit = "^3.6.0"
mkdocs = "^1.4.3"
mkdocstrings = "^0.22.0"
mkdocstrings-python = "^1.1.2"
mkdocs-material = {extras = ["imaging"], version = "^9.5.7"}
mkdocs-material = { extras = ["imaging"], version = "^9.5.7" }
mkdocs-material-extensions = "^1.3.1"
pillow = "^10.2.0"
cairosvg = "^2.7.1"
crewai-tools = "^0.2.5"
[tool.isort]
profile = "black"
known_first_party = ["crewai"]
[tool.poetry.group.test.dependencies]
pytest = "^8.0.0"
pytest-vcr = "^1.0.2"

View File

@@ -297,7 +297,7 @@ class Task(BaseModel):
return exported_result
def _is_gpt(self, llm) -> bool:
return isinstance(llm, ChatOpenAI) and llm.openai_api_base == None
return isinstance(llm, ChatOpenAI) and llm.openai_api_base is None
def _save_file(self, result: Any) -> None:
directory = os.path.dirname(self.output_file)

View File

@@ -49,7 +49,7 @@ class AgentTools(BaseModel):
for available_agent in self.agents
if available_agent.role.casefold().strip() == agent.casefold().strip()
]
except:
except Exception as _:
return self.i18n.errors("agent_tool_unexsiting_coworker").format(
coworkers="\n".join(
[f"- {agent.role.casefold()}" for agent in self.agents]

View File

@@ -64,7 +64,7 @@ class ToolUsage:
# Set the maximum parsing attempts for bigger models
if (isinstance(self.function_calling_llm, ChatOpenAI)) and (
self.function_calling_llm.openai_api_base == None
self.function_calling_llm.openai_api_base is None
):
if self.function_calling_llm.model_name in OPENAI_BIGGER_MODELS:
self._max_parsing_attempts = 2
@@ -254,7 +254,7 @@ class ToolUsage:
return "\n--\n".join(descriptions)
def _is_gpt(self, llm) -> bool:
return isinstance(llm, ChatOpenAI) and llm.openai_api_base == None
return isinstance(llm, ChatOpenAI) and llm.openai_api_base is None
def _tool_calling(
self, tool_string: str

View File

@@ -84,4 +84,4 @@ class Converter(BaseModel):
return new_prompt | self.llm | parser
def _is_gpt(self, llm) -> bool:
return isinstance(llm, ChatOpenAI) and llm.openai_api_base == None
return isinstance(llm, ChatOpenAI) and llm.openai_api_base is None

View File

@@ -58,4 +58,4 @@ class TaskEvaluator:
return converter.to_pydantic()
def _is_gpt(self, llm) -> bool:
return isinstance(llm, ChatOpenAI) and llm.openai_api_base == None
return isinstance(llm, ChatOpenAI) and llm.openai_api_base is None

View File

@@ -21,14 +21,14 @@ class I18N(BaseModel):
self._prompts = json.load(f)
else:
dir_path = os.path.dirname(os.path.realpath(__file__))
prompts_path = os.path.join(dir_path, f"../translations/en.json")
prompts_path = os.path.join(dir_path, "../translations/en.json")
with open(prompts_path, "r") as f:
self._prompts = json.load(f)
except FileNotFoundError:
raise Exception(f"Prompt file '{self.prompt_file}' not found.")
except json.JSONDecodeError:
raise Exception(f"Error decoding JSON from the prompts file.")
raise Exception("Error decoding JSON from the prompts file.")
if not self._prompts:
self._prompts = {}
@@ -47,5 +47,5 @@ class I18N(BaseModel):
def retrieve(self, kind, key) -> str:
try:
return self._prompts[kind][key]
except:
except Exception as _:
raise Exception(f"Prompt for '{kind}':'{key}' not found.")

View File

@@ -47,7 +47,7 @@ class TokenCalcHandler(BaseCallbackHandler):
else:
encoding = tiktoken.get_encoding("cl100k_base")
if self.token_cost_process == None:
if self.token_cost_process is None:
return
for prompt in prompts:

View File

@@ -31,8 +31,8 @@ def test_agent_default_values():
assert isinstance(agent.llm, ChatOpenAI)
assert agent.llm.model_name == "gpt-4"
assert agent.llm.temperature == 0.7
assert agent.llm.verbose == False
assert agent.allow_delegation == True
assert agent.llm.verbose is False
assert agent.allow_delegation is True
def test_custom_llm():
@@ -751,7 +751,7 @@ def test_agent_definition_based_on_dict():
assert agent.role == "test role"
assert agent.goal == "test goal"
assert agent.backstory == "test backstory"
assert agent.verbose == True
assert agent.verbose is True
assert agent.tools == []

View File

@@ -698,6 +698,8 @@ def test_crew_inputs_interpolate_both_agents_and_tasks():
)
crew = Crew(agents=[agent], tasks=[task], inputs={"topic": "AI", "points": 5})
inputs = {"topic": "AI", "points": 5}
crew._interpolate_inputs(inputs=inputs) # Manual call for now
assert crew.tasks[0].description == "Give me an analysis around AI."
assert crew.tasks[0].expected_output == "5 bullet points about AI."
@@ -706,7 +708,7 @@ def test_crew_inputs_interpolate_both_agents_and_tasks():
assert crew.agents[0].backstory == "You have a lot of experience with AI."
def test_crew_inputs_interpolate_both_agents_and_tasks():
def test_crew_inputs_interpolate_both_agents_and_tasks_diff():
from unittest.mock import patch
agent = Agent(
@@ -828,9 +830,7 @@ def test_tools_with_custom_caching():
with patch.object(
CacheHandler, "add", wraps=crew._cache_handler.add
) as add_to_cache:
with patch.object(
CacheHandler, "read", wraps=crew._cache_handler.read
) as read_from_cache:
with patch.object(CacheHandler, "read", wraps=crew._cache_handler.read) as _:
result = crew.kickoff()
add_to_cache.assert_called_once_with(
tool="multiplcation_tool",
@@ -907,8 +907,6 @@ def test_crew_log_file_output(tmp_path):
)
]
test_message = {"agent": "Researcher", "task": "Say Hi"}
crew = Crew(agents=[researcher], tasks=tasks, output_log_file=str(test_file))
crew.kickoff()
assert test_file.exists()
@@ -939,7 +937,7 @@ def test_manager_agent():
with patch.object(Task, "execute") as execute:
crew.kickoff()
assert manager.allow_delegation == True
assert manager.allow_delegation is True
execute.assert_called()