From 2cd64924d2f1ccd1552e964a7736be54937505bf Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 22 Jun 2024 08:02:05 +0000 Subject: [PATCH] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- pylint/checkers/misc.py | 36 ++++++++++++++++++++++++--------- tests/checkers/unittest_misc.py | 35 ++++++++++++++++---------------- 2 files changed, 44 insertions(+), 27 deletions(-) diff --git a/pylint/checkers/misc.py b/pylint/checkers/misc.py index aadc5583ea2..e9f79c85d09 100644 --- a/pylint/checkers/misc.py +++ b/pylint/checkers/misc.py @@ -96,9 +96,9 @@ class EncodingChecker(BaseTokenChecker, BaseRawFileChecker): "type": "yn", "metavar": "", "default": False, - "help": "Whether or not to search for fixme's in docstrings." - } - ) + "help": "Whether or not to search for fixme's in docstrings.", + }, + ), ) def open(self) -> None: @@ -106,10 +106,14 @@ def open(self) -> None: notes = "|".join(re.escape(note) for note in self.linter.config.notes) if self.linter.config.notes_rgx: - comment_regex = rf"#\s*({notes}|{self.linter.config.notes_rgx})(?=(:|\s|\Z))" + comment_regex = ( + rf"#\s*({notes}|{self.linter.config.notes_rgx})(?=(:|\s|\Z))" + ) self._comment_fixme_pattern = re.compile(comment_regex, re.I) if self.linter.config.check_fixme_in_docstring: - docstring_regex = rf"\"\"\"\s*({notes}|{self.linter.config.notes_rgx})(?=(:|\s|\Z))" + docstring_regex = ( + rf"\"\"\"\s*({notes}|{self.linter.config.notes_rgx})(?=(:|\s|\Z))" + ) self._docstring_fixme_pattern = re.compile(docstring_regex, re.I) else: comment_regex = rf"#\s*({notes})(?=(:|\s|\Z))" @@ -149,7 +153,9 @@ def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None: return for token_info in tokens: if token_info.type == tokenize.COMMENT: - comment_text = token_info.string[1:].lstrip() # trim '#' and white-spaces + comment_text = token_info.string[ + 1: + ].lstrip() # trim '#' and white-spaces if self._comment_fixme_pattern.search("#" + comment_text.lower()): self.add_message( "fixme", @@ -157,11 +163,18 @@ def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None: args=comment_text, line=token_info.start[0], ) - elif self.linter.config.check_fixme_in_docstring and self._is_docstring_comment(token_info): + elif ( + self.linter.config.check_fixme_in_docstring + and self._is_docstring_comment(token_info) + ): docstring_lines = token_info.string.split("\n") for line_no, line in enumerate(docstring_lines): - comment_text = line.removeprefix('"""').lstrip().removesuffix('"""') # trim '""""' and whitespace - if self._docstring_fixme_pattern.search('"""' + comment_text.lower()): + comment_text = ( + line.removeprefix('"""').lstrip().removesuffix('"""') + ) # trim '""""' and whitespace + if self._docstring_fixme_pattern.search( + '"""' + comment_text.lower() + ): self.add_message( "fixme", col_offset=token_info.start[1] + 1, @@ -170,7 +183,10 @@ def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None: ) def _is_docstring_comment(self, token_info: tokenize.TokenInfo) -> bool: - return token_info.type == tokenize.STRING and token_info.line.lstrip().startswith('"""') + return ( + token_info.type == tokenize.STRING + and token_info.line.lstrip().startswith('"""') + ) def register(linter: PyLinter) -> None: diff --git a/tests/checkers/unittest_misc.py b/tests/checkers/unittest_misc.py index 0918a2c1824..e932d377c4b 100644 --- a/tests/checkers/unittest_misc.py +++ b/tests/checkers/unittest_misc.py @@ -124,7 +124,6 @@ def test_docstring_with_message(self) -> None: """ with self.assertAddsMessages( MessageTest(msg_id="fixme", line=2, args="FIXME message", col_offset=9) - ): self.checker.process_tokens(_tokenize_str(code)) @@ -150,7 +149,7 @@ def test_docstring_with_nl_message_multi(self) -> None: """ with self.assertAddsMessages( MessageTest(msg_id="fixme", line=3, args="FIXME this", col_offset=9), - MessageTest(msg_id="fixme", line=4, args="TODO: that", col_offset=9) + MessageTest(msg_id="fixme", line=4, args="TODO: that", col_offset=9), ): self.checker.process_tokens(_tokenize_str(code)) @@ -166,7 +165,7 @@ def test_docstring_with_comment(self) -> None: with self.assertAddsMessages( MessageTest(msg_id="fixme", line=2, args="XXX message1", col_offset=9), MessageTest(msg_id="fixme", line=4, args="FIXME message2", col_offset=9), - MessageTest(msg_id="fixme", line=5, args="TODO message3", col_offset=9) + MessageTest(msg_id="fixme", line=5, args="TODO message3", col_offset=9), ): self.checker.process_tokens(_tokenize_str(code)) @@ -206,14 +205,13 @@ def test_docstring_todo_mult(self) -> None: \"\"\" """ with self.assertAddsMessages( - MessageTest(msg_id="fixme", line=3, args="FIXME this TODO that", col_offset=9), + MessageTest( + msg_id="fixme", line=3, args="FIXME this TODO that", col_offset=9 + ), ): self.checker.process_tokens(_tokenize_str(code)) - - @set_config( - check_fixme_in_docstring=True, - notes=["CODETAG"] - ) + + @set_config(check_fixme_in_docstring=True, notes=["CODETAG"]) def test_docstring_custom_note(self) -> None: code = """ \"\"\" @@ -221,14 +219,13 @@ def test_docstring_custom_note(self) -> None: \"\"\" """ with self.assertAddsMessages( - MessageTest(msg_id="fixme", line=3, args="CODETAG implement this", col_offset=9), + MessageTest( + msg_id="fixme", line=3, args="CODETAG implement this", col_offset=9 + ), ): self.checker.process_tokens(_tokenize_str(code)) - - @set_config( - check_fixme_in_docstring=True, - notes_rgx="FIX.*" - ) + + @set_config(check_fixme_in_docstring=True, notes_rgx="FIX.*") def test_docstring_custom_rgx(self) -> None: code = """ \"\"\" @@ -237,7 +234,11 @@ def test_docstring_custom_rgx(self) -> None: \"\"\" """ with self.assertAddsMessages( - MessageTest(msg_id="fixme", line=3, args="FIXME implement this", col_offset=9), - MessageTest(msg_id="fixme", line=4, args="FIXTHIS also implement this", col_offset=9), + MessageTest( + msg_id="fixme", line=3, args="FIXME implement this", col_offset=9 + ), + MessageTest( + msg_id="fixme", line=4, args="FIXTHIS also implement this", col_offset=9 + ), ): self.checker.process_tokens(_tokenize_str(code))