Skip to content

Commit 9d71795

Browse files
committed
Applied automated fixes from ruff PT pytest ruleset
1 parent 35dc644 commit 9d71795

10 files changed

+166
-116
lines changed

cmd2/transcript.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -108,9 +108,9 @@ def _test_transcript(self, fname: str, transcript: Iterator[str]) -> None:
108108
# Read the expected result from transcript
109109
if ansi.strip_style(line).startswith(self.cmdapp.visible_prompt):
110110
message = f'\nFile {fname}, line {line_num}\nCommand was:\n{command}\nExpected: (nothing)\nGot:\n{result}\n'
111-
self.assertTrue(not (result.strip()), message)
111+
assert not result.strip(), message
112112
# If the command signaled the application to quit there should be no more commands
113-
self.assertFalse(stop, stop_msg)
113+
assert not stop, stop_msg
114114
continue
115115
expected_parts = []
116116
while not ansi.strip_style(line).startswith(self.cmdapp.visible_prompt):
@@ -124,13 +124,13 @@ def _test_transcript(self, fname: str, transcript: Iterator[str]) -> None:
124124

125125
if stop:
126126
# This should only be hit if the command that set stop to True had output text
127-
self.assertTrue(finished, stop_msg)
127+
assert finished, stop_msg
128128

129129
# transform the expected text into a valid regular expression
130130
expected = ''.join(expected_parts)
131131
expected = self._transform_transcript_expected(expected)
132132
message = f'\nFile {fname}, line {line_num}\nCommand was:\n{command}\nExpected:\n{expected}\nGot:\n{result}\n'
133-
self.assertTrue(re.match(expected, result, re.MULTILINE | re.DOTALL), message)
133+
assert re.match(expected, result, re.MULTILINE | re.DOTALL), message
134134

135135
def _transform_transcript_expected(self, s: str) -> str:
136136
r"""Parse the string with slashed regexes into a valid regex.

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -199,7 +199,7 @@ select = [
199199
"PLE", # Pylint Errors
200200
# "PLR", # Pylint Refactoring suggestions
201201
"PLW", # Pylint Warnings
202-
# "PT", # flake8-pytest-style (warnings about unit test best practices)
202+
# "PT", # flake8-pytest-style (warnings about unit test best practices)
203203
# "PTH", # flake8-use-pathlib (force use of pathlib instead of os.path)
204204
"PYI", # flake8-pyi (warnings related to type hint best practices)
205205
# "Q", # flake8-quotes (force double quotes)

tests/test_ansi.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -148,7 +148,7 @@ def test_set_title():
148148

149149

150150
@pytest.mark.parametrize(
151-
'cols, prompt, line, cursor, msg, expected',
151+
('cols', 'prompt', 'line', 'cursor', 'msg', 'expected'),
152152
[
153153
(
154154
127,
@@ -230,7 +230,7 @@ def test_sequence_str_building(ansi_sequence):
230230

231231

232232
@pytest.mark.parametrize(
233-
'r, g, b, valid',
233+
('r', 'g', 'b', 'valid'),
234234
[
235235
(0, 0, 0, True),
236236
(255, 255, 255, True),

tests/test_argparse_completer.py

Lines changed: 21 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -361,7 +361,7 @@ def test_bad_subcommand_help(ac_app):
361361

362362

363363
@pytest.mark.parametrize(
364-
'command, text, completions',
364+
('command', 'text', 'completions'),
365365
[
366366
('', 'mus', ['music ']),
367367
('music', 'cre', ['create ']),
@@ -388,7 +388,7 @@ def test_complete_help(ac_app, command, text, completions):
388388

389389

390390
@pytest.mark.parametrize(
391-
'subcommand, text, completions',
391+
('subcommand', 'text', 'completions'),
392392
[('create', '', ['jazz', 'rock']), ('create', 'ja', ['jazz ']), ('create', 'foo', []), ('creab', 'ja', [])],
393393
)
394394
def test_subcommand_completions(ac_app, subcommand, text, completions):
@@ -406,7 +406,7 @@ def test_subcommand_completions(ac_app, subcommand, text, completions):
406406

407407

408408
@pytest.mark.parametrize(
409-
'command_and_args, text, completion_matches, display_matches',
409+
('command_and_args', 'text', 'completion_matches', 'display_matches'),
410410
[
411411
# Complete all flags (suppressed will not show)
412412
(
@@ -547,13 +547,12 @@ def test_autcomp_flag_completion(ac_app, command_and_args, text, completion_matc
547547
else:
548548
assert first_match is None
549549

550-
assert ac_app.completion_matches == sorted(
551-
completion_matches, key=ac_app.default_sort_key
552-
) and ac_app.display_matches == sorted(display_matches, key=ac_app.default_sort_key)
550+
assert ac_app.completion_matches == sorted(completion_matches, key=ac_app.default_sort_key)
551+
assert ac_app.display_matches == sorted(display_matches, key=ac_app.default_sort_key)
553552

554553

555554
@pytest.mark.parametrize(
556-
'flag, text, completions',
555+
('flag', 'text', 'completions'),
557556
[
558557
('-l', '', ArgparseCompleterTester.static_choices_list),
559558
('--list', 's', ['static', 'stop']),
@@ -588,7 +587,7 @@ def test_autocomp_flag_choices_completion(ac_app, flag, text, completions):
588587

589588

590589
@pytest.mark.parametrize(
591-
'pos, text, completions',
590+
('pos', 'text', 'completions'),
592591
[
593592
(1, '', ArgparseCompleterTester.static_choices_list),
594593
(1, 's', ['static', 'stop']),
@@ -639,11 +638,12 @@ def test_flag_sorting(ac_app):
639638
begidx = endidx - len(text)
640639

641640
first_match = complete_tester(text, line, begidx, endidx, ac_app)
642-
assert first_match is not None and ac_app.completion_matches == option_strings
641+
assert first_match is not None
642+
assert ac_app.completion_matches == option_strings
643643

644644

645645
@pytest.mark.parametrize(
646-
'flag, text, completions',
646+
('flag', 'text', 'completions'),
647647
[('-c', '', ArgparseCompleterTester.completions_for_flag), ('--completer', 'f', ['flag', 'fairly'])],
648648
)
649649
def test_autocomp_flag_completers(ac_app, flag, text, completions):
@@ -661,7 +661,7 @@ def test_autocomp_flag_completers(ac_app, flag, text, completions):
661661

662662

663663
@pytest.mark.parametrize(
664-
'pos, text, completions',
664+
('pos', 'text', 'completions'),
665665
[
666666
(1, '', ArgparseCompleterTester.completions_for_pos_1),
667667
(1, 'p', ['positional_1', 'probably']),
@@ -763,7 +763,7 @@ def test_completion_items(ac_app):
763763

764764

765765
@pytest.mark.parametrize(
766-
'num_aliases, show_description',
766+
('num_aliases', 'show_description'),
767767
[
768768
# The number of completion results determines if the description field of CompletionItems gets displayed
769769
# in the tab completions. The count must be greater than 1 and less than ac_app.max_completion_items,
@@ -803,7 +803,7 @@ def test_max_completion_items(ac_app, num_aliases, show_description):
803803

804804

805805
@pytest.mark.parametrize(
806-
'args, completions',
806+
('args', 'completions'),
807807
[
808808
# Flag with nargs = 2
809809
('--set_value', ArgparseCompleterTester.set_value_choices),
@@ -869,7 +869,7 @@ def test_autcomp_nargs(ac_app, args, completions):
869869

870870

871871
@pytest.mark.parametrize(
872-
'command_and_args, text, is_error',
872+
('command_and_args', 'text', 'is_error'),
873873
[
874874
# Flag is finished before moving on
875875
('hint --flag foo --', '', False),
@@ -986,7 +986,7 @@ def test_completion_items_descriptive_header(ac_app):
986986

987987

988988
@pytest.mark.parametrize(
989-
'command_and_args, text, has_hint',
989+
('command_and_args', 'text', 'has_hint'),
990990
[
991991
# Normal cases
992992
('hint', '', True),
@@ -1045,7 +1045,7 @@ def test_autocomp_hint_no_help_text(ac_app, capsys):
10451045

10461046

10471047
@pytest.mark.parametrize(
1048-
'args, text',
1048+
('args', 'text'),
10491049
[
10501050
# Exercise a flag arg and choices function that raises a CompletionError
10511051
('--choice ', 'choice'),
@@ -1066,7 +1066,7 @@ def test_completion_error(ac_app, capsys, args, text):
10661066

10671067

10681068
@pytest.mark.parametrize(
1069-
'command_and_args, completions',
1069+
('command_and_args', 'completions'),
10701070
[
10711071
# Exercise a choices function that receives arg_tokens dictionary
10721072
('arg_tokens choice subcmd', ['choice', 'subcmd']),
@@ -1092,7 +1092,7 @@ def test_arg_tokens(ac_app, command_and_args, completions):
10921092

10931093

10941094
@pytest.mark.parametrize(
1095-
'command_and_args, text, output_contains, first_match',
1095+
('command_and_args', 'text', 'output_contains', 'first_match'),
10961096
[
10971097
# Group isn't done. Hint will show for optional positional and no completions returned
10981098
('mutex', '', 'the optional positional', None),
@@ -1188,7 +1188,9 @@ def test_complete_command_help_no_tokens(ac_app):
11881188
assert not completions
11891189

11901190

1191-
@pytest.mark.parametrize('flag, completions', [('--provider', standalone_choices), ('--completer', standalone_completions)])
1191+
@pytest.mark.parametrize(
1192+
('flag', 'completions'), [('--provider', standalone_choices), ('--completer', standalone_completions)]
1193+
)
11921194
def test_complete_standalone(ac_app, flag, completions):
11931195
text = ''
11941196
line = 'standalone {} {}'.format(flag, text)

tests/test_argparse_custom.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ def fake_func():
4949

5050

5151
@pytest.mark.parametrize(
52-
'kwargs, is_valid',
52+
('kwargs', 'is_valid'),
5353
[
5454
({'choices_provider': fake_func}, True),
5555
({'completer': fake_func}, True),

tests/test_cmd2.py

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -221,7 +221,7 @@ def test_set_no_settables(base_app):
221221

222222

223223
@pytest.mark.parametrize(
224-
'new_val, is_valid, expected',
224+
('new_val', 'is_valid', 'expected'),
225225
[
226226
(ansi.AllowStyle.NEVER, True, ansi.AllowStyle.NEVER),
227227
('neVeR', True, ansi.AllowStyle.NEVER),
@@ -391,7 +391,8 @@ def test_run_script_with_empty_file(base_app, request):
391391
test_dir = os.path.dirname(request.module.__file__)
392392
filename = os.path.join(test_dir, 'scripts', 'empty.txt')
393393
out, err = run_cmd(base_app, 'run_script {}'.format(filename))
394-
assert not out and not err
394+
assert not out
395+
assert not err
395396
assert base_app.last_result is True
396397

397398

@@ -741,7 +742,8 @@ def test_pipe_to_shell(base_app):
741742
command = 'help help | wc'
742743

743744
out, err = run_cmd(base_app, command)
744-
assert out and not err
745+
assert out
746+
assert not err
745747

746748

747749
def test_pipe_to_shell_and_redirect(base_app):
@@ -755,7 +757,8 @@ def test_pipe_to_shell_and_redirect(base_app):
755757
command = 'help help | wc > {}'.format(filename)
756758

757759
out, err = run_cmd(base_app, command)
758-
assert not out and not err
760+
assert not out
761+
assert not err
759762
assert os.path.exists(filename)
760763
os.remove(filename)
761764

@@ -807,7 +810,8 @@ def test_get_paste_buffer_exception(base_app, mocker, capsys):
807810
out, err = capsys.readouterr()
808811
assert out == ''
809812
# this just checks that cmd2 is surfacing whatever error gets raised by pyperclip.paste
810-
assert 'ValueError' in err and 'foo' in err
813+
assert 'ValueError' in err
814+
assert 'foo' in err
811815

812816

813817
def test_allow_clipboard_initializer(base_app):
@@ -1493,7 +1497,7 @@ def test_select_uneven_list_of_tuples(select_app, monkeypatch):
14931497

14941498

14951499
@pytest.mark.parametrize(
1496-
'selection, type_str',
1500+
('selection', 'type_str'),
14971501
[
14981502
('1', "<class 'int'>"),
14991503
('2', "<class 'str'>"),
@@ -2889,7 +2893,8 @@ def test_disable_and_enable_category(disable_commands_app):
28892893
begidx = endidx - len(text)
28902894

28912895
first_match = complete_tester(text, line, begidx, endidx, disable_commands_app)
2892-
assert first_match is not None and disable_commands_app.completion_matches == ['result ']
2896+
assert first_match is not None
2897+
assert disable_commands_app.completion_matches == ['result ']
28932898

28942899
# has_no_helper_funcs had no completer originally, so there should be no results
28952900
text = ''

0 commit comments

Comments
 (0)