From 1881b62047ecc7fbfc60e54e37cbb39e58cb0529 Mon Sep 17 00:00:00 2001 From: Marco Date: Mon, 11 Nov 2024 17:05:06 +0100 Subject: [PATCH 01/17] Add test configurations to then be able to run the workflow with pytest --- .venv/Lib/site-packages/_pytest/__init__.py | 13 + .../Lib/site-packages/_pytest/_argcomplete.py | 117 + .../site-packages/_pytest/_code/__init__.py | 26 + .venv/Lib/site-packages/_pytest/_code/code.py | 1403 ++++++++++++ .../Lib/site-packages/_pytest/_code/source.py | 215 ++ .../Lib/site-packages/_pytest/_io/__init__.py | 10 + .venv/Lib/site-packages/_pytest/_io/pprint.py | 673 ++++++ .../Lib/site-packages/_pytest/_io/saferepr.py | 130 ++ .../_pytest/_io/terminalwriter.py | 274 +++ .../Lib/site-packages/_pytest/_io/wcwidth.py | 57 + .../Lib/site-packages/_pytest/_py/__init__.py | 0 .venv/Lib/site-packages/_pytest/_py/error.py | 111 + .venv/Lib/site-packages/_pytest/_py/path.py | 1475 ++++++++++++ .venv/Lib/site-packages/_pytest/_version.py | 16 + .../_pytest/assertion/__init__.py | 192 ++ .../_pytest/assertion/rewrite.py | 1204 ++++++++++ .../_pytest/assertion/truncate.py | 117 + .../site-packages/_pytest/assertion/util.py | 609 +++++ .../site-packages/_pytest/cacheprovider.py | 626 ++++++ .venv/Lib/site-packages/_pytest/capture.py | 1087 +++++++++ .venv/Lib/site-packages/_pytest/compat.py | 351 +++ .../site-packages/_pytest/config/__init__.py | 1972 +++++++++++++++++ .../_pytest/config/argparsing.py | 551 +++++ .../site-packages/_pytest/config/compat.py | 85 + .../_pytest/config/exceptions.py | 13 + .../site-packages/_pytest/config/findpaths.py | 228 ++ .venv/Lib/site-packages/_pytest/debugging.py | 385 ++++ .venv/Lib/site-packages/_pytest/deprecated.py | 91 + .venv/Lib/site-packages/_pytest/doctest.py | 755 +++++++ .../Lib/site-packages/_pytest/faulthandler.py | 105 + .venv/Lib/site-packages/_pytest/fixtures.py | 1932 ++++++++++++++++ .../site-packages/_pytest/freeze_support.py | 45 + .venv/Lib/site-packages/_pytest/helpconfig.py | 276 +++ .venv/Lib/site-packages/_pytest/hookspec.py | 1333 +++++++++++ .venv/Lib/site-packages/_pytest/junitxml.py | 697 ++++++ .venv/Lib/site-packages/_pytest/legacypath.py | 473 ++++ .venv/Lib/site-packages/_pytest/logging.py | 955 ++++++++ .venv/Lib/site-packages/_pytest/main.py | 1072 +++++++++ .../site-packages/_pytest/mark/__init__.py | 292 +++ .../site-packages/_pytest/mark/expression.py | 333 +++ .../site-packages/_pytest/mark/structures.py | 615 +++++ .../Lib/site-packages/_pytest/monkeypatch.py | 415 ++++ .venv/Lib/site-packages/_pytest/nodes.py | 766 +++++++ .venv/Lib/site-packages/_pytest/outcomes.py | 318 +++ .venv/Lib/site-packages/_pytest/pastebin.py | 113 + .venv/Lib/site-packages/_pytest/pathlib.py | 975 ++++++++ .venv/Lib/site-packages/_pytest/py.typed | 0 .venv/Lib/site-packages/_pytest/pytester.py | 1766 +++++++++++++++ .../_pytest/pytester_assertions.py | 74 + .venv/Lib/site-packages/_pytest/python.py | 1679 ++++++++++++++ .venv/Lib/site-packages/_pytest/python_api.py | 1020 +++++++++ .../Lib/site-packages/_pytest/python_path.py | 26 + .venv/Lib/site-packages/_pytest/recwarn.py | 365 +++ .venv/Lib/site-packages/_pytest/reports.py | 636 ++++++ .venv/Lib/site-packages/_pytest/runner.py | 571 +++++ .venv/Lib/site-packages/_pytest/scope.py | 91 + .venv/Lib/site-packages/_pytest/setuponly.py | 102 + .venv/Lib/site-packages/_pytest/setupplan.py | 39 + .venv/Lib/site-packages/_pytest/skipping.py | 301 +++ .venv/Lib/site-packages/_pytest/stash.py | 116 + .venv/Lib/site-packages/_pytest/stepwise.py | 125 ++ .venv/Lib/site-packages/_pytest/terminal.py | 1577 +++++++++++++ .../site-packages/_pytest/threadexception.py | 97 + .venv/Lib/site-packages/_pytest/timing.py | 16 + .venv/Lib/site-packages/_pytest/tmpdir.py | 322 +++ .venv/Lib/site-packages/_pytest/unittest.py | 435 ++++ .../_pytest/unraisableexception.py | 100 + .../site-packages/_pytest/warning_types.py | 166 ++ .venv/Lib/site-packages/_pytest/warnings.py | 151 ++ .../colorama-0.4.6.dist-info/INSTALLER | 1 + .../colorama-0.4.6.dist-info/METADATA | 441 ++++ .../colorama-0.4.6.dist-info/RECORD | 31 + .../colorama-0.4.6.dist-info/WHEEL | 5 + .../licenses/LICENSE.txt | 27 + .venv/Lib/site-packages/colorama/__init__.py | 7 + .venv/Lib/site-packages/colorama/ansi.py | 102 + .../Lib/site-packages/colorama/ansitowin32.py | 277 +++ .../Lib/site-packages/colorama/initialise.py | 121 + .../site-packages/colorama/tests/__init__.py | 1 + .../site-packages/colorama/tests/ansi_test.py | 76 + .../colorama/tests/ansitowin32_test.py | 294 +++ .../colorama/tests/initialise_test.py | 189 ++ .../colorama/tests/isatty_test.py | 57 + .../Lib/site-packages/colorama/tests/utils.py | 49 + .../colorama/tests/winterm_test.py | 131 ++ .venv/Lib/site-packages/colorama/win32.py | 180 ++ .venv/Lib/site-packages/colorama/winterm.py | 195 ++ .../coverage-7.6.4.dist-info/INSTALLER | 1 + .../coverage-7.6.4.dist-info/LICENSE.txt | 177 ++ .../coverage-7.6.4.dist-info/METADATA | 199 ++ .../coverage-7.6.4.dist-info/RECORD | 104 + .../coverage-7.6.4.dist-info/WHEEL | 5 + .../coverage-7.6.4.dist-info/entry_points.txt | 4 + .../coverage-7.6.4.dist-info/top_level.txt | 1 + .venv/Lib/site-packages/coverage/__init__.py | 38 + .venv/Lib/site-packages/coverage/__main__.py | 10 + .venv/Lib/site-packages/coverage/annotate.py | 115 + .venv/Lib/site-packages/coverage/bytecode.py | 22 + .venv/Lib/site-packages/coverage/cmdline.py | 1009 +++++++++ .venv/Lib/site-packages/coverage/collector.py | 497 +++++ .venv/Lib/site-packages/coverage/config.py | 627 ++++++ .venv/Lib/site-packages/coverage/context.py | 75 + .venv/Lib/site-packages/coverage/control.py | 1402 ++++++++++++ .venv/Lib/site-packages/coverage/core.py | 99 + .venv/Lib/site-packages/coverage/data.py | 228 ++ .venv/Lib/site-packages/coverage/debug.py | 613 +++++ .../Lib/site-packages/coverage/disposition.py | 58 + .venv/Lib/site-packages/coverage/env.py | 125 ++ .../Lib/site-packages/coverage/exceptions.py | 63 + .venv/Lib/site-packages/coverage/execfile.py | 324 +++ .venv/Lib/site-packages/coverage/files.py | 548 +++++ .venv/Lib/site-packages/coverage/html.py | 810 +++++++ .../coverage/htmlfiles/coverage_html.js | 733 ++++++ .../coverage/htmlfiles/favicon_32.png | Bin 0 -> 1732 bytes .../coverage/htmlfiles/index.html | 164 ++ .../coverage/htmlfiles/keybd_closed.png | Bin 0 -> 9004 bytes .../coverage/htmlfiles/pyfile.html | 149 ++ .../coverage/htmlfiles/style.css | 337 +++ .../coverage/htmlfiles/style.scss | 760 +++++++ .venv/Lib/site-packages/coverage/inorout.py | 594 +++++ .../Lib/site-packages/coverage/jsonreport.py | 179 ++ .../Lib/site-packages/coverage/lcovreport.py | 223 ++ .venv/Lib/site-packages/coverage/misc.py | 369 +++ .venv/Lib/site-packages/coverage/multiproc.py | 115 + .venv/Lib/site-packages/coverage/numbits.py | 147 ++ .venv/Lib/site-packages/coverage/parser.py | 1287 +++++++++++ .../Lib/site-packages/coverage/phystokens.py | 198 ++ .venv/Lib/site-packages/coverage/plugin.py | 617 ++++++ .../site-packages/coverage/plugin_support.py | 298 +++ .venv/Lib/site-packages/coverage/py.typed | 1 + .venv/Lib/site-packages/coverage/python.py | 273 +++ .venv/Lib/site-packages/coverage/pytracer.py | 362 +++ .venv/Lib/site-packages/coverage/regions.py | 126 ++ .venv/Lib/site-packages/coverage/report.py | 282 +++ .../Lib/site-packages/coverage/report_core.py | 120 + .venv/Lib/site-packages/coverage/results.py | 419 ++++ .venv/Lib/site-packages/coverage/sqldata.py | 1103 +++++++++ .venv/Lib/site-packages/coverage/sqlitedb.py | 231 ++ .venv/Lib/site-packages/coverage/sysmon.py | 433 ++++ .venv/Lib/site-packages/coverage/templite.py | 306 +++ .../Lib/site-packages/coverage/tomlconfig.py | 209 ++ .../coverage/tracer.cp312-win_amd64.pyd | Bin 0 -> 22528 bytes .venv/Lib/site-packages/coverage/tracer.pyi | 41 + .venv/Lib/site-packages/coverage/types.py | 203 ++ .venv/Lib/site-packages/coverage/version.py | 50 + .venv/Lib/site-packages/coverage/xmlreport.py | 261 +++ .../iniconfig-2.0.0.dist-info/INSTALLER | 1 + .../iniconfig-2.0.0.dist-info/METADATA | 80 + .../iniconfig-2.0.0.dist-info/RECORD | 14 + .../iniconfig-2.0.0.dist-info/WHEEL | 4 + .../licenses/LICENSE | 19 + .venv/Lib/site-packages/iniconfig/__init__.py | 216 ++ .venv/Lib/site-packages/iniconfig/_parse.py | 82 + .venv/Lib/site-packages/iniconfig/_version.py | 4 + .../Lib/site-packages/iniconfig/exceptions.py | 20 + .venv/Lib/site-packages/iniconfig/py.typed | 0 .../packaging-24.2.dist-info/INSTALLER | 1 + .../packaging-24.2.dist-info/LICENSE | 3 + .../packaging-24.2.dist-info/LICENSE.APACHE | 177 ++ .../packaging-24.2.dist-info/LICENSE.BSD | 23 + .../packaging-24.2.dist-info/METADATA | 102 + .../packaging-24.2.dist-info/RECORD | 40 + .../packaging-24.2.dist-info/WHEEL | 4 + .venv/Lib/site-packages/packaging/__init__.py | 15 + .venv/Lib/site-packages/packaging/_elffile.py | 110 + .../Lib/site-packages/packaging/_manylinux.py | 263 +++ .../Lib/site-packages/packaging/_musllinux.py | 85 + .venv/Lib/site-packages/packaging/_parser.py | 354 +++ .../site-packages/packaging/_structures.py | 61 + .../Lib/site-packages/packaging/_tokenizer.py | 194 ++ .../packaging/licenses/__init__.py | 145 ++ .../site-packages/packaging/licenses/_spdx.py | 759 +++++++ .venv/Lib/site-packages/packaging/markers.py | 331 +++ .venv/Lib/site-packages/packaging/metadata.py | 863 ++++++++ .venv/Lib/site-packages/packaging/py.typed | 0 .../site-packages/packaging/requirements.py | 91 + .../Lib/site-packages/packaging/specifiers.py | 1020 +++++++++ .venv/Lib/site-packages/packaging/tags.py | 617 ++++++ .venv/Lib/site-packages/packaging/utils.py | 163 ++ .venv/Lib/site-packages/packaging/version.py | 582 +++++ .../pluggy-1.5.0.dist-info/INSTALLER | 1 + .../pluggy-1.5.0.dist-info/LICENSE | 21 + .../pluggy-1.5.0.dist-info/METADATA | 155 ++ .../pluggy-1.5.0.dist-info/RECORD | 23 + .../pluggy-1.5.0.dist-info/WHEEL | 5 + .../pluggy-1.5.0.dist-info/top_level.txt | 1 + .venv/Lib/site-packages/pluggy/__init__.py | 37 + .venv/Lib/site-packages/pluggy/_callers.py | 182 ++ .venv/Lib/site-packages/pluggy/_hooks.py | 715 ++++++ .venv/Lib/site-packages/pluggy/_manager.py | 528 +++++ .venv/Lib/site-packages/pluggy/_result.py | 104 + .venv/Lib/site-packages/pluggy/_tracing.py | 73 + .venv/Lib/site-packages/pluggy/_version.py | 16 + .venv/Lib/site-packages/pluggy/_warnings.py | 27 + .venv/Lib/site-packages/pluggy/py.typed | 0 .venv/Lib/site-packages/py.py | 15 + .../pytest-8.3.3.dist-info/AUTHORS | 469 ++++ .../pytest-8.3.3.dist-info/INSTALLER | 1 + .../pytest-8.3.3.dist-info/LICENSE | 21 + .../pytest-8.3.3.dist-info/METADATA | 212 ++ .../pytest-8.3.3.dist-info/RECORD | 155 ++ .../pytest-8.3.3.dist-info/REQUESTED | 0 .../pytest-8.3.3.dist-info/WHEEL | 5 + .../pytest-8.3.3.dist-info/entry_points.txt | 3 + .../pytest-8.3.3.dist-info/top_level.txt | 3 + .venv/Lib/site-packages/pytest-cov.pth | 1 + .venv/Lib/site-packages/pytest/__init__.py | 172 ++ .venv/Lib/site-packages/pytest/__main__.py | 9 + .venv/Lib/site-packages/pytest/py.typed | 0 .../pytest_cov-6.0.0.dist-info/AUTHORS.rst | 64 + .../pytest_cov-6.0.0.dist-info/INSTALLER | 1 + .../pytest_cov-6.0.0.dist-info/LICENSE | 21 + .../pytest_cov-6.0.0.dist-info/METADATA | 598 +++++ .../pytest_cov-6.0.0.dist-info/RECORD | 20 + .../pytest_cov-6.0.0.dist-info/REQUESTED | 0 .../pytest_cov-6.0.0.dist-info/WHEEL | 5 + .../entry_points.txt | 2 + .../pytest_cov-6.0.0.dist-info/top_level.txt | 1 + .../Lib/site-packages/pytest_cov/__init__.py | 47 + .venv/Lib/site-packages/pytest_cov/compat.py | 15 + .venv/Lib/site-packages/pytest_cov/embed.py | 123 + .venv/Lib/site-packages/pytest_cov/engine.py | 454 ++++ .venv/Lib/site-packages/pytest_cov/plugin.py | 446 ++++ .../pytest_django-2.3.1.dist-info/AUTHORS | 13 + .../pytest_django-2.3.1.dist-info/INSTALLER | 1 + .../pytest_django-2.3.1.dist-info/LICENSE | 54 + .../pytest_django-2.3.1.dist-info/METADATA | 86 + .../pytest_django-2.3.1.dist-info/RECORD | 27 + .../pytest_django-2.3.1.dist-info/REQUESTED | 0 .../pytest_django-2.3.1.dist-info/WHEEL | 5 + .../entry_points.txt | 2 + .../top_level.txt | 1 + .../pytest_django-4.9.0.dist-info/AUTHORS | 23 + .../pytest_django-4.9.0.dist-info/INSTALLER | 1 + .../pytest_django-4.9.0.dist-info/LICENSE | 54 + .../pytest_django-4.9.0.dist-info/METADATA | 161 ++ .../pytest_django-4.9.0.dist-info/RECORD | 26 + .../pytest_django-4.9.0.dist-info/REQUESTED | 0 .../pytest_django-4.9.0.dist-info/WHEEL | 5 + .../entry_points.txt | 2 + .../top_level.txt | 1 + .../site-packages/pytest_django/__init__.py | 1 + .../site-packages/pytest_django/_version.py | 16 + .../site-packages/pytest_django/asserts.py | 221 ++ .../Lib/site-packages/pytest_django/client.py | 45 + .../site-packages/pytest_django/db_reuse.py | 77 + .../site-packages/pytest_django/deprecated.py | 17 + .../pytest_django/django_compat.py | 19 + .../site-packages/pytest_django/fixtures.py | 227 ++ .../pytest_django/lazy_django.py | 21 + .../pytest_django/live_server_helper.py | 96 + .../Lib/site-packages/pytest_django/plugin.py | 267 +++ .../Lib/site-packages/pytest_django/py.typed | 0 .venv/Scripts/coverage-3.12.exe | Bin 0 -> 108427 bytes .venv/Scripts/coverage.exe | Bin 0 -> 108427 bytes .venv/Scripts/coverage3.exe | Bin 0 -> 108427 bytes .venv/Scripts/py.test.exe | Bin 0 -> 108433 bytes .venv/Scripts/pytest.exe | Bin 0 -> 108433 bytes pytets.ini | 3 + videogames_register/{tests.py => test_app.py} | 1 + 260 files changed, 65871 insertions(+) create mode 100644 .venv/Lib/site-packages/_pytest/__init__.py create mode 100644 .venv/Lib/site-packages/_pytest/_argcomplete.py create mode 100644 .venv/Lib/site-packages/_pytest/_code/__init__.py create mode 100644 .venv/Lib/site-packages/_pytest/_code/code.py create mode 100644 .venv/Lib/site-packages/_pytest/_code/source.py create mode 100644 .venv/Lib/site-packages/_pytest/_io/__init__.py create mode 100644 .venv/Lib/site-packages/_pytest/_io/pprint.py create mode 100644 .venv/Lib/site-packages/_pytest/_io/saferepr.py create mode 100644 .venv/Lib/site-packages/_pytest/_io/terminalwriter.py create mode 100644 .venv/Lib/site-packages/_pytest/_io/wcwidth.py create mode 100644 .venv/Lib/site-packages/_pytest/_py/__init__.py create mode 100644 .venv/Lib/site-packages/_pytest/_py/error.py create mode 100644 .venv/Lib/site-packages/_pytest/_py/path.py create mode 100644 .venv/Lib/site-packages/_pytest/_version.py create mode 100644 .venv/Lib/site-packages/_pytest/assertion/__init__.py create mode 100644 .venv/Lib/site-packages/_pytest/assertion/rewrite.py create mode 100644 .venv/Lib/site-packages/_pytest/assertion/truncate.py create mode 100644 .venv/Lib/site-packages/_pytest/assertion/util.py create mode 100644 .venv/Lib/site-packages/_pytest/cacheprovider.py create mode 100644 .venv/Lib/site-packages/_pytest/capture.py create mode 100644 .venv/Lib/site-packages/_pytest/compat.py create mode 100644 .venv/Lib/site-packages/_pytest/config/__init__.py create mode 100644 .venv/Lib/site-packages/_pytest/config/argparsing.py create mode 100644 .venv/Lib/site-packages/_pytest/config/compat.py create mode 100644 .venv/Lib/site-packages/_pytest/config/exceptions.py create mode 100644 .venv/Lib/site-packages/_pytest/config/findpaths.py create mode 100644 .venv/Lib/site-packages/_pytest/debugging.py create mode 100644 .venv/Lib/site-packages/_pytest/deprecated.py create mode 100644 .venv/Lib/site-packages/_pytest/doctest.py create mode 100644 .venv/Lib/site-packages/_pytest/faulthandler.py create mode 100644 .venv/Lib/site-packages/_pytest/fixtures.py create mode 100644 .venv/Lib/site-packages/_pytest/freeze_support.py create mode 100644 .venv/Lib/site-packages/_pytest/helpconfig.py create mode 100644 .venv/Lib/site-packages/_pytest/hookspec.py create mode 100644 .venv/Lib/site-packages/_pytest/junitxml.py create mode 100644 .venv/Lib/site-packages/_pytest/legacypath.py create mode 100644 .venv/Lib/site-packages/_pytest/logging.py create mode 100644 .venv/Lib/site-packages/_pytest/main.py create mode 100644 .venv/Lib/site-packages/_pytest/mark/__init__.py create mode 100644 .venv/Lib/site-packages/_pytest/mark/expression.py create mode 100644 .venv/Lib/site-packages/_pytest/mark/structures.py create mode 100644 .venv/Lib/site-packages/_pytest/monkeypatch.py create mode 100644 .venv/Lib/site-packages/_pytest/nodes.py create mode 100644 .venv/Lib/site-packages/_pytest/outcomes.py create mode 100644 .venv/Lib/site-packages/_pytest/pastebin.py create mode 100644 .venv/Lib/site-packages/_pytest/pathlib.py create mode 100644 .venv/Lib/site-packages/_pytest/py.typed create mode 100644 .venv/Lib/site-packages/_pytest/pytester.py create mode 100644 .venv/Lib/site-packages/_pytest/pytester_assertions.py create mode 100644 .venv/Lib/site-packages/_pytest/python.py create mode 100644 .venv/Lib/site-packages/_pytest/python_api.py create mode 100644 .venv/Lib/site-packages/_pytest/python_path.py create mode 100644 .venv/Lib/site-packages/_pytest/recwarn.py create mode 100644 .venv/Lib/site-packages/_pytest/reports.py create mode 100644 .venv/Lib/site-packages/_pytest/runner.py create mode 100644 .venv/Lib/site-packages/_pytest/scope.py create mode 100644 .venv/Lib/site-packages/_pytest/setuponly.py create mode 100644 .venv/Lib/site-packages/_pytest/setupplan.py create mode 100644 .venv/Lib/site-packages/_pytest/skipping.py create mode 100644 .venv/Lib/site-packages/_pytest/stash.py create mode 100644 .venv/Lib/site-packages/_pytest/stepwise.py create mode 100644 .venv/Lib/site-packages/_pytest/terminal.py create mode 100644 .venv/Lib/site-packages/_pytest/threadexception.py create mode 100644 .venv/Lib/site-packages/_pytest/timing.py create mode 100644 .venv/Lib/site-packages/_pytest/tmpdir.py create mode 100644 .venv/Lib/site-packages/_pytest/unittest.py create mode 100644 .venv/Lib/site-packages/_pytest/unraisableexception.py create mode 100644 .venv/Lib/site-packages/_pytest/warning_types.py create mode 100644 .venv/Lib/site-packages/_pytest/warnings.py create mode 100644 .venv/Lib/site-packages/colorama-0.4.6.dist-info/INSTALLER create mode 100644 .venv/Lib/site-packages/colorama-0.4.6.dist-info/METADATA create mode 100644 .venv/Lib/site-packages/colorama-0.4.6.dist-info/RECORD create mode 100644 .venv/Lib/site-packages/colorama-0.4.6.dist-info/WHEEL create mode 100644 .venv/Lib/site-packages/colorama-0.4.6.dist-info/licenses/LICENSE.txt create mode 100644 .venv/Lib/site-packages/colorama/__init__.py create mode 100644 .venv/Lib/site-packages/colorama/ansi.py create mode 100644 .venv/Lib/site-packages/colorama/ansitowin32.py create mode 100644 .venv/Lib/site-packages/colorama/initialise.py create mode 100644 .venv/Lib/site-packages/colorama/tests/__init__.py create mode 100644 .venv/Lib/site-packages/colorama/tests/ansi_test.py create mode 100644 .venv/Lib/site-packages/colorama/tests/ansitowin32_test.py create mode 100644 .venv/Lib/site-packages/colorama/tests/initialise_test.py create mode 100644 .venv/Lib/site-packages/colorama/tests/isatty_test.py create mode 100644 .venv/Lib/site-packages/colorama/tests/utils.py create mode 100644 .venv/Lib/site-packages/colorama/tests/winterm_test.py create mode 100644 .venv/Lib/site-packages/colorama/win32.py create mode 100644 .venv/Lib/site-packages/colorama/winterm.py create mode 100644 .venv/Lib/site-packages/coverage-7.6.4.dist-info/INSTALLER create mode 100644 .venv/Lib/site-packages/coverage-7.6.4.dist-info/LICENSE.txt create mode 100644 .venv/Lib/site-packages/coverage-7.6.4.dist-info/METADATA create mode 100644 .venv/Lib/site-packages/coverage-7.6.4.dist-info/RECORD create mode 100644 .venv/Lib/site-packages/coverage-7.6.4.dist-info/WHEEL create mode 100644 .venv/Lib/site-packages/coverage-7.6.4.dist-info/entry_points.txt create mode 100644 .venv/Lib/site-packages/coverage-7.6.4.dist-info/top_level.txt create mode 100644 .venv/Lib/site-packages/coverage/__init__.py create mode 100644 .venv/Lib/site-packages/coverage/__main__.py create mode 100644 .venv/Lib/site-packages/coverage/annotate.py create mode 100644 .venv/Lib/site-packages/coverage/bytecode.py create mode 100644 .venv/Lib/site-packages/coverage/cmdline.py create mode 100644 .venv/Lib/site-packages/coverage/collector.py create mode 100644 .venv/Lib/site-packages/coverage/config.py create mode 100644 .venv/Lib/site-packages/coverage/context.py create mode 100644 .venv/Lib/site-packages/coverage/control.py create mode 100644 .venv/Lib/site-packages/coverage/core.py create mode 100644 .venv/Lib/site-packages/coverage/data.py create mode 100644 .venv/Lib/site-packages/coverage/debug.py create mode 100644 .venv/Lib/site-packages/coverage/disposition.py create mode 100644 .venv/Lib/site-packages/coverage/env.py create mode 100644 .venv/Lib/site-packages/coverage/exceptions.py create mode 100644 .venv/Lib/site-packages/coverage/execfile.py create mode 100644 .venv/Lib/site-packages/coverage/files.py create mode 100644 .venv/Lib/site-packages/coverage/html.py create mode 100644 .venv/Lib/site-packages/coverage/htmlfiles/coverage_html.js create mode 100644 .venv/Lib/site-packages/coverage/htmlfiles/favicon_32.png create mode 100644 .venv/Lib/site-packages/coverage/htmlfiles/index.html create mode 100644 .venv/Lib/site-packages/coverage/htmlfiles/keybd_closed.png create mode 100644 .venv/Lib/site-packages/coverage/htmlfiles/pyfile.html create mode 100644 .venv/Lib/site-packages/coverage/htmlfiles/style.css create mode 100644 .venv/Lib/site-packages/coverage/htmlfiles/style.scss create mode 100644 .venv/Lib/site-packages/coverage/inorout.py create mode 100644 .venv/Lib/site-packages/coverage/jsonreport.py create mode 100644 .venv/Lib/site-packages/coverage/lcovreport.py create mode 100644 .venv/Lib/site-packages/coverage/misc.py create mode 100644 .venv/Lib/site-packages/coverage/multiproc.py create mode 100644 .venv/Lib/site-packages/coverage/numbits.py create mode 100644 .venv/Lib/site-packages/coverage/parser.py create mode 100644 .venv/Lib/site-packages/coverage/phystokens.py create mode 100644 .venv/Lib/site-packages/coverage/plugin.py create mode 100644 .venv/Lib/site-packages/coverage/plugin_support.py create mode 100644 .venv/Lib/site-packages/coverage/py.typed create mode 100644 .venv/Lib/site-packages/coverage/python.py create mode 100644 .venv/Lib/site-packages/coverage/pytracer.py create mode 100644 .venv/Lib/site-packages/coverage/regions.py create mode 100644 .venv/Lib/site-packages/coverage/report.py create mode 100644 .venv/Lib/site-packages/coverage/report_core.py create mode 100644 .venv/Lib/site-packages/coverage/results.py create mode 100644 .venv/Lib/site-packages/coverage/sqldata.py create mode 100644 .venv/Lib/site-packages/coverage/sqlitedb.py create mode 100644 .venv/Lib/site-packages/coverage/sysmon.py create mode 100644 .venv/Lib/site-packages/coverage/templite.py create mode 100644 .venv/Lib/site-packages/coverage/tomlconfig.py create mode 100644 .venv/Lib/site-packages/coverage/tracer.cp312-win_amd64.pyd create mode 100644 .venv/Lib/site-packages/coverage/tracer.pyi create mode 100644 .venv/Lib/site-packages/coverage/types.py create mode 100644 .venv/Lib/site-packages/coverage/version.py create mode 100644 .venv/Lib/site-packages/coverage/xmlreport.py create mode 100644 .venv/Lib/site-packages/iniconfig-2.0.0.dist-info/INSTALLER create mode 100644 .venv/Lib/site-packages/iniconfig-2.0.0.dist-info/METADATA create mode 100644 .venv/Lib/site-packages/iniconfig-2.0.0.dist-info/RECORD create mode 100644 .venv/Lib/site-packages/iniconfig-2.0.0.dist-info/WHEEL create mode 100644 .venv/Lib/site-packages/iniconfig-2.0.0.dist-info/licenses/LICENSE create mode 100644 .venv/Lib/site-packages/iniconfig/__init__.py create mode 100644 .venv/Lib/site-packages/iniconfig/_parse.py create mode 100644 .venv/Lib/site-packages/iniconfig/_version.py create mode 100644 .venv/Lib/site-packages/iniconfig/exceptions.py create mode 100644 .venv/Lib/site-packages/iniconfig/py.typed create mode 100644 .venv/Lib/site-packages/packaging-24.2.dist-info/INSTALLER create mode 100644 .venv/Lib/site-packages/packaging-24.2.dist-info/LICENSE create mode 100644 .venv/Lib/site-packages/packaging-24.2.dist-info/LICENSE.APACHE create mode 100644 .venv/Lib/site-packages/packaging-24.2.dist-info/LICENSE.BSD create mode 100644 .venv/Lib/site-packages/packaging-24.2.dist-info/METADATA create mode 100644 .venv/Lib/site-packages/packaging-24.2.dist-info/RECORD create mode 100644 .venv/Lib/site-packages/packaging-24.2.dist-info/WHEEL create mode 100644 .venv/Lib/site-packages/packaging/__init__.py create mode 100644 .venv/Lib/site-packages/packaging/_elffile.py create mode 100644 .venv/Lib/site-packages/packaging/_manylinux.py create mode 100644 .venv/Lib/site-packages/packaging/_musllinux.py create mode 100644 .venv/Lib/site-packages/packaging/_parser.py create mode 100644 .venv/Lib/site-packages/packaging/_structures.py create mode 100644 .venv/Lib/site-packages/packaging/_tokenizer.py create mode 100644 .venv/Lib/site-packages/packaging/licenses/__init__.py create mode 100644 .venv/Lib/site-packages/packaging/licenses/_spdx.py create mode 100644 .venv/Lib/site-packages/packaging/markers.py create mode 100644 .venv/Lib/site-packages/packaging/metadata.py create mode 100644 .venv/Lib/site-packages/packaging/py.typed create mode 100644 .venv/Lib/site-packages/packaging/requirements.py create mode 100644 .venv/Lib/site-packages/packaging/specifiers.py create mode 100644 .venv/Lib/site-packages/packaging/tags.py create mode 100644 .venv/Lib/site-packages/packaging/utils.py create mode 100644 .venv/Lib/site-packages/packaging/version.py create mode 100644 .venv/Lib/site-packages/pluggy-1.5.0.dist-info/INSTALLER create mode 100644 .venv/Lib/site-packages/pluggy-1.5.0.dist-info/LICENSE create mode 100644 .venv/Lib/site-packages/pluggy-1.5.0.dist-info/METADATA create mode 100644 .venv/Lib/site-packages/pluggy-1.5.0.dist-info/RECORD create mode 100644 .venv/Lib/site-packages/pluggy-1.5.0.dist-info/WHEEL create mode 100644 .venv/Lib/site-packages/pluggy-1.5.0.dist-info/top_level.txt create mode 100644 .venv/Lib/site-packages/pluggy/__init__.py create mode 100644 .venv/Lib/site-packages/pluggy/_callers.py create mode 100644 .venv/Lib/site-packages/pluggy/_hooks.py create mode 100644 .venv/Lib/site-packages/pluggy/_manager.py create mode 100644 .venv/Lib/site-packages/pluggy/_result.py create mode 100644 .venv/Lib/site-packages/pluggy/_tracing.py create mode 100644 .venv/Lib/site-packages/pluggy/_version.py create mode 100644 .venv/Lib/site-packages/pluggy/_warnings.py create mode 100644 .venv/Lib/site-packages/pluggy/py.typed create mode 100644 .venv/Lib/site-packages/py.py create mode 100644 .venv/Lib/site-packages/pytest-8.3.3.dist-info/AUTHORS create mode 100644 .venv/Lib/site-packages/pytest-8.3.3.dist-info/INSTALLER create mode 100644 .venv/Lib/site-packages/pytest-8.3.3.dist-info/LICENSE create mode 100644 .venv/Lib/site-packages/pytest-8.3.3.dist-info/METADATA create mode 100644 .venv/Lib/site-packages/pytest-8.3.3.dist-info/RECORD create mode 100644 .venv/Lib/site-packages/pytest-8.3.3.dist-info/REQUESTED create mode 100644 .venv/Lib/site-packages/pytest-8.3.3.dist-info/WHEEL create mode 100644 .venv/Lib/site-packages/pytest-8.3.3.dist-info/entry_points.txt create mode 100644 .venv/Lib/site-packages/pytest-8.3.3.dist-info/top_level.txt create mode 100644 .venv/Lib/site-packages/pytest-cov.pth create mode 100644 .venv/Lib/site-packages/pytest/__init__.py create mode 100644 .venv/Lib/site-packages/pytest/__main__.py create mode 100644 .venv/Lib/site-packages/pytest/py.typed create mode 100644 .venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/AUTHORS.rst create mode 100644 .venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/INSTALLER create mode 100644 .venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/LICENSE create mode 100644 .venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/METADATA create mode 100644 .venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/RECORD create mode 100644 .venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/REQUESTED create mode 100644 .venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/WHEEL create mode 100644 .venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/entry_points.txt create mode 100644 .venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/top_level.txt create mode 100644 .venv/Lib/site-packages/pytest_cov/__init__.py create mode 100644 .venv/Lib/site-packages/pytest_cov/compat.py create mode 100644 .venv/Lib/site-packages/pytest_cov/embed.py create mode 100644 .venv/Lib/site-packages/pytest_cov/engine.py create mode 100644 .venv/Lib/site-packages/pytest_cov/plugin.py create mode 100644 .venv/Lib/site-packages/pytest_django-2.3.1.dist-info/AUTHORS create mode 100644 .venv/Lib/site-packages/pytest_django-2.3.1.dist-info/INSTALLER create mode 100644 .venv/Lib/site-packages/pytest_django-2.3.1.dist-info/LICENSE create mode 100644 .venv/Lib/site-packages/pytest_django-2.3.1.dist-info/METADATA create mode 100644 .venv/Lib/site-packages/pytest_django-2.3.1.dist-info/RECORD create mode 100644 .venv/Lib/site-packages/pytest_django-2.3.1.dist-info/REQUESTED create mode 100644 .venv/Lib/site-packages/pytest_django-2.3.1.dist-info/WHEEL create mode 100644 .venv/Lib/site-packages/pytest_django-2.3.1.dist-info/entry_points.txt create mode 100644 .venv/Lib/site-packages/pytest_django-2.3.1.dist-info/top_level.txt create mode 100644 .venv/Lib/site-packages/pytest_django-4.9.0.dist-info/AUTHORS create mode 100644 .venv/Lib/site-packages/pytest_django-4.9.0.dist-info/INSTALLER create mode 100644 .venv/Lib/site-packages/pytest_django-4.9.0.dist-info/LICENSE create mode 100644 .venv/Lib/site-packages/pytest_django-4.9.0.dist-info/METADATA create mode 100644 .venv/Lib/site-packages/pytest_django-4.9.0.dist-info/RECORD create mode 100644 .venv/Lib/site-packages/pytest_django-4.9.0.dist-info/REQUESTED create mode 100644 .venv/Lib/site-packages/pytest_django-4.9.0.dist-info/WHEEL create mode 100644 .venv/Lib/site-packages/pytest_django-4.9.0.dist-info/entry_points.txt create mode 100644 .venv/Lib/site-packages/pytest_django-4.9.0.dist-info/top_level.txt create mode 100644 .venv/Lib/site-packages/pytest_django/__init__.py create mode 100644 .venv/Lib/site-packages/pytest_django/_version.py create mode 100644 .venv/Lib/site-packages/pytest_django/asserts.py create mode 100644 .venv/Lib/site-packages/pytest_django/client.py create mode 100644 .venv/Lib/site-packages/pytest_django/db_reuse.py create mode 100644 .venv/Lib/site-packages/pytest_django/deprecated.py create mode 100644 .venv/Lib/site-packages/pytest_django/django_compat.py create mode 100644 .venv/Lib/site-packages/pytest_django/fixtures.py create mode 100644 .venv/Lib/site-packages/pytest_django/lazy_django.py create mode 100644 .venv/Lib/site-packages/pytest_django/live_server_helper.py create mode 100644 .venv/Lib/site-packages/pytest_django/plugin.py create mode 100644 .venv/Lib/site-packages/pytest_django/py.typed create mode 100644 .venv/Scripts/coverage-3.12.exe create mode 100644 .venv/Scripts/coverage.exe create mode 100644 .venv/Scripts/coverage3.exe create mode 100644 .venv/Scripts/py.test.exe create mode 100644 .venv/Scripts/pytest.exe create mode 100644 pytets.ini rename videogames_register/{tests.py => test_app.py} (99%) diff --git a/.venv/Lib/site-packages/_pytest/__init__.py b/.venv/Lib/site-packages/_pytest/__init__.py new file mode 100644 index 00000000..8eb8ec96 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/__init__.py @@ -0,0 +1,13 @@ +from __future__ import annotations + + +__all__ = ["__version__", "version_tuple"] + +try: + from ._version import version as __version__ + from ._version import version_tuple +except ImportError: # pragma: no cover + # broken installation, we don't even try + # unknown only works because we do poor mans version compare + __version__ = "unknown" + version_tuple = (0, 0, "unknown") diff --git a/.venv/Lib/site-packages/_pytest/_argcomplete.py b/.venv/Lib/site-packages/_pytest/_argcomplete.py new file mode 100644 index 00000000..59426ef9 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/_argcomplete.py @@ -0,0 +1,117 @@ +"""Allow bash-completion for argparse with argcomplete if installed. + +Needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail +to find the magic string, so _ARGCOMPLETE env. var is never set, and +this does not need special code). + +Function try_argcomplete(parser) should be called directly before +the call to ArgumentParser.parse_args(). + +The filescompleter is what you normally would use on the positional +arguments specification, in order to get "dirname/" after "dirn" +instead of the default "dirname ": + + optparser.add_argument(Config._file_or_dir, nargs='*').completer=filescompleter + +Other, application specific, completers should go in the file +doing the add_argument calls as they need to be specified as .completer +attributes as well. (If argcomplete is not installed, the function the +attribute points to will not be used). + +SPEEDUP +======= + +The generic argcomplete script for bash-completion +(/etc/bash_completion.d/python-argcomplete.sh) +uses a python program to determine startup script generated by pip. +You can speed up completion somewhat by changing this script to include + # PYTHON_ARGCOMPLETE_OK +so the python-argcomplete-check-easy-install-script does not +need to be called to find the entry point of the code and see if that is +marked with PYTHON_ARGCOMPLETE_OK. + +INSTALL/DEBUGGING +================= + +To include this support in another application that has setup.py generated +scripts: + +- Add the line: + # PYTHON_ARGCOMPLETE_OK + near the top of the main python entry point. + +- Include in the file calling parse_args(): + from _argcomplete import try_argcomplete, filescompleter + Call try_argcomplete just before parse_args(), and optionally add + filescompleter to the positional arguments' add_argument(). + +If things do not work right away: + +- Switch on argcomplete debugging with (also helpful when doing custom + completers): + export _ARC_DEBUG=1 + +- Run: + python-argcomplete-check-easy-install-script $(which appname) + echo $? + will echo 0 if the magic line has been found, 1 if not. + +- Sometimes it helps to find early on errors using: + _ARGCOMPLETE=1 _ARC_DEBUG=1 appname + which should throw a KeyError: 'COMPLINE' (which is properly set by the + global argcomplete script). +""" + +from __future__ import annotations + +import argparse +from glob import glob +import os +import sys +from typing import Any + + +class FastFilesCompleter: + """Fast file completer class.""" + + def __init__(self, directories: bool = True) -> None: + self.directories = directories + + def __call__(self, prefix: str, **kwargs: Any) -> list[str]: + # Only called on non option completions. + if os.sep in prefix[1:]: + prefix_dir = len(os.path.dirname(prefix) + os.sep) + else: + prefix_dir = 0 + completion = [] + globbed = [] + if "*" not in prefix and "?" not in prefix: + # We are on unix, otherwise no bash. + if not prefix or prefix[-1] == os.sep: + globbed.extend(glob(prefix + ".*")) + prefix += "*" + globbed.extend(glob(prefix)) + for x in sorted(globbed): + if os.path.isdir(x): + x += "/" + # Append stripping the prefix (like bash, not like compgen). + completion.append(x[prefix_dir:]) + return completion + + +if os.environ.get("_ARGCOMPLETE"): + try: + import argcomplete.completers + except ImportError: + sys.exit(-1) + filescompleter: FastFilesCompleter | None = FastFilesCompleter() + + def try_argcomplete(parser: argparse.ArgumentParser) -> None: + argcomplete.autocomplete(parser, always_complete_options=False) + +else: + + def try_argcomplete(parser: argparse.ArgumentParser) -> None: + pass + + filescompleter = None diff --git a/.venv/Lib/site-packages/_pytest/_code/__init__.py b/.venv/Lib/site-packages/_pytest/_code/__init__.py new file mode 100644 index 00000000..0bfde426 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/_code/__init__.py @@ -0,0 +1,26 @@ +"""Python inspection/code generation API.""" + +from __future__ import annotations + +from .code import Code +from .code import ExceptionInfo +from .code import filter_traceback +from .code import Frame +from .code import getfslineno +from .code import Traceback +from .code import TracebackEntry +from .source import getrawcode +from .source import Source + + +__all__ = [ + "Code", + "ExceptionInfo", + "filter_traceback", + "Frame", + "getfslineno", + "getrawcode", + "Traceback", + "TracebackEntry", + "Source", +] diff --git a/.venv/Lib/site-packages/_pytest/_code/code.py b/.venv/Lib/site-packages/_pytest/_code/code.py new file mode 100644 index 00000000..8fac39ea --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/_code/code.py @@ -0,0 +1,1403 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import ast +import dataclasses +import inspect +from inspect import CO_VARARGS +from inspect import CO_VARKEYWORDS +from io import StringIO +import os +from pathlib import Path +import re +import sys +import traceback +from traceback import format_exception_only +from types import CodeType +from types import FrameType +from types import TracebackType +from typing import Any +from typing import Callable +from typing import ClassVar +from typing import Final +from typing import final +from typing import Generic +from typing import Iterable +from typing import List +from typing import Literal +from typing import Mapping +from typing import overload +from typing import Pattern +from typing import Sequence +from typing import SupportsIndex +from typing import Tuple +from typing import Type +from typing import TypeVar +from typing import Union + +import pluggy + +import _pytest +from _pytest._code.source import findsource +from _pytest._code.source import getrawcode +from _pytest._code.source import getstatementrange_ast +from _pytest._code.source import Source +from _pytest._io import TerminalWriter +from _pytest._io.saferepr import safeformat +from _pytest._io.saferepr import saferepr +from _pytest.compat import get_real_func +from _pytest.deprecated import check_ispytest +from _pytest.pathlib import absolutepath +from _pytest.pathlib import bestrelpath + + +if sys.version_info < (3, 11): + from exceptiongroup import BaseExceptionGroup + +TracebackStyle = Literal["long", "short", "line", "no", "native", "value", "auto"] + +EXCEPTION_OR_MORE = Union[Type[BaseException], Tuple[Type[BaseException], ...]] + + +class Code: + """Wrapper around Python code objects.""" + + __slots__ = ("raw",) + + def __init__(self, obj: CodeType) -> None: + self.raw = obj + + @classmethod + def from_function(cls, obj: object) -> Code: + return cls(getrawcode(obj)) + + def __eq__(self, other): + return self.raw == other.raw + + # Ignore type because of https://github.com/python/mypy/issues/4266. + __hash__ = None # type: ignore + + @property + def firstlineno(self) -> int: + return self.raw.co_firstlineno - 1 + + @property + def name(self) -> str: + return self.raw.co_name + + @property + def path(self) -> Path | str: + """Return a path object pointing to source code, or an ``str`` in + case of ``OSError`` / non-existing file.""" + if not self.raw.co_filename: + return "" + try: + p = absolutepath(self.raw.co_filename) + # maybe don't try this checking + if not p.exists(): + raise OSError("path check failed.") + return p + except OSError: + # XXX maybe try harder like the weird logic + # in the standard lib [linecache.updatecache] does? + return self.raw.co_filename + + @property + def fullsource(self) -> Source | None: + """Return a _pytest._code.Source object for the full source file of the code.""" + full, _ = findsource(self.raw) + return full + + def source(self) -> Source: + """Return a _pytest._code.Source object for the code object's source only.""" + # return source only for that part of code + return Source(self.raw) + + def getargs(self, var: bool = False) -> tuple[str, ...]: + """Return a tuple with the argument names for the code object. + + If 'var' is set True also return the names of the variable and + keyword arguments when present. + """ + # Handy shortcut for getting args. + raw = self.raw + argcount = raw.co_argcount + if var: + argcount += raw.co_flags & CO_VARARGS + argcount += raw.co_flags & CO_VARKEYWORDS + return raw.co_varnames[:argcount] + + +class Frame: + """Wrapper around a Python frame holding f_locals and f_globals + in which expressions can be evaluated.""" + + __slots__ = ("raw",) + + def __init__(self, frame: FrameType) -> None: + self.raw = frame + + @property + def lineno(self) -> int: + return self.raw.f_lineno - 1 + + @property + def f_globals(self) -> dict[str, Any]: + return self.raw.f_globals + + @property + def f_locals(self) -> dict[str, Any]: + return self.raw.f_locals + + @property + def code(self) -> Code: + return Code(self.raw.f_code) + + @property + def statement(self) -> Source: + """Statement this frame is at.""" + if self.code.fullsource is None: + return Source("") + return self.code.fullsource.getstatement(self.lineno) + + def eval(self, code, **vars): + """Evaluate 'code' in the frame. + + 'vars' are optional additional local variables. + + Returns the result of the evaluation. + """ + f_locals = self.f_locals.copy() + f_locals.update(vars) + return eval(code, self.f_globals, f_locals) + + def repr(self, object: object) -> str: + """Return a 'safe' (non-recursive, one-line) string repr for 'object'.""" + return saferepr(object) + + def getargs(self, var: bool = False): + """Return a list of tuples (name, value) for all arguments. + + If 'var' is set True, also include the variable and keyword arguments + when present. + """ + retval = [] + for arg in self.code.getargs(var): + try: + retval.append((arg, self.f_locals[arg])) + except KeyError: + pass # this can occur when using Psyco + return retval + + +class TracebackEntry: + """A single entry in a Traceback.""" + + __slots__ = ("_rawentry", "_repr_style") + + def __init__( + self, + rawentry: TracebackType, + repr_style: Literal["short", "long"] | None = None, + ) -> None: + self._rawentry: Final = rawentry + self._repr_style: Final = repr_style + + def with_repr_style( + self, repr_style: Literal["short", "long"] | None + ) -> TracebackEntry: + return TracebackEntry(self._rawentry, repr_style) + + @property + def lineno(self) -> int: + return self._rawentry.tb_lineno - 1 + + @property + def frame(self) -> Frame: + return Frame(self._rawentry.tb_frame) + + @property + def relline(self) -> int: + return self.lineno - self.frame.code.firstlineno + + def __repr__(self) -> str: + return "" % (self.frame.code.path, self.lineno + 1) + + @property + def statement(self) -> Source: + """_pytest._code.Source object for the current statement.""" + source = self.frame.code.fullsource + assert source is not None + return source.getstatement(self.lineno) + + @property + def path(self) -> Path | str: + """Path to the source code.""" + return self.frame.code.path + + @property + def locals(self) -> dict[str, Any]: + """Locals of underlying frame.""" + return self.frame.f_locals + + def getfirstlinesource(self) -> int: + return self.frame.code.firstlineno + + def getsource( + self, astcache: dict[str | Path, ast.AST] | None = None + ) -> Source | None: + """Return failing source code.""" + # we use the passed in astcache to not reparse asttrees + # within exception info printing + source = self.frame.code.fullsource + if source is None: + return None + key = astnode = None + if astcache is not None: + key = self.frame.code.path + if key is not None: + astnode = astcache.get(key, None) + start = self.getfirstlinesource() + try: + astnode, _, end = getstatementrange_ast( + self.lineno, source, astnode=astnode + ) + except SyntaxError: + end = self.lineno + 1 + else: + if key is not None and astcache is not None: + astcache[key] = astnode + return source[start:end] + + source = property(getsource) + + def ishidden(self, excinfo: ExceptionInfo[BaseException] | None) -> bool: + """Return True if the current frame has a var __tracebackhide__ + resolving to True. + + If __tracebackhide__ is a callable, it gets called with the + ExceptionInfo instance and can decide whether to hide the traceback. + + Mostly for internal use. + """ + tbh: bool | Callable[[ExceptionInfo[BaseException] | None], bool] = False + for maybe_ns_dct in (self.frame.f_locals, self.frame.f_globals): + # in normal cases, f_locals and f_globals are dictionaries + # however via `exec(...)` / `eval(...)` they can be other types + # (even incorrect types!). + # as such, we suppress all exceptions while accessing __tracebackhide__ + try: + tbh = maybe_ns_dct["__tracebackhide__"] + except Exception: + pass + else: + break + if tbh and callable(tbh): + return tbh(excinfo) + return tbh + + def __str__(self) -> str: + name = self.frame.code.name + try: + line = str(self.statement).lstrip() + except KeyboardInterrupt: + raise + except BaseException: + line = "???" + # This output does not quite match Python's repr for traceback entries, + # but changing it to do so would break certain plugins. See + # https://github.com/pytest-dev/pytest/pull/7535/ for details. + return " File %r:%d in %s\n %s\n" % ( + str(self.path), + self.lineno + 1, + name, + line, + ) + + @property + def name(self) -> str: + """co_name of underlying code.""" + return self.frame.code.raw.co_name + + +class Traceback(List[TracebackEntry]): + """Traceback objects encapsulate and offer higher level access to Traceback entries.""" + + def __init__( + self, + tb: TracebackType | Iterable[TracebackEntry], + ) -> None: + """Initialize from given python traceback object and ExceptionInfo.""" + if isinstance(tb, TracebackType): + + def f(cur: TracebackType) -> Iterable[TracebackEntry]: + cur_: TracebackType | None = cur + while cur_ is not None: + yield TracebackEntry(cur_) + cur_ = cur_.tb_next + + super().__init__(f(tb)) + else: + super().__init__(tb) + + def cut( + self, + path: os.PathLike[str] | str | None = None, + lineno: int | None = None, + firstlineno: int | None = None, + excludepath: os.PathLike[str] | None = None, + ) -> Traceback: + """Return a Traceback instance wrapping part of this Traceback. + + By providing any combination of path, lineno and firstlineno, the + first frame to start the to-be-returned traceback is determined. + + This allows cutting the first part of a Traceback instance e.g. + for formatting reasons (removing some uninteresting bits that deal + with handling of the exception/traceback). + """ + path_ = None if path is None else os.fspath(path) + excludepath_ = None if excludepath is None else os.fspath(excludepath) + for x in self: + code = x.frame.code + codepath = code.path + if path is not None and str(codepath) != path_: + continue + if ( + excludepath is not None + and isinstance(codepath, Path) + and excludepath_ in (str(p) for p in codepath.parents) # type: ignore[operator] + ): + continue + if lineno is not None and x.lineno != lineno: + continue + if firstlineno is not None and x.frame.code.firstlineno != firstlineno: + continue + return Traceback(x._rawentry) + return self + + @overload + def __getitem__(self, key: SupportsIndex) -> TracebackEntry: ... + + @overload + def __getitem__(self, key: slice) -> Traceback: ... + + def __getitem__(self, key: SupportsIndex | slice) -> TracebackEntry | Traceback: + if isinstance(key, slice): + return self.__class__(super().__getitem__(key)) + else: + return super().__getitem__(key) + + def filter( + self, + excinfo_or_fn: ExceptionInfo[BaseException] | Callable[[TracebackEntry], bool], + /, + ) -> Traceback: + """Return a Traceback instance with certain items removed. + + If the filter is an `ExceptionInfo`, removes all the ``TracebackEntry``s + which are hidden (see ishidden() above). + + Otherwise, the filter is a function that gets a single argument, a + ``TracebackEntry`` instance, and should return True when the item should + be added to the ``Traceback``, False when not. + """ + if isinstance(excinfo_or_fn, ExceptionInfo): + fn = lambda x: not x.ishidden(excinfo_or_fn) # noqa: E731 + else: + fn = excinfo_or_fn + return Traceback(filter(fn, self)) + + def recursionindex(self) -> int | None: + """Return the index of the frame/TracebackEntry where recursion originates if + appropriate, None if no recursion occurred.""" + cache: dict[tuple[Any, int, int], list[dict[str, Any]]] = {} + for i, entry in enumerate(self): + # id for the code.raw is needed to work around + # the strange metaprogramming in the decorator lib from pypi + # which generates code objects that have hash/value equality + # XXX needs a test + key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno + values = cache.setdefault(key, []) + # Since Python 3.13 f_locals is a proxy, freeze it. + loc = dict(entry.frame.f_locals) + if values: + for otherloc in values: + if otherloc == loc: + return i + values.append(loc) + return None + + +E = TypeVar("E", bound=BaseException, covariant=True) + + +@final +@dataclasses.dataclass +class ExceptionInfo(Generic[E]): + """Wraps sys.exc_info() objects and offers help for navigating the traceback.""" + + _assert_start_repr: ClassVar = "AssertionError('assert " + + _excinfo: tuple[type[E], E, TracebackType] | None + _striptext: str + _traceback: Traceback | None + + def __init__( + self, + excinfo: tuple[type[E], E, TracebackType] | None, + striptext: str = "", + traceback: Traceback | None = None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self._excinfo = excinfo + self._striptext = striptext + self._traceback = traceback + + @classmethod + def from_exception( + cls, + # Ignoring error: "Cannot use a covariant type variable as a parameter". + # This is OK to ignore because this class is (conceptually) readonly. + # See https://github.com/python/mypy/issues/7049. + exception: E, # type: ignore[misc] + exprinfo: str | None = None, + ) -> ExceptionInfo[E]: + """Return an ExceptionInfo for an existing exception. + + The exception must have a non-``None`` ``__traceback__`` attribute, + otherwise this function fails with an assertion error. This means that + the exception must have been raised, or added a traceback with the + :py:meth:`~BaseException.with_traceback()` method. + + :param exprinfo: + A text string helping to determine if we should strip + ``AssertionError`` from the output. Defaults to the exception + message/``__str__()``. + + .. versionadded:: 7.4 + """ + assert exception.__traceback__, ( + "Exceptions passed to ExcInfo.from_exception(...)" + " must have a non-None __traceback__." + ) + exc_info = (type(exception), exception, exception.__traceback__) + return cls.from_exc_info(exc_info, exprinfo) + + @classmethod + def from_exc_info( + cls, + exc_info: tuple[type[E], E, TracebackType], + exprinfo: str | None = None, + ) -> ExceptionInfo[E]: + """Like :func:`from_exception`, but using old-style exc_info tuple.""" + _striptext = "" + if exprinfo is None and isinstance(exc_info[1], AssertionError): + exprinfo = getattr(exc_info[1], "msg", None) + if exprinfo is None: + exprinfo = saferepr(exc_info[1]) + if exprinfo and exprinfo.startswith(cls._assert_start_repr): + _striptext = "AssertionError: " + + return cls(exc_info, _striptext, _ispytest=True) + + @classmethod + def from_current(cls, exprinfo: str | None = None) -> ExceptionInfo[BaseException]: + """Return an ExceptionInfo matching the current traceback. + + .. warning:: + + Experimental API + + :param exprinfo: + A text string helping to determine if we should strip + ``AssertionError`` from the output. Defaults to the exception + message/``__str__()``. + """ + tup = sys.exc_info() + assert tup[0] is not None, "no current exception" + assert tup[1] is not None, "no current exception" + assert tup[2] is not None, "no current exception" + exc_info = (tup[0], tup[1], tup[2]) + return ExceptionInfo.from_exc_info(exc_info, exprinfo) + + @classmethod + def for_later(cls) -> ExceptionInfo[E]: + """Return an unfilled ExceptionInfo.""" + return cls(None, _ispytest=True) + + def fill_unfilled(self, exc_info: tuple[type[E], E, TracebackType]) -> None: + """Fill an unfilled ExceptionInfo created with ``for_later()``.""" + assert self._excinfo is None, "ExceptionInfo was already filled" + self._excinfo = exc_info + + @property + def type(self) -> type[E]: + """The exception class.""" + assert ( + self._excinfo is not None + ), ".type can only be used after the context manager exits" + return self._excinfo[0] + + @property + def value(self) -> E: + """The exception value.""" + assert ( + self._excinfo is not None + ), ".value can only be used after the context manager exits" + return self._excinfo[1] + + @property + def tb(self) -> TracebackType: + """The exception raw traceback.""" + assert ( + self._excinfo is not None + ), ".tb can only be used after the context manager exits" + return self._excinfo[2] + + @property + def typename(self) -> str: + """The type name of the exception.""" + assert ( + self._excinfo is not None + ), ".typename can only be used after the context manager exits" + return self.type.__name__ + + @property + def traceback(self) -> Traceback: + """The traceback.""" + if self._traceback is None: + self._traceback = Traceback(self.tb) + return self._traceback + + @traceback.setter + def traceback(self, value: Traceback) -> None: + self._traceback = value + + def __repr__(self) -> str: + if self._excinfo is None: + return "" + return f"<{self.__class__.__name__} {saferepr(self._excinfo[1])} tblen={len(self.traceback)}>" + + def exconly(self, tryshort: bool = False) -> str: + """Return the exception as a string. + + When 'tryshort' resolves to True, and the exception is an + AssertionError, only the actual exception part of the exception + representation is returned (so 'AssertionError: ' is removed from + the beginning). + """ + lines = format_exception_only(self.type, self.value) + text = "".join(lines) + text = text.rstrip() + if tryshort: + if text.startswith(self._striptext): + text = text[len(self._striptext) :] + return text + + def errisinstance(self, exc: EXCEPTION_OR_MORE) -> bool: + """Return True if the exception is an instance of exc. + + Consider using ``isinstance(excinfo.value, exc)`` instead. + """ + return isinstance(self.value, exc) + + def _getreprcrash(self) -> ReprFileLocation | None: + # Find last non-hidden traceback entry that led to the exception of the + # traceback, or None if all hidden. + for i in range(-1, -len(self.traceback) - 1, -1): + entry = self.traceback[i] + if not entry.ishidden(self): + path, lineno = entry.frame.code.raw.co_filename, entry.lineno + exconly = self.exconly(tryshort=True) + return ReprFileLocation(path, lineno + 1, exconly) + return None + + def getrepr( + self, + showlocals: bool = False, + style: TracebackStyle = "long", + abspath: bool = False, + tbfilter: bool + | Callable[[ExceptionInfo[BaseException]], _pytest._code.code.Traceback] = True, + funcargs: bool = False, + truncate_locals: bool = True, + truncate_args: bool = True, + chain: bool = True, + ) -> ReprExceptionInfo | ExceptionChainRepr: + """Return str()able representation of this exception info. + + :param bool showlocals: + Show locals per traceback entry. + Ignored if ``style=="native"``. + + :param str style: + long|short|line|no|native|value traceback style. + + :param bool abspath: + If paths should be changed to absolute or left unchanged. + + :param tbfilter: + A filter for traceback entries. + + * If false, don't hide any entries. + * If true, hide internal entries and entries that contain a local + variable ``__tracebackhide__ = True``. + * If a callable, delegates the filtering to the callable. + + Ignored if ``style`` is ``"native"``. + + :param bool funcargs: + Show fixtures ("funcargs" for legacy purposes) per traceback entry. + + :param bool truncate_locals: + With ``showlocals==True``, make sure locals can be safely represented as strings. + + :param bool truncate_args: + With ``showargs==True``, make sure args can be safely represented as strings. + + :param bool chain: + If chained exceptions in Python 3 should be shown. + + .. versionchanged:: 3.9 + + Added the ``chain`` parameter. + """ + if style == "native": + return ReprExceptionInfo( + reprtraceback=ReprTracebackNative( + traceback.format_exception( + self.type, + self.value, + self.traceback[0]._rawentry if self.traceback else None, + ) + ), + reprcrash=self._getreprcrash(), + ) + + fmt = FormattedExcinfo( + showlocals=showlocals, + style=style, + abspath=abspath, + tbfilter=tbfilter, + funcargs=funcargs, + truncate_locals=truncate_locals, + truncate_args=truncate_args, + chain=chain, + ) + return fmt.repr_excinfo(self) + + def _stringify_exception(self, exc: BaseException) -> str: + try: + notes = getattr(exc, "__notes__", []) + except KeyError: + # Workaround for https://github.com/python/cpython/issues/98778 on + # Python <= 3.9, and some 3.10 and 3.11 patch versions. + HTTPError = getattr(sys.modules.get("urllib.error", None), "HTTPError", ()) + if sys.version_info < (3, 12) and isinstance(exc, HTTPError): + notes = [] + else: + raise + + return "\n".join( + [ + str(exc), + *notes, + ] + ) + + def match(self, regexp: str | Pattern[str]) -> Literal[True]: + """Check whether the regular expression `regexp` matches the string + representation of the exception using :func:`python:re.search`. + + If it matches `True` is returned, otherwise an `AssertionError` is raised. + """ + __tracebackhide__ = True + value = self._stringify_exception(self.value) + msg = f"Regex pattern did not match.\n Regex: {regexp!r}\n Input: {value!r}" + if regexp == value: + msg += "\n Did you mean to `re.escape()` the regex?" + assert re.search(regexp, value), msg + # Return True to allow for "assert excinfo.match()". + return True + + def _group_contains( + self, + exc_group: BaseExceptionGroup[BaseException], + expected_exception: EXCEPTION_OR_MORE, + match: str | Pattern[str] | None, + target_depth: int | None = None, + current_depth: int = 1, + ) -> bool: + """Return `True` if a `BaseExceptionGroup` contains a matching exception.""" + if (target_depth is not None) and (current_depth > target_depth): + # already descended past the target depth + return False + for exc in exc_group.exceptions: + if isinstance(exc, BaseExceptionGroup): + if self._group_contains( + exc, expected_exception, match, target_depth, current_depth + 1 + ): + return True + if (target_depth is not None) and (current_depth != target_depth): + # not at the target depth, no match + continue + if not isinstance(exc, expected_exception): + continue + if match is not None: + value = self._stringify_exception(exc) + if not re.search(match, value): + continue + return True + return False + + def group_contains( + self, + expected_exception: EXCEPTION_OR_MORE, + *, + match: str | Pattern[str] | None = None, + depth: int | None = None, + ) -> bool: + """Check whether a captured exception group contains a matching exception. + + :param Type[BaseException] | Tuple[Type[BaseException]] expected_exception: + The expected exception type, or a tuple if one of multiple possible + exception types are expected. + + :param str | Pattern[str] | None match: + If specified, a string containing a regular expression, + or a regular expression object, that is tested against the string + representation of the exception and its `PEP-678 ` `__notes__` + using :func:`re.search`. + + To match a literal string that may contain :ref:`special characters + `, the pattern can first be escaped with :func:`re.escape`. + + :param Optional[int] depth: + If `None`, will search for a matching exception at any nesting depth. + If >= 1, will only match an exception if it's at the specified depth (depth = 1 being + the exceptions contained within the topmost exception group). + + .. versionadded:: 8.0 + """ + msg = "Captured exception is not an instance of `BaseExceptionGroup`" + assert isinstance(self.value, BaseExceptionGroup), msg + msg = "`depth` must be >= 1 if specified" + assert (depth is None) or (depth >= 1), msg + return self._group_contains(self.value, expected_exception, match, depth) + + +@dataclasses.dataclass +class FormattedExcinfo: + """Presenting information about failing Functions and Generators.""" + + # for traceback entries + flow_marker: ClassVar = ">" + fail_marker: ClassVar = "E" + + showlocals: bool = False + style: TracebackStyle = "long" + abspath: bool = True + tbfilter: bool | Callable[[ExceptionInfo[BaseException]], Traceback] = True + funcargs: bool = False + truncate_locals: bool = True + truncate_args: bool = True + chain: bool = True + astcache: dict[str | Path, ast.AST] = dataclasses.field( + default_factory=dict, init=False, repr=False + ) + + def _getindent(self, source: Source) -> int: + # Figure out indent for the given source. + try: + s = str(source.getstatement(len(source) - 1)) + except KeyboardInterrupt: + raise + except BaseException: + try: + s = str(source[-1]) + except KeyboardInterrupt: + raise + except BaseException: + return 0 + return 4 + (len(s) - len(s.lstrip())) + + def _getentrysource(self, entry: TracebackEntry) -> Source | None: + source = entry.getsource(self.astcache) + if source is not None: + source = source.deindent() + return source + + def repr_args(self, entry: TracebackEntry) -> ReprFuncArgs | None: + if self.funcargs: + args = [] + for argname, argvalue in entry.frame.getargs(var=True): + if self.truncate_args: + str_repr = saferepr(argvalue) + else: + str_repr = saferepr(argvalue, maxsize=None) + args.append((argname, str_repr)) + return ReprFuncArgs(args) + return None + + def get_source( + self, + source: Source | None, + line_index: int = -1, + excinfo: ExceptionInfo[BaseException] | None = None, + short: bool = False, + ) -> list[str]: + """Return formatted and marked up source lines.""" + lines = [] + if source is not None and line_index < 0: + line_index += len(source) + if source is None or line_index >= len(source.lines) or line_index < 0: + # `line_index` could still be outside `range(len(source.lines))` if + # we're processing AST with pathological position attributes. + source = Source("???") + line_index = 0 + space_prefix = " " + if short: + lines.append(space_prefix + source.lines[line_index].strip()) + else: + for line in source.lines[:line_index]: + lines.append(space_prefix + line) + lines.append(self.flow_marker + " " + source.lines[line_index]) + for line in source.lines[line_index + 1 :]: + lines.append(space_prefix + line) + if excinfo is not None: + indent = 4 if short else self._getindent(source) + lines.extend(self.get_exconly(excinfo, indent=indent, markall=True)) + return lines + + def get_exconly( + self, + excinfo: ExceptionInfo[BaseException], + indent: int = 4, + markall: bool = False, + ) -> list[str]: + lines = [] + indentstr = " " * indent + # Get the real exception information out. + exlines = excinfo.exconly(tryshort=True).split("\n") + failindent = self.fail_marker + indentstr[1:] + for line in exlines: + lines.append(failindent + line) + if not markall: + failindent = indentstr + return lines + + def repr_locals(self, locals: Mapping[str, object]) -> ReprLocals | None: + if self.showlocals: + lines = [] + keys = [loc for loc in locals if loc[0] != "@"] + keys.sort() + for name in keys: + value = locals[name] + if name == "__builtins__": + lines.append("__builtins__ = ") + else: + # This formatting could all be handled by the + # _repr() function, which is only reprlib.Repr in + # disguise, so is very configurable. + if self.truncate_locals: + str_repr = saferepr(value) + else: + str_repr = safeformat(value) + # if len(str_repr) < 70 or not isinstance(value, (list, tuple, dict)): + lines.append(f"{name:<10} = {str_repr}") + # else: + # self._line("%-10s =\\" % (name,)) + # # XXX + # pprint.pprint(value, stream=self.excinfowriter) + return ReprLocals(lines) + return None + + def repr_traceback_entry( + self, + entry: TracebackEntry | None, + excinfo: ExceptionInfo[BaseException] | None = None, + ) -> ReprEntry: + lines: list[str] = [] + style = ( + entry._repr_style + if entry is not None and entry._repr_style is not None + else self.style + ) + if style in ("short", "long") and entry is not None: + source = self._getentrysource(entry) + if source is None: + source = Source("???") + line_index = 0 + else: + line_index = entry.lineno - entry.getfirstlinesource() + short = style == "short" + reprargs = self.repr_args(entry) if not short else None + s = self.get_source(source, line_index, excinfo, short=short) + lines.extend(s) + if short: + message = f"in {entry.name}" + else: + message = excinfo and excinfo.typename or "" + entry_path = entry.path + path = self._makepath(entry_path) + reprfileloc = ReprFileLocation(path, entry.lineno + 1, message) + localsrepr = self.repr_locals(entry.locals) + return ReprEntry(lines, reprargs, localsrepr, reprfileloc, style) + elif style == "value": + if excinfo: + lines.extend(str(excinfo.value).split("\n")) + return ReprEntry(lines, None, None, None, style) + else: + if excinfo: + lines.extend(self.get_exconly(excinfo, indent=4)) + return ReprEntry(lines, None, None, None, style) + + def _makepath(self, path: Path | str) -> str: + if not self.abspath and isinstance(path, Path): + try: + np = bestrelpath(Path.cwd(), path) + except OSError: + return str(path) + if len(np) < len(str(path)): + return np + return str(path) + + def repr_traceback(self, excinfo: ExceptionInfo[BaseException]) -> ReprTraceback: + traceback = excinfo.traceback + if callable(self.tbfilter): + traceback = self.tbfilter(excinfo) + elif self.tbfilter: + traceback = traceback.filter(excinfo) + + if isinstance(excinfo.value, RecursionError): + traceback, extraline = self._truncate_recursive_traceback(traceback) + else: + extraline = None + + if not traceback: + if extraline is None: + extraline = "All traceback entries are hidden. Pass `--full-trace` to see hidden and internal frames." + entries = [self.repr_traceback_entry(None, excinfo)] + return ReprTraceback(entries, extraline, style=self.style) + + last = traceback[-1] + if self.style == "value": + entries = [self.repr_traceback_entry(last, excinfo)] + return ReprTraceback(entries, None, style=self.style) + + entries = [ + self.repr_traceback_entry(entry, excinfo if last == entry else None) + for entry in traceback + ] + return ReprTraceback(entries, extraline, style=self.style) + + def _truncate_recursive_traceback( + self, traceback: Traceback + ) -> tuple[Traceback, str | None]: + """Truncate the given recursive traceback trying to find the starting + point of the recursion. + + The detection is done by going through each traceback entry and + finding the point in which the locals of the frame are equal to the + locals of a previous frame (see ``recursionindex()``). + + Handle the situation where the recursion process might raise an + exception (for example comparing numpy arrays using equality raises a + TypeError), in which case we do our best to warn the user of the + error and show a limited traceback. + """ + try: + recursionindex = traceback.recursionindex() + except Exception as e: + max_frames = 10 + extraline: str | None = ( + "!!! Recursion error detected, but an error occurred locating the origin of recursion.\n" + " The following exception happened when comparing locals in the stack frame:\n" + f" {type(e).__name__}: {e!s}\n" + f" Displaying first and last {max_frames} stack frames out of {len(traceback)}." + ) + # Type ignored because adding two instances of a List subtype + # currently incorrectly has type List instead of the subtype. + traceback = traceback[:max_frames] + traceback[-max_frames:] # type: ignore + else: + if recursionindex is not None: + extraline = "!!! Recursion detected (same locals & position)" + traceback = traceback[: recursionindex + 1] + else: + extraline = None + + return traceback, extraline + + def repr_excinfo(self, excinfo: ExceptionInfo[BaseException]) -> ExceptionChainRepr: + repr_chain: list[tuple[ReprTraceback, ReprFileLocation | None, str | None]] = [] + e: BaseException | None = excinfo.value + excinfo_: ExceptionInfo[BaseException] | None = excinfo + descr = None + seen: set[int] = set() + while e is not None and id(e) not in seen: + seen.add(id(e)) + + if excinfo_: + # Fall back to native traceback as a temporary workaround until + # full support for exception groups added to ExceptionInfo. + # See https://github.com/pytest-dev/pytest/issues/9159 + if isinstance(e, BaseExceptionGroup): + reprtraceback: ReprTracebackNative | ReprTraceback = ( + ReprTracebackNative( + traceback.format_exception( + type(excinfo_.value), + excinfo_.value, + excinfo_.traceback[0]._rawentry, + ) + ) + ) + else: + reprtraceback = self.repr_traceback(excinfo_) + reprcrash = excinfo_._getreprcrash() + else: + # Fallback to native repr if the exception doesn't have a traceback: + # ExceptionInfo objects require a full traceback to work. + reprtraceback = ReprTracebackNative( + traceback.format_exception(type(e), e, None) + ) + reprcrash = None + repr_chain += [(reprtraceback, reprcrash, descr)] + + if e.__cause__ is not None and self.chain: + e = e.__cause__ + excinfo_ = ExceptionInfo.from_exception(e) if e.__traceback__ else None + descr = "The above exception was the direct cause of the following exception:" + elif ( + e.__context__ is not None and not e.__suppress_context__ and self.chain + ): + e = e.__context__ + excinfo_ = ExceptionInfo.from_exception(e) if e.__traceback__ else None + descr = "During handling of the above exception, another exception occurred:" + else: + e = None + repr_chain.reverse() + return ExceptionChainRepr(repr_chain) + + +@dataclasses.dataclass(eq=False) +class TerminalRepr: + def __str__(self) -> str: + # FYI this is called from pytest-xdist's serialization of exception + # information. + io = StringIO() + tw = TerminalWriter(file=io) + self.toterminal(tw) + return io.getvalue().strip() + + def __repr__(self) -> str: + return f"<{self.__class__} instance at {id(self):0x}>" + + def toterminal(self, tw: TerminalWriter) -> None: + raise NotImplementedError() + + +# This class is abstract -- only subclasses are instantiated. +@dataclasses.dataclass(eq=False) +class ExceptionRepr(TerminalRepr): + # Provided by subclasses. + reprtraceback: ReprTraceback + reprcrash: ReprFileLocation | None + sections: list[tuple[str, str, str]] = dataclasses.field( + init=False, default_factory=list + ) + + def addsection(self, name: str, content: str, sep: str = "-") -> None: + self.sections.append((name, content, sep)) + + def toterminal(self, tw: TerminalWriter) -> None: + for name, content, sep in self.sections: + tw.sep(sep, name) + tw.line(content) + + +@dataclasses.dataclass(eq=False) +class ExceptionChainRepr(ExceptionRepr): + chain: Sequence[tuple[ReprTraceback, ReprFileLocation | None, str | None]] + + def __init__( + self, + chain: Sequence[tuple[ReprTraceback, ReprFileLocation | None, str | None]], + ) -> None: + # reprcrash and reprtraceback of the outermost (the newest) exception + # in the chain. + super().__init__( + reprtraceback=chain[-1][0], + reprcrash=chain[-1][1], + ) + self.chain = chain + + def toterminal(self, tw: TerminalWriter) -> None: + for element in self.chain: + element[0].toterminal(tw) + if element[2] is not None: + tw.line("") + tw.line(element[2], yellow=True) + super().toterminal(tw) + + +@dataclasses.dataclass(eq=False) +class ReprExceptionInfo(ExceptionRepr): + reprtraceback: ReprTraceback + reprcrash: ReprFileLocation | None + + def toterminal(self, tw: TerminalWriter) -> None: + self.reprtraceback.toterminal(tw) + super().toterminal(tw) + + +@dataclasses.dataclass(eq=False) +class ReprTraceback(TerminalRepr): + reprentries: Sequence[ReprEntry | ReprEntryNative] + extraline: str | None + style: TracebackStyle + + entrysep: ClassVar = "_ " + + def toterminal(self, tw: TerminalWriter) -> None: + # The entries might have different styles. + for i, entry in enumerate(self.reprentries): + if entry.style == "long": + tw.line("") + entry.toterminal(tw) + if i < len(self.reprentries) - 1: + next_entry = self.reprentries[i + 1] + if ( + entry.style == "long" + or entry.style == "short" + and next_entry.style == "long" + ): + tw.sep(self.entrysep) + + if self.extraline: + tw.line(self.extraline) + + +class ReprTracebackNative(ReprTraceback): + def __init__(self, tblines: Sequence[str]) -> None: + self.reprentries = [ReprEntryNative(tblines)] + self.extraline = None + self.style = "native" + + +@dataclasses.dataclass(eq=False) +class ReprEntryNative(TerminalRepr): + lines: Sequence[str] + + style: ClassVar[TracebackStyle] = "native" + + def toterminal(self, tw: TerminalWriter) -> None: + tw.write("".join(self.lines)) + + +@dataclasses.dataclass(eq=False) +class ReprEntry(TerminalRepr): + lines: Sequence[str] + reprfuncargs: ReprFuncArgs | None + reprlocals: ReprLocals | None + reprfileloc: ReprFileLocation | None + style: TracebackStyle + + def _write_entry_lines(self, tw: TerminalWriter) -> None: + """Write the source code portions of a list of traceback entries with syntax highlighting. + + Usually entries are lines like these: + + " x = 1" + "> assert x == 2" + "E assert 1 == 2" + + This function takes care of rendering the "source" portions of it (the lines without + the "E" prefix) using syntax highlighting, taking care to not highlighting the ">" + character, as doing so might break line continuations. + """ + if not self.lines: + return + + # separate indents and source lines that are not failures: we want to + # highlight the code but not the indentation, which may contain markers + # such as "> assert 0" + fail_marker = f"{FormattedExcinfo.fail_marker} " + indent_size = len(fail_marker) + indents: list[str] = [] + source_lines: list[str] = [] + failure_lines: list[str] = [] + for index, line in enumerate(self.lines): + is_failure_line = line.startswith(fail_marker) + if is_failure_line: + # from this point on all lines are considered part of the failure + failure_lines.extend(self.lines[index:]) + break + else: + if self.style == "value": + source_lines.append(line) + else: + indents.append(line[:indent_size]) + source_lines.append(line[indent_size:]) + + tw._write_source(source_lines, indents) + + # failure lines are always completely red and bold + for line in failure_lines: + tw.line(line, bold=True, red=True) + + def toterminal(self, tw: TerminalWriter) -> None: + if self.style == "short": + if self.reprfileloc: + self.reprfileloc.toterminal(tw) + self._write_entry_lines(tw) + if self.reprlocals: + self.reprlocals.toterminal(tw, indent=" " * 8) + return + + if self.reprfuncargs: + self.reprfuncargs.toterminal(tw) + + self._write_entry_lines(tw) + + if self.reprlocals: + tw.line("") + self.reprlocals.toterminal(tw) + if self.reprfileloc: + if self.lines: + tw.line("") + self.reprfileloc.toterminal(tw) + + def __str__(self) -> str: + return "{}\n{}\n{}".format( + "\n".join(self.lines), self.reprlocals, self.reprfileloc + ) + + +@dataclasses.dataclass(eq=False) +class ReprFileLocation(TerminalRepr): + path: str + lineno: int + message: str + + def __post_init__(self) -> None: + self.path = str(self.path) + + def toterminal(self, tw: TerminalWriter) -> None: + # Filename and lineno output for each entry, using an output format + # that most editors understand. + msg = self.message + i = msg.find("\n") + if i != -1: + msg = msg[:i] + tw.write(self.path, bold=True, red=True) + tw.line(f":{self.lineno}: {msg}") + + +@dataclasses.dataclass(eq=False) +class ReprLocals(TerminalRepr): + lines: Sequence[str] + + def toterminal(self, tw: TerminalWriter, indent="") -> None: + for line in self.lines: + tw.line(indent + line) + + +@dataclasses.dataclass(eq=False) +class ReprFuncArgs(TerminalRepr): + args: Sequence[tuple[str, object]] + + def toterminal(self, tw: TerminalWriter) -> None: + if self.args: + linesofar = "" + for name, value in self.args: + ns = f"{name} = {value}" + if len(ns) + len(linesofar) + 2 > tw.fullwidth: + if linesofar: + tw.line(linesofar) + linesofar = ns + else: + if linesofar: + linesofar += ", " + ns + else: + linesofar = ns + if linesofar: + tw.line(linesofar) + tw.line("") + + +def getfslineno(obj: object) -> tuple[str | Path, int]: + """Return source location (path, lineno) for the given object. + + If the source cannot be determined return ("", -1). + + The line number is 0-based. + """ + # xxx let decorators etc specify a sane ordering + # NOTE: this used to be done in _pytest.compat.getfslineno, initially added + # in 6ec13a2b9. It ("place_as") appears to be something very custom. + obj = get_real_func(obj) + if hasattr(obj, "place_as"): + obj = obj.place_as + + try: + code = Code.from_function(obj) + except TypeError: + try: + fn = inspect.getsourcefile(obj) or inspect.getfile(obj) # type: ignore[arg-type] + except TypeError: + return "", -1 + + fspath = fn and absolutepath(fn) or "" + lineno = -1 + if fspath: + try: + _, lineno = findsource(obj) + except OSError: + pass + return fspath, lineno + + return code.path, code.firstlineno + + +# Relative paths that we use to filter traceback entries from appearing to the user; +# see filter_traceback. +# note: if we need to add more paths than what we have now we should probably use a list +# for better maintenance. + +_PLUGGY_DIR = Path(pluggy.__file__.rstrip("oc")) +# pluggy is either a package or a single module depending on the version +if _PLUGGY_DIR.name == "__init__.py": + _PLUGGY_DIR = _PLUGGY_DIR.parent +_PYTEST_DIR = Path(_pytest.__file__).parent + + +def filter_traceback(entry: TracebackEntry) -> bool: + """Return True if a TracebackEntry instance should be included in tracebacks. + + We hide traceback entries of: + + * dynamically generated code (no code to show up for it); + * internal traceback from pytest or its internal libraries, py and pluggy. + """ + # entry.path might sometimes return a str object when the entry + # points to dynamically generated code. + # See https://bitbucket.org/pytest-dev/py/issues/71. + raw_filename = entry.frame.code.raw.co_filename + is_generated = "<" in raw_filename and ">" in raw_filename + if is_generated: + return False + + # entry.path might point to a non-existing file, in which case it will + # also return a str object. See #1133. + p = Path(entry.path) + + parents = p.parents + if _PLUGGY_DIR in parents: + return False + if _PYTEST_DIR in parents: + return False + + return True diff --git a/.venv/Lib/site-packages/_pytest/_code/source.py b/.venv/Lib/site-packages/_pytest/_code/source.py new file mode 100644 index 00000000..604aff8b --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/_code/source.py @@ -0,0 +1,215 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import ast +from bisect import bisect_right +import inspect +import textwrap +import tokenize +import types +from typing import Iterable +from typing import Iterator +from typing import overload +import warnings + + +class Source: + """An immutable object holding a source code fragment. + + When using Source(...), the source lines are deindented. + """ + + def __init__(self, obj: object = None) -> None: + if not obj: + self.lines: list[str] = [] + elif isinstance(obj, Source): + self.lines = obj.lines + elif isinstance(obj, (tuple, list)): + self.lines = deindent(x.rstrip("\n") for x in obj) + elif isinstance(obj, str): + self.lines = deindent(obj.split("\n")) + else: + try: + rawcode = getrawcode(obj) + src = inspect.getsource(rawcode) + except TypeError: + src = inspect.getsource(obj) # type: ignore[arg-type] + self.lines = deindent(src.split("\n")) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Source): + return NotImplemented + return self.lines == other.lines + + # Ignore type because of https://github.com/python/mypy/issues/4266. + __hash__ = None # type: ignore + + @overload + def __getitem__(self, key: int) -> str: ... + + @overload + def __getitem__(self, key: slice) -> Source: ... + + def __getitem__(self, key: int | slice) -> str | Source: + if isinstance(key, int): + return self.lines[key] + else: + if key.step not in (None, 1): + raise IndexError("cannot slice a Source with a step") + newsource = Source() + newsource.lines = self.lines[key.start : key.stop] + return newsource + + def __iter__(self) -> Iterator[str]: + return iter(self.lines) + + def __len__(self) -> int: + return len(self.lines) + + def strip(self) -> Source: + """Return new Source object with trailing and leading blank lines removed.""" + start, end = 0, len(self) + while start < end and not self.lines[start].strip(): + start += 1 + while end > start and not self.lines[end - 1].strip(): + end -= 1 + source = Source() + source.lines[:] = self.lines[start:end] + return source + + def indent(self, indent: str = " " * 4) -> Source: + """Return a copy of the source object with all lines indented by the + given indent-string.""" + newsource = Source() + newsource.lines = [(indent + line) for line in self.lines] + return newsource + + def getstatement(self, lineno: int) -> Source: + """Return Source statement which contains the given linenumber + (counted from 0).""" + start, end = self.getstatementrange(lineno) + return self[start:end] + + def getstatementrange(self, lineno: int) -> tuple[int, int]: + """Return (start, end) tuple which spans the minimal statement region + which containing the given lineno.""" + if not (0 <= lineno < len(self)): + raise IndexError("lineno out of range") + ast, start, end = getstatementrange_ast(lineno, self) + return start, end + + def deindent(self) -> Source: + """Return a new Source object deindented.""" + newsource = Source() + newsource.lines[:] = deindent(self.lines) + return newsource + + def __str__(self) -> str: + return "\n".join(self.lines) + + +# +# helper functions +# + + +def findsource(obj) -> tuple[Source | None, int]: + try: + sourcelines, lineno = inspect.findsource(obj) + except Exception: + return None, -1 + source = Source() + source.lines = [line.rstrip() for line in sourcelines] + return source, lineno + + +def getrawcode(obj: object, trycall: bool = True) -> types.CodeType: + """Return code object for given function.""" + try: + return obj.__code__ # type: ignore[attr-defined,no-any-return] + except AttributeError: + pass + if trycall: + call = getattr(obj, "__call__", None) + if call and not isinstance(obj, type): + return getrawcode(call, trycall=False) + raise TypeError(f"could not get code object for {obj!r}") + + +def deindent(lines: Iterable[str]) -> list[str]: + return textwrap.dedent("\n".join(lines)).splitlines() + + +def get_statement_startend2(lineno: int, node: ast.AST) -> tuple[int, int | None]: + # Flatten all statements and except handlers into one lineno-list. + # AST's line numbers start indexing at 1. + values: list[int] = [] + for x in ast.walk(node): + if isinstance(x, (ast.stmt, ast.ExceptHandler)): + # The lineno points to the class/def, so need to include the decorators. + if isinstance(x, (ast.ClassDef, ast.FunctionDef, ast.AsyncFunctionDef)): + for d in x.decorator_list: + values.append(d.lineno - 1) + values.append(x.lineno - 1) + for name in ("finalbody", "orelse"): + val: list[ast.stmt] | None = getattr(x, name, None) + if val: + # Treat the finally/orelse part as its own statement. + values.append(val[0].lineno - 1 - 1) + values.sort() + insert_index = bisect_right(values, lineno) + start = values[insert_index - 1] + if insert_index >= len(values): + end = None + else: + end = values[insert_index] + return start, end + + +def getstatementrange_ast( + lineno: int, + source: Source, + assertion: bool = False, + astnode: ast.AST | None = None, +) -> tuple[ast.AST, int, int]: + if astnode is None: + content = str(source) + # See #4260: + # Don't produce duplicate warnings when compiling source to find AST. + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + astnode = ast.parse(content, "source", "exec") + + start, end = get_statement_startend2(lineno, astnode) + # We need to correct the end: + # - ast-parsing strips comments + # - there might be empty lines + # - we might have lesser indented code blocks at the end + if end is None: + end = len(source.lines) + + if end > start + 1: + # Make sure we don't span differently indented code blocks + # by using the BlockFinder helper used which inspect.getsource() uses itself. + block_finder = inspect.BlockFinder() + # If we start with an indented line, put blockfinder to "started" mode. + block_finder.started = ( + bool(source.lines[start]) and source.lines[start][0].isspace() + ) + it = ((x + "\n") for x in source.lines[start:end]) + try: + for tok in tokenize.generate_tokens(lambda: next(it)): + block_finder.tokeneater(*tok) + except (inspect.EndOfBlock, IndentationError): + end = block_finder.last + start + except Exception: + pass + + # The end might still point to a comment or empty line, correct it. + while end: + line = source.lines[end - 1].lstrip() + if line.startswith("#") or not line: + end -= 1 + else: + break + return astnode, start, end diff --git a/.venv/Lib/site-packages/_pytest/_io/__init__.py b/.venv/Lib/site-packages/_pytest/_io/__init__.py new file mode 100644 index 00000000..b0155b18 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/_io/__init__.py @@ -0,0 +1,10 @@ +from __future__ import annotations + +from .terminalwriter import get_terminal_width +from .terminalwriter import TerminalWriter + + +__all__ = [ + "TerminalWriter", + "get_terminal_width", +] diff --git a/.venv/Lib/site-packages/_pytest/_io/pprint.py b/.venv/Lib/site-packages/_pytest/_io/pprint.py new file mode 100644 index 00000000..fc29989b --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/_io/pprint.py @@ -0,0 +1,673 @@ +# mypy: allow-untyped-defs +# This module was imported from the cpython standard library +# (https://github.com/python/cpython/) at commit +# c5140945c723ae6c4b7ee81ff720ac8ea4b52cfd (python3.12). +# +# +# Original Author: Fred L. Drake, Jr. +# fdrake@acm.org +# +# This is a simple little module I wrote to make life easier. I didn't +# see anything quite like it in the library, though I may have overlooked +# something. I wrote this when I was trying to read some heavily nested +# tuples with fairly non-descriptive content. This is modeled very much +# after Lisp/Scheme - style pretty-printing of lists. If you find it +# useful, thank small children who sleep at night. +from __future__ import annotations + +import collections as _collections +import dataclasses as _dataclasses +from io import StringIO as _StringIO +import re +import types as _types +from typing import Any +from typing import Callable +from typing import IO +from typing import Iterator + + +class _safe_key: + """Helper function for key functions when sorting unorderable objects. + + The wrapped-object will fallback to a Py2.x style comparison for + unorderable types (sorting first comparing the type name and then by + the obj ids). Does not work recursively, so dict.items() must have + _safe_key applied to both the key and the value. + + """ + + __slots__ = ["obj"] + + def __init__(self, obj): + self.obj = obj + + def __lt__(self, other): + try: + return self.obj < other.obj + except TypeError: + return (str(type(self.obj)), id(self.obj)) < ( + str(type(other.obj)), + id(other.obj), + ) + + +def _safe_tuple(t): + """Helper function for comparing 2-tuples""" + return _safe_key(t[0]), _safe_key(t[1]) + + +class PrettyPrinter: + def __init__( + self, + indent: int = 4, + width: int = 80, + depth: int | None = None, + ) -> None: + """Handle pretty printing operations onto a stream using a set of + configured parameters. + + indent + Number of spaces to indent for each level of nesting. + + width + Attempted maximum number of columns in the output. + + depth + The maximum depth to print out nested structures. + + """ + if indent < 0: + raise ValueError("indent must be >= 0") + if depth is not None and depth <= 0: + raise ValueError("depth must be > 0") + if not width: + raise ValueError("width must be != 0") + self._depth = depth + self._indent_per_level = indent + self._width = width + + def pformat(self, object: Any) -> str: + sio = _StringIO() + self._format(object, sio, 0, 0, set(), 0) + return sio.getvalue() + + def _format( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + objid = id(object) + if objid in context: + stream.write(_recursion(object)) + return + + p = self._dispatch.get(type(object).__repr__, None) + if p is not None: + context.add(objid) + p(self, object, stream, indent, allowance, context, level + 1) + context.remove(objid) + elif ( + _dataclasses.is_dataclass(object) # type:ignore[unreachable] + and not isinstance(object, type) + and object.__dataclass_params__.repr + and + # Check dataclass has generated repr method. + hasattr(object.__repr__, "__wrapped__") + and "__create_fn__" in object.__repr__.__wrapped__.__qualname__ + ): + context.add(objid) # type:ignore[unreachable] + self._pprint_dataclass( + object, stream, indent, allowance, context, level + 1 + ) + context.remove(objid) + else: + stream.write(self._repr(object, context, level)) + + def _pprint_dataclass( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + cls_name = object.__class__.__name__ + items = [ + (f.name, getattr(object, f.name)) + for f in _dataclasses.fields(object) + if f.repr + ] + stream.write(cls_name + "(") + self._format_namespace_items(items, stream, indent, allowance, context, level) + stream.write(")") + + _dispatch: dict[ + Callable[..., str], + Callable[[PrettyPrinter, Any, IO[str], int, int, set[int], int], None], + ] = {} + + def _pprint_dict( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + write = stream.write + write("{") + items = sorted(object.items(), key=_safe_tuple) + self._format_dict_items(items, stream, indent, allowance, context, level) + write("}") + + _dispatch[dict.__repr__] = _pprint_dict + + def _pprint_ordered_dict( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + if not len(object): + stream.write(repr(object)) + return + cls = object.__class__ + stream.write(cls.__name__ + "(") + self._pprint_dict(object, stream, indent, allowance, context, level) + stream.write(")") + + _dispatch[_collections.OrderedDict.__repr__] = _pprint_ordered_dict + + def _pprint_list( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + stream.write("[") + self._format_items(object, stream, indent, allowance, context, level) + stream.write("]") + + _dispatch[list.__repr__] = _pprint_list + + def _pprint_tuple( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + stream.write("(") + self._format_items(object, stream, indent, allowance, context, level) + stream.write(")") + + _dispatch[tuple.__repr__] = _pprint_tuple + + def _pprint_set( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + if not len(object): + stream.write(repr(object)) + return + typ = object.__class__ + if typ is set: + stream.write("{") + endchar = "}" + else: + stream.write(typ.__name__ + "({") + endchar = "})" + object = sorted(object, key=_safe_key) + self._format_items(object, stream, indent, allowance, context, level) + stream.write(endchar) + + _dispatch[set.__repr__] = _pprint_set + _dispatch[frozenset.__repr__] = _pprint_set + + def _pprint_str( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + write = stream.write + if not len(object): + write(repr(object)) + return + chunks = [] + lines = object.splitlines(True) + if level == 1: + indent += 1 + allowance += 1 + max_width1 = max_width = self._width - indent + for i, line in enumerate(lines): + rep = repr(line) + if i == len(lines) - 1: + max_width1 -= allowance + if len(rep) <= max_width1: + chunks.append(rep) + else: + # A list of alternating (non-space, space) strings + parts = re.findall(r"\S*\s*", line) + assert parts + assert not parts[-1] + parts.pop() # drop empty last part + max_width2 = max_width + current = "" + for j, part in enumerate(parts): + candidate = current + part + if j == len(parts) - 1 and i == len(lines) - 1: + max_width2 -= allowance + if len(repr(candidate)) > max_width2: + if current: + chunks.append(repr(current)) + current = part + else: + current = candidate + if current: + chunks.append(repr(current)) + if len(chunks) == 1: + write(rep) + return + if level == 1: + write("(") + for i, rep in enumerate(chunks): + if i > 0: + write("\n" + " " * indent) + write(rep) + if level == 1: + write(")") + + _dispatch[str.__repr__] = _pprint_str + + def _pprint_bytes( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + write = stream.write + if len(object) <= 4: + write(repr(object)) + return + parens = level == 1 + if parens: + indent += 1 + allowance += 1 + write("(") + delim = "" + for rep in _wrap_bytes_repr(object, self._width - indent, allowance): + write(delim) + write(rep) + if not delim: + delim = "\n" + " " * indent + if parens: + write(")") + + _dispatch[bytes.__repr__] = _pprint_bytes + + def _pprint_bytearray( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + write = stream.write + write("bytearray(") + self._pprint_bytes( + bytes(object), stream, indent + 10, allowance + 1, context, level + 1 + ) + write(")") + + _dispatch[bytearray.__repr__] = _pprint_bytearray + + def _pprint_mappingproxy( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + stream.write("mappingproxy(") + self._format(object.copy(), stream, indent, allowance, context, level) + stream.write(")") + + _dispatch[_types.MappingProxyType.__repr__] = _pprint_mappingproxy + + def _pprint_simplenamespace( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + if type(object) is _types.SimpleNamespace: + # The SimpleNamespace repr is "namespace" instead of the class + # name, so we do the same here. For subclasses; use the class name. + cls_name = "namespace" + else: + cls_name = object.__class__.__name__ + items = object.__dict__.items() + stream.write(cls_name + "(") + self._format_namespace_items(items, stream, indent, allowance, context, level) + stream.write(")") + + _dispatch[_types.SimpleNamespace.__repr__] = _pprint_simplenamespace + + def _format_dict_items( + self, + items: list[tuple[Any, Any]], + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + if not items: + return + + write = stream.write + item_indent = indent + self._indent_per_level + delimnl = "\n" + " " * item_indent + for key, ent in items: + write(delimnl) + write(self._repr(key, context, level)) + write(": ") + self._format(ent, stream, item_indent, 1, context, level) + write(",") + + write("\n" + " " * indent) + + def _format_namespace_items( + self, + items: list[tuple[Any, Any]], + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + if not items: + return + + write = stream.write + item_indent = indent + self._indent_per_level + delimnl = "\n" + " " * item_indent + for key, ent in items: + write(delimnl) + write(key) + write("=") + if id(ent) in context: + # Special-case representation of recursion to match standard + # recursive dataclass repr. + write("...") + else: + self._format( + ent, + stream, + item_indent + len(key) + 1, + 1, + context, + level, + ) + + write(",") + + write("\n" + " " * indent) + + def _format_items( + self, + items: list[Any], + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + if not items: + return + + write = stream.write + item_indent = indent + self._indent_per_level + delimnl = "\n" + " " * item_indent + + for item in items: + write(delimnl) + self._format(item, stream, item_indent, 1, context, level) + write(",") + + write("\n" + " " * indent) + + def _repr(self, object: Any, context: set[int], level: int) -> str: + return self._safe_repr(object, context.copy(), self._depth, level) + + def _pprint_default_dict( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + rdf = self._repr(object.default_factory, context, level) + stream.write(f"{object.__class__.__name__}({rdf}, ") + self._pprint_dict(object, stream, indent, allowance, context, level) + stream.write(")") + + _dispatch[_collections.defaultdict.__repr__] = _pprint_default_dict + + def _pprint_counter( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + stream.write(object.__class__.__name__ + "(") + + if object: + stream.write("{") + items = object.most_common() + self._format_dict_items(items, stream, indent, allowance, context, level) + stream.write("}") + + stream.write(")") + + _dispatch[_collections.Counter.__repr__] = _pprint_counter + + def _pprint_chain_map( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + if not len(object.maps) or (len(object.maps) == 1 and not len(object.maps[0])): + stream.write(repr(object)) + return + + stream.write(object.__class__.__name__ + "(") + self._format_items(object.maps, stream, indent, allowance, context, level) + stream.write(")") + + _dispatch[_collections.ChainMap.__repr__] = _pprint_chain_map + + def _pprint_deque( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + stream.write(object.__class__.__name__ + "(") + if object.maxlen is not None: + stream.write("maxlen=%d, " % object.maxlen) + stream.write("[") + + self._format_items(object, stream, indent, allowance + 1, context, level) + stream.write("])") + + _dispatch[_collections.deque.__repr__] = _pprint_deque + + def _pprint_user_dict( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + self._format(object.data, stream, indent, allowance, context, level - 1) + + _dispatch[_collections.UserDict.__repr__] = _pprint_user_dict + + def _pprint_user_list( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + self._format(object.data, stream, indent, allowance, context, level - 1) + + _dispatch[_collections.UserList.__repr__] = _pprint_user_list + + def _pprint_user_string( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + self._format(object.data, stream, indent, allowance, context, level - 1) + + _dispatch[_collections.UserString.__repr__] = _pprint_user_string + + def _safe_repr( + self, object: Any, context: set[int], maxlevels: int | None, level: int + ) -> str: + typ = type(object) + if typ in _builtin_scalars: + return repr(object) + + r = getattr(typ, "__repr__", None) + + if issubclass(typ, dict) and r is dict.__repr__: + if not object: + return "{}" + objid = id(object) + if maxlevels and level >= maxlevels: + return "{...}" + if objid in context: + return _recursion(object) + context.add(objid) + components: list[str] = [] + append = components.append + level += 1 + for k, v in sorted(object.items(), key=_safe_tuple): + krepr = self._safe_repr(k, context, maxlevels, level) + vrepr = self._safe_repr(v, context, maxlevels, level) + append(f"{krepr}: {vrepr}") + context.remove(objid) + return "{{{}}}".format(", ".join(components)) + + if (issubclass(typ, list) and r is list.__repr__) or ( + issubclass(typ, tuple) and r is tuple.__repr__ + ): + if issubclass(typ, list): + if not object: + return "[]" + format = "[%s]" + elif len(object) == 1: + format = "(%s,)" + else: + if not object: + return "()" + format = "(%s)" + objid = id(object) + if maxlevels and level >= maxlevels: + return format % "..." + if objid in context: + return _recursion(object) + context.add(objid) + components = [] + append = components.append + level += 1 + for o in object: + orepr = self._safe_repr(o, context, maxlevels, level) + append(orepr) + context.remove(objid) + return format % ", ".join(components) + + return repr(object) + + +_builtin_scalars = frozenset( + {str, bytes, bytearray, float, complex, bool, type(None), int} +) + + +def _recursion(object: Any) -> str: + return f"" + + +def _wrap_bytes_repr(object: Any, width: int, allowance: int) -> Iterator[str]: + current = b"" + last = len(object) // 4 * 4 + for i in range(0, len(object), 4): + part = object[i : i + 4] + candidate = current + part + if i == last: + width -= allowance + if len(repr(candidate)) > width: + if current: + yield repr(current) + current = part + else: + current = candidate + if current: + yield repr(current) diff --git a/.venv/Lib/site-packages/_pytest/_io/saferepr.py b/.venv/Lib/site-packages/_pytest/_io/saferepr.py new file mode 100644 index 00000000..cee70e33 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/_io/saferepr.py @@ -0,0 +1,130 @@ +from __future__ import annotations + +import pprint +import reprlib + + +def _try_repr_or_str(obj: object) -> str: + try: + return repr(obj) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException: + return f'{type(obj).__name__}("{obj}")' + + +def _format_repr_exception(exc: BaseException, obj: object) -> str: + try: + exc_info = _try_repr_or_str(exc) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as inner_exc: + exc_info = f"unpresentable exception ({_try_repr_or_str(inner_exc)})" + return ( + f"<[{exc_info} raised in repr()] {type(obj).__name__} object at 0x{id(obj):x}>" + ) + + +def _ellipsize(s: str, maxsize: int) -> str: + if len(s) > maxsize: + i = max(0, (maxsize - 3) // 2) + j = max(0, maxsize - 3 - i) + return s[:i] + "..." + s[len(s) - j :] + return s + + +class SafeRepr(reprlib.Repr): + """ + repr.Repr that limits the resulting size of repr() and includes + information on exceptions raised during the call. + """ + + def __init__(self, maxsize: int | None, use_ascii: bool = False) -> None: + """ + :param maxsize: + If not None, will truncate the resulting repr to that specific size, using ellipsis + somewhere in the middle to hide the extra text. + If None, will not impose any size limits on the returning repr. + """ + super().__init__() + # ``maxstring`` is used by the superclass, and needs to be an int; using a + # very large number in case maxsize is None, meaning we want to disable + # truncation. + self.maxstring = maxsize if maxsize is not None else 1_000_000_000 + self.maxsize = maxsize + self.use_ascii = use_ascii + + def repr(self, x: object) -> str: + try: + if self.use_ascii: + s = ascii(x) + else: + s = super().repr(x) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as exc: + s = _format_repr_exception(exc, x) + if self.maxsize is not None: + s = _ellipsize(s, self.maxsize) + return s + + def repr_instance(self, x: object, level: int) -> str: + try: + s = repr(x) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as exc: + s = _format_repr_exception(exc, x) + if self.maxsize is not None: + s = _ellipsize(s, self.maxsize) + return s + + +def safeformat(obj: object) -> str: + """Return a pretty printed string for the given object. + + Failing __repr__ functions of user instances will be represented + with a short exception info. + """ + try: + return pprint.pformat(obj) + except Exception as exc: + return _format_repr_exception(exc, obj) + + +# Maximum size of overall repr of objects to display during assertion errors. +DEFAULT_REPR_MAX_SIZE = 240 + + +def saferepr( + obj: object, maxsize: int | None = DEFAULT_REPR_MAX_SIZE, use_ascii: bool = False +) -> str: + """Return a size-limited safe repr-string for the given object. + + Failing __repr__ functions of user instances will be represented + with a short exception info and 'saferepr' generally takes + care to never raise exceptions itself. + + This function is a wrapper around the Repr/reprlib functionality of the + stdlib. + """ + return SafeRepr(maxsize, use_ascii).repr(obj) + + +def saferepr_unlimited(obj: object, use_ascii: bool = True) -> str: + """Return an unlimited-size safe repr-string for the given object. + + As with saferepr, failing __repr__ functions of user instances + will be represented with a short exception info. + + This function is a wrapper around simple repr. + + Note: a cleaner solution would be to alter ``saferepr``this way + when maxsize=None, but that might affect some other code. + """ + try: + if use_ascii: + return ascii(obj) + return repr(obj) + except Exception as exc: + return _format_repr_exception(exc, obj) diff --git a/.venv/Lib/site-packages/_pytest/_io/terminalwriter.py b/.venv/Lib/site-packages/_pytest/_io/terminalwriter.py new file mode 100644 index 00000000..70ebd3d0 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/_io/terminalwriter.py @@ -0,0 +1,274 @@ +"""Helper functions for writing to terminals and files.""" + +from __future__ import annotations + +import os +import shutil +import sys +from typing import final +from typing import Literal +from typing import Sequence +from typing import TextIO +from typing import TYPE_CHECKING + +from ..compat import assert_never +from .wcwidth import wcswidth + + +if TYPE_CHECKING: + from pygments.formatter import Formatter + from pygments.lexer import Lexer + + +# This code was initially copied from py 1.8.1, file _io/terminalwriter.py. + + +def get_terminal_width() -> int: + width, _ = shutil.get_terminal_size(fallback=(80, 24)) + + # The Windows get_terminal_size may be bogus, let's sanify a bit. + if width < 40: + width = 80 + + return width + + +def should_do_markup(file: TextIO) -> bool: + if os.environ.get("PY_COLORS") == "1": + return True + if os.environ.get("PY_COLORS") == "0": + return False + if os.environ.get("NO_COLOR"): + return False + if os.environ.get("FORCE_COLOR"): + return True + return ( + hasattr(file, "isatty") and file.isatty() and os.environ.get("TERM") != "dumb" + ) + + +@final +class TerminalWriter: + _esctable = dict( + black=30, + red=31, + green=32, + yellow=33, + blue=34, + purple=35, + cyan=36, + white=37, + Black=40, + Red=41, + Green=42, + Yellow=43, + Blue=44, + Purple=45, + Cyan=46, + White=47, + bold=1, + light=2, + blink=5, + invert=7, + ) + + def __init__(self, file: TextIO | None = None) -> None: + if file is None: + file = sys.stdout + if hasattr(file, "isatty") and file.isatty() and sys.platform == "win32": + try: + import colorama + except ImportError: + pass + else: + file = colorama.AnsiToWin32(file).stream + assert file is not None + self._file = file + self.hasmarkup = should_do_markup(file) + self._current_line = "" + self._terminal_width: int | None = None + self.code_highlight = True + + @property + def fullwidth(self) -> int: + if self._terminal_width is not None: + return self._terminal_width + return get_terminal_width() + + @fullwidth.setter + def fullwidth(self, value: int) -> None: + self._terminal_width = value + + @property + def width_of_current_line(self) -> int: + """Return an estimate of the width so far in the current line.""" + return wcswidth(self._current_line) + + def markup(self, text: str, **markup: bool) -> str: + for name in markup: + if name not in self._esctable: + raise ValueError(f"unknown markup: {name!r}") + if self.hasmarkup: + esc = [self._esctable[name] for name, on in markup.items() if on] + if esc: + text = "".join(f"\x1b[{cod}m" for cod in esc) + text + "\x1b[0m" + return text + + def sep( + self, + sepchar: str, + title: str | None = None, + fullwidth: int | None = None, + **markup: bool, + ) -> None: + if fullwidth is None: + fullwidth = self.fullwidth + # The goal is to have the line be as long as possible + # under the condition that len(line) <= fullwidth. + if sys.platform == "win32": + # If we print in the last column on windows we are on a + # new line but there is no way to verify/neutralize this + # (we may not know the exact line width). + # So let's be defensive to avoid empty lines in the output. + fullwidth -= 1 + if title is not None: + # we want 2 + 2*len(fill) + len(title) <= fullwidth + # i.e. 2 + 2*len(sepchar)*N + len(title) <= fullwidth + # 2*len(sepchar)*N <= fullwidth - len(title) - 2 + # N <= (fullwidth - len(title) - 2) // (2*len(sepchar)) + N = max((fullwidth - len(title) - 2) // (2 * len(sepchar)), 1) + fill = sepchar * N + line = f"{fill} {title} {fill}" + else: + # we want len(sepchar)*N <= fullwidth + # i.e. N <= fullwidth // len(sepchar) + line = sepchar * (fullwidth // len(sepchar)) + # In some situations there is room for an extra sepchar at the right, + # in particular if we consider that with a sepchar like "_ " the + # trailing space is not important at the end of the line. + if len(line) + len(sepchar.rstrip()) <= fullwidth: + line += sepchar.rstrip() + + self.line(line, **markup) + + def write(self, msg: str, *, flush: bool = False, **markup: bool) -> None: + if msg: + current_line = msg.rsplit("\n", 1)[-1] + if "\n" in msg: + self._current_line = current_line + else: + self._current_line += current_line + + msg = self.markup(msg, **markup) + + try: + self._file.write(msg) + except UnicodeEncodeError: + # Some environments don't support printing general Unicode + # strings, due to misconfiguration or otherwise; in that case, + # print the string escaped to ASCII. + # When the Unicode situation improves we should consider + # letting the error propagate instead of masking it (see #7475 + # for one brief attempt). + msg = msg.encode("unicode-escape").decode("ascii") + self._file.write(msg) + + if flush: + self.flush() + + def line(self, s: str = "", **markup: bool) -> None: + self.write(s, **markup) + self.write("\n") + + def flush(self) -> None: + self._file.flush() + + def _write_source(self, lines: Sequence[str], indents: Sequence[str] = ()) -> None: + """Write lines of source code possibly highlighted. + + Keeping this private for now because the API is clunky. We should discuss how + to evolve the terminal writer so we can have more precise color support, for example + being able to write part of a line in one color and the rest in another, and so on. + """ + if indents and len(indents) != len(lines): + raise ValueError( + f"indents size ({len(indents)}) should have same size as lines ({len(lines)})" + ) + if not indents: + indents = [""] * len(lines) + source = "\n".join(lines) + new_lines = self._highlight(source).splitlines() + for indent, new_line in zip(indents, new_lines): + self.line(indent + new_line) + + def _get_pygments_lexer(self, lexer: Literal["python", "diff"]) -> Lexer | None: + try: + if lexer == "python": + from pygments.lexers.python import PythonLexer + + return PythonLexer() + elif lexer == "diff": + from pygments.lexers.diff import DiffLexer + + return DiffLexer() + else: + assert_never(lexer) + except ModuleNotFoundError: + return None + + def _get_pygments_formatter(self) -> Formatter | None: + try: + import pygments.util + except ModuleNotFoundError: + return None + + from _pytest.config.exceptions import UsageError + + theme = os.getenv("PYTEST_THEME") + theme_mode = os.getenv("PYTEST_THEME_MODE", "dark") + + try: + from pygments.formatters.terminal import TerminalFormatter + + return TerminalFormatter(bg=theme_mode, style=theme) + + except pygments.util.ClassNotFound as e: + raise UsageError( + f"PYTEST_THEME environment variable has an invalid value: '{theme}'. " + "Hint: See available pygments styles with `pygmentize -L styles`." + ) from e + except pygments.util.OptionError as e: + raise UsageError( + f"PYTEST_THEME_MODE environment variable has an invalid value: '{theme_mode}'. " + "The allowed values are 'dark' (default) and 'light'." + ) from e + + def _highlight( + self, source: str, lexer: Literal["diff", "python"] = "python" + ) -> str: + """Highlight the given source if we have markup support.""" + if not source or not self.hasmarkup or not self.code_highlight: + return source + + pygments_lexer = self._get_pygments_lexer(lexer) + if pygments_lexer is None: + return source + + pygments_formatter = self._get_pygments_formatter() + if pygments_formatter is None: + return source + + from pygments import highlight + + highlighted: str = highlight(source, pygments_lexer, pygments_formatter) + # pygments terminal formatter may add a newline when there wasn't one. + # We don't want this, remove. + if highlighted[-1] == "\n" and source[-1] != "\n": + highlighted = highlighted[:-1] + + # Some lexers will not set the initial color explicitly + # which may lead to the previous color being propagated to the + # start of the expression, so reset first. + highlighted = "\x1b[0m" + highlighted + + return highlighted diff --git a/.venv/Lib/site-packages/_pytest/_io/wcwidth.py b/.venv/Lib/site-packages/_pytest/_io/wcwidth.py new file mode 100644 index 00000000..23886ff1 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/_io/wcwidth.py @@ -0,0 +1,57 @@ +from __future__ import annotations + +from functools import lru_cache +import unicodedata + + +@lru_cache(100) +def wcwidth(c: str) -> int: + """Determine how many columns are needed to display a character in a terminal. + + Returns -1 if the character is not printable. + Returns 0, 1 or 2 for other characters. + """ + o = ord(c) + + # ASCII fast path. + if 0x20 <= o < 0x07F: + return 1 + + # Some Cf/Zp/Zl characters which should be zero-width. + if ( + o == 0x0000 + or 0x200B <= o <= 0x200F + or 0x2028 <= o <= 0x202E + or 0x2060 <= o <= 0x2063 + ): + return 0 + + category = unicodedata.category(c) + + # Control characters. + if category == "Cc": + return -1 + + # Combining characters with zero width. + if category in ("Me", "Mn"): + return 0 + + # Full/Wide east asian characters. + if unicodedata.east_asian_width(c) in ("F", "W"): + return 2 + + return 1 + + +def wcswidth(s: str) -> int: + """Determine how many columns are needed to display a string in a terminal. + + Returns -1 if the string contains non-printable characters. + """ + width = 0 + for c in unicodedata.normalize("NFC", s): + wc = wcwidth(c) + if wc < 0: + return -1 + width += wc + return width diff --git a/.venv/Lib/site-packages/_pytest/_py/__init__.py b/.venv/Lib/site-packages/_pytest/_py/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/.venv/Lib/site-packages/_pytest/_py/error.py b/.venv/Lib/site-packages/_pytest/_py/error.py new file mode 100644 index 00000000..ab3a4ed3 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/_py/error.py @@ -0,0 +1,111 @@ +"""create errno-specific classes for IO or os calls.""" + +from __future__ import annotations + +import errno +import os +import sys +from typing import Callable +from typing import TYPE_CHECKING +from typing import TypeVar + + +if TYPE_CHECKING: + from typing_extensions import ParamSpec + + P = ParamSpec("P") + +R = TypeVar("R") + + +class Error(EnvironmentError): + def __repr__(self) -> str: + return "{}.{} {!r}: {} ".format( + self.__class__.__module__, + self.__class__.__name__, + self.__class__.__doc__, + " ".join(map(str, self.args)), + # repr(self.args) + ) + + def __str__(self) -> str: + s = "[{}]: {}".format( + self.__class__.__doc__, + " ".join(map(str, self.args)), + ) + return s + + +_winerrnomap = { + 2: errno.ENOENT, + 3: errno.ENOENT, + 17: errno.EEXIST, + 18: errno.EXDEV, + 13: errno.EBUSY, # empty cd drive, but ENOMEDIUM seems unavailable + 22: errno.ENOTDIR, + 20: errno.ENOTDIR, + 267: errno.ENOTDIR, + 5: errno.EACCES, # anything better? +} + + +class ErrorMaker: + """lazily provides Exception classes for each possible POSIX errno + (as defined per the 'errno' module). All such instances + subclass EnvironmentError. + """ + + _errno2class: dict[int, type[Error]] = {} + + def __getattr__(self, name: str) -> type[Error]: + if name[0] == "_": + raise AttributeError(name) + eno = getattr(errno, name) + cls = self._geterrnoclass(eno) + setattr(self, name, cls) + return cls + + def _geterrnoclass(self, eno: int) -> type[Error]: + try: + return self._errno2class[eno] + except KeyError: + clsname = errno.errorcode.get(eno, "UnknownErrno%d" % (eno,)) + errorcls = type( + clsname, + (Error,), + {"__module__": "py.error", "__doc__": os.strerror(eno)}, + ) + self._errno2class[eno] = errorcls + return errorcls + + def checked_call( + self, func: Callable[P, R], *args: P.args, **kwargs: P.kwargs + ) -> R: + """Call a function and raise an errno-exception if applicable.""" + __tracebackhide__ = True + try: + return func(*args, **kwargs) + except Error: + raise + except OSError as value: + if not hasattr(value, "errno"): + raise + errno = value.errno + if sys.platform == "win32": + try: + cls = self._geterrnoclass(_winerrnomap[errno]) + except KeyError: + raise value + else: + # we are not on Windows, or we got a proper OSError + cls = self._geterrnoclass(errno) + + raise cls(f"{func.__name__}{args!r}") + + +_error_maker = ErrorMaker() +checked_call = _error_maker.checked_call + + +def __getattr__(attr: str) -> type[Error]: + return getattr(_error_maker, attr) # type: ignore[no-any-return] diff --git a/.venv/Lib/site-packages/_pytest/_py/path.py b/.venv/Lib/site-packages/_pytest/_py/path.py new file mode 100644 index 00000000..c7ab1182 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/_py/path.py @@ -0,0 +1,1475 @@ +# mypy: allow-untyped-defs +"""local path implementation.""" + +from __future__ import annotations + +import atexit +from contextlib import contextmanager +import fnmatch +import importlib.util +import io +import os +from os.path import abspath +from os.path import dirname +from os.path import exists +from os.path import isabs +from os.path import isdir +from os.path import isfile +from os.path import islink +from os.path import normpath +import posixpath +from stat import S_ISDIR +from stat import S_ISLNK +from stat import S_ISREG +import sys +from typing import Any +from typing import Callable +from typing import cast +from typing import Literal +from typing import overload +from typing import TYPE_CHECKING +import uuid +import warnings + +from . import error + + +# Moved from local.py. +iswin32 = sys.platform == "win32" or (getattr(os, "_name", False) == "nt") + + +class Checkers: + _depend_on_existence = "exists", "link", "dir", "file" + + def __init__(self, path): + self.path = path + + def dotfile(self): + return self.path.basename.startswith(".") + + def ext(self, arg): + if not arg.startswith("."): + arg = "." + arg + return self.path.ext == arg + + def basename(self, arg): + return self.path.basename == arg + + def basestarts(self, arg): + return self.path.basename.startswith(arg) + + def relto(self, arg): + return self.path.relto(arg) + + def fnmatch(self, arg): + return self.path.fnmatch(arg) + + def endswith(self, arg): + return str(self.path).endswith(arg) + + def _evaluate(self, kw): + from .._code.source import getrawcode + + for name, value in kw.items(): + invert = False + meth = None + try: + meth = getattr(self, name) + except AttributeError: + if name[:3] == "not": + invert = True + try: + meth = getattr(self, name[3:]) + except AttributeError: + pass + if meth is None: + raise TypeError(f"no {name!r} checker available for {self.path!r}") + try: + if getrawcode(meth).co_argcount > 1: + if (not meth(value)) ^ invert: + return False + else: + if bool(value) ^ bool(meth()) ^ invert: + return False + except (error.ENOENT, error.ENOTDIR, error.EBUSY): + # EBUSY feels not entirely correct, + # but its kind of necessary since ENOMEDIUM + # is not accessible in python + for name in self._depend_on_existence: + if name in kw: + if kw.get(name): + return False + name = "not" + name + if name in kw: + if not kw.get(name): + return False + return True + + _statcache: Stat + + def _stat(self) -> Stat: + try: + return self._statcache + except AttributeError: + try: + self._statcache = self.path.stat() + except error.ELOOP: + self._statcache = self.path.lstat() + return self._statcache + + def dir(self): + return S_ISDIR(self._stat().mode) + + def file(self): + return S_ISREG(self._stat().mode) + + def exists(self): + return self._stat() + + def link(self): + st = self.path.lstat() + return S_ISLNK(st.mode) + + +class NeverRaised(Exception): + pass + + +class Visitor: + def __init__(self, fil, rec, ignore, bf, sort): + if isinstance(fil, str): + fil = FNMatcher(fil) + if isinstance(rec, str): + self.rec: Callable[[LocalPath], bool] = FNMatcher(rec) + elif not hasattr(rec, "__call__") and rec: + self.rec = lambda path: True + else: + self.rec = rec + self.fil = fil + self.ignore = ignore + self.breadthfirst = bf + self.optsort = cast(Callable[[Any], Any], sorted) if sort else (lambda x: x) + + def gen(self, path): + try: + entries = path.listdir() + except self.ignore: + return + rec = self.rec + dirs = self.optsort( + [p for p in entries if p.check(dir=1) and (rec is None or rec(p))] + ) + if not self.breadthfirst: + for subdir in dirs: + yield from self.gen(subdir) + for p in self.optsort(entries): + if self.fil is None or self.fil(p): + yield p + if self.breadthfirst: + for subdir in dirs: + yield from self.gen(subdir) + + +class FNMatcher: + def __init__(self, pattern): + self.pattern = pattern + + def __call__(self, path): + pattern = self.pattern + + if ( + pattern.find(path.sep) == -1 + and iswin32 + and pattern.find(posixpath.sep) != -1 + ): + # Running on Windows, the pattern has no Windows path separators, + # and the pattern has one or more Posix path separators. Replace + # the Posix path separators with the Windows path separator. + pattern = pattern.replace(posixpath.sep, path.sep) + + if pattern.find(path.sep) == -1: + name = path.basename + else: + name = str(path) # path.strpath # XXX svn? + if not os.path.isabs(pattern): + pattern = "*" + path.sep + pattern + return fnmatch.fnmatch(name, pattern) + + +def map_as_list(func, iter): + return list(map(func, iter)) + + +class Stat: + if TYPE_CHECKING: + + @property + def size(self) -> int: ... + + @property + def mtime(self) -> float: ... + + def __getattr__(self, name: str) -> Any: + return getattr(self._osstatresult, "st_" + name) + + def __init__(self, path, osstatresult): + self.path = path + self._osstatresult = osstatresult + + @property + def owner(self): + if iswin32: + raise NotImplementedError("XXX win32") + import pwd + + entry = error.checked_call(pwd.getpwuid, self.uid) # type:ignore[attr-defined,unused-ignore] + return entry[0] + + @property + def group(self): + """Return group name of file.""" + if iswin32: + raise NotImplementedError("XXX win32") + import grp + + entry = error.checked_call(grp.getgrgid, self.gid) # type:ignore[attr-defined,unused-ignore] + return entry[0] + + def isdir(self): + return S_ISDIR(self._osstatresult.st_mode) + + def isfile(self): + return S_ISREG(self._osstatresult.st_mode) + + def islink(self): + self.path.lstat() + return S_ISLNK(self._osstatresult.st_mode) + + +def getuserid(user): + import pwd + + if not isinstance(user, int): + user = pwd.getpwnam(user)[2] # type:ignore[attr-defined,unused-ignore] + return user + + +def getgroupid(group): + import grp + + if not isinstance(group, int): + group = grp.getgrnam(group)[2] # type:ignore[attr-defined,unused-ignore] + return group + + +class LocalPath: + """Object oriented interface to os.path and other local filesystem + related information. + """ + + class ImportMismatchError(ImportError): + """raised on pyimport() if there is a mismatch of __file__'s""" + + sep = os.sep + + def __init__(self, path=None, expanduser=False): + """Initialize and return a local Path instance. + + Path can be relative to the current directory. + If path is None it defaults to the current working directory. + If expanduser is True, tilde-expansion is performed. + Note that Path instances always carry an absolute path. + Note also that passing in a local path object will simply return + the exact same path object. Use new() to get a new copy. + """ + if path is None: + self.strpath = error.checked_call(os.getcwd) + else: + try: + path = os.fspath(path) + except TypeError: + raise ValueError( + "can only pass None, Path instances " + "or non-empty strings to LocalPath" + ) + if expanduser: + path = os.path.expanduser(path) + self.strpath = abspath(path) + + if sys.platform != "win32": + + def chown(self, user, group, rec=0): + """Change ownership to the given user and group. + user and group may be specified by a number or + by a name. if rec is True change ownership + recursively. + """ + uid = getuserid(user) + gid = getgroupid(group) + if rec: + for x in self.visit(rec=lambda x: x.check(link=0)): + if x.check(link=0): + error.checked_call(os.chown, str(x), uid, gid) + error.checked_call(os.chown, str(self), uid, gid) + + def readlink(self) -> str: + """Return value of a symbolic link.""" + # https://github.com/python/mypy/issues/12278 + return error.checked_call(os.readlink, self.strpath) # type: ignore[arg-type,return-value,unused-ignore] + + def mklinkto(self, oldname): + """Posix style hard link to another name.""" + error.checked_call(os.link, str(oldname), str(self)) + + def mksymlinkto(self, value, absolute=1): + """Create a symbolic link with the given value (pointing to another name).""" + if absolute: + error.checked_call(os.symlink, str(value), self.strpath) + else: + base = self.common(value) + # with posix local paths '/' is always a common base + relsource = self.__class__(value).relto(base) + reldest = self.relto(base) + n = reldest.count(self.sep) + target = self.sep.join(("..",) * n + (relsource,)) + error.checked_call(os.symlink, target, self.strpath) + + def __div__(self, other): + return self.join(os.fspath(other)) + + __truediv__ = __div__ # py3k + + @property + def basename(self): + """Basename part of path.""" + return self._getbyspec("basename")[0] + + @property + def dirname(self): + """Dirname part of path.""" + return self._getbyspec("dirname")[0] + + @property + def purebasename(self): + """Pure base name of the path.""" + return self._getbyspec("purebasename")[0] + + @property + def ext(self): + """Extension of the path (including the '.').""" + return self._getbyspec("ext")[0] + + def read_binary(self): + """Read and return a bytestring from reading the path.""" + with self.open("rb") as f: + return f.read() + + def read_text(self, encoding): + """Read and return a Unicode string from reading the path.""" + with self.open("r", encoding=encoding) as f: + return f.read() + + def read(self, mode="r"): + """Read and return a bytestring from reading the path.""" + with self.open(mode) as f: + return f.read() + + def readlines(self, cr=1): + """Read and return a list of lines from the path. if cr is False, the + newline will be removed from the end of each line.""" + mode = "r" + + if not cr: + content = self.read(mode) + return content.split("\n") + else: + f = self.open(mode) + try: + return f.readlines() + finally: + f.close() + + def load(self): + """(deprecated) return object unpickled from self.read()""" + f = self.open("rb") + try: + import pickle + + return error.checked_call(pickle.load, f) + finally: + f.close() + + def move(self, target): + """Move this path to target.""" + if target.relto(self): + raise error.EINVAL(target, "cannot move path into a subdirectory of itself") + try: + self.rename(target) + except error.EXDEV: # invalid cross-device link + self.copy(target) + self.remove() + + def fnmatch(self, pattern): + """Return true if the basename/fullname matches the glob-'pattern'. + + valid pattern characters:: + + * matches everything + ? matches any single character + [seq] matches any character in seq + [!seq] matches any char not in seq + + If the pattern contains a path-separator then the full path + is used for pattern matching and a '*' is prepended to the + pattern. + + if the pattern doesn't contain a path-separator the pattern + is only matched against the basename. + """ + return FNMatcher(pattern)(self) + + def relto(self, relpath): + """Return a string which is the relative part of the path + to the given 'relpath'. + """ + if not isinstance(relpath, (str, LocalPath)): + raise TypeError(f"{relpath!r}: not a string or path object") + strrelpath = str(relpath) + if strrelpath and strrelpath[-1] != self.sep: + strrelpath += self.sep + # assert strrelpath[-1] == self.sep + # assert strrelpath[-2] != self.sep + strself = self.strpath + if sys.platform == "win32" or getattr(os, "_name", None) == "nt": + if os.path.normcase(strself).startswith(os.path.normcase(strrelpath)): + return strself[len(strrelpath) :] + elif strself.startswith(strrelpath): + return strself[len(strrelpath) :] + return "" + + def ensure_dir(self, *args): + """Ensure the path joined with args is a directory.""" + return self.ensure(*args, dir=True) + + def bestrelpath(self, dest): + """Return a string which is a relative path from self + (assumed to be a directory) to dest such that + self.join(bestrelpath) == dest and if not such + path can be determined return dest. + """ + try: + if self == dest: + return os.curdir + base = self.common(dest) + if not base: # can be the case on windows + return str(dest) + self2base = self.relto(base) + reldest = dest.relto(base) + if self2base: + n = self2base.count(self.sep) + 1 + else: + n = 0 + lst = [os.pardir] * n + if reldest: + lst.append(reldest) + target = dest.sep.join(lst) + return target + except AttributeError: + return str(dest) + + def exists(self): + return self.check() + + def isdir(self): + return self.check(dir=1) + + def isfile(self): + return self.check(file=1) + + def parts(self, reverse=False): + """Return a root-first list of all ancestor directories + plus the path itself. + """ + current = self + lst = [self] + while 1: + last = current + current = current.dirpath() + if last == current: + break + lst.append(current) + if not reverse: + lst.reverse() + return lst + + def common(self, other): + """Return the common part shared with the other path + or None if there is no common part. + """ + last = None + for x, y in zip(self.parts(), other.parts()): + if x != y: + return last + last = x + return last + + def __add__(self, other): + """Return new path object with 'other' added to the basename""" + return self.new(basename=self.basename + str(other)) + + def visit(self, fil=None, rec=None, ignore=NeverRaised, bf=False, sort=False): + """Yields all paths below the current one + + fil is a filter (glob pattern or callable), if not matching the + path will not be yielded, defaulting to None (everything is + returned) + + rec is a filter (glob pattern or callable) that controls whether + a node is descended, defaulting to None + + ignore is an Exception class that is ignoredwhen calling dirlist() + on any of the paths (by default, all exceptions are reported) + + bf if True will cause a breadthfirst search instead of the + default depthfirst. Default: False + + sort if True will sort entries within each directory level. + """ + yield from Visitor(fil, rec, ignore, bf, sort).gen(self) + + def _sortlist(self, res, sort): + if sort: + if hasattr(sort, "__call__"): + warnings.warn( + DeprecationWarning( + "listdir(sort=callable) is deprecated and breaks on python3" + ), + stacklevel=3, + ) + res.sort(sort) + else: + res.sort() + + def __fspath__(self): + return self.strpath + + def __hash__(self): + s = self.strpath + if iswin32: + s = s.lower() + return hash(s) + + def __eq__(self, other): + s1 = os.fspath(self) + try: + s2 = os.fspath(other) + except TypeError: + return False + if iswin32: + s1 = s1.lower() + try: + s2 = s2.lower() + except AttributeError: + return False + return s1 == s2 + + def __ne__(self, other): + return not (self == other) + + def __lt__(self, other): + return os.fspath(self) < os.fspath(other) + + def __gt__(self, other): + return os.fspath(self) > os.fspath(other) + + def samefile(self, other): + """Return True if 'other' references the same file as 'self'.""" + other = os.fspath(other) + if not isabs(other): + other = abspath(other) + if self == other: + return True + if not hasattr(os.path, "samefile"): + return False + return error.checked_call(os.path.samefile, self.strpath, other) + + def remove(self, rec=1, ignore_errors=False): + """Remove a file or directory (or a directory tree if rec=1). + if ignore_errors is True, errors while removing directories will + be ignored. + """ + if self.check(dir=1, link=0): + if rec: + # force remove of readonly files on windows + if iswin32: + self.chmod(0o700, rec=1) + import shutil + + error.checked_call( + shutil.rmtree, self.strpath, ignore_errors=ignore_errors + ) + else: + error.checked_call(os.rmdir, self.strpath) + else: + if iswin32: + self.chmod(0o700) + error.checked_call(os.remove, self.strpath) + + def computehash(self, hashtype="md5", chunksize=524288): + """Return hexdigest of hashvalue for this file.""" + try: + try: + import hashlib as mod + except ImportError: + if hashtype == "sha1": + hashtype = "sha" + mod = __import__(hashtype) + hash = getattr(mod, hashtype)() + except (AttributeError, ImportError): + raise ValueError(f"Don't know how to compute {hashtype!r} hash") + f = self.open("rb") + try: + while 1: + buf = f.read(chunksize) + if not buf: + return hash.hexdigest() + hash.update(buf) + finally: + f.close() + + def new(self, **kw): + """Create a modified version of this path. + the following keyword arguments modify various path parts:: + + a:/some/path/to/a/file.ext + xx drive + xxxxxxxxxxxxxxxxx dirname + xxxxxxxx basename + xxxx purebasename + xxx ext + """ + obj = object.__new__(self.__class__) + if not kw: + obj.strpath = self.strpath + return obj + drive, dirname, basename, purebasename, ext = self._getbyspec( + "drive,dirname,basename,purebasename,ext" + ) + if "basename" in kw: + if "purebasename" in kw or "ext" in kw: + raise ValueError(f"invalid specification {kw!r}") + else: + pb = kw.setdefault("purebasename", purebasename) + try: + ext = kw["ext"] + except KeyError: + pass + else: + if ext and not ext.startswith("."): + ext = "." + ext + kw["basename"] = pb + ext + + if "dirname" in kw and not kw["dirname"]: + kw["dirname"] = drive + else: + kw.setdefault("dirname", dirname) + kw.setdefault("sep", self.sep) + obj.strpath = normpath("{dirname}{sep}{basename}".format(**kw)) + return obj + + def _getbyspec(self, spec: str) -> list[str]: + """See new for what 'spec' can be.""" + res = [] + parts = self.strpath.split(self.sep) + + args = filter(None, spec.split(",")) + for name in args: + if name == "drive": + res.append(parts[0]) + elif name == "dirname": + res.append(self.sep.join(parts[:-1])) + else: + basename = parts[-1] + if name == "basename": + res.append(basename) + else: + i = basename.rfind(".") + if i == -1: + purebasename, ext = basename, "" + else: + purebasename, ext = basename[:i], basename[i:] + if name == "purebasename": + res.append(purebasename) + elif name == "ext": + res.append(ext) + else: + raise ValueError(f"invalid part specification {name!r}") + return res + + def dirpath(self, *args, **kwargs): + """Return the directory path joined with any given path arguments.""" + if not kwargs: + path = object.__new__(self.__class__) + path.strpath = dirname(self.strpath) + if args: + path = path.join(*args) + return path + return self.new(basename="").join(*args, **kwargs) + + def join(self, *args: os.PathLike[str], abs: bool = False) -> LocalPath: + """Return a new path by appending all 'args' as path + components. if abs=1 is used restart from root if any + of the args is an absolute path. + """ + sep = self.sep + strargs = [os.fspath(arg) for arg in args] + strpath = self.strpath + if abs: + newargs: list[str] = [] + for arg in reversed(strargs): + if isabs(arg): + strpath = arg + strargs = newargs + break + newargs.insert(0, arg) + # special case for when we have e.g. strpath == "/" + actual_sep = "" if strpath.endswith(sep) else sep + for arg in strargs: + arg = arg.strip(sep) + if iswin32: + # allow unix style paths even on windows. + arg = arg.strip("/") + arg = arg.replace("/", sep) + strpath = strpath + actual_sep + arg + actual_sep = sep + obj = object.__new__(self.__class__) + obj.strpath = normpath(strpath) + return obj + + def open(self, mode="r", ensure=False, encoding=None): + """Return an opened file with the given mode. + + If ensure is True, create parent directories if needed. + """ + if ensure: + self.dirpath().ensure(dir=1) + if encoding: + return error.checked_call( + io.open, + self.strpath, + mode, + encoding=encoding, + ) + return error.checked_call(open, self.strpath, mode) + + def _fastjoin(self, name): + child = object.__new__(self.__class__) + child.strpath = self.strpath + self.sep + name + return child + + def islink(self): + return islink(self.strpath) + + def check(self, **kw): + """Check a path for existence and properties. + + Without arguments, return True if the path exists, otherwise False. + + valid checkers:: + + file = 1 # is a file + file = 0 # is not a file (may not even exist) + dir = 1 # is a dir + link = 1 # is a link + exists = 1 # exists + + You can specify multiple checker definitions, for example:: + + path.check(file=1, link=1) # a link pointing to a file + """ + if not kw: + return exists(self.strpath) + if len(kw) == 1: + if "dir" in kw: + return not kw["dir"] ^ isdir(self.strpath) + if "file" in kw: + return not kw["file"] ^ isfile(self.strpath) + if not kw: + kw = {"exists": 1} + return Checkers(self)._evaluate(kw) + + _patternchars = set("*?[" + os.sep) + + def listdir(self, fil=None, sort=None): + """List directory contents, possibly filter by the given fil func + and possibly sorted. + """ + if fil is None and sort is None: + names = error.checked_call(os.listdir, self.strpath) + return map_as_list(self._fastjoin, names) + if isinstance(fil, str): + if not self._patternchars.intersection(fil): + child = self._fastjoin(fil) + if exists(child.strpath): + return [child] + return [] + fil = FNMatcher(fil) + names = error.checked_call(os.listdir, self.strpath) + res = [] + for name in names: + child = self._fastjoin(name) + if fil is None or fil(child): + res.append(child) + self._sortlist(res, sort) + return res + + def size(self) -> int: + """Return size of the underlying file object""" + return self.stat().size + + def mtime(self) -> float: + """Return last modification time of the path.""" + return self.stat().mtime + + def copy(self, target, mode=False, stat=False): + """Copy path to target. + + If mode is True, will copy permission from path to target. + If stat is True, copy permission, last modification + time, last access time, and flags from path to target. + """ + if self.check(file=1): + if target.check(dir=1): + target = target.join(self.basename) + assert self != target + copychunked(self, target) + if mode: + copymode(self.strpath, target.strpath) + if stat: + copystat(self, target) + else: + + def rec(p): + return p.check(link=0) + + for x in self.visit(rec=rec): + relpath = x.relto(self) + newx = target.join(relpath) + newx.dirpath().ensure(dir=1) + if x.check(link=1): + newx.mksymlinkto(x.readlink()) + continue + elif x.check(file=1): + copychunked(x, newx) + elif x.check(dir=1): + newx.ensure(dir=1) + if mode: + copymode(x.strpath, newx.strpath) + if stat: + copystat(x, newx) + + def rename(self, target): + """Rename this path to target.""" + target = os.fspath(target) + return error.checked_call(os.rename, self.strpath, target) + + def dump(self, obj, bin=1): + """Pickle object into path location""" + f = self.open("wb") + import pickle + + try: + error.checked_call(pickle.dump, obj, f, bin) + finally: + f.close() + + def mkdir(self, *args): + """Create & return the directory joined with args.""" + p = self.join(*args) + error.checked_call(os.mkdir, os.fspath(p)) + return p + + def write_binary(self, data, ensure=False): + """Write binary data into path. If ensure is True create + missing parent directories. + """ + if ensure: + self.dirpath().ensure(dir=1) + with self.open("wb") as f: + f.write(data) + + def write_text(self, data, encoding, ensure=False): + """Write text data into path using the specified encoding. + If ensure is True create missing parent directories. + """ + if ensure: + self.dirpath().ensure(dir=1) + with self.open("w", encoding=encoding) as f: + f.write(data) + + def write(self, data, mode="w", ensure=False): + """Write data into path. If ensure is True create + missing parent directories. + """ + if ensure: + self.dirpath().ensure(dir=1) + if "b" in mode: + if not isinstance(data, bytes): + raise ValueError("can only process bytes") + else: + if not isinstance(data, str): + if not isinstance(data, bytes): + data = str(data) + else: + data = data.decode(sys.getdefaultencoding()) + f = self.open(mode) + try: + f.write(data) + finally: + f.close() + + def _ensuredirs(self): + parent = self.dirpath() + if parent == self: + return self + if parent.check(dir=0): + parent._ensuredirs() + if self.check(dir=0): + try: + self.mkdir() + except error.EEXIST: + # race condition: file/dir created by another thread/process. + # complain if it is not a dir + if self.check(dir=0): + raise + return self + + def ensure(self, *args, **kwargs): + """Ensure that an args-joined path exists (by default as + a file). if you specify a keyword argument 'dir=True' + then the path is forced to be a directory path. + """ + p = self.join(*args) + if kwargs.get("dir", 0): + return p._ensuredirs() + else: + p.dirpath()._ensuredirs() + if not p.check(file=1): + p.open("wb").close() + return p + + @overload + def stat(self, raising: Literal[True] = ...) -> Stat: ... + + @overload + def stat(self, raising: Literal[False]) -> Stat | None: ... + + def stat(self, raising: bool = True) -> Stat | None: + """Return an os.stat() tuple.""" + if raising: + return Stat(self, error.checked_call(os.stat, self.strpath)) + try: + return Stat(self, os.stat(self.strpath)) + except KeyboardInterrupt: + raise + except Exception: + return None + + def lstat(self) -> Stat: + """Return an os.lstat() tuple.""" + return Stat(self, error.checked_call(os.lstat, self.strpath)) + + def setmtime(self, mtime=None): + """Set modification time for the given path. if 'mtime' is None + (the default) then the file's mtime is set to current time. + + Note that the resolution for 'mtime' is platform dependent. + """ + if mtime is None: + return error.checked_call(os.utime, self.strpath, mtime) + try: + return error.checked_call(os.utime, self.strpath, (-1, mtime)) + except error.EINVAL: + return error.checked_call(os.utime, self.strpath, (self.atime(), mtime)) + + def chdir(self): + """Change directory to self and return old current directory""" + try: + old = self.__class__() + except error.ENOENT: + old = None + error.checked_call(os.chdir, self.strpath) + return old + + @contextmanager + def as_cwd(self): + """ + Return a context manager, which changes to the path's dir during the + managed "with" context. + On __enter__ it returns the old dir, which might be ``None``. + """ + old = self.chdir() + try: + yield old + finally: + if old is not None: + old.chdir() + + def realpath(self): + """Return a new path which contains no symbolic links.""" + return self.__class__(os.path.realpath(self.strpath)) + + def atime(self): + """Return last access time of the path.""" + return self.stat().atime + + def __repr__(self): + return f"local({self.strpath!r})" + + def __str__(self): + """Return string representation of the Path.""" + return self.strpath + + def chmod(self, mode, rec=0): + """Change permissions to the given mode. If mode is an + integer it directly encodes the os-specific modes. + if rec is True perform recursively. + """ + if not isinstance(mode, int): + raise TypeError(f"mode {mode!r} must be an integer") + if rec: + for x in self.visit(rec=rec): + error.checked_call(os.chmod, str(x), mode) + error.checked_call(os.chmod, self.strpath, mode) + + def pypkgpath(self): + """Return the Python package path by looking for the last + directory upwards which still contains an __init__.py. + Return None if a pkgpath cannot be determined. + """ + pkgpath = None + for parent in self.parts(reverse=True): + if parent.isdir(): + if not parent.join("__init__.py").exists(): + break + if not isimportable(parent.basename): + break + pkgpath = parent + return pkgpath + + def _ensuresyspath(self, ensuremode, path): + if ensuremode: + s = str(path) + if ensuremode == "append": + if s not in sys.path: + sys.path.append(s) + else: + if s != sys.path[0]: + sys.path.insert(0, s) + + def pyimport(self, modname=None, ensuresyspath=True): + """Return path as an imported python module. + + If modname is None, look for the containing package + and construct an according module name. + The module will be put/looked up in sys.modules. + if ensuresyspath is True then the root dir for importing + the file (taking __init__.py files into account) will + be prepended to sys.path if it isn't there already. + If ensuresyspath=="append" the root dir will be appended + if it isn't already contained in sys.path. + if ensuresyspath is False no modification of syspath happens. + + Special value of ensuresyspath=="importlib" is intended + purely for using in pytest, it is capable only of importing + separate .py files outside packages, e.g. for test suite + without any __init__.py file. It effectively allows having + same-named test modules in different places and offers + mild opt-in via this option. Note that it works only in + recent versions of python. + """ + if not self.check(): + raise error.ENOENT(self) + + if ensuresyspath == "importlib": + if modname is None: + modname = self.purebasename + spec = importlib.util.spec_from_file_location(modname, str(self)) + if spec is None or spec.loader is None: + raise ImportError(f"Can't find module {modname} at location {self!s}") + mod = importlib.util.module_from_spec(spec) + spec.loader.exec_module(mod) + return mod + + pkgpath = None + if modname is None: + pkgpath = self.pypkgpath() + if pkgpath is not None: + pkgroot = pkgpath.dirpath() + names = self.new(ext="").relto(pkgroot).split(self.sep) + if names[-1] == "__init__": + names.pop() + modname = ".".join(names) + else: + pkgroot = self.dirpath() + modname = self.purebasename + + self._ensuresyspath(ensuresyspath, pkgroot) + __import__(modname) + mod = sys.modules[modname] + if self.basename == "__init__.py": + return mod # we don't check anything as we might + # be in a namespace package ... too icky to check + modfile = mod.__file__ + assert modfile is not None + if modfile[-4:] in (".pyc", ".pyo"): + modfile = modfile[:-1] + elif modfile.endswith("$py.class"): + modfile = modfile[:-9] + ".py" + if modfile.endswith(os.sep + "__init__.py"): + if self.basename != "__init__.py": + modfile = modfile[:-12] + try: + issame = self.samefile(modfile) + except error.ENOENT: + issame = False + if not issame: + ignore = os.getenv("PY_IGNORE_IMPORTMISMATCH") + if ignore != "1": + raise self.ImportMismatchError(modname, modfile, self) + return mod + else: + try: + return sys.modules[modname] + except KeyError: + # we have a custom modname, do a pseudo-import + import types + + mod = types.ModuleType(modname) + mod.__file__ = str(self) + sys.modules[modname] = mod + try: + with open(str(self), "rb") as f: + exec(f.read(), mod.__dict__) + except BaseException: + del sys.modules[modname] + raise + return mod + + def sysexec(self, *argv: os.PathLike[str], **popen_opts: Any) -> str: + """Return stdout text from executing a system child process, + where the 'self' path points to executable. + The process is directly invoked and not through a system shell. + """ + from subprocess import PIPE + from subprocess import Popen + + popen_opts.pop("stdout", None) + popen_opts.pop("stderr", None) + proc = Popen( + [str(self)] + [str(arg) for arg in argv], + **popen_opts, + stdout=PIPE, + stderr=PIPE, + ) + stdout: str | bytes + stdout, stderr = proc.communicate() + ret = proc.wait() + if isinstance(stdout, bytes): + stdout = stdout.decode(sys.getdefaultencoding()) + if ret != 0: + if isinstance(stderr, bytes): + stderr = stderr.decode(sys.getdefaultencoding()) + raise RuntimeError( + ret, + ret, + str(self), + stdout, + stderr, + ) + return stdout + + @classmethod + def sysfind(cls, name, checker=None, paths=None): + """Return a path object found by looking at the systems + underlying PATH specification. If the checker is not None + it will be invoked to filter matching paths. If a binary + cannot be found, None is returned + Note: This is probably not working on plain win32 systems + but may work on cygwin. + """ + if isabs(name): + p = local(name) + if p.check(file=1): + return p + else: + if paths is None: + if iswin32: + paths = os.environ["Path"].split(";") + if "" not in paths and "." not in paths: + paths.append(".") + try: + systemroot = os.environ["SYSTEMROOT"] + except KeyError: + pass + else: + paths = [ + path.replace("%SystemRoot%", systemroot) for path in paths + ] + else: + paths = os.environ["PATH"].split(":") + tryadd = [] + if iswin32: + tryadd += os.environ["PATHEXT"].split(os.pathsep) + tryadd.append("") + + for x in paths: + for addext in tryadd: + p = local(x).join(name, abs=True) + addext + try: + if p.check(file=1): + if checker: + if not checker(p): + continue + return p + except error.EACCES: + pass + return None + + @classmethod + def _gethomedir(cls): + try: + x = os.environ["HOME"] + except KeyError: + try: + x = os.environ["HOMEDRIVE"] + os.environ["HOMEPATH"] + except KeyError: + return None + return cls(x) + + # """ + # special class constructors for local filesystem paths + # """ + @classmethod + def get_temproot(cls): + """Return the system's temporary directory + (where tempfiles are usually created in) + """ + import tempfile + + return local(tempfile.gettempdir()) + + @classmethod + def mkdtemp(cls, rootdir=None): + """Return a Path object pointing to a fresh new temporary directory + (which we created ourselves). + """ + import tempfile + + if rootdir is None: + rootdir = cls.get_temproot() + path = error.checked_call(tempfile.mkdtemp, dir=str(rootdir)) + return cls(path) + + @classmethod + def make_numbered_dir( + cls, prefix="session-", rootdir=None, keep=3, lock_timeout=172800 + ): # two days + """Return unique directory with a number greater than the current + maximum one. The number is assumed to start directly after prefix. + if keep is true directories with a number less than (maxnum-keep) + will be removed. If .lock files are used (lock_timeout non-zero), + algorithm is multi-process safe. + """ + if rootdir is None: + rootdir = cls.get_temproot() + + nprefix = prefix.lower() + + def parse_num(path): + """Parse the number out of a path (if it matches the prefix)""" + nbasename = path.basename.lower() + if nbasename.startswith(nprefix): + try: + return int(nbasename[len(nprefix) :]) + except ValueError: + pass + + def create_lockfile(path): + """Exclusively create lockfile. Throws when failed""" + mypid = os.getpid() + lockfile = path.join(".lock") + if hasattr(lockfile, "mksymlinkto"): + lockfile.mksymlinkto(str(mypid)) + else: + fd = error.checked_call( + os.open, str(lockfile), os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644 + ) + with os.fdopen(fd, "w") as f: + f.write(str(mypid)) + return lockfile + + def atexit_remove_lockfile(lockfile): + """Ensure lockfile is removed at process exit""" + mypid = os.getpid() + + def try_remove_lockfile(): + # in a fork() situation, only the last process should + # remove the .lock, otherwise the other processes run the + # risk of seeing their temporary dir disappear. For now + # we remove the .lock in the parent only (i.e. we assume + # that the children finish before the parent). + if os.getpid() != mypid: + return + try: + lockfile.remove() + except error.Error: + pass + + atexit.register(try_remove_lockfile) + + # compute the maximum number currently in use with the prefix + lastmax = None + while True: + maxnum = -1 + for path in rootdir.listdir(): + num = parse_num(path) + if num is not None: + maxnum = max(maxnum, num) + + # make the new directory + try: + udir = rootdir.mkdir(prefix + str(maxnum + 1)) + if lock_timeout: + lockfile = create_lockfile(udir) + atexit_remove_lockfile(lockfile) + except (error.EEXIST, error.ENOENT, error.EBUSY): + # race condition (1): another thread/process created the dir + # in the meantime - try again + # race condition (2): another thread/process spuriously acquired + # lock treating empty directory as candidate + # for removal - try again + # race condition (3): another thread/process tried to create the lock at + # the same time (happened in Python 3.3 on Windows) + # https://ci.appveyor.com/project/pytestbot/py/build/1.0.21/job/ffi85j4c0lqwsfwa + if lastmax == maxnum: + raise + lastmax = maxnum + continue + break + + def get_mtime(path): + """Read file modification time""" + try: + return path.lstat().mtime + except error.Error: + pass + + garbage_prefix = prefix + "garbage-" + + def is_garbage(path): + """Check if path denotes directory scheduled for removal""" + bn = path.basename + return bn.startswith(garbage_prefix) + + # prune old directories + udir_time = get_mtime(udir) + if keep and udir_time: + for path in rootdir.listdir(): + num = parse_num(path) + if num is not None and num <= (maxnum - keep): + try: + # try acquiring lock to remove directory as exclusive user + if lock_timeout: + create_lockfile(path) + except (error.EEXIST, error.ENOENT, error.EBUSY): + path_time = get_mtime(path) + if not path_time: + # assume directory doesn't exist now + continue + if abs(udir_time - path_time) < lock_timeout: + # assume directory with lockfile exists + # and lock timeout hasn't expired yet + continue + + # path dir locked for exclusive use + # and scheduled for removal to avoid another thread/process + # treating it as a new directory or removal candidate + garbage_path = rootdir.join(garbage_prefix + str(uuid.uuid4())) + try: + path.rename(garbage_path) + garbage_path.remove(rec=1) + except KeyboardInterrupt: + raise + except Exception: # this might be error.Error, WindowsError ... + pass + if is_garbage(path): + try: + path.remove(rec=1) + except KeyboardInterrupt: + raise + except Exception: # this might be error.Error, WindowsError ... + pass + + # make link... + try: + username = os.environ["USER"] # linux, et al + except KeyError: + try: + username = os.environ["USERNAME"] # windows + except KeyError: + username = "current" + + src = str(udir) + dest = src[: src.rfind("-")] + "-" + username + try: + os.unlink(dest) + except OSError: + pass + try: + os.symlink(src, dest) + except (OSError, AttributeError, NotImplementedError): + pass + + return udir + + +def copymode(src, dest): + """Copy permission from src to dst.""" + import shutil + + shutil.copymode(src, dest) + + +def copystat(src, dest): + """Copy permission, last modification time, + last access time, and flags from src to dst.""" + import shutil + + shutil.copystat(str(src), str(dest)) + + +def copychunked(src, dest): + chunksize = 524288 # half a meg of bytes + fsrc = src.open("rb") + try: + fdest = dest.open("wb") + try: + while 1: + buf = fsrc.read(chunksize) + if not buf: + break + fdest.write(buf) + finally: + fdest.close() + finally: + fsrc.close() + + +def isimportable(name): + if name and (name[0].isalpha() or name[0] == "_"): + name = name.replace("_", "") + return not name or name.isalnum() + + +local = LocalPath diff --git a/.venv/Lib/site-packages/_pytest/_version.py b/.venv/Lib/site-packages/_pytest/_version.py new file mode 100644 index 00000000..cf258c17 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/_version.py @@ -0,0 +1,16 @@ +# file generated by setuptools_scm +# don't change, don't track in version control +TYPE_CHECKING = False +if TYPE_CHECKING: + from typing import Tuple, Union + VERSION_TUPLE = Tuple[Union[int, str], ...] +else: + VERSION_TUPLE = object + +version: str +__version__: str +__version_tuple__: VERSION_TUPLE +version_tuple: VERSION_TUPLE + +__version__ = version = '8.3.3' +__version_tuple__ = version_tuple = (8, 3, 3) diff --git a/.venv/Lib/site-packages/_pytest/assertion/__init__.py b/.venv/Lib/site-packages/_pytest/assertion/__init__.py new file mode 100644 index 00000000..f2f1d029 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/assertion/__init__.py @@ -0,0 +1,192 @@ +# mypy: allow-untyped-defs +"""Support for presenting detailed information in failing assertions.""" + +from __future__ import annotations + +import sys +from typing import Any +from typing import Generator +from typing import TYPE_CHECKING + +from _pytest.assertion import rewrite +from _pytest.assertion import truncate +from _pytest.assertion import util +from _pytest.assertion.rewrite import assertstate_key +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.nodes import Item + + +if TYPE_CHECKING: + from _pytest.main import Session + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("debugconfig") + group.addoption( + "--assert", + action="store", + dest="assertmode", + choices=("rewrite", "plain"), + default="rewrite", + metavar="MODE", + help=( + "Control assertion debugging tools.\n" + "'plain' performs no assertion debugging.\n" + "'rewrite' (the default) rewrites assert statements in test modules" + " on import to provide assert expression information." + ), + ) + parser.addini( + "enable_assertion_pass_hook", + type="bool", + default=False, + help="Enables the pytest_assertion_pass hook. " + "Make sure to delete any previously generated pyc cache files.", + ) + Config._add_verbosity_ini( + parser, + Config.VERBOSITY_ASSERTIONS, + help=( + "Specify a verbosity level for assertions, overriding the main level. " + "Higher levels will provide more detailed explanation when an assertion fails." + ), + ) + + +def register_assert_rewrite(*names: str) -> None: + """Register one or more module names to be rewritten on import. + + This function will make sure that this module or all modules inside + the package will get their assert statements rewritten. + Thus you should make sure to call this before the module is + actually imported, usually in your __init__.py if you are a plugin + using a package. + + :param names: The module names to register. + """ + for name in names: + if not isinstance(name, str): + msg = "expected module names as *args, got {0} instead" # type: ignore[unreachable] + raise TypeError(msg.format(repr(names))) + for hook in sys.meta_path: + if isinstance(hook, rewrite.AssertionRewritingHook): + importhook = hook + break + else: + # TODO(typing): Add a protocol for mark_rewrite() and use it + # for importhook and for PytestPluginManager.rewrite_hook. + importhook = DummyRewriteHook() # type: ignore + importhook.mark_rewrite(*names) + + +class DummyRewriteHook: + """A no-op import hook for when rewriting is disabled.""" + + def mark_rewrite(self, *names: str) -> None: + pass + + +class AssertionState: + """State for the assertion plugin.""" + + def __init__(self, config: Config, mode) -> None: + self.mode = mode + self.trace = config.trace.root.get("assertion") + self.hook: rewrite.AssertionRewritingHook | None = None + + +def install_importhook(config: Config) -> rewrite.AssertionRewritingHook: + """Try to install the rewrite hook, raise SystemError if it fails.""" + config.stash[assertstate_key] = AssertionState(config, "rewrite") + config.stash[assertstate_key].hook = hook = rewrite.AssertionRewritingHook(config) + sys.meta_path.insert(0, hook) + config.stash[assertstate_key].trace("installed rewrite import hook") + + def undo() -> None: + hook = config.stash[assertstate_key].hook + if hook is not None and hook in sys.meta_path: + sys.meta_path.remove(hook) + + config.add_cleanup(undo) + return hook + + +def pytest_collection(session: Session) -> None: + # This hook is only called when test modules are collected + # so for example not in the managing process of pytest-xdist + # (which does not collect test modules). + assertstate = session.config.stash.get(assertstate_key, None) + if assertstate: + if assertstate.hook is not None: + assertstate.hook.set_session(session) + + +@hookimpl(wrapper=True, tryfirst=True) +def pytest_runtest_protocol(item: Item) -> Generator[None, object, object]: + """Setup the pytest_assertrepr_compare and pytest_assertion_pass hooks. + + The rewrite module will use util._reprcompare if it exists to use custom + reporting via the pytest_assertrepr_compare hook. This sets up this custom + comparison for the test. + """ + ihook = item.ihook + + def callbinrepr(op, left: object, right: object) -> str | None: + """Call the pytest_assertrepr_compare hook and prepare the result. + + This uses the first result from the hook and then ensures the + following: + * Overly verbose explanations are truncated unless configured otherwise + (eg. if running in verbose mode). + * Embedded newlines are escaped to help util.format_explanation() + later. + * If the rewrite mode is used embedded %-characters are replaced + to protect later % formatting. + + The result can be formatted by util.format_explanation() for + pretty printing. + """ + hook_result = ihook.pytest_assertrepr_compare( + config=item.config, op=op, left=left, right=right + ) + for new_expl in hook_result: + if new_expl: + new_expl = truncate.truncate_if_required(new_expl, item) + new_expl = [line.replace("\n", "\\n") for line in new_expl] + res = "\n~".join(new_expl) + if item.config.getvalue("assertmode") == "rewrite": + res = res.replace("%", "%%") + return res + return None + + saved_assert_hooks = util._reprcompare, util._assertion_pass + util._reprcompare = callbinrepr + util._config = item.config + + if ihook.pytest_assertion_pass.get_hookimpls(): + + def call_assertion_pass_hook(lineno: int, orig: str, expl: str) -> None: + ihook.pytest_assertion_pass(item=item, lineno=lineno, orig=orig, expl=expl) + + util._assertion_pass = call_assertion_pass_hook + + try: + return (yield) + finally: + util._reprcompare, util._assertion_pass = saved_assert_hooks + util._config = None + + +def pytest_sessionfinish(session: Session) -> None: + assertstate = session.config.stash.get(assertstate_key, None) + if assertstate: + if assertstate.hook is not None: + assertstate.hook.set_session(None) + + +def pytest_assertrepr_compare( + config: Config, op: str, left: Any, right: Any +) -> list[str] | None: + return util.assertrepr_compare(config=config, op=op, left=left, right=right) diff --git a/.venv/Lib/site-packages/_pytest/assertion/rewrite.py b/.venv/Lib/site-packages/_pytest/assertion/rewrite.py new file mode 100644 index 00000000..a7a92c0f --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/assertion/rewrite.py @@ -0,0 +1,1204 @@ +"""Rewrite assertion AST to produce nice error messages.""" + +from __future__ import annotations + +import ast +from collections import defaultdict +import errno +import functools +import importlib.abc +import importlib.machinery +import importlib.util +import io +import itertools +import marshal +import os +from pathlib import Path +from pathlib import PurePath +import struct +import sys +import tokenize +import types +from typing import Callable +from typing import IO +from typing import Iterable +from typing import Iterator +from typing import Sequence +from typing import TYPE_CHECKING + +from _pytest._io.saferepr import DEFAULT_REPR_MAX_SIZE +from _pytest._io.saferepr import saferepr +from _pytest._version import version +from _pytest.assertion import util +from _pytest.config import Config +from _pytest.main import Session +from _pytest.pathlib import absolutepath +from _pytest.pathlib import fnmatch_ex +from _pytest.stash import StashKey + + +# fmt: off +from _pytest.assertion.util import format_explanation as _format_explanation # noqa:F401, isort:skip +# fmt:on + +if TYPE_CHECKING: + from _pytest.assertion import AssertionState + + +class Sentinel: + pass + + +assertstate_key = StashKey["AssertionState"]() + +# pytest caches rewritten pycs in pycache dirs +PYTEST_TAG = f"{sys.implementation.cache_tag}-pytest-{version}" +PYC_EXT = ".py" + (__debug__ and "c" or "o") +PYC_TAIL = "." + PYTEST_TAG + PYC_EXT + +# Special marker that denotes we have just left a scope definition +_SCOPE_END_MARKER = Sentinel() + + +class AssertionRewritingHook(importlib.abc.MetaPathFinder, importlib.abc.Loader): + """PEP302/PEP451 import hook which rewrites asserts.""" + + def __init__(self, config: Config) -> None: + self.config = config + try: + self.fnpats = config.getini("python_files") + except ValueError: + self.fnpats = ["test_*.py", "*_test.py"] + self.session: Session | None = None + self._rewritten_names: dict[str, Path] = {} + self._must_rewrite: set[str] = set() + # flag to guard against trying to rewrite a pyc file while we are already writing another pyc file, + # which might result in infinite recursion (#3506) + self._writing_pyc = False + self._basenames_to_check_rewrite = {"conftest"} + self._marked_for_rewrite_cache: dict[str, bool] = {} + self._session_paths_checked = False + + def set_session(self, session: Session | None) -> None: + self.session = session + self._session_paths_checked = False + + # Indirection so we can mock calls to find_spec originated from the hook during testing + _find_spec = importlib.machinery.PathFinder.find_spec + + def find_spec( + self, + name: str, + path: Sequence[str | bytes] | None = None, + target: types.ModuleType | None = None, + ) -> importlib.machinery.ModuleSpec | None: + if self._writing_pyc: + return None + state = self.config.stash[assertstate_key] + if self._early_rewrite_bailout(name, state): + return None + state.trace(f"find_module called for: {name}") + + # Type ignored because mypy is confused about the `self` binding here. + spec = self._find_spec(name, path) # type: ignore + + if spec is None and path is not None: + # With --import-mode=importlib, PathFinder cannot find spec without modifying `sys.path`, + # causing inability to assert rewriting (#12659). + # At this point, try using the file path to find the module spec. + for _path_str in path: + spec = importlib.util.spec_from_file_location(name, _path_str) + if spec is not None: + break + + if ( + # the import machinery could not find a file to import + spec is None + # this is a namespace package (without `__init__.py`) + # there's nothing to rewrite there + or spec.origin is None + # we can only rewrite source files + or not isinstance(spec.loader, importlib.machinery.SourceFileLoader) + # if the file doesn't exist, we can't rewrite it + or not os.path.exists(spec.origin) + ): + return None + else: + fn = spec.origin + + if not self._should_rewrite(name, fn, state): + return None + + return importlib.util.spec_from_file_location( + name, + fn, + loader=self, + submodule_search_locations=spec.submodule_search_locations, + ) + + def create_module( + self, spec: importlib.machinery.ModuleSpec + ) -> types.ModuleType | None: + return None # default behaviour is fine + + def exec_module(self, module: types.ModuleType) -> None: + assert module.__spec__ is not None + assert module.__spec__.origin is not None + fn = Path(module.__spec__.origin) + state = self.config.stash[assertstate_key] + + self._rewritten_names[module.__name__] = fn + + # The requested module looks like a test file, so rewrite it. This is + # the most magical part of the process: load the source, rewrite the + # asserts, and load the rewritten source. We also cache the rewritten + # module code in a special pyc. We must be aware of the possibility of + # concurrent pytest processes rewriting and loading pycs. To avoid + # tricky race conditions, we maintain the following invariant: The + # cached pyc is always a complete, valid pyc. Operations on it must be + # atomic. POSIX's atomic rename comes in handy. + write = not sys.dont_write_bytecode + cache_dir = get_cache_dir(fn) + if write: + ok = try_makedirs(cache_dir) + if not ok: + write = False + state.trace(f"read only directory: {cache_dir}") + + cache_name = fn.name[:-3] + PYC_TAIL + pyc = cache_dir / cache_name + # Notice that even if we're in a read-only directory, I'm going + # to check for a cached pyc. This may not be optimal... + co = _read_pyc(fn, pyc, state.trace) + if co is None: + state.trace(f"rewriting {fn!r}") + source_stat, co = _rewrite_test(fn, self.config) + if write: + self._writing_pyc = True + try: + _write_pyc(state, co, source_stat, pyc) + finally: + self._writing_pyc = False + else: + state.trace(f"found cached rewritten pyc for {fn}") + exec(co, module.__dict__) + + def _early_rewrite_bailout(self, name: str, state: AssertionState) -> bool: + """A fast way to get out of rewriting modules. + + Profiling has shown that the call to PathFinder.find_spec (inside of + the find_spec from this class) is a major slowdown, so, this method + tries to filter what we're sure won't be rewritten before getting to + it. + """ + if self.session is not None and not self._session_paths_checked: + self._session_paths_checked = True + for initial_path in self.session._initialpaths: + # Make something as c:/projects/my_project/path.py -> + # ['c:', 'projects', 'my_project', 'path.py'] + parts = str(initial_path).split(os.sep) + # add 'path' to basenames to be checked. + self._basenames_to_check_rewrite.add(os.path.splitext(parts[-1])[0]) + + # Note: conftest already by default in _basenames_to_check_rewrite. + parts = name.split(".") + if parts[-1] in self._basenames_to_check_rewrite: + return False + + # For matching the name it must be as if it was a filename. + path = PurePath(*parts).with_suffix(".py") + + for pat in self.fnpats: + # if the pattern contains subdirectories ("tests/**.py" for example) we can't bail out based + # on the name alone because we need to match against the full path + if os.path.dirname(pat): + return False + if fnmatch_ex(pat, path): + return False + + if self._is_marked_for_rewrite(name, state): + return False + + state.trace(f"early skip of rewriting module: {name}") + return True + + def _should_rewrite(self, name: str, fn: str, state: AssertionState) -> bool: + # always rewrite conftest files + if os.path.basename(fn) == "conftest.py": + state.trace(f"rewriting conftest file: {fn!r}") + return True + + if self.session is not None: + if self.session.isinitpath(absolutepath(fn)): + state.trace(f"matched test file (was specified on cmdline): {fn!r}") + return True + + # modules not passed explicitly on the command line are only + # rewritten if they match the naming convention for test files + fn_path = PurePath(fn) + for pat in self.fnpats: + if fnmatch_ex(pat, fn_path): + state.trace(f"matched test file {fn!r}") + return True + + return self._is_marked_for_rewrite(name, state) + + def _is_marked_for_rewrite(self, name: str, state: AssertionState) -> bool: + try: + return self._marked_for_rewrite_cache[name] + except KeyError: + for marked in self._must_rewrite: + if name == marked or name.startswith(marked + "."): + state.trace(f"matched marked file {name!r} (from {marked!r})") + self._marked_for_rewrite_cache[name] = True + return True + + self._marked_for_rewrite_cache[name] = False + return False + + def mark_rewrite(self, *names: str) -> None: + """Mark import names as needing to be rewritten. + + The named module or package as well as any nested modules will + be rewritten on import. + """ + already_imported = ( + set(names).intersection(sys.modules).difference(self._rewritten_names) + ) + for name in already_imported: + mod = sys.modules[name] + if not AssertionRewriter.is_rewrite_disabled( + mod.__doc__ or "" + ) and not isinstance(mod.__loader__, type(self)): + self._warn_already_imported(name) + self._must_rewrite.update(names) + self._marked_for_rewrite_cache.clear() + + def _warn_already_imported(self, name: str) -> None: + from _pytest.warning_types import PytestAssertRewriteWarning + + self.config.issue_config_time_warning( + PytestAssertRewriteWarning( + f"Module already imported so cannot be rewritten: {name}" + ), + stacklevel=5, + ) + + def get_data(self, pathname: str | bytes) -> bytes: + """Optional PEP302 get_data API.""" + with open(pathname, "rb") as f: + return f.read() + + if sys.version_info >= (3, 10): + if sys.version_info >= (3, 12): + from importlib.resources.abc import TraversableResources + else: + from importlib.abc import TraversableResources + + def get_resource_reader(self, name: str) -> TraversableResources: + if sys.version_info < (3, 11): + from importlib.readers import FileReader + else: + from importlib.resources.readers import FileReader + + return FileReader(types.SimpleNamespace(path=self._rewritten_names[name])) + + +def _write_pyc_fp( + fp: IO[bytes], source_stat: os.stat_result, co: types.CodeType +) -> None: + # Technically, we don't have to have the same pyc format as + # (C)Python, since these "pycs" should never be seen by builtin + # import. However, there's little reason to deviate. + fp.write(importlib.util.MAGIC_NUMBER) + # https://www.python.org/dev/peps/pep-0552/ + flags = b"\x00\x00\x00\x00" + fp.write(flags) + # as of now, bytecode header expects 32-bit numbers for size and mtime (#4903) + mtime = int(source_stat.st_mtime) & 0xFFFFFFFF + size = source_stat.st_size & 0xFFFFFFFF + # " bool: + proc_pyc = f"{pyc}.{os.getpid()}" + try: + with open(proc_pyc, "wb") as fp: + _write_pyc_fp(fp, source_stat, co) + except OSError as e: + state.trace(f"error writing pyc file at {proc_pyc}: errno={e.errno}") + return False + + try: + os.replace(proc_pyc, pyc) + except OSError as e: + state.trace(f"error writing pyc file at {pyc}: {e}") + # we ignore any failure to write the cache file + # there are many reasons, permission-denied, pycache dir being a + # file etc. + return False + return True + + +def _rewrite_test(fn: Path, config: Config) -> tuple[os.stat_result, types.CodeType]: + """Read and rewrite *fn* and return the code object.""" + stat = os.stat(fn) + source = fn.read_bytes() + strfn = str(fn) + tree = ast.parse(source, filename=strfn) + rewrite_asserts(tree, source, strfn, config) + co = compile(tree, strfn, "exec", dont_inherit=True) + return stat, co + + +def _read_pyc( + source: Path, pyc: Path, trace: Callable[[str], None] = lambda x: None +) -> types.CodeType | None: + """Possibly read a pytest pyc containing rewritten code. + + Return rewritten code if successful or None if not. + """ + try: + fp = open(pyc, "rb") + except OSError: + return None + with fp: + try: + stat_result = os.stat(source) + mtime = int(stat_result.st_mtime) + size = stat_result.st_size + data = fp.read(16) + except OSError as e: + trace(f"_read_pyc({source}): OSError {e}") + return None + # Check for invalid or out of date pyc file. + if len(data) != (16): + trace(f"_read_pyc({source}): invalid pyc (too short)") + return None + if data[:4] != importlib.util.MAGIC_NUMBER: + trace(f"_read_pyc({source}): invalid pyc (bad magic number)") + return None + if data[4:8] != b"\x00\x00\x00\x00": + trace(f"_read_pyc({source}): invalid pyc (unsupported flags)") + return None + mtime_data = data[8:12] + if int.from_bytes(mtime_data, "little") != mtime & 0xFFFFFFFF: + trace(f"_read_pyc({source}): out of date") + return None + size_data = data[12:16] + if int.from_bytes(size_data, "little") != size & 0xFFFFFFFF: + trace(f"_read_pyc({source}): invalid pyc (incorrect size)") + return None + try: + co = marshal.load(fp) + except Exception as e: + trace(f"_read_pyc({source}): marshal.load error {e}") + return None + if not isinstance(co, types.CodeType): + trace(f"_read_pyc({source}): not a code object") + return None + return co + + +def rewrite_asserts( + mod: ast.Module, + source: bytes, + module_path: str | None = None, + config: Config | None = None, +) -> None: + """Rewrite the assert statements in mod.""" + AssertionRewriter(module_path, config, source).run(mod) + + +def _saferepr(obj: object) -> str: + r"""Get a safe repr of an object for assertion error messages. + + The assertion formatting (util.format_explanation()) requires + newlines to be escaped since they are a special character for it. + Normally assertion.util.format_explanation() does this but for a + custom repr it is possible to contain one of the special escape + sequences, especially '\n{' and '\n}' are likely to be present in + JSON reprs. + """ + if isinstance(obj, types.MethodType): + # for bound methods, skip redundant information + return obj.__name__ + + maxsize = _get_maxsize_for_saferepr(util._config) + return saferepr(obj, maxsize=maxsize).replace("\n", "\\n") + + +def _get_maxsize_for_saferepr(config: Config | None) -> int | None: + """Get `maxsize` configuration for saferepr based on the given config object.""" + if config is None: + verbosity = 0 + else: + verbosity = config.get_verbosity(Config.VERBOSITY_ASSERTIONS) + if verbosity >= 2: + return None + if verbosity >= 1: + return DEFAULT_REPR_MAX_SIZE * 10 + return DEFAULT_REPR_MAX_SIZE + + +def _format_assertmsg(obj: object) -> str: + r"""Format the custom assertion message given. + + For strings this simply replaces newlines with '\n~' so that + util.format_explanation() will preserve them instead of escaping + newlines. For other objects saferepr() is used first. + """ + # reprlib appears to have a bug which means that if a string + # contains a newline it gets escaped, however if an object has a + # .__repr__() which contains newlines it does not get escaped. + # However in either case we want to preserve the newline. + replaces = [("\n", "\n~"), ("%", "%%")] + if not isinstance(obj, str): + obj = saferepr(obj, _get_maxsize_for_saferepr(util._config)) + replaces.append(("\\n", "\n~")) + + for r1, r2 in replaces: + obj = obj.replace(r1, r2) + + return obj + + +def _should_repr_global_name(obj: object) -> bool: + if callable(obj): + return False + + try: + return not hasattr(obj, "__name__") + except Exception: + return True + + +def _format_boolop(explanations: Iterable[str], is_or: bool) -> str: + explanation = "(" + (is_or and " or " or " and ").join(explanations) + ")" + return explanation.replace("%", "%%") + + +def _call_reprcompare( + ops: Sequence[str], + results: Sequence[bool], + expls: Sequence[str], + each_obj: Sequence[object], +) -> str: + for i, res, expl in zip(range(len(ops)), results, expls): + try: + done = not res + except Exception: + done = True + if done: + break + if util._reprcompare is not None: + custom = util._reprcompare(ops[i], each_obj[i], each_obj[i + 1]) + if custom is not None: + return custom + return expl + + +def _call_assertion_pass(lineno: int, orig: str, expl: str) -> None: + if util._assertion_pass is not None: + util._assertion_pass(lineno, orig, expl) + + +def _check_if_assertion_pass_impl() -> bool: + """Check if any plugins implement the pytest_assertion_pass hook + in order not to generate explanation unnecessarily (might be expensive).""" + return True if util._assertion_pass else False + + +UNARY_MAP = {ast.Not: "not %s", ast.Invert: "~%s", ast.USub: "-%s", ast.UAdd: "+%s"} + +BINOP_MAP = { + ast.BitOr: "|", + ast.BitXor: "^", + ast.BitAnd: "&", + ast.LShift: "<<", + ast.RShift: ">>", + ast.Add: "+", + ast.Sub: "-", + ast.Mult: "*", + ast.Div: "/", + ast.FloorDiv: "//", + ast.Mod: "%%", # escaped for string formatting + ast.Eq: "==", + ast.NotEq: "!=", + ast.Lt: "<", + ast.LtE: "<=", + ast.Gt: ">", + ast.GtE: ">=", + ast.Pow: "**", + ast.Is: "is", + ast.IsNot: "is not", + ast.In: "in", + ast.NotIn: "not in", + ast.MatMult: "@", +} + + +def traverse_node(node: ast.AST) -> Iterator[ast.AST]: + """Recursively yield node and all its children in depth-first order.""" + yield node + for child in ast.iter_child_nodes(node): + yield from traverse_node(child) + + +@functools.lru_cache(maxsize=1) +def _get_assertion_exprs(src: bytes) -> dict[int, str]: + """Return a mapping from {lineno: "assertion test expression"}.""" + ret: dict[int, str] = {} + + depth = 0 + lines: list[str] = [] + assert_lineno: int | None = None + seen_lines: set[int] = set() + + def _write_and_reset() -> None: + nonlocal depth, lines, assert_lineno, seen_lines + assert assert_lineno is not None + ret[assert_lineno] = "".join(lines).rstrip().rstrip("\\") + depth = 0 + lines = [] + assert_lineno = None + seen_lines = set() + + tokens = tokenize.tokenize(io.BytesIO(src).readline) + for tp, source, (lineno, offset), _, line in tokens: + if tp == tokenize.NAME and source == "assert": + assert_lineno = lineno + elif assert_lineno is not None: + # keep track of depth for the assert-message `,` lookup + if tp == tokenize.OP and source in "([{": + depth += 1 + elif tp == tokenize.OP and source in ")]}": + depth -= 1 + + if not lines: + lines.append(line[offset:]) + seen_lines.add(lineno) + # a non-nested comma separates the expression from the message + elif depth == 0 and tp == tokenize.OP and source == ",": + # one line assert with message + if lineno in seen_lines and len(lines) == 1: + offset_in_trimmed = offset + len(lines[-1]) - len(line) + lines[-1] = lines[-1][:offset_in_trimmed] + # multi-line assert with message + elif lineno in seen_lines: + lines[-1] = lines[-1][:offset] + # multi line assert with escaped newline before message + else: + lines.append(line[:offset]) + _write_and_reset() + elif tp in {tokenize.NEWLINE, tokenize.ENDMARKER}: + _write_and_reset() + elif lines and lineno not in seen_lines: + lines.append(line) + seen_lines.add(lineno) + + return ret + + +class AssertionRewriter(ast.NodeVisitor): + """Assertion rewriting implementation. + + The main entrypoint is to call .run() with an ast.Module instance, + this will then find all the assert statements and rewrite them to + provide intermediate values and a detailed assertion error. See + http://pybites.blogspot.be/2011/07/behind-scenes-of-pytests-new-assertion.html + for an overview of how this works. + + The entry point here is .run() which will iterate over all the + statements in an ast.Module and for each ast.Assert statement it + finds call .visit() with it. Then .visit_Assert() takes over and + is responsible for creating new ast statements to replace the + original assert statement: it rewrites the test of an assertion + to provide intermediate values and replace it with an if statement + which raises an assertion error with a detailed explanation in + case the expression is false and calls pytest_assertion_pass hook + if expression is true. + + For this .visit_Assert() uses the visitor pattern to visit all the + AST nodes of the ast.Assert.test field, each visit call returning + an AST node and the corresponding explanation string. During this + state is kept in several instance attributes: + + :statements: All the AST statements which will replace the assert + statement. + + :variables: This is populated by .variable() with each variable + used by the statements so that they can all be set to None at + the end of the statements. + + :variable_counter: Counter to create new unique variables needed + by statements. Variables are created using .variable() and + have the form of "@py_assert0". + + :expl_stmts: The AST statements which will be executed to get + data from the assertion. This is the code which will construct + the detailed assertion message that is used in the AssertionError + or for the pytest_assertion_pass hook. + + :explanation_specifiers: A dict filled by .explanation_param() + with %-formatting placeholders and their corresponding + expressions to use in the building of an assertion message. + This is used by .pop_format_context() to build a message. + + :stack: A stack of the explanation_specifiers dicts maintained by + .push_format_context() and .pop_format_context() which allows + to build another %-formatted string while already building one. + + :scope: A tuple containing the current scope used for variables_overwrite. + + :variables_overwrite: A dict filled with references to variables + that change value within an assert. This happens when a variable is + reassigned with the walrus operator + + This state, except the variables_overwrite, is reset on every new assert + statement visited and used by the other visitors. + """ + + def __init__( + self, module_path: str | None, config: Config | None, source: bytes + ) -> None: + super().__init__() + self.module_path = module_path + self.config = config + if config is not None: + self.enable_assertion_pass_hook = config.getini( + "enable_assertion_pass_hook" + ) + else: + self.enable_assertion_pass_hook = False + self.source = source + self.scope: tuple[ast.AST, ...] = () + self.variables_overwrite: defaultdict[tuple[ast.AST, ...], dict[str, str]] = ( + defaultdict(dict) + ) + + def run(self, mod: ast.Module) -> None: + """Find all assert statements in *mod* and rewrite them.""" + if not mod.body: + # Nothing to do. + return + + # We'll insert some special imports at the top of the module, but after any + # docstrings and __future__ imports, so first figure out where that is. + doc = getattr(mod, "docstring", None) + expect_docstring = doc is None + if doc is not None and self.is_rewrite_disabled(doc): + return + pos = 0 + item = None + for item in mod.body: + if ( + expect_docstring + and isinstance(item, ast.Expr) + and isinstance(item.value, ast.Constant) + and isinstance(item.value.value, str) + ): + doc = item.value.value + if self.is_rewrite_disabled(doc): + return + expect_docstring = False + elif ( + isinstance(item, ast.ImportFrom) + and item.level == 0 + and item.module == "__future__" + ): + pass + else: + break + pos += 1 + # Special case: for a decorated function, set the lineno to that of the + # first decorator, not the `def`. Issue #4984. + if isinstance(item, ast.FunctionDef) and item.decorator_list: + lineno = item.decorator_list[0].lineno + else: + lineno = item.lineno + # Now actually insert the special imports. + if sys.version_info >= (3, 10): + aliases = [ + ast.alias("builtins", "@py_builtins", lineno=lineno, col_offset=0), + ast.alias( + "_pytest.assertion.rewrite", + "@pytest_ar", + lineno=lineno, + col_offset=0, + ), + ] + else: + aliases = [ + ast.alias("builtins", "@py_builtins"), + ast.alias("_pytest.assertion.rewrite", "@pytest_ar"), + ] + imports = [ + ast.Import([alias], lineno=lineno, col_offset=0) for alias in aliases + ] + mod.body[pos:pos] = imports + + # Collect asserts. + self.scope = (mod,) + nodes: list[ast.AST | Sentinel] = [mod] + while nodes: + node = nodes.pop() + if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)): + self.scope = tuple((*self.scope, node)) + nodes.append(_SCOPE_END_MARKER) + if node == _SCOPE_END_MARKER: + self.scope = self.scope[:-1] + continue + assert isinstance(node, ast.AST) + for name, field in ast.iter_fields(node): + if isinstance(field, list): + new: list[ast.AST] = [] + for i, child in enumerate(field): + if isinstance(child, ast.Assert): + # Transform assert. + new.extend(self.visit(child)) + else: + new.append(child) + if isinstance(child, ast.AST): + nodes.append(child) + setattr(node, name, new) + elif ( + isinstance(field, ast.AST) + # Don't recurse into expressions as they can't contain + # asserts. + and not isinstance(field, ast.expr) + ): + nodes.append(field) + + @staticmethod + def is_rewrite_disabled(docstring: str) -> bool: + return "PYTEST_DONT_REWRITE" in docstring + + def variable(self) -> str: + """Get a new variable.""" + # Use a character invalid in python identifiers to avoid clashing. + name = "@py_assert" + str(next(self.variable_counter)) + self.variables.append(name) + return name + + def assign(self, expr: ast.expr) -> ast.Name: + """Give *expr* a name.""" + name = self.variable() + self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr)) + return ast.Name(name, ast.Load()) + + def display(self, expr: ast.expr) -> ast.expr: + """Call saferepr on the expression.""" + return self.helper("_saferepr", expr) + + def helper(self, name: str, *args: ast.expr) -> ast.expr: + """Call a helper in this module.""" + py_name = ast.Name("@pytest_ar", ast.Load()) + attr = ast.Attribute(py_name, name, ast.Load()) + return ast.Call(attr, list(args), []) + + def builtin(self, name: str) -> ast.Attribute: + """Return the builtin called *name*.""" + builtin_name = ast.Name("@py_builtins", ast.Load()) + return ast.Attribute(builtin_name, name, ast.Load()) + + def explanation_param(self, expr: ast.expr) -> str: + """Return a new named %-formatting placeholder for expr. + + This creates a %-formatting placeholder for expr in the + current formatting context, e.g. ``%(py0)s``. The placeholder + and expr are placed in the current format context so that it + can be used on the next call to .pop_format_context(). + """ + specifier = "py" + str(next(self.variable_counter)) + self.explanation_specifiers[specifier] = expr + return "%(" + specifier + ")s" + + def push_format_context(self) -> None: + """Create a new formatting context. + + The format context is used for when an explanation wants to + have a variable value formatted in the assertion message. In + this case the value required can be added using + .explanation_param(). Finally .pop_format_context() is used + to format a string of %-formatted values as added by + .explanation_param(). + """ + self.explanation_specifiers: dict[str, ast.expr] = {} + self.stack.append(self.explanation_specifiers) + + def pop_format_context(self, expl_expr: ast.expr) -> ast.Name: + """Format the %-formatted string with current format context. + + The expl_expr should be an str ast.expr instance constructed from + the %-placeholders created by .explanation_param(). This will + add the required code to format said string to .expl_stmts and + return the ast.Name instance of the formatted string. + """ + current = self.stack.pop() + if self.stack: + self.explanation_specifiers = self.stack[-1] + keys: list[ast.expr | None] = [ast.Constant(key) for key in current.keys()] + format_dict = ast.Dict(keys, list(current.values())) + form = ast.BinOp(expl_expr, ast.Mod(), format_dict) + name = "@py_format" + str(next(self.variable_counter)) + if self.enable_assertion_pass_hook: + self.format_variables.append(name) + self.expl_stmts.append(ast.Assign([ast.Name(name, ast.Store())], form)) + return ast.Name(name, ast.Load()) + + def generic_visit(self, node: ast.AST) -> tuple[ast.Name, str]: + """Handle expressions we don't have custom code for.""" + assert isinstance(node, ast.expr) + res = self.assign(node) + return res, self.explanation_param(self.display(res)) + + def visit_Assert(self, assert_: ast.Assert) -> list[ast.stmt]: + """Return the AST statements to replace the ast.Assert instance. + + This rewrites the test of an assertion to provide + intermediate values and replace it with an if statement which + raises an assertion error with a detailed explanation in case + the expression is false. + """ + if isinstance(assert_.test, ast.Tuple) and len(assert_.test.elts) >= 1: + import warnings + + from _pytest.warning_types import PytestAssertRewriteWarning + + # TODO: This assert should not be needed. + assert self.module_path is not None + warnings.warn_explicit( + PytestAssertRewriteWarning( + "assertion is always true, perhaps remove parentheses?" + ), + category=None, + filename=self.module_path, + lineno=assert_.lineno, + ) + + self.statements: list[ast.stmt] = [] + self.variables: list[str] = [] + self.variable_counter = itertools.count() + + if self.enable_assertion_pass_hook: + self.format_variables: list[str] = [] + + self.stack: list[dict[str, ast.expr]] = [] + self.expl_stmts: list[ast.stmt] = [] + self.push_format_context() + # Rewrite assert into a bunch of statements. + top_condition, explanation = self.visit(assert_.test) + + negation = ast.UnaryOp(ast.Not(), top_condition) + + if self.enable_assertion_pass_hook: # Experimental pytest_assertion_pass hook + msg = self.pop_format_context(ast.Constant(explanation)) + + # Failed + if assert_.msg: + assertmsg = self.helper("_format_assertmsg", assert_.msg) + gluestr = "\n>assert " + else: + assertmsg = ast.Constant("") + gluestr = "assert " + err_explanation = ast.BinOp(ast.Constant(gluestr), ast.Add(), msg) + err_msg = ast.BinOp(assertmsg, ast.Add(), err_explanation) + err_name = ast.Name("AssertionError", ast.Load()) + fmt = self.helper("_format_explanation", err_msg) + exc = ast.Call(err_name, [fmt], []) + raise_ = ast.Raise(exc, None) + statements_fail = [] + statements_fail.extend(self.expl_stmts) + statements_fail.append(raise_) + + # Passed + fmt_pass = self.helper("_format_explanation", msg) + orig = _get_assertion_exprs(self.source)[assert_.lineno] + hook_call_pass = ast.Expr( + self.helper( + "_call_assertion_pass", + ast.Constant(assert_.lineno), + ast.Constant(orig), + fmt_pass, + ) + ) + # If any hooks implement assert_pass hook + hook_impl_test = ast.If( + self.helper("_check_if_assertion_pass_impl"), + [*self.expl_stmts, hook_call_pass], + [], + ) + statements_pass: list[ast.stmt] = [hook_impl_test] + + # Test for assertion condition + main_test = ast.If(negation, statements_fail, statements_pass) + self.statements.append(main_test) + if self.format_variables: + variables: list[ast.expr] = [ + ast.Name(name, ast.Store()) for name in self.format_variables + ] + clear_format = ast.Assign(variables, ast.Constant(None)) + self.statements.append(clear_format) + + else: # Original assertion rewriting + # Create failure message. + body = self.expl_stmts + self.statements.append(ast.If(negation, body, [])) + if assert_.msg: + assertmsg = self.helper("_format_assertmsg", assert_.msg) + explanation = "\n>assert " + explanation + else: + assertmsg = ast.Constant("") + explanation = "assert " + explanation + template = ast.BinOp(assertmsg, ast.Add(), ast.Constant(explanation)) + msg = self.pop_format_context(template) + fmt = self.helper("_format_explanation", msg) + err_name = ast.Name("AssertionError", ast.Load()) + exc = ast.Call(err_name, [fmt], []) + raise_ = ast.Raise(exc, None) + + body.append(raise_) + + # Clear temporary variables by setting them to None. + if self.variables: + variables = [ast.Name(name, ast.Store()) for name in self.variables] + clear = ast.Assign(variables, ast.Constant(None)) + self.statements.append(clear) + # Fix locations (line numbers/column offsets). + for stmt in self.statements: + for node in traverse_node(stmt): + ast.copy_location(node, assert_) + return self.statements + + def visit_NamedExpr(self, name: ast.NamedExpr) -> tuple[ast.NamedExpr, str]: + # This method handles the 'walrus operator' repr of the target + # name if it's a local variable or _should_repr_global_name() + # thinks it's acceptable. + locs = ast.Call(self.builtin("locals"), [], []) + target_id = name.target.id + inlocs = ast.Compare(ast.Constant(target_id), [ast.In()], [locs]) + dorepr = self.helper("_should_repr_global_name", name) + test = ast.BoolOp(ast.Or(), [inlocs, dorepr]) + expr = ast.IfExp(test, self.display(name), ast.Constant(target_id)) + return name, self.explanation_param(expr) + + def visit_Name(self, name: ast.Name) -> tuple[ast.Name, str]: + # Display the repr of the name if it's a local variable or + # _should_repr_global_name() thinks it's acceptable. + locs = ast.Call(self.builtin("locals"), [], []) + inlocs = ast.Compare(ast.Constant(name.id), [ast.In()], [locs]) + dorepr = self.helper("_should_repr_global_name", name) + test = ast.BoolOp(ast.Or(), [inlocs, dorepr]) + expr = ast.IfExp(test, self.display(name), ast.Constant(name.id)) + return name, self.explanation_param(expr) + + def visit_BoolOp(self, boolop: ast.BoolOp) -> tuple[ast.Name, str]: + res_var = self.variable() + expl_list = self.assign(ast.List([], ast.Load())) + app = ast.Attribute(expl_list, "append", ast.Load()) + is_or = int(isinstance(boolop.op, ast.Or)) + body = save = self.statements + fail_save = self.expl_stmts + levels = len(boolop.values) - 1 + self.push_format_context() + # Process each operand, short-circuiting if needed. + for i, v in enumerate(boolop.values): + if i: + fail_inner: list[ast.stmt] = [] + # cond is set in a prior loop iteration below + self.expl_stmts.append(ast.If(cond, fail_inner, [])) # noqa: F821 + self.expl_stmts = fail_inner + # Check if the left operand is a ast.NamedExpr and the value has already been visited + if ( + isinstance(v, ast.Compare) + and isinstance(v.left, ast.NamedExpr) + and v.left.target.id + in [ + ast_expr.id + for ast_expr in boolop.values[:i] + if hasattr(ast_expr, "id") + ] + ): + pytest_temp = self.variable() + self.variables_overwrite[self.scope][v.left.target.id] = v.left # type:ignore[assignment] + v.left.target.id = pytest_temp + self.push_format_context() + res, expl = self.visit(v) + body.append(ast.Assign([ast.Name(res_var, ast.Store())], res)) + expl_format = self.pop_format_context(ast.Constant(expl)) + call = ast.Call(app, [expl_format], []) + self.expl_stmts.append(ast.Expr(call)) + if i < levels: + cond: ast.expr = res + if is_or: + cond = ast.UnaryOp(ast.Not(), cond) + inner: list[ast.stmt] = [] + self.statements.append(ast.If(cond, inner, [])) + self.statements = body = inner + self.statements = save + self.expl_stmts = fail_save + expl_template = self.helper("_format_boolop", expl_list, ast.Constant(is_or)) + expl = self.pop_format_context(expl_template) + return ast.Name(res_var, ast.Load()), self.explanation_param(expl) + + def visit_UnaryOp(self, unary: ast.UnaryOp) -> tuple[ast.Name, str]: + pattern = UNARY_MAP[unary.op.__class__] + operand_res, operand_expl = self.visit(unary.operand) + res = self.assign(ast.UnaryOp(unary.op, operand_res)) + return res, pattern % (operand_expl,) + + def visit_BinOp(self, binop: ast.BinOp) -> tuple[ast.Name, str]: + symbol = BINOP_MAP[binop.op.__class__] + left_expr, left_expl = self.visit(binop.left) + right_expr, right_expl = self.visit(binop.right) + explanation = f"({left_expl} {symbol} {right_expl})" + res = self.assign(ast.BinOp(left_expr, binop.op, right_expr)) + return res, explanation + + def visit_Call(self, call: ast.Call) -> tuple[ast.Name, str]: + new_func, func_expl = self.visit(call.func) + arg_expls = [] + new_args = [] + new_kwargs = [] + for arg in call.args: + if isinstance(arg, ast.Name) and arg.id in self.variables_overwrite.get( + self.scope, {} + ): + arg = self.variables_overwrite[self.scope][arg.id] # type:ignore[assignment] + res, expl = self.visit(arg) + arg_expls.append(expl) + new_args.append(res) + for keyword in call.keywords: + if isinstance( + keyword.value, ast.Name + ) and keyword.value.id in self.variables_overwrite.get(self.scope, {}): + keyword.value = self.variables_overwrite[self.scope][keyword.value.id] # type:ignore[assignment] + res, expl = self.visit(keyword.value) + new_kwargs.append(ast.keyword(keyword.arg, res)) + if keyword.arg: + arg_expls.append(keyword.arg + "=" + expl) + else: # **args have `arg` keywords with an .arg of None + arg_expls.append("**" + expl) + + expl = "{}({})".format(func_expl, ", ".join(arg_expls)) + new_call = ast.Call(new_func, new_args, new_kwargs) + res = self.assign(new_call) + res_expl = self.explanation_param(self.display(res)) + outer_expl = f"{res_expl}\n{{{res_expl} = {expl}\n}}" + return res, outer_expl + + def visit_Starred(self, starred: ast.Starred) -> tuple[ast.Starred, str]: + # A Starred node can appear in a function call. + res, expl = self.visit(starred.value) + new_starred = ast.Starred(res, starred.ctx) + return new_starred, "*" + expl + + def visit_Attribute(self, attr: ast.Attribute) -> tuple[ast.Name, str]: + if not isinstance(attr.ctx, ast.Load): + return self.generic_visit(attr) + value, value_expl = self.visit(attr.value) + res = self.assign(ast.Attribute(value, attr.attr, ast.Load())) + res_expl = self.explanation_param(self.display(res)) + pat = "%s\n{%s = %s.%s\n}" + expl = pat % (res_expl, res_expl, value_expl, attr.attr) + return res, expl + + def visit_Compare(self, comp: ast.Compare) -> tuple[ast.expr, str]: + self.push_format_context() + # We first check if we have overwritten a variable in the previous assert + if isinstance( + comp.left, ast.Name + ) and comp.left.id in self.variables_overwrite.get(self.scope, {}): + comp.left = self.variables_overwrite[self.scope][comp.left.id] # type:ignore[assignment] + if isinstance(comp.left, ast.NamedExpr): + self.variables_overwrite[self.scope][comp.left.target.id] = comp.left # type:ignore[assignment] + left_res, left_expl = self.visit(comp.left) + if isinstance(comp.left, (ast.Compare, ast.BoolOp)): + left_expl = f"({left_expl})" + res_variables = [self.variable() for i in range(len(comp.ops))] + load_names: list[ast.expr] = [ast.Name(v, ast.Load()) for v in res_variables] + store_names = [ast.Name(v, ast.Store()) for v in res_variables] + it = zip(range(len(comp.ops)), comp.ops, comp.comparators) + expls: list[ast.expr] = [] + syms: list[ast.expr] = [] + results = [left_res] + for i, op, next_operand in it: + if ( + isinstance(next_operand, ast.NamedExpr) + and isinstance(left_res, ast.Name) + and next_operand.target.id == left_res.id + ): + next_operand.target.id = self.variable() + self.variables_overwrite[self.scope][left_res.id] = next_operand # type:ignore[assignment] + next_res, next_expl = self.visit(next_operand) + if isinstance(next_operand, (ast.Compare, ast.BoolOp)): + next_expl = f"({next_expl})" + results.append(next_res) + sym = BINOP_MAP[op.__class__] + syms.append(ast.Constant(sym)) + expl = f"{left_expl} {sym} {next_expl}" + expls.append(ast.Constant(expl)) + res_expr = ast.Compare(left_res, [op], [next_res]) + self.statements.append(ast.Assign([store_names[i]], res_expr)) + left_res, left_expl = next_res, next_expl + # Use pytest.assertion.util._reprcompare if that's available. + expl_call = self.helper( + "_call_reprcompare", + ast.Tuple(syms, ast.Load()), + ast.Tuple(load_names, ast.Load()), + ast.Tuple(expls, ast.Load()), + ast.Tuple(results, ast.Load()), + ) + if len(comp.ops) > 1: + res: ast.expr = ast.BoolOp(ast.And(), load_names) + else: + res = load_names[0] + + return res, self.explanation_param(self.pop_format_context(expl_call)) + + +def try_makedirs(cache_dir: Path) -> bool: + """Attempt to create the given directory and sub-directories exist. + + Returns True if successful or if it already exists. + """ + try: + os.makedirs(cache_dir, exist_ok=True) + except (FileNotFoundError, NotADirectoryError, FileExistsError): + # One of the path components was not a directory: + # - we're in a zip file + # - it is a file + return False + except PermissionError: + return False + except OSError as e: + # as of now, EROFS doesn't have an equivalent OSError-subclass + # + # squashfuse_ll returns ENOSYS "OSError: [Errno 38] Function not + # implemented" for a read-only error + if e.errno in {errno.EROFS, errno.ENOSYS}: + return False + raise + return True + + +def get_cache_dir(file_path: Path) -> Path: + """Return the cache directory to write .pyc files for the given .py file path.""" + if sys.pycache_prefix: + # given: + # prefix = '/tmp/pycs' + # path = '/home/user/proj/test_app.py' + # we want: + # '/tmp/pycs/home/user/proj' + return Path(sys.pycache_prefix) / Path(*file_path.parts[1:-1]) + else: + # classic pycache directory + return file_path.parent / "__pycache__" diff --git a/.venv/Lib/site-packages/_pytest/assertion/truncate.py b/.venv/Lib/site-packages/_pytest/assertion/truncate.py new file mode 100644 index 00000000..b67f02cc --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/assertion/truncate.py @@ -0,0 +1,117 @@ +"""Utilities for truncating assertion output. + +Current default behaviour is to truncate assertion explanations at +terminal lines, unless running with an assertions verbosity level of at least 2 or running on CI. +""" + +from __future__ import annotations + +from _pytest.assertion import util +from _pytest.config import Config +from _pytest.nodes import Item + + +DEFAULT_MAX_LINES = 8 +DEFAULT_MAX_CHARS = 8 * 80 +USAGE_MSG = "use '-vv' to show" + + +def truncate_if_required( + explanation: list[str], item: Item, max_length: int | None = None +) -> list[str]: + """Truncate this assertion explanation if the given test item is eligible.""" + if _should_truncate_item(item): + return _truncate_explanation(explanation) + return explanation + + +def _should_truncate_item(item: Item) -> bool: + """Whether or not this test item is eligible for truncation.""" + verbose = item.config.get_verbosity(Config.VERBOSITY_ASSERTIONS) + return verbose < 2 and not util.running_on_ci() + + +def _truncate_explanation( + input_lines: list[str], + max_lines: int | None = None, + max_chars: int | None = None, +) -> list[str]: + """Truncate given list of strings that makes up the assertion explanation. + + Truncates to either 8 lines, or 640 characters - whichever the input reaches + first, taking the truncation explanation into account. The remaining lines + will be replaced by a usage message. + """ + if max_lines is None: + max_lines = DEFAULT_MAX_LINES + if max_chars is None: + max_chars = DEFAULT_MAX_CHARS + + # Check if truncation required + input_char_count = len("".join(input_lines)) + # The length of the truncation explanation depends on the number of lines + # removed but is at least 68 characters: + # The real value is + # 64 (for the base message: + # '...\n...Full output truncated (1 line hidden), use '-vv' to show")' + # ) + # + 1 (for plural) + # + int(math.log10(len(input_lines) - max_lines)) (number of hidden line, at least 1) + # + 3 for the '...' added to the truncated line + # But if there's more than 100 lines it's very likely that we're going to + # truncate, so we don't need the exact value using log10. + tolerable_max_chars = ( + max_chars + 70 # 64 + 1 (for plural) + 2 (for '99') + 3 for '...' + ) + # The truncation explanation add two lines to the output + tolerable_max_lines = max_lines + 2 + if ( + len(input_lines) <= tolerable_max_lines + and input_char_count <= tolerable_max_chars + ): + return input_lines + # Truncate first to max_lines, and then truncate to max_chars if necessary + truncated_explanation = input_lines[:max_lines] + truncated_char = True + # We reevaluate the need to truncate chars following removal of some lines + if len("".join(truncated_explanation)) > tolerable_max_chars: + truncated_explanation = _truncate_by_char_count( + truncated_explanation, max_chars + ) + else: + truncated_char = False + + truncated_line_count = len(input_lines) - len(truncated_explanation) + if truncated_explanation[-1]: + # Add ellipsis and take into account part-truncated final line + truncated_explanation[-1] = truncated_explanation[-1] + "..." + if truncated_char: + # It's possible that we did not remove any char from this line + truncated_line_count += 1 + else: + # Add proper ellipsis when we were able to fit a full line exactly + truncated_explanation[-1] = "..." + return [ + *truncated_explanation, + "", + f"...Full output truncated ({truncated_line_count} line" + f"{'' if truncated_line_count == 1 else 's'} hidden), {USAGE_MSG}", + ] + + +def _truncate_by_char_count(input_lines: list[str], max_chars: int) -> list[str]: + # Find point at which input length exceeds total allowed length + iterated_char_count = 0 + for iterated_index, input_line in enumerate(input_lines): + if iterated_char_count + len(input_line) > max_chars: + break + iterated_char_count += len(input_line) + + # Create truncated explanation with modified final line + truncated_result = input_lines[:iterated_index] + final_line = input_lines[iterated_index] + if final_line: + final_line_truncate_point = max_chars - iterated_char_count + final_line = final_line[:final_line_truncate_point] + truncated_result.append(final_line) + return truncated_result diff --git a/.venv/Lib/site-packages/_pytest/assertion/util.py b/.venv/Lib/site-packages/_pytest/assertion/util.py new file mode 100644 index 00000000..4dc1af4a --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/assertion/util.py @@ -0,0 +1,609 @@ +# mypy: allow-untyped-defs +"""Utilities for assertion debugging.""" + +from __future__ import annotations + +import collections.abc +import os +import pprint +from typing import AbstractSet +from typing import Any +from typing import Callable +from typing import Iterable +from typing import Literal +from typing import Mapping +from typing import Protocol +from typing import Sequence +from unicodedata import normalize + +from _pytest import outcomes +import _pytest._code +from _pytest._io.pprint import PrettyPrinter +from _pytest._io.saferepr import saferepr +from _pytest._io.saferepr import saferepr_unlimited +from _pytest.config import Config + + +# The _reprcompare attribute on the util module is used by the new assertion +# interpretation code and assertion rewriter to detect this plugin was +# loaded and in turn call the hooks defined here as part of the +# DebugInterpreter. +_reprcompare: Callable[[str, object, object], str | None] | None = None + +# Works similarly as _reprcompare attribute. Is populated with the hook call +# when pytest_runtest_setup is called. +_assertion_pass: Callable[[int, str, str], None] | None = None + +# Config object which is assigned during pytest_runtest_protocol. +_config: Config | None = None + + +class _HighlightFunc(Protocol): + def __call__(self, source: str, lexer: Literal["diff", "python"] = "python") -> str: + """Apply highlighting to the given source.""" + + +def format_explanation(explanation: str) -> str: + r"""Format an explanation. + + Normally all embedded newlines are escaped, however there are + three exceptions: \n{, \n} and \n~. The first two are intended + cover nested explanations, see function and attribute explanations + for examples (.visit_Call(), visit_Attribute()). The last one is + for when one explanation needs to span multiple lines, e.g. when + displaying diffs. + """ + lines = _split_explanation(explanation) + result = _format_lines(lines) + return "\n".join(result) + + +def _split_explanation(explanation: str) -> list[str]: + r"""Return a list of individual lines in the explanation. + + This will return a list of lines split on '\n{', '\n}' and '\n~'. + Any other newlines will be escaped and appear in the line as the + literal '\n' characters. + """ + raw_lines = (explanation or "").split("\n") + lines = [raw_lines[0]] + for values in raw_lines[1:]: + if values and values[0] in ["{", "}", "~", ">"]: + lines.append(values) + else: + lines[-1] += "\\n" + values + return lines + + +def _format_lines(lines: Sequence[str]) -> list[str]: + """Format the individual lines. + + This will replace the '{', '}' and '~' characters of our mini formatting + language with the proper 'where ...', 'and ...' and ' + ...' text, taking + care of indentation along the way. + + Return a list of formatted lines. + """ + result = list(lines[:1]) + stack = [0] + stackcnt = [0] + for line in lines[1:]: + if line.startswith("{"): + if stackcnt[-1]: + s = "and " + else: + s = "where " + stack.append(len(result)) + stackcnt[-1] += 1 + stackcnt.append(0) + result.append(" +" + " " * (len(stack) - 1) + s + line[1:]) + elif line.startswith("}"): + stack.pop() + stackcnt.pop() + result[stack[-1]] += line[1:] + else: + assert line[0] in ["~", ">"] + stack[-1] += 1 + indent = len(stack) if line.startswith("~") else len(stack) - 1 + result.append(" " * indent + line[1:]) + assert len(stack) == 1 + return result + + +def issequence(x: Any) -> bool: + return isinstance(x, collections.abc.Sequence) and not isinstance(x, str) + + +def istext(x: Any) -> bool: + return isinstance(x, str) + + +def isdict(x: Any) -> bool: + return isinstance(x, dict) + + +def isset(x: Any) -> bool: + return isinstance(x, (set, frozenset)) + + +def isnamedtuple(obj: Any) -> bool: + return isinstance(obj, tuple) and getattr(obj, "_fields", None) is not None + + +def isdatacls(obj: Any) -> bool: + return getattr(obj, "__dataclass_fields__", None) is not None + + +def isattrs(obj: Any) -> bool: + return getattr(obj, "__attrs_attrs__", None) is not None + + +def isiterable(obj: Any) -> bool: + try: + iter(obj) + return not istext(obj) + except Exception: + return False + + +def has_default_eq( + obj: object, +) -> bool: + """Check if an instance of an object contains the default eq + + First, we check if the object's __eq__ attribute has __code__, + if so, we check the equally of the method code filename (__code__.co_filename) + to the default one generated by the dataclass and attr module + for dataclasses the default co_filename is , for attrs class, the __eq__ should contain "attrs eq generated" + """ + # inspired from https://github.com/willmcgugan/rich/blob/07d51ffc1aee6f16bd2e5a25b4e82850fb9ed778/rich/pretty.py#L68 + if hasattr(obj.__eq__, "__code__") and hasattr(obj.__eq__.__code__, "co_filename"): + code_filename = obj.__eq__.__code__.co_filename + + if isattrs(obj): + return "attrs generated eq" in code_filename + + return code_filename == "" # data class + return True + + +def assertrepr_compare( + config, op: str, left: Any, right: Any, use_ascii: bool = False +) -> list[str] | None: + """Return specialised explanations for some operators/operands.""" + verbose = config.get_verbosity(Config.VERBOSITY_ASSERTIONS) + + # Strings which normalize equal are often hard to distinguish when printed; use ascii() to make this easier. + # See issue #3246. + use_ascii = ( + isinstance(left, str) + and isinstance(right, str) + and normalize("NFD", left) == normalize("NFD", right) + ) + + if verbose > 1: + left_repr = saferepr_unlimited(left, use_ascii=use_ascii) + right_repr = saferepr_unlimited(right, use_ascii=use_ascii) + else: + # XXX: "15 chars indentation" is wrong + # ("E AssertionError: assert "); should use term width. + maxsize = ( + 80 - 15 - len(op) - 2 + ) // 2 # 15 chars indentation, 1 space around op + + left_repr = saferepr(left, maxsize=maxsize, use_ascii=use_ascii) + right_repr = saferepr(right, maxsize=maxsize, use_ascii=use_ascii) + + summary = f"{left_repr} {op} {right_repr}" + highlighter = config.get_terminal_writer()._highlight + + explanation = None + try: + if op == "==": + explanation = _compare_eq_any(left, right, highlighter, verbose) + elif op == "not in": + if istext(left) and istext(right): + explanation = _notin_text(left, right, verbose) + elif op == "!=": + if isset(left) and isset(right): + explanation = ["Both sets are equal"] + elif op == ">=": + if isset(left) and isset(right): + explanation = _compare_gte_set(left, right, highlighter, verbose) + elif op == "<=": + if isset(left) and isset(right): + explanation = _compare_lte_set(left, right, highlighter, verbose) + elif op == ">": + if isset(left) and isset(right): + explanation = _compare_gt_set(left, right, highlighter, verbose) + elif op == "<": + if isset(left) and isset(right): + explanation = _compare_lt_set(left, right, highlighter, verbose) + + except outcomes.Exit: + raise + except Exception: + repr_crash = _pytest._code.ExceptionInfo.from_current()._getreprcrash() + explanation = [ + f"(pytest_assertion plugin: representation of details failed: {repr_crash}.", + " Probably an object has a faulty __repr__.)", + ] + + if not explanation: + return None + + if explanation[0] != "": + explanation = ["", *explanation] + return [summary, *explanation] + + +def _compare_eq_any( + left: Any, right: Any, highlighter: _HighlightFunc, verbose: int = 0 +) -> list[str]: + explanation = [] + if istext(left) and istext(right): + explanation = _diff_text(left, right, verbose) + else: + from _pytest.python_api import ApproxBase + + if isinstance(left, ApproxBase) or isinstance(right, ApproxBase): + # Although the common order should be obtained == expected, this ensures both ways + approx_side = left if isinstance(left, ApproxBase) else right + other_side = right if isinstance(left, ApproxBase) else left + + explanation = approx_side._repr_compare(other_side) + elif type(left) is type(right) and ( + isdatacls(left) or isattrs(left) or isnamedtuple(left) + ): + # Note: unlike dataclasses/attrs, namedtuples compare only the + # field values, not the type or field names. But this branch + # intentionally only handles the same-type case, which was often + # used in older code bases before dataclasses/attrs were available. + explanation = _compare_eq_cls(left, right, highlighter, verbose) + elif issequence(left) and issequence(right): + explanation = _compare_eq_sequence(left, right, highlighter, verbose) + elif isset(left) and isset(right): + explanation = _compare_eq_set(left, right, highlighter, verbose) + elif isdict(left) and isdict(right): + explanation = _compare_eq_dict(left, right, highlighter, verbose) + + if isiterable(left) and isiterable(right): + expl = _compare_eq_iterable(left, right, highlighter, verbose) + explanation.extend(expl) + + return explanation + + +def _diff_text(left: str, right: str, verbose: int = 0) -> list[str]: + """Return the explanation for the diff between text. + + Unless --verbose is used this will skip leading and trailing + characters which are identical to keep the diff minimal. + """ + from difflib import ndiff + + explanation: list[str] = [] + + if verbose < 1: + i = 0 # just in case left or right has zero length + for i in range(min(len(left), len(right))): + if left[i] != right[i]: + break + if i > 42: + i -= 10 # Provide some context + explanation = [ + f"Skipping {i} identical leading characters in diff, use -v to show" + ] + left = left[i:] + right = right[i:] + if len(left) == len(right): + for i in range(len(left)): + if left[-i] != right[-i]: + break + if i > 42: + i -= 10 # Provide some context + explanation += [ + f"Skipping {i} identical trailing " + "characters in diff, use -v to show" + ] + left = left[:-i] + right = right[:-i] + keepends = True + if left.isspace() or right.isspace(): + left = repr(str(left)) + right = repr(str(right)) + explanation += ["Strings contain only whitespace, escaping them using repr()"] + # "right" is the expected base against which we compare "left", + # see https://github.com/pytest-dev/pytest/issues/3333 + explanation += [ + line.strip("\n") + for line in ndiff(right.splitlines(keepends), left.splitlines(keepends)) + ] + return explanation + + +def _compare_eq_iterable( + left: Iterable[Any], + right: Iterable[Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> list[str]: + if verbose <= 0 and not running_on_ci(): + return ["Use -v to get more diff"] + # dynamic import to speedup pytest + import difflib + + left_formatting = PrettyPrinter().pformat(left).splitlines() + right_formatting = PrettyPrinter().pformat(right).splitlines() + + explanation = ["", "Full diff:"] + # "right" is the expected base against which we compare "left", + # see https://github.com/pytest-dev/pytest/issues/3333 + explanation.extend( + highlighter( + "\n".join( + line.rstrip() + for line in difflib.ndiff(right_formatting, left_formatting) + ), + lexer="diff", + ).splitlines() + ) + return explanation + + +def _compare_eq_sequence( + left: Sequence[Any], + right: Sequence[Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> list[str]: + comparing_bytes = isinstance(left, bytes) and isinstance(right, bytes) + explanation: list[str] = [] + len_left = len(left) + len_right = len(right) + for i in range(min(len_left, len_right)): + if left[i] != right[i]: + if comparing_bytes: + # when comparing bytes, we want to see their ascii representation + # instead of their numeric values (#5260) + # using a slice gives us the ascii representation: + # >>> s = b'foo' + # >>> s[0] + # 102 + # >>> s[0:1] + # b'f' + left_value = left[i : i + 1] + right_value = right[i : i + 1] + else: + left_value = left[i] + right_value = right[i] + + explanation.append( + f"At index {i} diff:" + f" {highlighter(repr(left_value))} != {highlighter(repr(right_value))}" + ) + break + + if comparing_bytes: + # when comparing bytes, it doesn't help to show the "sides contain one or more + # items" longer explanation, so skip it + + return explanation + + len_diff = len_left - len_right + if len_diff: + if len_diff > 0: + dir_with_more = "Left" + extra = saferepr(left[len_right]) + else: + len_diff = 0 - len_diff + dir_with_more = "Right" + extra = saferepr(right[len_left]) + + if len_diff == 1: + explanation += [ + f"{dir_with_more} contains one more item: {highlighter(extra)}" + ] + else: + explanation += [ + "%s contains %d more items, first extra item: %s" + % (dir_with_more, len_diff, highlighter(extra)) + ] + return explanation + + +def _compare_eq_set( + left: AbstractSet[Any], + right: AbstractSet[Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> list[str]: + explanation = [] + explanation.extend(_set_one_sided_diff("left", left, right, highlighter)) + explanation.extend(_set_one_sided_diff("right", right, left, highlighter)) + return explanation + + +def _compare_gt_set( + left: AbstractSet[Any], + right: AbstractSet[Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> list[str]: + explanation = _compare_gte_set(left, right, highlighter) + if not explanation: + return ["Both sets are equal"] + return explanation + + +def _compare_lt_set( + left: AbstractSet[Any], + right: AbstractSet[Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> list[str]: + explanation = _compare_lte_set(left, right, highlighter) + if not explanation: + return ["Both sets are equal"] + return explanation + + +def _compare_gte_set( + left: AbstractSet[Any], + right: AbstractSet[Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> list[str]: + return _set_one_sided_diff("right", right, left, highlighter) + + +def _compare_lte_set( + left: AbstractSet[Any], + right: AbstractSet[Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> list[str]: + return _set_one_sided_diff("left", left, right, highlighter) + + +def _set_one_sided_diff( + posn: str, + set1: AbstractSet[Any], + set2: AbstractSet[Any], + highlighter: _HighlightFunc, +) -> list[str]: + explanation = [] + diff = set1 - set2 + if diff: + explanation.append(f"Extra items in the {posn} set:") + for item in diff: + explanation.append(highlighter(saferepr(item))) + return explanation + + +def _compare_eq_dict( + left: Mapping[Any, Any], + right: Mapping[Any, Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> list[str]: + explanation: list[str] = [] + set_left = set(left) + set_right = set(right) + common = set_left.intersection(set_right) + same = {k: left[k] for k in common if left[k] == right[k]} + if same and verbose < 2: + explanation += [f"Omitting {len(same)} identical items, use -vv to show"] + elif same: + explanation += ["Common items:"] + explanation += highlighter(pprint.pformat(same)).splitlines() + diff = {k for k in common if left[k] != right[k]} + if diff: + explanation += ["Differing items:"] + for k in diff: + explanation += [ + highlighter(saferepr({k: left[k]})) + + " != " + + highlighter(saferepr({k: right[k]})) + ] + extra_left = set_left - set_right + len_extra_left = len(extra_left) + if len_extra_left: + explanation.append( + "Left contains %d more item%s:" + % (len_extra_left, "" if len_extra_left == 1 else "s") + ) + explanation.extend( + highlighter(pprint.pformat({k: left[k] for k in extra_left})).splitlines() + ) + extra_right = set_right - set_left + len_extra_right = len(extra_right) + if len_extra_right: + explanation.append( + "Right contains %d more item%s:" + % (len_extra_right, "" if len_extra_right == 1 else "s") + ) + explanation.extend( + highlighter(pprint.pformat({k: right[k] for k in extra_right})).splitlines() + ) + return explanation + + +def _compare_eq_cls( + left: Any, right: Any, highlighter: _HighlightFunc, verbose: int +) -> list[str]: + if not has_default_eq(left): + return [] + if isdatacls(left): + import dataclasses + + all_fields = dataclasses.fields(left) + fields_to_check = [info.name for info in all_fields if info.compare] + elif isattrs(left): + all_fields = left.__attrs_attrs__ + fields_to_check = [field.name for field in all_fields if getattr(field, "eq")] + elif isnamedtuple(left): + fields_to_check = left._fields + else: + assert False + + indent = " " + same = [] + diff = [] + for field in fields_to_check: + if getattr(left, field) == getattr(right, field): + same.append(field) + else: + diff.append(field) + + explanation = [] + if same or diff: + explanation += [""] + if same and verbose < 2: + explanation.append(f"Omitting {len(same)} identical items, use -vv to show") + elif same: + explanation += ["Matching attributes:"] + explanation += highlighter(pprint.pformat(same)).splitlines() + if diff: + explanation += ["Differing attributes:"] + explanation += highlighter(pprint.pformat(diff)).splitlines() + for field in diff: + field_left = getattr(left, field) + field_right = getattr(right, field) + explanation += [ + "", + f"Drill down into differing attribute {field}:", + f"{indent}{field}: {highlighter(repr(field_left))} != {highlighter(repr(field_right))}", + ] + explanation += [ + indent + line + for line in _compare_eq_any( + field_left, field_right, highlighter, verbose + ) + ] + return explanation + + +def _notin_text(term: str, text: str, verbose: int = 0) -> list[str]: + index = text.find(term) + head = text[:index] + tail = text[index + len(term) :] + correct_text = head + tail + diff = _diff_text(text, correct_text, verbose) + newdiff = [f"{saferepr(term, maxsize=42)} is contained here:"] + for line in diff: + if line.startswith("Skipping"): + continue + if line.startswith("- "): + continue + if line.startswith("+ "): + newdiff.append(" " + line[2:]) + else: + newdiff.append(line) + return newdiff + + +def running_on_ci() -> bool: + """Check if we're currently running on a CI system.""" + env_vars = ["CI", "BUILD_NUMBER"] + return any(var in os.environ for var in env_vars) diff --git a/.venv/Lib/site-packages/_pytest/cacheprovider.py b/.venv/Lib/site-packages/_pytest/cacheprovider.py new file mode 100644 index 00000000..1b236efd --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/cacheprovider.py @@ -0,0 +1,626 @@ +# mypy: allow-untyped-defs +"""Implementation of the cache provider.""" + +# This plugin was not named "cache" to avoid conflicts with the external +# pytest-cache version. +from __future__ import annotations + +import dataclasses +import errno +import json +import os +from pathlib import Path +import tempfile +from typing import final +from typing import Generator +from typing import Iterable + +from .pathlib import resolve_from_str +from .pathlib import rm_rf +from .reports import CollectReport +from _pytest import nodes +from _pytest._io import TerminalWriter +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.main import Session +from _pytest.nodes import Directory +from _pytest.nodes import File +from _pytest.reports import TestReport + + +README_CONTENT = """\ +# pytest cache directory # + +This directory contains data from the pytest's cache plugin, +which provides the `--lf` and `--ff` options, as well as the `cache` fixture. + +**Do not** commit this to version control. + +See [the docs](https://docs.pytest.org/en/stable/how-to/cache.html) for more information. +""" + +CACHEDIR_TAG_CONTENT = b"""\ +Signature: 8a477f597d28d172789f06886806bc55 +# This file is a cache directory tag created by pytest. +# For information about cache directory tags, see: +# https://bford.info/cachedir/spec.html +""" + + +@final +@dataclasses.dataclass +class Cache: + """Instance of the `cache` fixture.""" + + _cachedir: Path = dataclasses.field(repr=False) + _config: Config = dataclasses.field(repr=False) + + # Sub-directory under cache-dir for directories created by `mkdir()`. + _CACHE_PREFIX_DIRS = "d" + + # Sub-directory under cache-dir for values created by `set()`. + _CACHE_PREFIX_VALUES = "v" + + def __init__( + self, cachedir: Path, config: Config, *, _ispytest: bool = False + ) -> None: + check_ispytest(_ispytest) + self._cachedir = cachedir + self._config = config + + @classmethod + def for_config(cls, config: Config, *, _ispytest: bool = False) -> Cache: + """Create the Cache instance for a Config. + + :meta private: + """ + check_ispytest(_ispytest) + cachedir = cls.cache_dir_from_config(config, _ispytest=True) + if config.getoption("cacheclear") and cachedir.is_dir(): + cls.clear_cache(cachedir, _ispytest=True) + return cls(cachedir, config, _ispytest=True) + + @classmethod + def clear_cache(cls, cachedir: Path, _ispytest: bool = False) -> None: + """Clear the sub-directories used to hold cached directories and values. + + :meta private: + """ + check_ispytest(_ispytest) + for prefix in (cls._CACHE_PREFIX_DIRS, cls._CACHE_PREFIX_VALUES): + d = cachedir / prefix + if d.is_dir(): + rm_rf(d) + + @staticmethod + def cache_dir_from_config(config: Config, *, _ispytest: bool = False) -> Path: + """Get the path to the cache directory for a Config. + + :meta private: + """ + check_ispytest(_ispytest) + return resolve_from_str(config.getini("cache_dir"), config.rootpath) + + def warn(self, fmt: str, *, _ispytest: bool = False, **args: object) -> None: + """Issue a cache warning. + + :meta private: + """ + check_ispytest(_ispytest) + import warnings + + from _pytest.warning_types import PytestCacheWarning + + warnings.warn( + PytestCacheWarning(fmt.format(**args) if args else fmt), + self._config.hook, + stacklevel=3, + ) + + def _mkdir(self, path: Path) -> None: + self._ensure_cache_dir_and_supporting_files() + path.mkdir(exist_ok=True, parents=True) + + def mkdir(self, name: str) -> Path: + """Return a directory path object with the given name. + + If the directory does not yet exist, it will be created. You can use + it to manage files to e.g. store/retrieve database dumps across test + sessions. + + .. versionadded:: 7.0 + + :param name: + Must be a string not containing a ``/`` separator. + Make sure the name contains your plugin or application + identifiers to prevent clashes with other cache users. + """ + path = Path(name) + if len(path.parts) > 1: + raise ValueError("name is not allowed to contain path separators") + res = self._cachedir.joinpath(self._CACHE_PREFIX_DIRS, path) + self._mkdir(res) + return res + + def _getvaluepath(self, key: str) -> Path: + return self._cachedir.joinpath(self._CACHE_PREFIX_VALUES, Path(key)) + + def get(self, key: str, default): + """Return the cached value for the given key. + + If no value was yet cached or the value cannot be read, the specified + default is returned. + + :param key: + Must be a ``/`` separated value. Usually the first + name is the name of your plugin or your application. + :param default: + The value to return in case of a cache-miss or invalid cache value. + """ + path = self._getvaluepath(key) + try: + with path.open("r", encoding="UTF-8") as f: + return json.load(f) + except (ValueError, OSError): + return default + + def set(self, key: str, value: object) -> None: + """Save value for the given key. + + :param key: + Must be a ``/`` separated value. Usually the first + name is the name of your plugin or your application. + :param value: + Must be of any combination of basic python types, + including nested types like lists of dictionaries. + """ + path = self._getvaluepath(key) + try: + self._mkdir(path.parent) + except OSError as exc: + self.warn( + f"could not create cache path {path}: {exc}", + _ispytest=True, + ) + return + data = json.dumps(value, ensure_ascii=False, indent=2) + try: + f = path.open("w", encoding="UTF-8") + except OSError as exc: + self.warn( + f"cache could not write path {path}: {exc}", + _ispytest=True, + ) + else: + with f: + f.write(data) + + def _ensure_cache_dir_and_supporting_files(self) -> None: + """Create the cache dir and its supporting files.""" + if self._cachedir.is_dir(): + return + + self._cachedir.parent.mkdir(parents=True, exist_ok=True) + with tempfile.TemporaryDirectory( + prefix="pytest-cache-files-", + dir=self._cachedir.parent, + ) as newpath: + path = Path(newpath) + + # Reset permissions to the default, see #12308. + # Note: there's no way to get the current umask atomically, eek. + umask = os.umask(0o022) + os.umask(umask) + path.chmod(0o777 - umask) + + with open(path.joinpath("README.md"), "x", encoding="UTF-8") as f: + f.write(README_CONTENT) + with open(path.joinpath(".gitignore"), "x", encoding="UTF-8") as f: + f.write("# Created by pytest automatically.\n*\n") + with open(path.joinpath("CACHEDIR.TAG"), "xb") as f: + f.write(CACHEDIR_TAG_CONTENT) + + try: + path.rename(self._cachedir) + except OSError as e: + # If 2 concurrent pytests both race to the rename, the loser + # gets "Directory not empty" from the rename. In this case, + # everything is handled so just continue (while letting the + # temporary directory be cleaned up). + # On Windows, the error is a FileExistsError which translates to EEXIST. + if e.errno not in (errno.ENOTEMPTY, errno.EEXIST): + raise + else: + # Create a directory in place of the one we just moved so that + # `TemporaryDirectory`'s cleanup doesn't complain. + # + # TODO: pass ignore_cleanup_errors=True when we no longer support python < 3.10. + # See https://github.com/python/cpython/issues/74168. Note that passing + # delete=False would do the wrong thing in case of errors and isn't supported + # until python 3.12. + path.mkdir() + + +class LFPluginCollWrapper: + def __init__(self, lfplugin: LFPlugin) -> None: + self.lfplugin = lfplugin + self._collected_at_least_one_failure = False + + @hookimpl(wrapper=True) + def pytest_make_collect_report( + self, collector: nodes.Collector + ) -> Generator[None, CollectReport, CollectReport]: + res = yield + if isinstance(collector, (Session, Directory)): + # Sort any lf-paths to the beginning. + lf_paths = self.lfplugin._last_failed_paths + + # Use stable sort to prioritize last failed. + def sort_key(node: nodes.Item | nodes.Collector) -> bool: + return node.path in lf_paths + + res.result = sorted( + res.result, + key=sort_key, + reverse=True, + ) + + elif isinstance(collector, File): + if collector.path in self.lfplugin._last_failed_paths: + result = res.result + lastfailed = self.lfplugin.lastfailed + + # Only filter with known failures. + if not self._collected_at_least_one_failure: + if not any(x.nodeid in lastfailed for x in result): + return res + self.lfplugin.config.pluginmanager.register( + LFPluginCollSkipfiles(self.lfplugin), "lfplugin-collskip" + ) + self._collected_at_least_one_failure = True + + session = collector.session + result[:] = [ + x + for x in result + if x.nodeid in lastfailed + # Include any passed arguments (not trivial to filter). + or session.isinitpath(x.path) + # Keep all sub-collectors. + or isinstance(x, nodes.Collector) + ] + + return res + + +class LFPluginCollSkipfiles: + def __init__(self, lfplugin: LFPlugin) -> None: + self.lfplugin = lfplugin + + @hookimpl + def pytest_make_collect_report( + self, collector: nodes.Collector + ) -> CollectReport | None: + if isinstance(collector, File): + if collector.path not in self.lfplugin._last_failed_paths: + self.lfplugin._skipped_files += 1 + + return CollectReport( + collector.nodeid, "passed", longrepr=None, result=[] + ) + return None + + +class LFPlugin: + """Plugin which implements the --lf (run last-failing) option.""" + + def __init__(self, config: Config) -> None: + self.config = config + active_keys = "lf", "failedfirst" + self.active = any(config.getoption(key) for key in active_keys) + assert config.cache + self.lastfailed: dict[str, bool] = config.cache.get("cache/lastfailed", {}) + self._previously_failed_count: int | None = None + self._report_status: str | None = None + self._skipped_files = 0 # count skipped files during collection due to --lf + + if config.getoption("lf"): + self._last_failed_paths = self.get_last_failed_paths() + config.pluginmanager.register( + LFPluginCollWrapper(self), "lfplugin-collwrapper" + ) + + def get_last_failed_paths(self) -> set[Path]: + """Return a set with all Paths of the previously failed nodeids and + their parents.""" + rootpath = self.config.rootpath + result = set() + for nodeid in self.lastfailed: + path = rootpath / nodeid.split("::")[0] + result.add(path) + result.update(path.parents) + return {x for x in result if x.exists()} + + def pytest_report_collectionfinish(self) -> str | None: + if self.active and self.config.get_verbosity() >= 0: + return f"run-last-failure: {self._report_status}" + return None + + def pytest_runtest_logreport(self, report: TestReport) -> None: + if (report.when == "call" and report.passed) or report.skipped: + self.lastfailed.pop(report.nodeid, None) + elif report.failed: + self.lastfailed[report.nodeid] = True + + def pytest_collectreport(self, report: CollectReport) -> None: + passed = report.outcome in ("passed", "skipped") + if passed: + if report.nodeid in self.lastfailed: + self.lastfailed.pop(report.nodeid) + self.lastfailed.update((item.nodeid, True) for item in report.result) + else: + self.lastfailed[report.nodeid] = True + + @hookimpl(wrapper=True, tryfirst=True) + def pytest_collection_modifyitems( + self, config: Config, items: list[nodes.Item] + ) -> Generator[None]: + res = yield + + if not self.active: + return res + + if self.lastfailed: + previously_failed = [] + previously_passed = [] + for item in items: + if item.nodeid in self.lastfailed: + previously_failed.append(item) + else: + previously_passed.append(item) + self._previously_failed_count = len(previously_failed) + + if not previously_failed: + # Running a subset of all tests with recorded failures + # only outside of it. + self._report_status = "%d known failures not in selected tests" % ( + len(self.lastfailed), + ) + else: + if self.config.getoption("lf"): + items[:] = previously_failed + config.hook.pytest_deselected(items=previously_passed) + else: # --failedfirst + items[:] = previously_failed + previously_passed + + noun = "failure" if self._previously_failed_count == 1 else "failures" + suffix = " first" if self.config.getoption("failedfirst") else "" + self._report_status = ( + f"rerun previous {self._previously_failed_count} {noun}{suffix}" + ) + + if self._skipped_files > 0: + files_noun = "file" if self._skipped_files == 1 else "files" + self._report_status += f" (skipped {self._skipped_files} {files_noun})" + else: + self._report_status = "no previously failed tests, " + if self.config.getoption("last_failed_no_failures") == "none": + self._report_status += "deselecting all items." + config.hook.pytest_deselected(items=items[:]) + items[:] = [] + else: + self._report_status += "not deselecting items." + + return res + + def pytest_sessionfinish(self, session: Session) -> None: + config = self.config + if config.getoption("cacheshow") or hasattr(config, "workerinput"): + return + + assert config.cache is not None + saved_lastfailed = config.cache.get("cache/lastfailed", {}) + if saved_lastfailed != self.lastfailed: + config.cache.set("cache/lastfailed", self.lastfailed) + + +class NFPlugin: + """Plugin which implements the --nf (run new-first) option.""" + + def __init__(self, config: Config) -> None: + self.config = config + self.active = config.option.newfirst + assert config.cache is not None + self.cached_nodeids = set(config.cache.get("cache/nodeids", [])) + + @hookimpl(wrapper=True, tryfirst=True) + def pytest_collection_modifyitems(self, items: list[nodes.Item]) -> Generator[None]: + res = yield + + if self.active: + new_items: dict[str, nodes.Item] = {} + other_items: dict[str, nodes.Item] = {} + for item in items: + if item.nodeid not in self.cached_nodeids: + new_items[item.nodeid] = item + else: + other_items[item.nodeid] = item + + items[:] = self._get_increasing_order( + new_items.values() + ) + self._get_increasing_order(other_items.values()) + self.cached_nodeids.update(new_items) + else: + self.cached_nodeids.update(item.nodeid for item in items) + + return res + + def _get_increasing_order(self, items: Iterable[nodes.Item]) -> list[nodes.Item]: + return sorted(items, key=lambda item: item.path.stat().st_mtime, reverse=True) + + def pytest_sessionfinish(self) -> None: + config = self.config + if config.getoption("cacheshow") or hasattr(config, "workerinput"): + return + + if config.getoption("collectonly"): + return + + assert config.cache is not None + config.cache.set("cache/nodeids", sorted(self.cached_nodeids)) + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group.addoption( + "--lf", + "--last-failed", + action="store_true", + dest="lf", + help="Rerun only the tests that failed " + "at the last run (or all if none failed)", + ) + group.addoption( + "--ff", + "--failed-first", + action="store_true", + dest="failedfirst", + help="Run all tests, but run the last failures first. " + "This may re-order tests and thus lead to " + "repeated fixture setup/teardown.", + ) + group.addoption( + "--nf", + "--new-first", + action="store_true", + dest="newfirst", + help="Run tests from new files first, then the rest of the tests " + "sorted by file mtime", + ) + group.addoption( + "--cache-show", + action="append", + nargs="?", + dest="cacheshow", + help=( + "Show cache contents, don't perform collection or tests. " + "Optional argument: glob (default: '*')." + ), + ) + group.addoption( + "--cache-clear", + action="store_true", + dest="cacheclear", + help="Remove all cache contents at start of test run", + ) + cache_dir_default = ".pytest_cache" + if "TOX_ENV_DIR" in os.environ: + cache_dir_default = os.path.join(os.environ["TOX_ENV_DIR"], cache_dir_default) + parser.addini("cache_dir", default=cache_dir_default, help="Cache directory path") + group.addoption( + "--lfnf", + "--last-failed-no-failures", + action="store", + dest="last_failed_no_failures", + choices=("all", "none"), + default="all", + help="With ``--lf``, determines whether to execute tests when there " + "are no previously (known) failures or when no " + "cached ``lastfailed`` data was found. " + "``all`` (the default) runs the full test suite again. " + "``none`` just emits a message about no known failures and exits successfully.", + ) + + +def pytest_cmdline_main(config: Config) -> int | ExitCode | None: + if config.option.cacheshow and not config.option.help: + from _pytest.main import wrap_session + + return wrap_session(config, cacheshow) + return None + + +@hookimpl(tryfirst=True) +def pytest_configure(config: Config) -> None: + config.cache = Cache.for_config(config, _ispytest=True) + config.pluginmanager.register(LFPlugin(config), "lfplugin") + config.pluginmanager.register(NFPlugin(config), "nfplugin") + + +@fixture +def cache(request: FixtureRequest) -> Cache: + """Return a cache object that can persist state between testing sessions. + + cache.get(key, default) + cache.set(key, value) + + Keys must be ``/`` separated strings, where the first part is usually the + name of your plugin or application to avoid clashes with other cache users. + + Values can be any object handled by the json stdlib module. + """ + assert request.config.cache is not None + return request.config.cache + + +def pytest_report_header(config: Config) -> str | None: + """Display cachedir with --cache-show and if non-default.""" + if config.option.verbose > 0 or config.getini("cache_dir") != ".pytest_cache": + assert config.cache is not None + cachedir = config.cache._cachedir + # TODO: evaluate generating upward relative paths + # starting with .., ../.. if sensible + + try: + displaypath = cachedir.relative_to(config.rootpath) + except ValueError: + displaypath = cachedir + return f"cachedir: {displaypath}" + return None + + +def cacheshow(config: Config, session: Session) -> int: + from pprint import pformat + + assert config.cache is not None + + tw = TerminalWriter() + tw.line("cachedir: " + str(config.cache._cachedir)) + if not config.cache._cachedir.is_dir(): + tw.line("cache is empty") + return 0 + + glob = config.option.cacheshow[0] + if glob is None: + glob = "*" + + dummy = object() + basedir = config.cache._cachedir + vdir = basedir / Cache._CACHE_PREFIX_VALUES + tw.sep("-", f"cache values for {glob!r}") + for valpath in sorted(x for x in vdir.rglob(glob) if x.is_file()): + key = str(valpath.relative_to(vdir)) + val = config.cache.get(key, dummy) + if val is dummy: + tw.line(f"{key} contains unreadable content, will be ignored") + else: + tw.line(f"{key} contains:") + for line in pformat(val).splitlines(): + tw.line(" " + line) + + ddir = basedir / Cache._CACHE_PREFIX_DIRS + if ddir.is_dir(): + contents = sorted(ddir.rglob(glob)) + tw.sep("-", f"cache directories for {glob!r}") + for p in contents: + # if p.is_dir(): + # print("%s/" % p.relative_to(basedir)) + if p.is_file(): + key = str(p.relative_to(basedir)) + tw.line(f"{key} is a file of length {p.stat().st_size:d}") + return 0 diff --git a/.venv/Lib/site-packages/_pytest/capture.py b/.venv/Lib/site-packages/_pytest/capture.py new file mode 100644 index 00000000..506c0b3d --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/capture.py @@ -0,0 +1,1087 @@ +# mypy: allow-untyped-defs +"""Per-test stdout/stderr capturing mechanism.""" + +from __future__ import annotations + +import abc +import collections +import contextlib +import io +from io import UnsupportedOperation +import os +import sys +from tempfile import TemporaryFile +from types import TracebackType +from typing import Any +from typing import AnyStr +from typing import BinaryIO +from typing import Final +from typing import final +from typing import Generator +from typing import Generic +from typing import Iterable +from typing import Iterator +from typing import Literal +from typing import NamedTuple +from typing import TextIO +from typing import TYPE_CHECKING + + +if TYPE_CHECKING: + from typing_extensions import Self + +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import SubRequest +from _pytest.nodes import Collector +from _pytest.nodes import File +from _pytest.nodes import Item +from _pytest.reports import CollectReport + + +_CaptureMethod = Literal["fd", "sys", "no", "tee-sys"] + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group._addoption( + "--capture", + action="store", + default="fd", + metavar="method", + choices=["fd", "sys", "no", "tee-sys"], + help="Per-test capturing method: one of fd|sys|no|tee-sys", + ) + group._addoption( + "-s", + action="store_const", + const="no", + dest="capture", + help="Shortcut for --capture=no", + ) + + +def _colorama_workaround() -> None: + """Ensure colorama is imported so that it attaches to the correct stdio + handles on Windows. + + colorama uses the terminal on import time. So if something does the + first import of colorama while I/O capture is active, colorama will + fail in various ways. + """ + if sys.platform.startswith("win32"): + try: + import colorama # noqa: F401 + except ImportError: + pass + + +def _windowsconsoleio_workaround(stream: TextIO) -> None: + """Workaround for Windows Unicode console handling. + + Python 3.6 implemented Unicode console handling for Windows. This works + by reading/writing to the raw console handle using + ``{Read,Write}ConsoleW``. + + The problem is that we are going to ``dup2`` over the stdio file + descriptors when doing ``FDCapture`` and this will ``CloseHandle`` the + handles used by Python to write to the console. Though there is still some + weirdness and the console handle seems to only be closed randomly and not + on the first call to ``CloseHandle``, or maybe it gets reopened with the + same handle value when we suspend capturing. + + The workaround in this case will reopen stdio with a different fd which + also means a different handle by replicating the logic in + "Py_lifecycle.c:initstdio/create_stdio". + + :param stream: + In practice ``sys.stdout`` or ``sys.stderr``, but given + here as parameter for unittesting purposes. + + See https://github.com/pytest-dev/py/issues/103. + """ + if not sys.platform.startswith("win32") or hasattr(sys, "pypy_version_info"): + return + + # Bail out if ``stream`` doesn't seem like a proper ``io`` stream (#2666). + if not hasattr(stream, "buffer"): # type: ignore[unreachable,unused-ignore] + return + + raw_stdout = stream.buffer.raw if hasattr(stream.buffer, "raw") else stream.buffer + + if not isinstance(raw_stdout, io._WindowsConsoleIO): # type: ignore[attr-defined,unused-ignore] + return + + def _reopen_stdio(f, mode): + if not hasattr(stream.buffer, "raw") and mode[0] == "w": + buffering = 0 + else: + buffering = -1 + + return io.TextIOWrapper( + open(os.dup(f.fileno()), mode, buffering), + f.encoding, + f.errors, + f.newlines, + f.line_buffering, + ) + + sys.stdin = _reopen_stdio(sys.stdin, "rb") + sys.stdout = _reopen_stdio(sys.stdout, "wb") + sys.stderr = _reopen_stdio(sys.stderr, "wb") + + +@hookimpl(wrapper=True) +def pytest_load_initial_conftests(early_config: Config) -> Generator[None]: + ns = early_config.known_args_namespace + if ns.capture == "fd": + _windowsconsoleio_workaround(sys.stdout) + _colorama_workaround() + pluginmanager = early_config.pluginmanager + capman = CaptureManager(ns.capture) + pluginmanager.register(capman, "capturemanager") + + # Make sure that capturemanager is properly reset at final shutdown. + early_config.add_cleanup(capman.stop_global_capturing) + + # Finally trigger conftest loading but while capturing (issue #93). + capman.start_global_capturing() + try: + try: + yield + finally: + capman.suspend_global_capture() + except BaseException: + out, err = capman.read_global_capture() + sys.stdout.write(out) + sys.stderr.write(err) + raise + + +# IO Helpers. + + +class EncodedFile(io.TextIOWrapper): + __slots__ = () + + @property + def name(self) -> str: + # Ensure that file.name is a string. Workaround for a Python bug + # fixed in >=3.7.4: https://bugs.python.org/issue36015 + return repr(self.buffer) + + @property + def mode(self) -> str: + # TextIOWrapper doesn't expose a mode, but at least some of our + # tests check it. + return self.buffer.mode.replace("b", "") + + +class CaptureIO(io.TextIOWrapper): + def __init__(self) -> None: + super().__init__(io.BytesIO(), encoding="UTF-8", newline="", write_through=True) + + def getvalue(self) -> str: + assert isinstance(self.buffer, io.BytesIO) + return self.buffer.getvalue().decode("UTF-8") + + +class TeeCaptureIO(CaptureIO): + def __init__(self, other: TextIO) -> None: + self._other = other + super().__init__() + + def write(self, s: str) -> int: + super().write(s) + return self._other.write(s) + + +class DontReadFromInput(TextIO): + @property + def encoding(self) -> str: + assert sys.__stdin__ is not None + return sys.__stdin__.encoding + + def read(self, size: int = -1) -> str: + raise OSError( + "pytest: reading from stdin while output is captured! Consider using `-s`." + ) + + readline = read + + def __next__(self) -> str: + return self.readline() + + def readlines(self, hint: int | None = -1) -> list[str]: + raise OSError( + "pytest: reading from stdin while output is captured! Consider using `-s`." + ) + + def __iter__(self) -> Iterator[str]: + return self + + def fileno(self) -> int: + raise UnsupportedOperation("redirected stdin is pseudofile, has no fileno()") + + def flush(self) -> None: + raise UnsupportedOperation("redirected stdin is pseudofile, has no flush()") + + def isatty(self) -> bool: + return False + + def close(self) -> None: + pass + + def readable(self) -> bool: + return False + + def seek(self, offset: int, whence: int = 0) -> int: + raise UnsupportedOperation("redirected stdin is pseudofile, has no seek(int)") + + def seekable(self) -> bool: + return False + + def tell(self) -> int: + raise UnsupportedOperation("redirected stdin is pseudofile, has no tell()") + + def truncate(self, size: int | None = None) -> int: + raise UnsupportedOperation("cannot truncate stdin") + + def write(self, data: str) -> int: + raise UnsupportedOperation("cannot write to stdin") + + def writelines(self, lines: Iterable[str]) -> None: + raise UnsupportedOperation("Cannot write to stdin") + + def writable(self) -> bool: + return False + + def __enter__(self) -> Self: + return self + + def __exit__( + self, + type: type[BaseException] | None, + value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + pass + + @property + def buffer(self) -> BinaryIO: + # The str/bytes doesn't actually matter in this type, so OK to fake. + return self # type: ignore[return-value] + + +# Capture classes. + + +class CaptureBase(abc.ABC, Generic[AnyStr]): + EMPTY_BUFFER: AnyStr + + @abc.abstractmethod + def __init__(self, fd: int) -> None: + raise NotImplementedError() + + @abc.abstractmethod + def start(self) -> None: + raise NotImplementedError() + + @abc.abstractmethod + def done(self) -> None: + raise NotImplementedError() + + @abc.abstractmethod + def suspend(self) -> None: + raise NotImplementedError() + + @abc.abstractmethod + def resume(self) -> None: + raise NotImplementedError() + + @abc.abstractmethod + def writeorg(self, data: AnyStr) -> None: + raise NotImplementedError() + + @abc.abstractmethod + def snap(self) -> AnyStr: + raise NotImplementedError() + + +patchsysdict = {0: "stdin", 1: "stdout", 2: "stderr"} + + +class NoCapture(CaptureBase[str]): + EMPTY_BUFFER = "" + + def __init__(self, fd: int) -> None: + pass + + def start(self) -> None: + pass + + def done(self) -> None: + pass + + def suspend(self) -> None: + pass + + def resume(self) -> None: + pass + + def snap(self) -> str: + return "" + + def writeorg(self, data: str) -> None: + pass + + +class SysCaptureBase(CaptureBase[AnyStr]): + def __init__( + self, fd: int, tmpfile: TextIO | None = None, *, tee: bool = False + ) -> None: + name = patchsysdict[fd] + self._old: TextIO = getattr(sys, name) + self.name = name + if tmpfile is None: + if name == "stdin": + tmpfile = DontReadFromInput() + else: + tmpfile = CaptureIO() if not tee else TeeCaptureIO(self._old) + self.tmpfile = tmpfile + self._state = "initialized" + + def repr(self, class_name: str) -> str: + return "<{} {} _old={} _state={!r} tmpfile={!r}>".format( + class_name, + self.name, + hasattr(self, "_old") and repr(self._old) or "", + self._state, + self.tmpfile, + ) + + def __repr__(self) -> str: + return "<{} {} _old={} _state={!r} tmpfile={!r}>".format( + self.__class__.__name__, + self.name, + hasattr(self, "_old") and repr(self._old) or "", + self._state, + self.tmpfile, + ) + + def _assert_state(self, op: str, states: tuple[str, ...]) -> None: + assert ( + self._state in states + ), "cannot {} in state {!r}: expected one of {}".format( + op, self._state, ", ".join(states) + ) + + def start(self) -> None: + self._assert_state("start", ("initialized",)) + setattr(sys, self.name, self.tmpfile) + self._state = "started" + + def done(self) -> None: + self._assert_state("done", ("initialized", "started", "suspended", "done")) + if self._state == "done": + return + setattr(sys, self.name, self._old) + del self._old + self.tmpfile.close() + self._state = "done" + + def suspend(self) -> None: + self._assert_state("suspend", ("started", "suspended")) + setattr(sys, self.name, self._old) + self._state = "suspended" + + def resume(self) -> None: + self._assert_state("resume", ("started", "suspended")) + if self._state == "started": + return + setattr(sys, self.name, self.tmpfile) + self._state = "started" + + +class SysCaptureBinary(SysCaptureBase[bytes]): + EMPTY_BUFFER = b"" + + def snap(self) -> bytes: + self._assert_state("snap", ("started", "suspended")) + self.tmpfile.seek(0) + res = self.tmpfile.buffer.read() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + def writeorg(self, data: bytes) -> None: + self._assert_state("writeorg", ("started", "suspended")) + self._old.flush() + self._old.buffer.write(data) + self._old.buffer.flush() + + +class SysCapture(SysCaptureBase[str]): + EMPTY_BUFFER = "" + + def snap(self) -> str: + self._assert_state("snap", ("started", "suspended")) + assert isinstance(self.tmpfile, CaptureIO) + res = self.tmpfile.getvalue() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + def writeorg(self, data: str) -> None: + self._assert_state("writeorg", ("started", "suspended")) + self._old.write(data) + self._old.flush() + + +class FDCaptureBase(CaptureBase[AnyStr]): + def __init__(self, targetfd: int) -> None: + self.targetfd = targetfd + + try: + os.fstat(targetfd) + except OSError: + # FD capturing is conceptually simple -- create a temporary file, + # redirect the FD to it, redirect back when done. But when the + # target FD is invalid it throws a wrench into this lovely scheme. + # + # Tests themselves shouldn't care if the FD is valid, FD capturing + # should work regardless of external circumstances. So falling back + # to just sys capturing is not a good option. + # + # Further complications are the need to support suspend() and the + # possibility of FD reuse (e.g. the tmpfile getting the very same + # target FD). The following approach is robust, I believe. + self.targetfd_invalid: int | None = os.open(os.devnull, os.O_RDWR) + os.dup2(self.targetfd_invalid, targetfd) + else: + self.targetfd_invalid = None + self.targetfd_save = os.dup(targetfd) + + if targetfd == 0: + self.tmpfile = open(os.devnull, encoding="utf-8") + self.syscapture: CaptureBase[str] = SysCapture(targetfd) + else: + self.tmpfile = EncodedFile( + TemporaryFile(buffering=0), + encoding="utf-8", + errors="replace", + newline="", + write_through=True, + ) + if targetfd in patchsysdict: + self.syscapture = SysCapture(targetfd, self.tmpfile) + else: + self.syscapture = NoCapture(targetfd) + + self._state = "initialized" + + def __repr__(self) -> str: + return ( + f"<{self.__class__.__name__} {self.targetfd} oldfd={self.targetfd_save} " + f"_state={self._state!r} tmpfile={self.tmpfile!r}>" + ) + + def _assert_state(self, op: str, states: tuple[str, ...]) -> None: + assert ( + self._state in states + ), "cannot {} in state {!r}: expected one of {}".format( + op, self._state, ", ".join(states) + ) + + def start(self) -> None: + """Start capturing on targetfd using memorized tmpfile.""" + self._assert_state("start", ("initialized",)) + os.dup2(self.tmpfile.fileno(), self.targetfd) + self.syscapture.start() + self._state = "started" + + def done(self) -> None: + """Stop capturing, restore streams, return original capture file, + seeked to position zero.""" + self._assert_state("done", ("initialized", "started", "suspended", "done")) + if self._state == "done": + return + os.dup2(self.targetfd_save, self.targetfd) + os.close(self.targetfd_save) + if self.targetfd_invalid is not None: + if self.targetfd_invalid != self.targetfd: + os.close(self.targetfd) + os.close(self.targetfd_invalid) + self.syscapture.done() + self.tmpfile.close() + self._state = "done" + + def suspend(self) -> None: + self._assert_state("suspend", ("started", "suspended")) + if self._state == "suspended": + return + self.syscapture.suspend() + os.dup2(self.targetfd_save, self.targetfd) + self._state = "suspended" + + def resume(self) -> None: + self._assert_state("resume", ("started", "suspended")) + if self._state == "started": + return + self.syscapture.resume() + os.dup2(self.tmpfile.fileno(), self.targetfd) + self._state = "started" + + +class FDCaptureBinary(FDCaptureBase[bytes]): + """Capture IO to/from a given OS-level file descriptor. + + snap() produces `bytes`. + """ + + EMPTY_BUFFER = b"" + + def snap(self) -> bytes: + self._assert_state("snap", ("started", "suspended")) + self.tmpfile.seek(0) + res = self.tmpfile.buffer.read() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + def writeorg(self, data: bytes) -> None: + """Write to original file descriptor.""" + self._assert_state("writeorg", ("started", "suspended")) + os.write(self.targetfd_save, data) + + +class FDCapture(FDCaptureBase[str]): + """Capture IO to/from a given OS-level file descriptor. + + snap() produces text. + """ + + EMPTY_BUFFER = "" + + def snap(self) -> str: + self._assert_state("snap", ("started", "suspended")) + self.tmpfile.seek(0) + res = self.tmpfile.read() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + def writeorg(self, data: str) -> None: + """Write to original file descriptor.""" + self._assert_state("writeorg", ("started", "suspended")) + # XXX use encoding of original stream + os.write(self.targetfd_save, data.encode("utf-8")) + + +# MultiCapture + + +# Generic NamedTuple only supported since Python 3.11. +if sys.version_info >= (3, 11) or TYPE_CHECKING: + + @final + class CaptureResult(NamedTuple, Generic[AnyStr]): + """The result of :method:`caplog.readouterr() `.""" + + out: AnyStr + err: AnyStr + +else: + + class CaptureResult( + collections.namedtuple("CaptureResult", ["out", "err"]), # noqa: PYI024 + Generic[AnyStr], + ): + """The result of :method:`caplog.readouterr() `.""" + + __slots__ = () + + +class MultiCapture(Generic[AnyStr]): + _state = None + _in_suspended = False + + def __init__( + self, + in_: CaptureBase[AnyStr] | None, + out: CaptureBase[AnyStr] | None, + err: CaptureBase[AnyStr] | None, + ) -> None: + self.in_: CaptureBase[AnyStr] | None = in_ + self.out: CaptureBase[AnyStr] | None = out + self.err: CaptureBase[AnyStr] | None = err + + def __repr__(self) -> str: + return ( + f"" + ) + + def start_capturing(self) -> None: + self._state = "started" + if self.in_: + self.in_.start() + if self.out: + self.out.start() + if self.err: + self.err.start() + + def pop_outerr_to_orig(self) -> tuple[AnyStr, AnyStr]: + """Pop current snapshot out/err capture and flush to orig streams.""" + out, err = self.readouterr() + if out: + assert self.out is not None + self.out.writeorg(out) + if err: + assert self.err is not None + self.err.writeorg(err) + return out, err + + def suspend_capturing(self, in_: bool = False) -> None: + self._state = "suspended" + if self.out: + self.out.suspend() + if self.err: + self.err.suspend() + if in_ and self.in_: + self.in_.suspend() + self._in_suspended = True + + def resume_capturing(self) -> None: + self._state = "started" + if self.out: + self.out.resume() + if self.err: + self.err.resume() + if self._in_suspended: + assert self.in_ is not None + self.in_.resume() + self._in_suspended = False + + def stop_capturing(self) -> None: + """Stop capturing and reset capturing streams.""" + if self._state == "stopped": + raise ValueError("was already stopped") + self._state = "stopped" + if self.out: + self.out.done() + if self.err: + self.err.done() + if self.in_: + self.in_.done() + + def is_started(self) -> bool: + """Whether actively capturing -- not suspended or stopped.""" + return self._state == "started" + + def readouterr(self) -> CaptureResult[AnyStr]: + out = self.out.snap() if self.out else "" + err = self.err.snap() if self.err else "" + # TODO: This type error is real, need to fix. + return CaptureResult(out, err) # type: ignore[arg-type] + + +def _get_multicapture(method: _CaptureMethod) -> MultiCapture[str]: + if method == "fd": + return MultiCapture(in_=FDCapture(0), out=FDCapture(1), err=FDCapture(2)) + elif method == "sys": + return MultiCapture(in_=SysCapture(0), out=SysCapture(1), err=SysCapture(2)) + elif method == "no": + return MultiCapture(in_=None, out=None, err=None) + elif method == "tee-sys": + return MultiCapture( + in_=None, out=SysCapture(1, tee=True), err=SysCapture(2, tee=True) + ) + raise ValueError(f"unknown capturing method: {method!r}") + + +# CaptureManager and CaptureFixture + + +class CaptureManager: + """The capture plugin. + + Manages that the appropriate capture method is enabled/disabled during + collection and each test phase (setup, call, teardown). After each of + those points, the captured output is obtained and attached to the + collection/runtest report. + + There are two levels of capture: + + * global: enabled by default and can be suppressed by the ``-s`` + option. This is always enabled/disabled during collection and each test + phase. + + * fixture: when a test function or one of its fixture depend on the + ``capsys`` or ``capfd`` fixtures. In this case special handling is + needed to ensure the fixtures take precedence over the global capture. + """ + + def __init__(self, method: _CaptureMethod) -> None: + self._method: Final = method + self._global_capturing: MultiCapture[str] | None = None + self._capture_fixture: CaptureFixture[Any] | None = None + + def __repr__(self) -> str: + return ( + f"" + ) + + def is_capturing(self) -> str | bool: + if self.is_globally_capturing(): + return "global" + if self._capture_fixture: + return f"fixture {self._capture_fixture.request.fixturename}" + return False + + # Global capturing control + + def is_globally_capturing(self) -> bool: + return self._method != "no" + + def start_global_capturing(self) -> None: + assert self._global_capturing is None + self._global_capturing = _get_multicapture(self._method) + self._global_capturing.start_capturing() + + def stop_global_capturing(self) -> None: + if self._global_capturing is not None: + self._global_capturing.pop_outerr_to_orig() + self._global_capturing.stop_capturing() + self._global_capturing = None + + def resume_global_capture(self) -> None: + # During teardown of the python process, and on rare occasions, capture + # attributes can be `None` while trying to resume global capture. + if self._global_capturing is not None: + self._global_capturing.resume_capturing() + + def suspend_global_capture(self, in_: bool = False) -> None: + if self._global_capturing is not None: + self._global_capturing.suspend_capturing(in_=in_) + + def suspend(self, in_: bool = False) -> None: + # Need to undo local capsys-et-al if it exists before disabling global capture. + self.suspend_fixture() + self.suspend_global_capture(in_) + + def resume(self) -> None: + self.resume_global_capture() + self.resume_fixture() + + def read_global_capture(self) -> CaptureResult[str]: + assert self._global_capturing is not None + return self._global_capturing.readouterr() + + # Fixture Control + + def set_fixture(self, capture_fixture: CaptureFixture[Any]) -> None: + if self._capture_fixture: + current_fixture = self._capture_fixture.request.fixturename + requested_fixture = capture_fixture.request.fixturename + capture_fixture.request.raiseerror( + f"cannot use {requested_fixture} and {current_fixture} at the same time" + ) + self._capture_fixture = capture_fixture + + def unset_fixture(self) -> None: + self._capture_fixture = None + + def activate_fixture(self) -> None: + """If the current item is using ``capsys`` or ``capfd``, activate + them so they take precedence over the global capture.""" + if self._capture_fixture: + self._capture_fixture._start() + + def deactivate_fixture(self) -> None: + """Deactivate the ``capsys`` or ``capfd`` fixture of this item, if any.""" + if self._capture_fixture: + self._capture_fixture.close() + + def suspend_fixture(self) -> None: + if self._capture_fixture: + self._capture_fixture._suspend() + + def resume_fixture(self) -> None: + if self._capture_fixture: + self._capture_fixture._resume() + + # Helper context managers + + @contextlib.contextmanager + def global_and_fixture_disabled(self) -> Generator[None]: + """Context manager to temporarily disable global and current fixture capturing.""" + do_fixture = self._capture_fixture and self._capture_fixture._is_started() + if do_fixture: + self.suspend_fixture() + do_global = self._global_capturing and self._global_capturing.is_started() + if do_global: + self.suspend_global_capture() + try: + yield + finally: + if do_global: + self.resume_global_capture() + if do_fixture: + self.resume_fixture() + + @contextlib.contextmanager + def item_capture(self, when: str, item: Item) -> Generator[None]: + self.resume_global_capture() + self.activate_fixture() + try: + yield + finally: + self.deactivate_fixture() + self.suspend_global_capture(in_=False) + + out, err = self.read_global_capture() + item.add_report_section(when, "stdout", out) + item.add_report_section(when, "stderr", err) + + # Hooks + + @hookimpl(wrapper=True) + def pytest_make_collect_report( + self, collector: Collector + ) -> Generator[None, CollectReport, CollectReport]: + if isinstance(collector, File): + self.resume_global_capture() + try: + rep = yield + finally: + self.suspend_global_capture() + out, err = self.read_global_capture() + if out: + rep.sections.append(("Captured stdout", out)) + if err: + rep.sections.append(("Captured stderr", err)) + else: + rep = yield + return rep + + @hookimpl(wrapper=True) + def pytest_runtest_setup(self, item: Item) -> Generator[None]: + with self.item_capture("setup", item): + return (yield) + + @hookimpl(wrapper=True) + def pytest_runtest_call(self, item: Item) -> Generator[None]: + with self.item_capture("call", item): + return (yield) + + @hookimpl(wrapper=True) + def pytest_runtest_teardown(self, item: Item) -> Generator[None]: + with self.item_capture("teardown", item): + return (yield) + + @hookimpl(tryfirst=True) + def pytest_keyboard_interrupt(self) -> None: + self.stop_global_capturing() + + @hookimpl(tryfirst=True) + def pytest_internalerror(self) -> None: + self.stop_global_capturing() + + +class CaptureFixture(Generic[AnyStr]): + """Object returned by the :fixture:`capsys`, :fixture:`capsysbinary`, + :fixture:`capfd` and :fixture:`capfdbinary` fixtures.""" + + def __init__( + self, + captureclass: type[CaptureBase[AnyStr]], + request: SubRequest, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self.captureclass: type[CaptureBase[AnyStr]] = captureclass + self.request = request + self._capture: MultiCapture[AnyStr] | None = None + self._captured_out: AnyStr = self.captureclass.EMPTY_BUFFER + self._captured_err: AnyStr = self.captureclass.EMPTY_BUFFER + + def _start(self) -> None: + if self._capture is None: + self._capture = MultiCapture( + in_=None, + out=self.captureclass(1), + err=self.captureclass(2), + ) + self._capture.start_capturing() + + def close(self) -> None: + if self._capture is not None: + out, err = self._capture.pop_outerr_to_orig() + self._captured_out += out + self._captured_err += err + self._capture.stop_capturing() + self._capture = None + + def readouterr(self) -> CaptureResult[AnyStr]: + """Read and return the captured output so far, resetting the internal + buffer. + + :returns: + The captured content as a namedtuple with ``out`` and ``err`` + string attributes. + """ + captured_out, captured_err = self._captured_out, self._captured_err + if self._capture is not None: + out, err = self._capture.readouterr() + captured_out += out + captured_err += err + self._captured_out = self.captureclass.EMPTY_BUFFER + self._captured_err = self.captureclass.EMPTY_BUFFER + return CaptureResult(captured_out, captured_err) + + def _suspend(self) -> None: + """Suspend this fixture's own capturing temporarily.""" + if self._capture is not None: + self._capture.suspend_capturing() + + def _resume(self) -> None: + """Resume this fixture's own capturing temporarily.""" + if self._capture is not None: + self._capture.resume_capturing() + + def _is_started(self) -> bool: + """Whether actively capturing -- not disabled or closed.""" + if self._capture is not None: + return self._capture.is_started() + return False + + @contextlib.contextmanager + def disabled(self) -> Generator[None]: + """Temporarily disable capturing while inside the ``with`` block.""" + capmanager: CaptureManager = self.request.config.pluginmanager.getplugin( + "capturemanager" + ) + with capmanager.global_and_fixture_disabled(): + yield + + +# The fixtures. + + +@fixture +def capsys(request: SubRequest) -> Generator[CaptureFixture[str]]: + r"""Enable text capturing of writes to ``sys.stdout`` and ``sys.stderr``. + + The captured output is made available via ``capsys.readouterr()`` method + calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``text`` objects. + + Returns an instance of :class:`CaptureFixture[str] `. + + Example: + + .. code-block:: python + + def test_output(capsys): + print("hello") + captured = capsys.readouterr() + assert captured.out == "hello\n" + """ + capman: CaptureManager = request.config.pluginmanager.getplugin("capturemanager") + capture_fixture = CaptureFixture(SysCapture, request, _ispytest=True) + capman.set_fixture(capture_fixture) + capture_fixture._start() + yield capture_fixture + capture_fixture.close() + capman.unset_fixture() + + +@fixture +def capsysbinary(request: SubRequest) -> Generator[CaptureFixture[bytes]]: + r"""Enable bytes capturing of writes to ``sys.stdout`` and ``sys.stderr``. + + The captured output is made available via ``capsysbinary.readouterr()`` + method calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``bytes`` objects. + + Returns an instance of :class:`CaptureFixture[bytes] `. + + Example: + + .. code-block:: python + + def test_output(capsysbinary): + print("hello") + captured = capsysbinary.readouterr() + assert captured.out == b"hello\n" + """ + capman: CaptureManager = request.config.pluginmanager.getplugin("capturemanager") + capture_fixture = CaptureFixture(SysCaptureBinary, request, _ispytest=True) + capman.set_fixture(capture_fixture) + capture_fixture._start() + yield capture_fixture + capture_fixture.close() + capman.unset_fixture() + + +@fixture +def capfd(request: SubRequest) -> Generator[CaptureFixture[str]]: + r"""Enable text capturing of writes to file descriptors ``1`` and ``2``. + + The captured output is made available via ``capfd.readouterr()`` method + calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``text`` objects. + + Returns an instance of :class:`CaptureFixture[str] `. + + Example: + + .. code-block:: python + + def test_system_echo(capfd): + os.system('echo "hello"') + captured = capfd.readouterr() + assert captured.out == "hello\n" + """ + capman: CaptureManager = request.config.pluginmanager.getplugin("capturemanager") + capture_fixture = CaptureFixture(FDCapture, request, _ispytest=True) + capman.set_fixture(capture_fixture) + capture_fixture._start() + yield capture_fixture + capture_fixture.close() + capman.unset_fixture() + + +@fixture +def capfdbinary(request: SubRequest) -> Generator[CaptureFixture[bytes]]: + r"""Enable bytes capturing of writes to file descriptors ``1`` and ``2``. + + The captured output is made available via ``capfd.readouterr()`` method + calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``byte`` objects. + + Returns an instance of :class:`CaptureFixture[bytes] `. + + Example: + + .. code-block:: python + + def test_system_echo(capfdbinary): + os.system('echo "hello"') + captured = capfdbinary.readouterr() + assert captured.out == b"hello\n" + + """ + capman: CaptureManager = request.config.pluginmanager.getplugin("capturemanager") + capture_fixture = CaptureFixture(FDCaptureBinary, request, _ispytest=True) + capman.set_fixture(capture_fixture) + capture_fixture._start() + yield capture_fixture + capture_fixture.close() + capman.unset_fixture() diff --git a/.venv/Lib/site-packages/_pytest/compat.py b/.venv/Lib/site-packages/_pytest/compat.py new file mode 100644 index 00000000..614848e0 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/compat.py @@ -0,0 +1,351 @@ +# mypy: allow-untyped-defs +"""Python version compatibility code.""" + +from __future__ import annotations + +import dataclasses +import enum +import functools +import inspect +from inspect import Parameter +from inspect import signature +import os +from pathlib import Path +import sys +from typing import Any +from typing import Callable +from typing import Final +from typing import NoReturn + +import py + + +#: constant to prepare valuing pylib path replacements/lazy proxies later on +# intended for removal in pytest 8.0 or 9.0 + +# fmt: off +# intentional space to create a fake difference for the verification +LEGACY_PATH = py.path. local +# fmt: on + + +def legacy_path(path: str | os.PathLike[str]) -> LEGACY_PATH: + """Internal wrapper to prepare lazy proxies for legacy_path instances""" + return LEGACY_PATH(path) + + +# fmt: off +# Singleton type for NOTSET, as described in: +# https://www.python.org/dev/peps/pep-0484/#support-for-singleton-types-in-unions +class NotSetType(enum.Enum): + token = 0 +NOTSET: Final = NotSetType.token +# fmt: on + + +def is_generator(func: object) -> bool: + genfunc = inspect.isgeneratorfunction(func) + return genfunc and not iscoroutinefunction(func) + + +def iscoroutinefunction(func: object) -> bool: + """Return True if func is a coroutine function (a function defined with async + def syntax, and doesn't contain yield), or a function decorated with + @asyncio.coroutine. + + Note: copied and modified from Python 3.5's builtin coroutines.py to avoid + importing asyncio directly, which in turns also initializes the "logging" + module as a side-effect (see issue #8). + """ + return inspect.iscoroutinefunction(func) or getattr(func, "_is_coroutine", False) + + +def is_async_function(func: object) -> bool: + """Return True if the given function seems to be an async function or + an async generator.""" + return iscoroutinefunction(func) or inspect.isasyncgenfunction(func) + + +def getlocation(function, curdir: str | os.PathLike[str] | None = None) -> str: + function = get_real_func(function) + fn = Path(inspect.getfile(function)) + lineno = function.__code__.co_firstlineno + if curdir is not None: + try: + relfn = fn.relative_to(curdir) + except ValueError: + pass + else: + return "%s:%d" % (relfn, lineno + 1) + return "%s:%d" % (fn, lineno + 1) + + +def num_mock_patch_args(function) -> int: + """Return number of arguments used up by mock arguments (if any).""" + patchings = getattr(function, "patchings", None) + if not patchings: + return 0 + + mock_sentinel = getattr(sys.modules.get("mock"), "DEFAULT", object()) + ut_mock_sentinel = getattr(sys.modules.get("unittest.mock"), "DEFAULT", object()) + + return len( + [ + p + for p in patchings + if not p.attribute_name + and (p.new is mock_sentinel or p.new is ut_mock_sentinel) + ] + ) + + +def getfuncargnames( + function: Callable[..., object], + *, + name: str = "", + cls: type | None = None, +) -> tuple[str, ...]: + """Return the names of a function's mandatory arguments. + + Should return the names of all function arguments that: + * Aren't bound to an instance or type as in instance or class methods. + * Don't have default values. + * Aren't bound with functools.partial. + * Aren't replaced with mocks. + + The cls arguments indicate that the function should be treated as a bound + method even though it's not unless the function is a static method. + + The name parameter should be the original name in which the function was collected. + """ + # TODO(RonnyPfannschmidt): This function should be refactored when we + # revisit fixtures. The fixture mechanism should ask the node for + # the fixture names, and not try to obtain directly from the + # function object well after collection has occurred. + + # The parameters attribute of a Signature object contains an + # ordered mapping of parameter names to Parameter instances. This + # creates a tuple of the names of the parameters that don't have + # defaults. + try: + parameters = signature(function).parameters + except (ValueError, TypeError) as e: + from _pytest.outcomes import fail + + fail( + f"Could not determine arguments of {function!r}: {e}", + pytrace=False, + ) + + arg_names = tuple( + p.name + for p in parameters.values() + if ( + p.kind is Parameter.POSITIONAL_OR_KEYWORD + or p.kind is Parameter.KEYWORD_ONLY + ) + and p.default is Parameter.empty + ) + if not name: + name = function.__name__ + + # If this function should be treated as a bound method even though + # it's passed as an unbound method or function, remove the first + # parameter name. + if ( + # Not using `getattr` because we don't want to resolve the staticmethod. + # Not using `cls.__dict__` because we want to check the entire MRO. + cls + and not isinstance( + inspect.getattr_static(cls, name, default=None), staticmethod + ) + ): + arg_names = arg_names[1:] + # Remove any names that will be replaced with mocks. + if hasattr(function, "__wrapped__"): + arg_names = arg_names[num_mock_patch_args(function) :] + return arg_names + + +def get_default_arg_names(function: Callable[..., Any]) -> tuple[str, ...]: + # Note: this code intentionally mirrors the code at the beginning of + # getfuncargnames, to get the arguments which were excluded from its result + # because they had default values. + return tuple( + p.name + for p in signature(function).parameters.values() + if p.kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY) + and p.default is not Parameter.empty + ) + + +_non_printable_ascii_translate_table = { + i: f"\\x{i:02x}" for i in range(128) if i not in range(32, 127) +} +_non_printable_ascii_translate_table.update( + {ord("\t"): "\\t", ord("\r"): "\\r", ord("\n"): "\\n"} +) + + +def ascii_escaped(val: bytes | str) -> str: + r"""If val is pure ASCII, return it as an str, otherwise, escape + bytes objects into a sequence of escaped bytes: + + b'\xc3\xb4\xc5\xd6' -> r'\xc3\xb4\xc5\xd6' + + and escapes strings into a sequence of escaped unicode ids, e.g.: + + r'4\nV\U00043efa\x0eMXWB\x1e\u3028\u15fd\xcd\U0007d944' + + Note: + The obvious "v.decode('unicode-escape')" will return + valid UTF-8 unicode if it finds them in bytes, but we + want to return escaped bytes for any byte, even if they match + a UTF-8 string. + """ + if isinstance(val, bytes): + ret = val.decode("ascii", "backslashreplace") + else: + ret = val.encode("unicode_escape").decode("ascii") + return ret.translate(_non_printable_ascii_translate_table) + + +@dataclasses.dataclass +class _PytestWrapper: + """Dummy wrapper around a function object for internal use only. + + Used to correctly unwrap the underlying function object when we are + creating fixtures, because we wrap the function object ourselves with a + decorator to issue warnings when the fixture function is called directly. + """ + + obj: Any + + +def get_real_func(obj): + """Get the real function object of the (possibly) wrapped object by + functools.wraps or functools.partial.""" + start_obj = obj + for i in range(100): + # __pytest_wrapped__ is set by @pytest.fixture when wrapping the fixture function + # to trigger a warning if it gets called directly instead of by pytest: we don't + # want to unwrap further than this otherwise we lose useful wrappings like @mock.patch (#3774) + new_obj = getattr(obj, "__pytest_wrapped__", None) + if isinstance(new_obj, _PytestWrapper): + obj = new_obj.obj + break + new_obj = getattr(obj, "__wrapped__", None) + if new_obj is None: + break + obj = new_obj + else: + from _pytest._io.saferepr import saferepr + + raise ValueError( + f"could not find real function of {saferepr(start_obj)}\nstopped at {saferepr(obj)}" + ) + if isinstance(obj, functools.partial): + obj = obj.func + return obj + + +def get_real_method(obj, holder): + """Attempt to obtain the real function object that might be wrapping + ``obj``, while at the same time returning a bound method to ``holder`` if + the original object was a bound method.""" + try: + is_method = hasattr(obj, "__func__") + obj = get_real_func(obj) + except Exception: # pragma: no cover + return obj + if is_method and hasattr(obj, "__get__") and callable(obj.__get__): + obj = obj.__get__(holder) + return obj + + +def getimfunc(func): + try: + return func.__func__ + except AttributeError: + return func + + +def safe_getattr(object: Any, name: str, default: Any) -> Any: + """Like getattr but return default upon any Exception or any OutcomeException. + + Attribute access can potentially fail for 'evil' Python objects. + See issue #214. + It catches OutcomeException because of #2490 (issue #580), new outcomes + are derived from BaseException instead of Exception (for more details + check #2707). + """ + from _pytest.outcomes import TEST_OUTCOME + + try: + return getattr(object, name, default) + except TEST_OUTCOME: + return default + + +def safe_isclass(obj: object) -> bool: + """Ignore any exception via isinstance on Python 3.""" + try: + return inspect.isclass(obj) + except Exception: + return False + + +def get_user_id() -> int | None: + """Return the current process's real user id or None if it could not be + determined. + + :return: The user id or None if it could not be determined. + """ + # mypy follows the version and platform checking expectation of PEP 484: + # https://mypy.readthedocs.io/en/stable/common_issues.html?highlight=platform#python-version-and-system-platform-checks + # Containment checks are too complex for mypy v1.5.0 and cause failure. + if sys.platform == "win32" or sys.platform == "emscripten": + # win32 does not have a getuid() function. + # Emscripten has a return 0 stub. + return None + else: + # On other platforms, a return value of -1 is assumed to indicate that + # the current process's real user id could not be determined. + ERROR = -1 + uid = os.getuid() + return uid if uid != ERROR else None + + +# Perform exhaustiveness checking. +# +# Consider this example: +# +# MyUnion = Union[int, str] +# +# def handle(x: MyUnion) -> int { +# if isinstance(x, int): +# return 1 +# elif isinstance(x, str): +# return 2 +# else: +# raise Exception('unreachable') +# +# Now suppose we add a new variant: +# +# MyUnion = Union[int, str, bytes] +# +# After doing this, we must remember ourselves to go and update the handle +# function to handle the new variant. +# +# With `assert_never` we can do better: +# +# // raise Exception('unreachable') +# return assert_never(x) +# +# Now, if we forget to handle the new variant, the type-checker will emit a +# compile-time error, instead of the runtime error we would have gotten +# previously. +# +# This also work for Enums (if you use `is` to compare) and Literals. +def assert_never(value: NoReturn) -> NoReturn: + assert False, f"Unhandled value: {value} ({type(value).__name__})" diff --git a/.venv/Lib/site-packages/_pytest/config/__init__.py b/.venv/Lib/site-packages/_pytest/config/__init__.py new file mode 100644 index 00000000..3cca1479 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/config/__init__.py @@ -0,0 +1,1972 @@ +# mypy: allow-untyped-defs +"""Command line options, ini-file and conftest.py processing.""" + +from __future__ import annotations + +import argparse +import collections.abc +import copy +import dataclasses +import enum +from functools import lru_cache +import glob +import importlib.metadata +import inspect +import os +import pathlib +import re +import shlex +import sys +from textwrap import dedent +import types +from types import FunctionType +from typing import Any +from typing import Callable +from typing import cast +from typing import Final +from typing import final +from typing import Generator +from typing import IO +from typing import Iterable +from typing import Iterator +from typing import Sequence +from typing import TextIO +from typing import Type +from typing import TYPE_CHECKING +import warnings + +import pluggy +from pluggy import HookimplMarker +from pluggy import HookimplOpts +from pluggy import HookspecMarker +from pluggy import HookspecOpts +from pluggy import PluginManager + +from .compat import PathAwareHookProxy +from .exceptions import PrintHelp as PrintHelp +from .exceptions import UsageError as UsageError +from .findpaths import determine_setup +from _pytest import __version__ +import _pytest._code +from _pytest._code import ExceptionInfo +from _pytest._code import filter_traceback +from _pytest._code.code import TracebackStyle +from _pytest._io import TerminalWriter +from _pytest.config.argparsing import Argument +from _pytest.config.argparsing import Parser +import _pytest.deprecated +import _pytest.hookspec +from _pytest.outcomes import fail +from _pytest.outcomes import Skipped +from _pytest.pathlib import absolutepath +from _pytest.pathlib import bestrelpath +from _pytest.pathlib import import_path +from _pytest.pathlib import ImportMode +from _pytest.pathlib import resolve_package_path +from _pytest.pathlib import safe_exists +from _pytest.stash import Stash +from _pytest.warning_types import PytestConfigWarning +from _pytest.warning_types import warn_explicit_for + + +if TYPE_CHECKING: + from _pytest.cacheprovider import Cache + from _pytest.terminal import TerminalReporter + + +_PluggyPlugin = object +"""A type to represent plugin objects. + +Plugins can be any namespace, so we can't narrow it down much, but we use an +alias to make the intent clear. + +Ideally this type would be provided by pluggy itself. +""" + + +hookimpl = HookimplMarker("pytest") +hookspec = HookspecMarker("pytest") + + +@final +class ExitCode(enum.IntEnum): + """Encodes the valid exit codes by pytest. + + Currently users and plugins may supply other exit codes as well. + + .. versionadded:: 5.0 + """ + + #: Tests passed. + OK = 0 + #: Tests failed. + TESTS_FAILED = 1 + #: pytest was interrupted. + INTERRUPTED = 2 + #: An internal error got in the way. + INTERNAL_ERROR = 3 + #: pytest was misused. + USAGE_ERROR = 4 + #: pytest couldn't find tests. + NO_TESTS_COLLECTED = 5 + + +class ConftestImportFailure(Exception): + def __init__( + self, + path: pathlib.Path, + *, + cause: Exception, + ) -> None: + self.path = path + self.cause = cause + + def __str__(self) -> str: + return f"{type(self.cause).__name__}: {self.cause} (from {self.path})" + + +def filter_traceback_for_conftest_import_failure( + entry: _pytest._code.TracebackEntry, +) -> bool: + """Filter tracebacks entries which point to pytest internals or importlib. + + Make a special case for importlib because we use it to import test modules and conftest files + in _pytest.pathlib.import_path. + """ + return filter_traceback(entry) and "importlib" not in str(entry.path).split(os.sep) + + +def main( + args: list[str] | os.PathLike[str] | None = None, + plugins: Sequence[str | _PluggyPlugin] | None = None, +) -> int | ExitCode: + """Perform an in-process test run. + + :param args: + List of command line arguments. If `None` or not given, defaults to reading + arguments directly from the process command line (:data:`sys.argv`). + :param plugins: List of plugin objects to be auto-registered during initialization. + + :returns: An exit code. + """ + old_pytest_version = os.environ.get("PYTEST_VERSION") + try: + os.environ["PYTEST_VERSION"] = __version__ + try: + config = _prepareconfig(args, plugins) + except ConftestImportFailure as e: + exc_info = ExceptionInfo.from_exception(e.cause) + tw = TerminalWriter(sys.stderr) + tw.line(f"ImportError while loading conftest '{e.path}'.", red=True) + exc_info.traceback = exc_info.traceback.filter( + filter_traceback_for_conftest_import_failure + ) + exc_repr = ( + exc_info.getrepr(style="short", chain=False) + if exc_info.traceback + else exc_info.exconly() + ) + formatted_tb = str(exc_repr) + for line in formatted_tb.splitlines(): + tw.line(line.rstrip(), red=True) + return ExitCode.USAGE_ERROR + else: + try: + ret: ExitCode | int = config.hook.pytest_cmdline_main(config=config) + try: + return ExitCode(ret) + except ValueError: + return ret + finally: + config._ensure_unconfigure() + except UsageError as e: + tw = TerminalWriter(sys.stderr) + for msg in e.args: + tw.line(f"ERROR: {msg}\n", red=True) + return ExitCode.USAGE_ERROR + finally: + if old_pytest_version is None: + os.environ.pop("PYTEST_VERSION", None) + else: + os.environ["PYTEST_VERSION"] = old_pytest_version + + +def console_main() -> int: + """The CLI entry point of pytest. + + This function is not meant for programmable use; use `main()` instead. + """ + # https://docs.python.org/3/library/signal.html#note-on-sigpipe + try: + code = main() + sys.stdout.flush() + return code + except BrokenPipeError: + # Python flushes standard streams on exit; redirect remaining output + # to devnull to avoid another BrokenPipeError at shutdown + devnull = os.open(os.devnull, os.O_WRONLY) + os.dup2(devnull, sys.stdout.fileno()) + return 1 # Python exits with error code 1 on EPIPE + + +class cmdline: # compatibility namespace + main = staticmethod(main) + + +def filename_arg(path: str, optname: str) -> str: + """Argparse type validator for filename arguments. + + :path: Path of filename. + :optname: Name of the option. + """ + if os.path.isdir(path): + raise UsageError(f"{optname} must be a filename, given: {path}") + return path + + +def directory_arg(path: str, optname: str) -> str: + """Argparse type validator for directory arguments. + + :path: Path of directory. + :optname: Name of the option. + """ + if not os.path.isdir(path): + raise UsageError(f"{optname} must be a directory, given: {path}") + return path + + +# Plugins that cannot be disabled via "-p no:X" currently. +essential_plugins = ( + "mark", + "main", + "runner", + "fixtures", + "helpconfig", # Provides -p. +) + +default_plugins = ( + *essential_plugins, + "python", + "terminal", + "debugging", + "unittest", + "capture", + "skipping", + "legacypath", + "tmpdir", + "monkeypatch", + "recwarn", + "pastebin", + "assertion", + "junitxml", + "doctest", + "cacheprovider", + "freeze_support", + "setuponly", + "setupplan", + "stepwise", + "warnings", + "logging", + "reports", + "python_path", + "unraisableexception", + "threadexception", + "faulthandler", +) + +builtin_plugins = set(default_plugins) +builtin_plugins.add("pytester") +builtin_plugins.add("pytester_assertions") + + +def get_config( + args: list[str] | None = None, + plugins: Sequence[str | _PluggyPlugin] | None = None, +) -> Config: + # subsequent calls to main will create a fresh instance + pluginmanager = PytestPluginManager() + config = Config( + pluginmanager, + invocation_params=Config.InvocationParams( + args=args or (), + plugins=plugins, + dir=pathlib.Path.cwd(), + ), + ) + + if args is not None: + # Handle any "-p no:plugin" args. + pluginmanager.consider_preparse(args, exclude_only=True) + + for spec in default_plugins: + pluginmanager.import_plugin(spec) + + return config + + +def get_plugin_manager() -> PytestPluginManager: + """Obtain a new instance of the + :py:class:`pytest.PytestPluginManager`, with default plugins + already loaded. + + This function can be used by integration with other tools, like hooking + into pytest to run tests into an IDE. + """ + return get_config().pluginmanager + + +def _prepareconfig( + args: list[str] | os.PathLike[str] | None = None, + plugins: Sequence[str | _PluggyPlugin] | None = None, +) -> Config: + if args is None: + args = sys.argv[1:] + elif isinstance(args, os.PathLike): + args = [os.fspath(args)] + elif not isinstance(args, list): + msg = ( # type:ignore[unreachable] + "`args` parameter expected to be a list of strings, got: {!r} (type: {})" + ) + raise TypeError(msg.format(args, type(args))) + + config = get_config(args, plugins) + pluginmanager = config.pluginmanager + try: + if plugins: + for plugin in plugins: + if isinstance(plugin, str): + pluginmanager.consider_pluginarg(plugin) + else: + pluginmanager.register(plugin) + config = pluginmanager.hook.pytest_cmdline_parse( + pluginmanager=pluginmanager, args=args + ) + return config + except BaseException: + config._ensure_unconfigure() + raise + + +def _get_directory(path: pathlib.Path) -> pathlib.Path: + """Get the directory of a path - itself if already a directory.""" + if path.is_file(): + return path.parent + else: + return path + + +def _get_legacy_hook_marks( + method: Any, + hook_type: str, + opt_names: tuple[str, ...], +) -> dict[str, bool]: + if TYPE_CHECKING: + # abuse typeguard from importlib to avoid massive method type union that's lacking an alias + assert inspect.isroutine(method) + known_marks: set[str] = {m.name for m in getattr(method, "pytestmark", [])} + must_warn: list[str] = [] + opts: dict[str, bool] = {} + for opt_name in opt_names: + opt_attr = getattr(method, opt_name, AttributeError) + if opt_attr is not AttributeError: + must_warn.append(f"{opt_name}={opt_attr}") + opts[opt_name] = True + elif opt_name in known_marks: + must_warn.append(f"{opt_name}=True") + opts[opt_name] = True + else: + opts[opt_name] = False + if must_warn: + hook_opts = ", ".join(must_warn) + message = _pytest.deprecated.HOOK_LEGACY_MARKING.format( + type=hook_type, + fullname=method.__qualname__, + hook_opts=hook_opts, + ) + warn_explicit_for(cast(FunctionType, method), message) + return opts + + +@final +class PytestPluginManager(PluginManager): + """A :py:class:`pluggy.PluginManager ` with + additional pytest-specific functionality: + + * Loading plugins from the command line, ``PYTEST_PLUGINS`` env variable and + ``pytest_plugins`` global variables found in plugins being loaded. + * ``conftest.py`` loading during start-up. + """ + + def __init__(self) -> None: + import _pytest.assertion + + super().__init__("pytest") + + # -- State related to local conftest plugins. + # All loaded conftest modules. + self._conftest_plugins: set[types.ModuleType] = set() + # All conftest modules applicable for a directory. + # This includes the directory's own conftest modules as well + # as those of its parent directories. + self._dirpath2confmods: dict[pathlib.Path, list[types.ModuleType]] = {} + # Cutoff directory above which conftests are no longer discovered. + self._confcutdir: pathlib.Path | None = None + # If set, conftest loading is skipped. + self._noconftest = False + + # _getconftestmodules()'s call to _get_directory() causes a stat + # storm when it's called potentially thousands of times in a test + # session (#9478), often with the same path, so cache it. + self._get_directory = lru_cache(256)(_get_directory) + + # plugins that were explicitly skipped with pytest.skip + # list of (module name, skip reason) + # previously we would issue a warning when a plugin was skipped, but + # since we refactored warnings as first citizens of Config, they are + # just stored here to be used later. + self.skipped_plugins: list[tuple[str, str]] = [] + + self.add_hookspecs(_pytest.hookspec) + self.register(self) + if os.environ.get("PYTEST_DEBUG"): + err: IO[str] = sys.stderr + encoding: str = getattr(err, "encoding", "utf8") + try: + err = open( + os.dup(err.fileno()), + mode=err.mode, + buffering=1, + encoding=encoding, + ) + except Exception: + pass + self.trace.root.setwriter(err.write) + self.enable_tracing() + + # Config._consider_importhook will set a real object if required. + self.rewrite_hook = _pytest.assertion.DummyRewriteHook() + # Used to know when we are importing conftests after the pytest_configure stage. + self._configured = False + + def parse_hookimpl_opts( + self, plugin: _PluggyPlugin, name: str + ) -> HookimplOpts | None: + """:meta private:""" + # pytest hooks are always prefixed with "pytest_", + # so we avoid accessing possibly non-readable attributes + # (see issue #1073). + if not name.startswith("pytest_"): + return None + # Ignore names which cannot be hooks. + if name == "pytest_plugins": + return None + + opts = super().parse_hookimpl_opts(plugin, name) + if opts is not None: + return opts + + method = getattr(plugin, name) + # Consider only actual functions for hooks (#3775). + if not inspect.isroutine(method): + return None + # Collect unmarked hooks as long as they have the `pytest_' prefix. + return _get_legacy_hook_marks( # type: ignore[return-value] + method, "impl", ("tryfirst", "trylast", "optionalhook", "hookwrapper") + ) + + def parse_hookspec_opts(self, module_or_class, name: str) -> HookspecOpts | None: + """:meta private:""" + opts = super().parse_hookspec_opts(module_or_class, name) + if opts is None: + method = getattr(module_or_class, name) + if name.startswith("pytest_"): + opts = _get_legacy_hook_marks( # type: ignore[assignment] + method, + "spec", + ("firstresult", "historic"), + ) + return opts + + def register(self, plugin: _PluggyPlugin, name: str | None = None) -> str | None: + if name in _pytest.deprecated.DEPRECATED_EXTERNAL_PLUGINS: + warnings.warn( + PytestConfigWarning( + "{} plugin has been merged into the core, " + "please remove it from your requirements.".format( + name.replace("_", "-") + ) + ) + ) + return None + plugin_name = super().register(plugin, name) + if plugin_name is not None: + self.hook.pytest_plugin_registered.call_historic( + kwargs=dict( + plugin=plugin, + plugin_name=plugin_name, + manager=self, + ) + ) + + if isinstance(plugin, types.ModuleType): + self.consider_module(plugin) + return plugin_name + + def getplugin(self, name: str): + # Support deprecated naming because plugins (xdist e.g.) use it. + plugin: _PluggyPlugin | None = self.get_plugin(name) + return plugin + + def hasplugin(self, name: str) -> bool: + """Return whether a plugin with the given name is registered.""" + return bool(self.get_plugin(name)) + + def pytest_configure(self, config: Config) -> None: + """:meta private:""" + # XXX now that the pluginmanager exposes hookimpl(tryfirst...) + # we should remove tryfirst/trylast as markers. + config.addinivalue_line( + "markers", + "tryfirst: mark a hook implementation function such that the " + "plugin machinery will try to call it first/as early as possible. " + "DEPRECATED, use @pytest.hookimpl(tryfirst=True) instead.", + ) + config.addinivalue_line( + "markers", + "trylast: mark a hook implementation function such that the " + "plugin machinery will try to call it last/as late as possible. " + "DEPRECATED, use @pytest.hookimpl(trylast=True) instead.", + ) + self._configured = True + + # + # Internal API for local conftest plugin handling. + # + def _set_initial_conftests( + self, + args: Sequence[str | pathlib.Path], + pyargs: bool, + noconftest: bool, + rootpath: pathlib.Path, + confcutdir: pathlib.Path | None, + invocation_dir: pathlib.Path, + importmode: ImportMode | str, + *, + consider_namespace_packages: bool, + ) -> None: + """Load initial conftest files given a preparsed "namespace". + + As conftest files may add their own command line options which have + arguments ('--my-opt somepath') we might get some false positives. + All builtin and 3rd party plugins will have been loaded, however, so + common options will not confuse our logic here. + """ + self._confcutdir = ( + absolutepath(invocation_dir / confcutdir) if confcutdir else None + ) + self._noconftest = noconftest + self._using_pyargs = pyargs + foundanchor = False + for initial_path in args: + path = str(initial_path) + # remove node-id syntax + i = path.find("::") + if i != -1: + path = path[:i] + anchor = absolutepath(invocation_dir / path) + + # Ensure we do not break if what appears to be an anchor + # is in fact a very long option (#10169, #11394). + if safe_exists(anchor): + self._try_load_conftest( + anchor, + importmode, + rootpath, + consider_namespace_packages=consider_namespace_packages, + ) + foundanchor = True + if not foundanchor: + self._try_load_conftest( + invocation_dir, + importmode, + rootpath, + consider_namespace_packages=consider_namespace_packages, + ) + + def _is_in_confcutdir(self, path: pathlib.Path) -> bool: + """Whether to consider the given path to load conftests from.""" + if self._confcutdir is None: + return True + # The semantics here are literally: + # Do not load a conftest if it is found upwards from confcut dir. + # But this is *not* the same as: + # Load only conftests from confcutdir or below. + # At first glance they might seem the same thing, however we do support use cases where + # we want to load conftests that are not found in confcutdir or below, but are found + # in completely different directory hierarchies like packages installed + # in out-of-source trees. + # (see #9767 for a regression where the logic was inverted). + return path not in self._confcutdir.parents + + def _try_load_conftest( + self, + anchor: pathlib.Path, + importmode: str | ImportMode, + rootpath: pathlib.Path, + *, + consider_namespace_packages: bool, + ) -> None: + self._loadconftestmodules( + anchor, + importmode, + rootpath, + consider_namespace_packages=consider_namespace_packages, + ) + # let's also consider test* subdirs + if anchor.is_dir(): + for x in anchor.glob("test*"): + if x.is_dir(): + self._loadconftestmodules( + x, + importmode, + rootpath, + consider_namespace_packages=consider_namespace_packages, + ) + + def _loadconftestmodules( + self, + path: pathlib.Path, + importmode: str | ImportMode, + rootpath: pathlib.Path, + *, + consider_namespace_packages: bool, + ) -> None: + if self._noconftest: + return + + directory = self._get_directory(path) + + # Optimization: avoid repeated searches in the same directory. + # Assumes always called with same importmode and rootpath. + if directory in self._dirpath2confmods: + return + + clist = [] + for parent in reversed((directory, *directory.parents)): + if self._is_in_confcutdir(parent): + conftestpath = parent / "conftest.py" + if conftestpath.is_file(): + mod = self._importconftest( + conftestpath, + importmode, + rootpath, + consider_namespace_packages=consider_namespace_packages, + ) + clist.append(mod) + self._dirpath2confmods[directory] = clist + + def _getconftestmodules(self, path: pathlib.Path) -> Sequence[types.ModuleType]: + directory = self._get_directory(path) + return self._dirpath2confmods.get(directory, ()) + + def _rget_with_confmod( + self, + name: str, + path: pathlib.Path, + ) -> tuple[types.ModuleType, Any]: + modules = self._getconftestmodules(path) + for mod in reversed(modules): + try: + return mod, getattr(mod, name) + except AttributeError: + continue + raise KeyError(name) + + def _importconftest( + self, + conftestpath: pathlib.Path, + importmode: str | ImportMode, + rootpath: pathlib.Path, + *, + consider_namespace_packages: bool, + ) -> types.ModuleType: + conftestpath_plugin_name = str(conftestpath) + existing = self.get_plugin(conftestpath_plugin_name) + if existing is not None: + return cast(types.ModuleType, existing) + + # conftest.py files there are not in a Python package all have module + # name "conftest", and thus conflict with each other. Clear the existing + # before loading the new one, otherwise the existing one will be + # returned from the module cache. + pkgpath = resolve_package_path(conftestpath) + if pkgpath is None: + try: + del sys.modules[conftestpath.stem] + except KeyError: + pass + + try: + mod = import_path( + conftestpath, + mode=importmode, + root=rootpath, + consider_namespace_packages=consider_namespace_packages, + ) + except Exception as e: + assert e.__traceback__ is not None + raise ConftestImportFailure(conftestpath, cause=e) from e + + self._check_non_top_pytest_plugins(mod, conftestpath) + + self._conftest_plugins.add(mod) + dirpath = conftestpath.parent + if dirpath in self._dirpath2confmods: + for path, mods in self._dirpath2confmods.items(): + if dirpath in path.parents or path == dirpath: + if mod in mods: + raise AssertionError( + f"While trying to load conftest path {conftestpath!s}, " + f"found that the module {mod} is already loaded with path {mod.__file__}. " + "This is not supposed to happen. Please report this issue to pytest." + ) + mods.append(mod) + self.trace(f"loading conftestmodule {mod!r}") + self.consider_conftest(mod, registration_name=conftestpath_plugin_name) + return mod + + def _check_non_top_pytest_plugins( + self, + mod: types.ModuleType, + conftestpath: pathlib.Path, + ) -> None: + if ( + hasattr(mod, "pytest_plugins") + and self._configured + and not self._using_pyargs + ): + msg = ( + "Defining 'pytest_plugins' in a non-top-level conftest is no longer supported:\n" + "It affects the entire test suite instead of just below the conftest as expected.\n" + " {}\n" + "Please move it to a top level conftest file at the rootdir:\n" + " {}\n" + "For more information, visit:\n" + " https://docs.pytest.org/en/stable/deprecations.html#pytest-plugins-in-non-top-level-conftest-files" + ) + fail(msg.format(conftestpath, self._confcutdir), pytrace=False) + + # + # API for bootstrapping plugin loading + # + # + + def consider_preparse( + self, args: Sequence[str], *, exclude_only: bool = False + ) -> None: + """:meta private:""" + i = 0 + n = len(args) + while i < n: + opt = args[i] + i += 1 + if isinstance(opt, str): + if opt == "-p": + try: + parg = args[i] + except IndexError: + return + i += 1 + elif opt.startswith("-p"): + parg = opt[2:] + else: + continue + parg = parg.strip() + if exclude_only and not parg.startswith("no:"): + continue + self.consider_pluginarg(parg) + + def consider_pluginarg(self, arg: str) -> None: + """:meta private:""" + if arg.startswith("no:"): + name = arg[3:] + if name in essential_plugins: + raise UsageError(f"plugin {name} cannot be disabled") + + # PR #4304: remove stepwise if cacheprovider is blocked. + if name == "cacheprovider": + self.set_blocked("stepwise") + self.set_blocked("pytest_stepwise") + + self.set_blocked(name) + if not name.startswith("pytest_"): + self.set_blocked("pytest_" + name) + else: + name = arg + # Unblock the plugin. + self.unblock(name) + if not name.startswith("pytest_"): + self.unblock("pytest_" + name) + self.import_plugin(arg, consider_entry_points=True) + + def consider_conftest( + self, conftestmodule: types.ModuleType, registration_name: str + ) -> None: + """:meta private:""" + self.register(conftestmodule, name=registration_name) + + def consider_env(self) -> None: + """:meta private:""" + self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS")) + + def consider_module(self, mod: types.ModuleType) -> None: + """:meta private:""" + self._import_plugin_specs(getattr(mod, "pytest_plugins", [])) + + def _import_plugin_specs( + self, spec: None | types.ModuleType | str | Sequence[str] + ) -> None: + plugins = _get_plugin_specs_as_list(spec) + for import_spec in plugins: + self.import_plugin(import_spec) + + def import_plugin(self, modname: str, consider_entry_points: bool = False) -> None: + """Import a plugin with ``modname``. + + If ``consider_entry_points`` is True, entry point names are also + considered to find a plugin. + """ + # Most often modname refers to builtin modules, e.g. "pytester", + # "terminal" or "capture". Those plugins are registered under their + # basename for historic purposes but must be imported with the + # _pytest prefix. + assert isinstance( + modname, str + ), f"module name as text required, got {modname!r}" + if self.is_blocked(modname) or self.get_plugin(modname) is not None: + return + + importspec = "_pytest." + modname if modname in builtin_plugins else modname + self.rewrite_hook.mark_rewrite(importspec) + + if consider_entry_points: + loaded = self.load_setuptools_entrypoints("pytest11", name=modname) + if loaded: + return + + try: + __import__(importspec) + except ImportError as e: + raise ImportError( + f'Error importing plugin "{modname}": {e.args[0]}' + ).with_traceback(e.__traceback__) from e + + except Skipped as e: + self.skipped_plugins.append((modname, e.msg or "")) + else: + mod = sys.modules[importspec] + self.register(mod, modname) + + +def _get_plugin_specs_as_list( + specs: None | types.ModuleType | str | Sequence[str], +) -> list[str]: + """Parse a plugins specification into a list of plugin names.""" + # None means empty. + if specs is None: + return [] + # Workaround for #3899 - a submodule which happens to be called "pytest_plugins". + if isinstance(specs, types.ModuleType): + return [] + # Comma-separated list. + if isinstance(specs, str): + return specs.split(",") if specs else [] + # Direct specification. + if isinstance(specs, collections.abc.Sequence): + return list(specs) + raise UsageError( + f"Plugins may be specified as a sequence or a ','-separated string of plugin names. Got: {specs!r}" + ) + + +class Notset: + def __repr__(self): + return "" + + +notset = Notset() + + +def _iter_rewritable_modules(package_files: Iterable[str]) -> Iterator[str]: + """Given an iterable of file names in a source distribution, return the "names" that should + be marked for assertion rewrite. + + For example the package "pytest_mock/__init__.py" should be added as "pytest_mock" in + the assertion rewrite mechanism. + + This function has to deal with dist-info based distributions and egg based distributions + (which are still very much in use for "editable" installs). + + Here are the file names as seen in a dist-info based distribution: + + pytest_mock/__init__.py + pytest_mock/_version.py + pytest_mock/plugin.py + pytest_mock.egg-info/PKG-INFO + + Here are the file names as seen in an egg based distribution: + + src/pytest_mock/__init__.py + src/pytest_mock/_version.py + src/pytest_mock/plugin.py + src/pytest_mock.egg-info/PKG-INFO + LICENSE + setup.py + + We have to take in account those two distribution flavors in order to determine which + names should be considered for assertion rewriting. + + More information: + https://github.com/pytest-dev/pytest-mock/issues/167 + """ + package_files = list(package_files) + seen_some = False + for fn in package_files: + is_simple_module = "/" not in fn and fn.endswith(".py") + is_package = fn.count("/") == 1 and fn.endswith("__init__.py") + if is_simple_module: + module_name, _ = os.path.splitext(fn) + # we ignore "setup.py" at the root of the distribution + # as well as editable installation finder modules made by setuptools + if module_name != "setup" and not module_name.startswith("__editable__"): + seen_some = True + yield module_name + elif is_package: + package_name = os.path.dirname(fn) + seen_some = True + yield package_name + + if not seen_some: + # At this point we did not find any packages or modules suitable for assertion + # rewriting, so we try again by stripping the first path component (to account for + # "src" based source trees for example). + # This approach lets us have the common case continue to be fast, as egg-distributions + # are rarer. + new_package_files = [] + for fn in package_files: + parts = fn.split("/") + new_fn = "/".join(parts[1:]) + if new_fn: + new_package_files.append(new_fn) + if new_package_files: + yield from _iter_rewritable_modules(new_package_files) + + +@final +class Config: + """Access to configuration values, pluginmanager and plugin hooks. + + :param PytestPluginManager pluginmanager: + A pytest PluginManager. + + :param InvocationParams invocation_params: + Object containing parameters regarding the :func:`pytest.main` + invocation. + """ + + @final + @dataclasses.dataclass(frozen=True) + class InvocationParams: + """Holds parameters passed during :func:`pytest.main`. + + The object attributes are read-only. + + .. versionadded:: 5.1 + + .. note:: + + Note that the environment variable ``PYTEST_ADDOPTS`` and the ``addopts`` + ini option are handled by pytest, not being included in the ``args`` attribute. + + Plugins accessing ``InvocationParams`` must be aware of that. + """ + + args: tuple[str, ...] + """The command-line arguments as passed to :func:`pytest.main`.""" + plugins: Sequence[str | _PluggyPlugin] | None + """Extra plugins, might be `None`.""" + dir: pathlib.Path + """The directory from which :func:`pytest.main` was invoked. :type: pathlib.Path""" + + def __init__( + self, + *, + args: Iterable[str], + plugins: Sequence[str | _PluggyPlugin] | None, + dir: pathlib.Path, + ) -> None: + object.__setattr__(self, "args", tuple(args)) + object.__setattr__(self, "plugins", plugins) + object.__setattr__(self, "dir", dir) + + class ArgsSource(enum.Enum): + """Indicates the source of the test arguments. + + .. versionadded:: 7.2 + """ + + #: Command line arguments. + ARGS = enum.auto() + #: Invocation directory. + INVOCATION_DIR = enum.auto() + INCOVATION_DIR = INVOCATION_DIR # backwards compatibility alias + #: 'testpaths' configuration value. + TESTPATHS = enum.auto() + + # Set by cacheprovider plugin. + cache: Cache + + def __init__( + self, + pluginmanager: PytestPluginManager, + *, + invocation_params: InvocationParams | None = None, + ) -> None: + from .argparsing import FILE_OR_DIR + from .argparsing import Parser + + if invocation_params is None: + invocation_params = self.InvocationParams( + args=(), plugins=None, dir=pathlib.Path.cwd() + ) + + self.option = argparse.Namespace() + """Access to command line option as attributes. + + :type: argparse.Namespace + """ + + self.invocation_params = invocation_params + """The parameters with which pytest was invoked. + + :type: InvocationParams + """ + + _a = FILE_OR_DIR + self._parser = Parser( + usage=f"%(prog)s [options] [{_a}] [{_a}] [...]", + processopt=self._processopt, + _ispytest=True, + ) + self.pluginmanager = pluginmanager + """The plugin manager handles plugin registration and hook invocation. + + :type: PytestPluginManager + """ + + self.stash = Stash() + """A place where plugins can store information on the config for their + own use. + + :type: Stash + """ + # Deprecated alias. Was never public. Can be removed in a few releases. + self._store = self.stash + + self.trace = self.pluginmanager.trace.root.get("config") + self.hook: pluggy.HookRelay = PathAwareHookProxy(self.pluginmanager.hook) # type: ignore[assignment] + self._inicache: dict[str, Any] = {} + self._override_ini: Sequence[str] = () + self._opt2dest: dict[str, str] = {} + self._cleanup: list[Callable[[], None]] = [] + self.pluginmanager.register(self, "pytestconfig") + self._configured = False + self.hook.pytest_addoption.call_historic( + kwargs=dict(parser=self._parser, pluginmanager=self.pluginmanager) + ) + self.args_source = Config.ArgsSource.ARGS + self.args: list[str] = [] + + @property + def rootpath(self) -> pathlib.Path: + """The path to the :ref:`rootdir `. + + :type: pathlib.Path + + .. versionadded:: 6.1 + """ + return self._rootpath + + @property + def inipath(self) -> pathlib.Path | None: + """The path to the :ref:`configfile `. + + .. versionadded:: 6.1 + """ + return self._inipath + + def add_cleanup(self, func: Callable[[], None]) -> None: + """Add a function to be called when the config object gets out of + use (usually coinciding with pytest_unconfigure).""" + self._cleanup.append(func) + + def _do_configure(self) -> None: + assert not self._configured + self._configured = True + with warnings.catch_warnings(): + warnings.simplefilter("default") + self.hook.pytest_configure.call_historic(kwargs=dict(config=self)) + + def _ensure_unconfigure(self) -> None: + if self._configured: + self._configured = False + self.hook.pytest_unconfigure(config=self) + self.hook.pytest_configure._call_history = [] + while self._cleanup: + fin = self._cleanup.pop() + fin() + + def get_terminal_writer(self) -> TerminalWriter: + terminalreporter: TerminalReporter | None = self.pluginmanager.get_plugin( + "terminalreporter" + ) + assert terminalreporter is not None + return terminalreporter._tw + + def pytest_cmdline_parse( + self, pluginmanager: PytestPluginManager, args: list[str] + ) -> Config: + try: + self.parse(args) + except UsageError: + # Handle --version and --help here in a minimal fashion. + # This gets done via helpconfig normally, but its + # pytest_cmdline_main is not called in case of errors. + if getattr(self.option, "version", False) or "--version" in args: + from _pytest.helpconfig import showversion + + showversion(self) + elif ( + getattr(self.option, "help", False) or "--help" in args or "-h" in args + ): + self._parser._getparser().print_help() + sys.stdout.write( + "\nNOTE: displaying only minimal help due to UsageError.\n\n" + ) + + raise + + return self + + def notify_exception( + self, + excinfo: ExceptionInfo[BaseException], + option: argparse.Namespace | None = None, + ) -> None: + if option and getattr(option, "fulltrace", False): + style: TracebackStyle = "long" + else: + style = "native" + excrepr = excinfo.getrepr( + funcargs=True, showlocals=getattr(option, "showlocals", False), style=style + ) + res = self.hook.pytest_internalerror(excrepr=excrepr, excinfo=excinfo) + if not any(res): + for line in str(excrepr).split("\n"): + sys.stderr.write(f"INTERNALERROR> {line}\n") + sys.stderr.flush() + + def cwd_relative_nodeid(self, nodeid: str) -> str: + # nodeid's are relative to the rootpath, compute relative to cwd. + if self.invocation_params.dir != self.rootpath: + base_path_part, *nodeid_part = nodeid.split("::") + # Only process path part + fullpath = self.rootpath / base_path_part + relative_path = bestrelpath(self.invocation_params.dir, fullpath) + + nodeid = "::".join([relative_path, *nodeid_part]) + return nodeid + + @classmethod + def fromdictargs(cls, option_dict, args) -> Config: + """Constructor usable for subprocesses.""" + config = get_config(args) + config.option.__dict__.update(option_dict) + config.parse(args, addopts=False) + for x in config.option.plugins: + config.pluginmanager.consider_pluginarg(x) + return config + + def _processopt(self, opt: Argument) -> None: + for name in opt._short_opts + opt._long_opts: + self._opt2dest[name] = opt.dest + + if hasattr(opt, "default"): + if not hasattr(self.option, opt.dest): + setattr(self.option, opt.dest, opt.default) + + @hookimpl(trylast=True) + def pytest_load_initial_conftests(self, early_config: Config) -> None: + # We haven't fully parsed the command line arguments yet, so + # early_config.args it not set yet. But we need it for + # discovering the initial conftests. So "pre-run" the logic here. + # It will be done for real in `parse()`. + args, args_source = early_config._decide_args( + args=early_config.known_args_namespace.file_or_dir, + pyargs=early_config.known_args_namespace.pyargs, + testpaths=early_config.getini("testpaths"), + invocation_dir=early_config.invocation_params.dir, + rootpath=early_config.rootpath, + warn=False, + ) + self.pluginmanager._set_initial_conftests( + args=args, + pyargs=early_config.known_args_namespace.pyargs, + noconftest=early_config.known_args_namespace.noconftest, + rootpath=early_config.rootpath, + confcutdir=early_config.known_args_namespace.confcutdir, + invocation_dir=early_config.invocation_params.dir, + importmode=early_config.known_args_namespace.importmode, + consider_namespace_packages=early_config.getini( + "consider_namespace_packages" + ), + ) + + def _initini(self, args: Sequence[str]) -> None: + ns, unknown_args = self._parser.parse_known_and_unknown_args( + args, namespace=copy.copy(self.option) + ) + rootpath, inipath, inicfg = determine_setup( + inifile=ns.inifilename, + args=ns.file_or_dir + unknown_args, + rootdir_cmd_arg=ns.rootdir or None, + invocation_dir=self.invocation_params.dir, + ) + self._rootpath = rootpath + self._inipath = inipath + self.inicfg = inicfg + self._parser.extra_info["rootdir"] = str(self.rootpath) + self._parser.extra_info["inifile"] = str(self.inipath) + self._parser.addini("addopts", "Extra command line options", "args") + self._parser.addini("minversion", "Minimally required pytest version") + self._parser.addini( + "required_plugins", + "Plugins that must be present for pytest to run", + type="args", + default=[], + ) + self._override_ini = ns.override_ini or () + + def _consider_importhook(self, args: Sequence[str]) -> None: + """Install the PEP 302 import hook if using assertion rewriting. + + Needs to parse the --assert= option from the commandline + and find all the installed plugins to mark them for rewriting + by the importhook. + """ + ns, unknown_args = self._parser.parse_known_and_unknown_args(args) + mode = getattr(ns, "assertmode", "plain") + if mode == "rewrite": + import _pytest.assertion + + try: + hook = _pytest.assertion.install_importhook(self) + except SystemError: + mode = "plain" + else: + self._mark_plugins_for_rewrite(hook) + self._warn_about_missing_assertion(mode) + + def _mark_plugins_for_rewrite(self, hook) -> None: + """Given an importhook, mark for rewrite any top-level + modules or packages in the distribution package for + all pytest plugins.""" + self.pluginmanager.rewrite_hook = hook + + if os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD"): + # We don't autoload from distribution package entry points, + # no need to continue. + return + + package_files = ( + str(file) + for dist in importlib.metadata.distributions() + if any(ep.group == "pytest11" for ep in dist.entry_points) + for file in dist.files or [] + ) + + for name in _iter_rewritable_modules(package_files): + hook.mark_rewrite(name) + + def _validate_args(self, args: list[str], via: str) -> list[str]: + """Validate known args.""" + self._parser._config_source_hint = via # type: ignore + try: + self._parser.parse_known_and_unknown_args( + args, namespace=copy.copy(self.option) + ) + finally: + del self._parser._config_source_hint # type: ignore + + return args + + def _decide_args( + self, + *, + args: list[str], + pyargs: bool, + testpaths: list[str], + invocation_dir: pathlib.Path, + rootpath: pathlib.Path, + warn: bool, + ) -> tuple[list[str], ArgsSource]: + """Decide the args (initial paths/nodeids) to use given the relevant inputs. + + :param warn: Whether can issue warnings. + + :returns: The args and the args source. Guaranteed to be non-empty. + """ + if args: + source = Config.ArgsSource.ARGS + result = args + else: + if invocation_dir == rootpath: + source = Config.ArgsSource.TESTPATHS + if pyargs: + result = testpaths + else: + result = [] + for path in testpaths: + result.extend(sorted(glob.iglob(path, recursive=True))) + if testpaths and not result: + if warn: + warning_text = ( + "No files were found in testpaths; " + "consider removing or adjusting your testpaths configuration. " + "Searching recursively from the current directory instead." + ) + self.issue_config_time_warning( + PytestConfigWarning(warning_text), stacklevel=3 + ) + else: + result = [] + if not result: + source = Config.ArgsSource.INVOCATION_DIR + result = [str(invocation_dir)] + return result, source + + def _preparse(self, args: list[str], addopts: bool = True) -> None: + if addopts: + env_addopts = os.environ.get("PYTEST_ADDOPTS", "") + if len(env_addopts): + args[:] = ( + self._validate_args(shlex.split(env_addopts), "via PYTEST_ADDOPTS") + + args + ) + self._initini(args) + if addopts: + args[:] = ( + self._validate_args(self.getini("addopts"), "via addopts config") + args + ) + + self.known_args_namespace = self._parser.parse_known_args( + args, namespace=copy.copy(self.option) + ) + self._checkversion() + self._consider_importhook(args) + self.pluginmanager.consider_preparse(args, exclude_only=False) + if not os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD"): + # Don't autoload from distribution package entry point. Only + # explicitly specified plugins are going to be loaded. + self.pluginmanager.load_setuptools_entrypoints("pytest11") + self.pluginmanager.consider_env() + + self.known_args_namespace = self._parser.parse_known_args( + args, namespace=copy.copy(self.known_args_namespace) + ) + + self._validate_plugins() + self._warn_about_skipped_plugins() + + if self.known_args_namespace.confcutdir is None: + if self.inipath is not None: + confcutdir = str(self.inipath.parent) + else: + confcutdir = str(self.rootpath) + self.known_args_namespace.confcutdir = confcutdir + try: + self.hook.pytest_load_initial_conftests( + early_config=self, args=args, parser=self._parser + ) + except ConftestImportFailure as e: + if self.known_args_namespace.help or self.known_args_namespace.version: + # we don't want to prevent --help/--version to work + # so just let is pass and print a warning at the end + self.issue_config_time_warning( + PytestConfigWarning(f"could not load initial conftests: {e.path}"), + stacklevel=2, + ) + else: + raise + + @hookimpl(wrapper=True) + def pytest_collection(self) -> Generator[None, object, object]: + # Validate invalid ini keys after collection is done so we take in account + # options added by late-loading conftest files. + try: + return (yield) + finally: + self._validate_config_options() + + def _checkversion(self) -> None: + import pytest + + minver = self.inicfg.get("minversion", None) + if minver: + # Imported lazily to improve start-up time. + from packaging.version import Version + + if not isinstance(minver, str): + raise pytest.UsageError( + f"{self.inipath}: 'minversion' must be a single value" + ) + + if Version(minver) > Version(pytest.__version__): + raise pytest.UsageError( + f"{self.inipath}: 'minversion' requires pytest-{minver}, actual pytest-{pytest.__version__}'" + ) + + def _validate_config_options(self) -> None: + for key in sorted(self._get_unknown_ini_keys()): + self._warn_or_fail_if_strict(f"Unknown config option: {key}\n") + + def _validate_plugins(self) -> None: + required_plugins = sorted(self.getini("required_plugins")) + if not required_plugins: + return + + # Imported lazily to improve start-up time. + from packaging.requirements import InvalidRequirement + from packaging.requirements import Requirement + from packaging.version import Version + + plugin_info = self.pluginmanager.list_plugin_distinfo() + plugin_dist_info = {dist.project_name: dist.version for _, dist in plugin_info} + + missing_plugins = [] + for required_plugin in required_plugins: + try: + req = Requirement(required_plugin) + except InvalidRequirement: + missing_plugins.append(required_plugin) + continue + + if req.name not in plugin_dist_info: + missing_plugins.append(required_plugin) + elif not req.specifier.contains( + Version(plugin_dist_info[req.name]), prereleases=True + ): + missing_plugins.append(required_plugin) + + if missing_plugins: + raise UsageError( + "Missing required plugins: {}".format(", ".join(missing_plugins)), + ) + + def _warn_or_fail_if_strict(self, message: str) -> None: + if self.known_args_namespace.strict_config: + raise UsageError(message) + + self.issue_config_time_warning(PytestConfigWarning(message), stacklevel=3) + + def _get_unknown_ini_keys(self) -> list[str]: + parser_inicfg = self._parser._inidict + return [name for name in self.inicfg if name not in parser_inicfg] + + def parse(self, args: list[str], addopts: bool = True) -> None: + # Parse given cmdline arguments into this config object. + assert ( + self.args == [] + ), "can only parse cmdline args at most once per Config object" + self.hook.pytest_addhooks.call_historic( + kwargs=dict(pluginmanager=self.pluginmanager) + ) + self._preparse(args, addopts=addopts) + self._parser.after_preparse = True # type: ignore + try: + args = self._parser.parse_setoption( + args, self.option, namespace=self.option + ) + self.args, self.args_source = self._decide_args( + args=args, + pyargs=self.known_args_namespace.pyargs, + testpaths=self.getini("testpaths"), + invocation_dir=self.invocation_params.dir, + rootpath=self.rootpath, + warn=True, + ) + except PrintHelp: + pass + + def issue_config_time_warning(self, warning: Warning, stacklevel: int) -> None: + """Issue and handle a warning during the "configure" stage. + + During ``pytest_configure`` we can't capture warnings using the ``catch_warnings_for_item`` + function because it is not possible to have hook wrappers around ``pytest_configure``. + + This function is mainly intended for plugins that need to issue warnings during + ``pytest_configure`` (or similar stages). + + :param warning: The warning instance. + :param stacklevel: stacklevel forwarded to warnings.warn. + """ + if self.pluginmanager.is_blocked("warnings"): + return + + cmdline_filters = self.known_args_namespace.pythonwarnings or [] + config_filters = self.getini("filterwarnings") + + with warnings.catch_warnings(record=True) as records: + warnings.simplefilter("always", type(warning)) + apply_warning_filters(config_filters, cmdline_filters) + warnings.warn(warning, stacklevel=stacklevel) + + if records: + frame = sys._getframe(stacklevel - 1) + location = frame.f_code.co_filename, frame.f_lineno, frame.f_code.co_name + self.hook.pytest_warning_recorded.call_historic( + kwargs=dict( + warning_message=records[0], + when="config", + nodeid="", + location=location, + ) + ) + + def addinivalue_line(self, name: str, line: str) -> None: + """Add a line to an ini-file option. The option must have been + declared but might not yet be set in which case the line becomes + the first line in its value.""" + x = self.getini(name) + assert isinstance(x, list) + x.append(line) # modifies the cached list inline + + def getini(self, name: str): + """Return configuration value from an :ref:`ini file `. + + If a configuration value is not defined in an + :ref:`ini file `, then the ``default`` value provided while + registering the configuration through + :func:`parser.addini ` will be returned. + Please note that you can even provide ``None`` as a valid + default value. + + If ``default`` is not provided while registering using + :func:`parser.addini `, then a default value + based on the ``type`` parameter passed to + :func:`parser.addini ` will be returned. + The default values based on ``type`` are: + ``paths``, ``pathlist``, ``args`` and ``linelist`` : empty list ``[]`` + ``bool`` : ``False`` + ``string`` : empty string ``""`` + + If neither the ``default`` nor the ``type`` parameter is passed + while registering the configuration through + :func:`parser.addini `, then the configuration + is treated as a string and a default empty string '' is returned. + + If the specified name hasn't been registered through a prior + :func:`parser.addini ` call (usually from a + plugin), a ValueError is raised. + """ + try: + return self._inicache[name] + except KeyError: + self._inicache[name] = val = self._getini(name) + return val + + # Meant for easy monkeypatching by legacypath plugin. + # Can be inlined back (with no cover removed) once legacypath is gone. + def _getini_unknown_type(self, name: str, type: str, value: str | list[str]): + msg = f"unknown configuration type: {type}" + raise ValueError(msg, value) # pragma: no cover + + def _getini(self, name: str): + try: + description, type, default = self._parser._inidict[name] + except KeyError as e: + raise ValueError(f"unknown configuration value: {name!r}") from e + override_value = self._get_override_ini_value(name) + if override_value is None: + try: + value = self.inicfg[name] + except KeyError: + return default + else: + value = override_value + # Coerce the values based on types. + # + # Note: some coercions are only required if we are reading from .ini files, because + # the file format doesn't contain type information, but when reading from toml we will + # get either str or list of str values (see _parse_ini_config_from_pyproject_toml). + # For example: + # + # ini: + # a_line_list = "tests acceptance" + # in this case, we need to split the string to obtain a list of strings. + # + # toml: + # a_line_list = ["tests", "acceptance"] + # in this case, we already have a list ready to use. + # + if type == "paths": + dp = ( + self.inipath.parent + if self.inipath is not None + else self.invocation_params.dir + ) + input_values = shlex.split(value) if isinstance(value, str) else value + return [dp / x for x in input_values] + elif type == "args": + return shlex.split(value) if isinstance(value, str) else value + elif type == "linelist": + if isinstance(value, str): + return [t for t in map(lambda x: x.strip(), value.split("\n")) if t] + else: + return value + elif type == "bool": + return _strtobool(str(value).strip()) + elif type == "string": + return value + elif type is None: + return value + else: + return self._getini_unknown_type(name, type, value) + + def _getconftest_pathlist( + self, name: str, path: pathlib.Path + ) -> list[pathlib.Path] | None: + try: + mod, relroots = self.pluginmanager._rget_with_confmod(name, path) + except KeyError: + return None + assert mod.__file__ is not None + modpath = pathlib.Path(mod.__file__).parent + values: list[pathlib.Path] = [] + for relroot in relroots: + if isinstance(relroot, os.PathLike): + relroot = pathlib.Path(relroot) + else: + relroot = relroot.replace("/", os.sep) + relroot = absolutepath(modpath / relroot) + values.append(relroot) + return values + + def _get_override_ini_value(self, name: str) -> str | None: + value = None + # override_ini is a list of "ini=value" options. + # Always use the last item if multiple values are set for same ini-name, + # e.g. -o foo=bar1 -o foo=bar2 will set foo to bar2. + for ini_config in self._override_ini: + try: + key, user_ini_value = ini_config.split("=", 1) + except ValueError as e: + raise UsageError( + f"-o/--override-ini expects option=value style (got: {ini_config!r})." + ) from e + else: + if key == name: + value = user_ini_value + return value + + def getoption(self, name: str, default=notset, skip: bool = False): + """Return command line option value. + + :param name: Name of the option. You may also specify + the literal ``--OPT`` option instead of the "dest" option name. + :param default: Default value if no option of that name exists. + :param skip: If True, raise pytest.skip if option does not exists + or has a None value. + """ + name = self._opt2dest.get(name, name) + try: + val = getattr(self.option, name) + if val is None and skip: + raise AttributeError(name) + return val + except AttributeError as e: + if default is not notset: + return default + if skip: + import pytest + + pytest.skip(f"no {name!r} option found") + raise ValueError(f"no option named {name!r}") from e + + def getvalue(self, name: str, path=None): + """Deprecated, use getoption() instead.""" + return self.getoption(name) + + def getvalueorskip(self, name: str, path=None): + """Deprecated, use getoption(skip=True) instead.""" + return self.getoption(name, skip=True) + + #: Verbosity type for failed assertions (see :confval:`verbosity_assertions`). + VERBOSITY_ASSERTIONS: Final = "assertions" + #: Verbosity type for test case execution (see :confval:`verbosity_test_cases`). + VERBOSITY_TEST_CASES: Final = "test_cases" + _VERBOSITY_INI_DEFAULT: Final = "auto" + + def get_verbosity(self, verbosity_type: str | None = None) -> int: + r"""Retrieve the verbosity level for a fine-grained verbosity type. + + :param verbosity_type: Verbosity type to get level for. If a level is + configured for the given type, that value will be returned. If the + given type is not a known verbosity type, the global verbosity + level will be returned. If the given type is None (default), the + global verbosity level will be returned. + + To configure a level for a fine-grained verbosity type, the + configuration file should have a setting for the configuration name + and a numeric value for the verbosity level. A special value of "auto" + can be used to explicitly use the global verbosity level. + + Example: + + .. code-block:: ini + + # content of pytest.ini + [pytest] + verbosity_assertions = 2 + + .. code-block:: console + + pytest -v + + .. code-block:: python + + print(config.get_verbosity()) # 1 + print(config.get_verbosity(Config.VERBOSITY_ASSERTIONS)) # 2 + """ + global_level = self.getoption("verbose", default=0) + assert isinstance(global_level, int) + if verbosity_type is None: + return global_level + + ini_name = Config._verbosity_ini_name(verbosity_type) + if ini_name not in self._parser._inidict: + return global_level + + level = self.getini(ini_name) + if level == Config._VERBOSITY_INI_DEFAULT: + return global_level + + return int(level) + + @staticmethod + def _verbosity_ini_name(verbosity_type: str) -> str: + return f"verbosity_{verbosity_type}" + + @staticmethod + def _add_verbosity_ini(parser: Parser, verbosity_type: str, help: str) -> None: + """Add a output verbosity configuration option for the given output type. + + :param parser: Parser for command line arguments and ini-file values. + :param verbosity_type: Fine-grained verbosity category. + :param help: Description of the output this type controls. + + The value should be retrieved via a call to + :py:func:`config.get_verbosity(type) `. + """ + parser.addini( + Config._verbosity_ini_name(verbosity_type), + help=help, + type="string", + default=Config._VERBOSITY_INI_DEFAULT, + ) + + def _warn_about_missing_assertion(self, mode: str) -> None: + if not _assertion_supported(): + if mode == "plain": + warning_text = ( + "ASSERTIONS ARE NOT EXECUTED" + " and FAILING TESTS WILL PASS. Are you" + " using python -O?" + ) + else: + warning_text = ( + "assertions not in test modules or" + " plugins will be ignored" + " because assert statements are not executed " + "by the underlying Python interpreter " + "(are you using python -O?)\n" + ) + self.issue_config_time_warning( + PytestConfigWarning(warning_text), + stacklevel=3, + ) + + def _warn_about_skipped_plugins(self) -> None: + for module_name, msg in self.pluginmanager.skipped_plugins: + self.issue_config_time_warning( + PytestConfigWarning(f"skipped plugin {module_name!r}: {msg}"), + stacklevel=2, + ) + + +def _assertion_supported() -> bool: + try: + assert False + except AssertionError: + return True + else: + return False # type: ignore[unreachable] + + +def create_terminal_writer( + config: Config, file: TextIO | None = None +) -> TerminalWriter: + """Create a TerminalWriter instance configured according to the options + in the config object. + + Every code which requires a TerminalWriter object and has access to a + config object should use this function. + """ + tw = TerminalWriter(file=file) + + if config.option.color == "yes": + tw.hasmarkup = True + elif config.option.color == "no": + tw.hasmarkup = False + + if config.option.code_highlight == "yes": + tw.code_highlight = True + elif config.option.code_highlight == "no": + tw.code_highlight = False + + return tw + + +def _strtobool(val: str) -> bool: + """Convert a string representation of truth to True or False. + + True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values + are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if + 'val' is anything else. + + .. note:: Copied from distutils.util. + """ + val = val.lower() + if val in ("y", "yes", "t", "true", "on", "1"): + return True + elif val in ("n", "no", "f", "false", "off", "0"): + return False + else: + raise ValueError(f"invalid truth value {val!r}") + + +@lru_cache(maxsize=50) +def parse_warning_filter( + arg: str, *, escape: bool +) -> tuple[warnings._ActionKind, str, type[Warning], str, int]: + """Parse a warnings filter string. + + This is copied from warnings._setoption with the following changes: + + * Does not apply the filter. + * Escaping is optional. + * Raises UsageError so we get nice error messages on failure. + """ + __tracebackhide__ = True + error_template = dedent( + f"""\ + while parsing the following warning configuration: + + {arg} + + This error occurred: + + {{error}} + """ + ) + + parts = arg.split(":") + if len(parts) > 5: + doc_url = ( + "https://docs.python.org/3/library/warnings.html#describing-warning-filters" + ) + error = dedent( + f"""\ + Too many fields ({len(parts)}), expected at most 5 separated by colons: + + action:message:category:module:line + + For more information please consult: {doc_url} + """ + ) + raise UsageError(error_template.format(error=error)) + + while len(parts) < 5: + parts.append("") + action_, message, category_, module, lineno_ = (s.strip() for s in parts) + try: + action: warnings._ActionKind = warnings._getaction(action_) # type: ignore[attr-defined] + except warnings._OptionError as e: + raise UsageError(error_template.format(error=str(e))) from None + try: + category: type[Warning] = _resolve_warning_category(category_) + except Exception: + exc_info = ExceptionInfo.from_current() + exception_text = exc_info.getrepr(style="native") + raise UsageError(error_template.format(error=exception_text)) from None + if message and escape: + message = re.escape(message) + if module and escape: + module = re.escape(module) + r"\Z" + if lineno_: + try: + lineno = int(lineno_) + if lineno < 0: + raise ValueError("number is negative") + except ValueError as e: + raise UsageError( + error_template.format(error=f"invalid lineno {lineno_!r}: {e}") + ) from None + else: + lineno = 0 + return action, message, category, module, lineno + + +def _resolve_warning_category(category: str) -> type[Warning]: + """ + Copied from warnings._getcategory, but changed so it lets exceptions (specially ImportErrors) + propagate so we can get access to their tracebacks (#9218). + """ + __tracebackhide__ = True + if not category: + return Warning + + if "." not in category: + import builtins as m + + klass = category + else: + module, _, klass = category.rpartition(".") + m = __import__(module, None, None, [klass]) + cat = getattr(m, klass) + if not issubclass(cat, Warning): + raise UsageError(f"{cat} is not a Warning subclass") + return cast(Type[Warning], cat) + + +def apply_warning_filters( + config_filters: Iterable[str], cmdline_filters: Iterable[str] +) -> None: + """Applies pytest-configured filters to the warnings module""" + # Filters should have this precedence: cmdline options, config. + # Filters should be applied in the inverse order of precedence. + for arg in config_filters: + warnings.filterwarnings(*parse_warning_filter(arg, escape=False)) + + for arg in cmdline_filters: + warnings.filterwarnings(*parse_warning_filter(arg, escape=True)) diff --git a/.venv/Lib/site-packages/_pytest/config/argparsing.py b/.venv/Lib/site-packages/_pytest/config/argparsing.py new file mode 100644 index 00000000..85aa4632 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/config/argparsing.py @@ -0,0 +1,551 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import argparse +from gettext import gettext +import os +import sys +from typing import Any +from typing import Callable +from typing import cast +from typing import final +from typing import List +from typing import Literal +from typing import Mapping +from typing import NoReturn +from typing import Sequence + +import _pytest._io +from _pytest.config.exceptions import UsageError +from _pytest.deprecated import check_ispytest + + +FILE_OR_DIR = "file_or_dir" + + +class NotSet: + def __repr__(self) -> str: + return "" + + +NOT_SET = NotSet() + + +@final +class Parser: + """Parser for command line arguments and ini-file values. + + :ivar extra_info: Dict of generic param -> value to display in case + there's an error processing the command line arguments. + """ + + prog: str | None = None + + def __init__( + self, + usage: str | None = None, + processopt: Callable[[Argument], None] | None = None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self._anonymous = OptionGroup("Custom options", parser=self, _ispytest=True) + self._groups: list[OptionGroup] = [] + self._processopt = processopt + self._usage = usage + self._inidict: dict[str, tuple[str, str | None, Any]] = {} + self._ininames: list[str] = [] + self.extra_info: dict[str, Any] = {} + + def processoption(self, option: Argument) -> None: + if self._processopt: + if option.dest: + self._processopt(option) + + def getgroup( + self, name: str, description: str = "", after: str | None = None + ) -> OptionGroup: + """Get (or create) a named option Group. + + :param name: Name of the option group. + :param description: Long description for --help output. + :param after: Name of another group, used for ordering --help output. + :returns: The option group. + + The returned group object has an ``addoption`` method with the same + signature as :func:`parser.addoption ` but + will be shown in the respective group in the output of + ``pytest --help``. + """ + for group in self._groups: + if group.name == name: + return group + group = OptionGroup(name, description, parser=self, _ispytest=True) + i = 0 + for i, grp in enumerate(self._groups): + if grp.name == after: + break + self._groups.insert(i + 1, group) + return group + + def addoption(self, *opts: str, **attrs: Any) -> None: + """Register a command line option. + + :param opts: + Option names, can be short or long options. + :param attrs: + Same attributes as the argparse library's :meth:`add_argument() + ` function accepts. + + After command line parsing, options are available on the pytest config + object via ``config.option.NAME`` where ``NAME`` is usually set + by passing a ``dest`` attribute, for example + ``addoption("--long", dest="NAME", ...)``. + """ + self._anonymous.addoption(*opts, **attrs) + + def parse( + self, + args: Sequence[str | os.PathLike[str]], + namespace: argparse.Namespace | None = None, + ) -> argparse.Namespace: + from _pytest._argcomplete import try_argcomplete + + self.optparser = self._getparser() + try_argcomplete(self.optparser) + strargs = [os.fspath(x) for x in args] + return self.optparser.parse_args(strargs, namespace=namespace) + + def _getparser(self) -> MyOptionParser: + from _pytest._argcomplete import filescompleter + + optparser = MyOptionParser(self, self.extra_info, prog=self.prog) + groups = [*self._groups, self._anonymous] + for group in groups: + if group.options: + desc = group.description or group.name + arggroup = optparser.add_argument_group(desc) + for option in group.options: + n = option.names() + a = option.attrs() + arggroup.add_argument(*n, **a) + file_or_dir_arg = optparser.add_argument(FILE_OR_DIR, nargs="*") + # bash like autocompletion for dirs (appending '/') + # Type ignored because typeshed doesn't know about argcomplete. + file_or_dir_arg.completer = filescompleter # type: ignore + return optparser + + def parse_setoption( + self, + args: Sequence[str | os.PathLike[str]], + option: argparse.Namespace, + namespace: argparse.Namespace | None = None, + ) -> list[str]: + parsedoption = self.parse(args, namespace=namespace) + for name, value in parsedoption.__dict__.items(): + setattr(option, name, value) + return cast(List[str], getattr(parsedoption, FILE_OR_DIR)) + + def parse_known_args( + self, + args: Sequence[str | os.PathLike[str]], + namespace: argparse.Namespace | None = None, + ) -> argparse.Namespace: + """Parse the known arguments at this point. + + :returns: An argparse namespace object. + """ + return self.parse_known_and_unknown_args(args, namespace=namespace)[0] + + def parse_known_and_unknown_args( + self, + args: Sequence[str | os.PathLike[str]], + namespace: argparse.Namespace | None = None, + ) -> tuple[argparse.Namespace, list[str]]: + """Parse the known arguments at this point, and also return the + remaining unknown arguments. + + :returns: + A tuple containing an argparse namespace object for the known + arguments, and a list of the unknown arguments. + """ + optparser = self._getparser() + strargs = [os.fspath(x) for x in args] + return optparser.parse_known_args(strargs, namespace=namespace) + + def addini( + self, + name: str, + help: str, + type: Literal["string", "paths", "pathlist", "args", "linelist", "bool"] + | None = None, + default: Any = NOT_SET, + ) -> None: + """Register an ini-file option. + + :param name: + Name of the ini-variable. + :param type: + Type of the variable. Can be: + + * ``string``: a string + * ``bool``: a boolean + * ``args``: a list of strings, separated as in a shell + * ``linelist``: a list of strings, separated by line breaks + * ``paths``: a list of :class:`pathlib.Path`, separated as in a shell + * ``pathlist``: a list of ``py.path``, separated as in a shell + + For ``paths`` and ``pathlist`` types, they are considered relative to the ini-file. + In case the execution is happening without an ini-file defined, + they will be considered relative to the current working directory (for example with ``--override-ini``). + + .. versionadded:: 7.0 + The ``paths`` variable type. + + .. versionadded:: 8.1 + Use the current working directory to resolve ``paths`` and ``pathlist`` in the absence of an ini-file. + + Defaults to ``string`` if ``None`` or not passed. + :param default: + Default value if no ini-file option exists but is queried. + + The value of ini-variables can be retrieved via a call to + :py:func:`config.getini(name) `. + """ + assert type in (None, "string", "paths", "pathlist", "args", "linelist", "bool") + if default is NOT_SET: + default = get_ini_default_for_type(type) + + self._inidict[name] = (help, type, default) + self._ininames.append(name) + + +def get_ini_default_for_type( + type: Literal["string", "paths", "pathlist", "args", "linelist", "bool"] | None, +) -> Any: + """ + Used by addini to get the default value for a given ini-option type, when + default is not supplied. + """ + if type is None: + return "" + elif type in ("paths", "pathlist", "args", "linelist"): + return [] + elif type == "bool": + return False + else: + return "" + + +class ArgumentError(Exception): + """Raised if an Argument instance is created with invalid or + inconsistent arguments.""" + + def __init__(self, msg: str, option: Argument | str) -> None: + self.msg = msg + self.option_id = str(option) + + def __str__(self) -> str: + if self.option_id: + return f"option {self.option_id}: {self.msg}" + else: + return self.msg + + +class Argument: + """Class that mimics the necessary behaviour of optparse.Option. + + It's currently a least effort implementation and ignoring choices + and integer prefixes. + + https://docs.python.org/3/library/optparse.html#optparse-standard-option-types + """ + + def __init__(self, *names: str, **attrs: Any) -> None: + """Store params in private vars for use in add_argument.""" + self._attrs = attrs + self._short_opts: list[str] = [] + self._long_opts: list[str] = [] + try: + self.type = attrs["type"] + except KeyError: + pass + try: + # Attribute existence is tested in Config._processopt. + self.default = attrs["default"] + except KeyError: + pass + self._set_opt_strings(names) + dest: str | None = attrs.get("dest") + if dest: + self.dest = dest + elif self._long_opts: + self.dest = self._long_opts[0][2:].replace("-", "_") + else: + try: + self.dest = self._short_opts[0][1:] + except IndexError as e: + self.dest = "???" # Needed for the error repr. + raise ArgumentError("need a long or short option", self) from e + + def names(self) -> list[str]: + return self._short_opts + self._long_opts + + def attrs(self) -> Mapping[str, Any]: + # Update any attributes set by processopt. + attrs = "default dest help".split() + attrs.append(self.dest) + for attr in attrs: + try: + self._attrs[attr] = getattr(self, attr) + except AttributeError: + pass + return self._attrs + + def _set_opt_strings(self, opts: Sequence[str]) -> None: + """Directly from optparse. + + Might not be necessary as this is passed to argparse later on. + """ + for opt in opts: + if len(opt) < 2: + raise ArgumentError( + f"invalid option string {opt!r}: " + "must be at least two characters long", + self, + ) + elif len(opt) == 2: + if not (opt[0] == "-" and opt[1] != "-"): + raise ArgumentError( + f"invalid short option string {opt!r}: " + "must be of the form -x, (x any non-dash char)", + self, + ) + self._short_opts.append(opt) + else: + if not (opt[0:2] == "--" and opt[2] != "-"): + raise ArgumentError( + f"invalid long option string {opt!r}: " + "must start with --, followed by non-dash", + self, + ) + self._long_opts.append(opt) + + def __repr__(self) -> str: + args: list[str] = [] + if self._short_opts: + args += ["_short_opts: " + repr(self._short_opts)] + if self._long_opts: + args += ["_long_opts: " + repr(self._long_opts)] + args += ["dest: " + repr(self.dest)] + if hasattr(self, "type"): + args += ["type: " + repr(self.type)] + if hasattr(self, "default"): + args += ["default: " + repr(self.default)] + return "Argument({})".format(", ".join(args)) + + +class OptionGroup: + """A group of options shown in its own section.""" + + def __init__( + self, + name: str, + description: str = "", + parser: Parser | None = None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self.name = name + self.description = description + self.options: list[Argument] = [] + self.parser = parser + + def addoption(self, *opts: str, **attrs: Any) -> None: + """Add an option to this group. + + If a shortened version of a long option is specified, it will + be suppressed in the help. ``addoption('--twowords', '--two-words')`` + results in help showing ``--two-words`` only, but ``--twowords`` gets + accepted **and** the automatic destination is in ``args.twowords``. + + :param opts: + Option names, can be short or long options. + :param attrs: + Same attributes as the argparse library's :meth:`add_argument() + ` function accepts. + """ + conflict = set(opts).intersection( + name for opt in self.options for name in opt.names() + ) + if conflict: + raise ValueError(f"option names {conflict} already added") + option = Argument(*opts, **attrs) + self._addoption_instance(option, shortupper=False) + + def _addoption(self, *opts: str, **attrs: Any) -> None: + option = Argument(*opts, **attrs) + self._addoption_instance(option, shortupper=True) + + def _addoption_instance(self, option: Argument, shortupper: bool = False) -> None: + if not shortupper: + for opt in option._short_opts: + if opt[0] == "-" and opt[1].islower(): + raise ValueError("lowercase shortoptions reserved") + if self.parser: + self.parser.processoption(option) + self.options.append(option) + + +class MyOptionParser(argparse.ArgumentParser): + def __init__( + self, + parser: Parser, + extra_info: dict[str, Any] | None = None, + prog: str | None = None, + ) -> None: + self._parser = parser + super().__init__( + prog=prog, + usage=parser._usage, + add_help=False, + formatter_class=DropShorterLongHelpFormatter, + allow_abbrev=False, + fromfile_prefix_chars="@", + ) + # extra_info is a dict of (param -> value) to display if there's + # an usage error to provide more contextual information to the user. + self.extra_info = extra_info if extra_info else {} + + def error(self, message: str) -> NoReturn: + """Transform argparse error message into UsageError.""" + msg = f"{self.prog}: error: {message}" + + if hasattr(self._parser, "_config_source_hint"): + msg = f"{msg} ({self._parser._config_source_hint})" + + raise UsageError(self.format_usage() + msg) + + # Type ignored because typeshed has a very complex type in the superclass. + def parse_args( # type: ignore + self, + args: Sequence[str] | None = None, + namespace: argparse.Namespace | None = None, + ) -> argparse.Namespace: + """Allow splitting of positional arguments.""" + parsed, unrecognized = self.parse_known_args(args, namespace) + if unrecognized: + for arg in unrecognized: + if arg and arg[0] == "-": + lines = [ + "unrecognized arguments: {}".format(" ".join(unrecognized)) + ] + for k, v in sorted(self.extra_info.items()): + lines.append(f" {k}: {v}") + self.error("\n".join(lines)) + getattr(parsed, FILE_OR_DIR).extend(unrecognized) + return parsed + + if sys.version_info < (3, 9): # pragma: no cover + # Backport of https://github.com/python/cpython/pull/14316 so we can + # disable long --argument abbreviations without breaking short flags. + def _parse_optional( + self, arg_string: str + ) -> tuple[argparse.Action | None, str, str | None] | None: + if not arg_string: + return None + if arg_string[0] not in self.prefix_chars: + return None + if arg_string in self._option_string_actions: + action = self._option_string_actions[arg_string] + return action, arg_string, None + if len(arg_string) == 1: + return None + if "=" in arg_string: + option_string, explicit_arg = arg_string.split("=", 1) + if option_string in self._option_string_actions: + action = self._option_string_actions[option_string] + return action, option_string, explicit_arg + if self.allow_abbrev or not arg_string.startswith("--"): + option_tuples = self._get_option_tuples(arg_string) + if len(option_tuples) > 1: + msg = gettext( + "ambiguous option: %(option)s could match %(matches)s" + ) + options = ", ".join(option for _, option, _ in option_tuples) + self.error(msg % {"option": arg_string, "matches": options}) + elif len(option_tuples) == 1: + (option_tuple,) = option_tuples + return option_tuple + if self._negative_number_matcher.match(arg_string): + if not self._has_negative_number_optionals: + return None + if " " in arg_string: + return None + return None, arg_string, None + + +class DropShorterLongHelpFormatter(argparse.HelpFormatter): + """Shorten help for long options that differ only in extra hyphens. + + - Collapse **long** options that are the same except for extra hyphens. + - Shortcut if there are only two options and one of them is a short one. + - Cache result on the action object as this is called at least 2 times. + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + # Use more accurate terminal width. + if "width" not in kwargs: + kwargs["width"] = _pytest._io.get_terminal_width() + super().__init__(*args, **kwargs) + + def _format_action_invocation(self, action: argparse.Action) -> str: + orgstr = super()._format_action_invocation(action) + if orgstr and orgstr[0] != "-": # only optional arguments + return orgstr + res: str | None = getattr(action, "_formatted_action_invocation", None) + if res: + return res + options = orgstr.split(", ") + if len(options) == 2 and (len(options[0]) == 2 or len(options[1]) == 2): + # a shortcut for '-h, --help' or '--abc', '-a' + action._formatted_action_invocation = orgstr # type: ignore + return orgstr + return_list = [] + short_long: dict[str, str] = {} + for option in options: + if len(option) == 2 or option[2] == " ": + continue + if not option.startswith("--"): + raise ArgumentError( + f'long optional argument without "--": [{option}]', option + ) + xxoption = option[2:] + shortened = xxoption.replace("-", "") + if shortened not in short_long or len(short_long[shortened]) < len( + xxoption + ): + short_long[shortened] = xxoption + # now short_long has been filled out to the longest with dashes + # **and** we keep the right option ordering from add_argument + for option in options: + if len(option) == 2 or option[2] == " ": + return_list.append(option) + if option[2:] == short_long.get(option.replace("-", "")): + return_list.append(option.replace(" ", "=", 1)) + formatted_action_invocation = ", ".join(return_list) + action._formatted_action_invocation = formatted_action_invocation # type: ignore + return formatted_action_invocation + + def _split_lines(self, text, width): + """Wrap lines after splitting on original newlines. + + This allows to have explicit line breaks in the help text. + """ + import textwrap + + lines = [] + for line in text.splitlines(): + lines.extend(textwrap.wrap(line.strip(), width)) + return lines diff --git a/.venv/Lib/site-packages/_pytest/config/compat.py b/.venv/Lib/site-packages/_pytest/config/compat.py new file mode 100644 index 00000000..2856d85d --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/config/compat.py @@ -0,0 +1,85 @@ +from __future__ import annotations + +import functools +from pathlib import Path +from typing import Any +from typing import Mapping +import warnings + +import pluggy + +from ..compat import LEGACY_PATH +from ..compat import legacy_path +from ..deprecated import HOOK_LEGACY_PATH_ARG + + +# hookname: (Path, LEGACY_PATH) +imply_paths_hooks: Mapping[str, tuple[str, str]] = { + "pytest_ignore_collect": ("collection_path", "path"), + "pytest_collect_file": ("file_path", "path"), + "pytest_pycollect_makemodule": ("module_path", "path"), + "pytest_report_header": ("start_path", "startdir"), + "pytest_report_collectionfinish": ("start_path", "startdir"), +} + + +def _check_path(path: Path, fspath: LEGACY_PATH) -> None: + if Path(fspath) != path: + raise ValueError( + f"Path({fspath!r}) != {path!r}\n" + "if both path and fspath are given they need to be equal" + ) + + +class PathAwareHookProxy: + """ + this helper wraps around hook callers + until pluggy supports fixingcalls, this one will do + + it currently doesn't return full hook caller proxies for fixed hooks, + this may have to be changed later depending on bugs + """ + + def __init__(self, hook_relay: pluggy.HookRelay) -> None: + self._hook_relay = hook_relay + + def __dir__(self) -> list[str]: + return dir(self._hook_relay) + + def __getattr__(self, key: str) -> pluggy.HookCaller: + hook: pluggy.HookCaller = getattr(self._hook_relay, key) + if key not in imply_paths_hooks: + self.__dict__[key] = hook + return hook + else: + path_var, fspath_var = imply_paths_hooks[key] + + @functools.wraps(hook) + def fixed_hook(**kw: Any) -> Any: + path_value: Path | None = kw.pop(path_var, None) + fspath_value: LEGACY_PATH | None = kw.pop(fspath_var, None) + if fspath_value is not None: + warnings.warn( + HOOK_LEGACY_PATH_ARG.format( + pylib_path_arg=fspath_var, pathlib_path_arg=path_var + ), + stacklevel=2, + ) + if path_value is not None: + if fspath_value is not None: + _check_path(path_value, fspath_value) + else: + fspath_value = legacy_path(path_value) + else: + assert fspath_value is not None + path_value = Path(fspath_value) + + kw[path_var] = path_value + kw[fspath_var] = fspath_value + return hook(**kw) + + fixed_hook.name = hook.name # type: ignore[attr-defined] + fixed_hook.spec = hook.spec # type: ignore[attr-defined] + fixed_hook.__name__ = key + self.__dict__[key] = fixed_hook + return fixed_hook # type: ignore[return-value] diff --git a/.venv/Lib/site-packages/_pytest/config/exceptions.py b/.venv/Lib/site-packages/_pytest/config/exceptions.py new file mode 100644 index 00000000..90108eca --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/config/exceptions.py @@ -0,0 +1,13 @@ +from __future__ import annotations + +from typing import final + + +@final +class UsageError(Exception): + """Error in pytest usage or invocation.""" + + +class PrintHelp(Exception): + """Raised when pytest should print its help to skip the rest of the + argument parsing and validation.""" diff --git a/.venv/Lib/site-packages/_pytest/config/findpaths.py b/.venv/Lib/site-packages/_pytest/config/findpaths.py new file mode 100644 index 00000000..ce4c990b --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/config/findpaths.py @@ -0,0 +1,228 @@ +from __future__ import annotations + +import os +from pathlib import Path +import sys +from typing import Iterable +from typing import Sequence + +import iniconfig + +from .exceptions import UsageError +from _pytest.outcomes import fail +from _pytest.pathlib import absolutepath +from _pytest.pathlib import commonpath +from _pytest.pathlib import safe_exists + + +def _parse_ini_config(path: Path) -> iniconfig.IniConfig: + """Parse the given generic '.ini' file using legacy IniConfig parser, returning + the parsed object. + + Raise UsageError if the file cannot be parsed. + """ + try: + return iniconfig.IniConfig(str(path)) + except iniconfig.ParseError as exc: + raise UsageError(str(exc)) from exc + + +def load_config_dict_from_file( + filepath: Path, +) -> dict[str, str | list[str]] | None: + """Load pytest configuration from the given file path, if supported. + + Return None if the file does not contain valid pytest configuration. + """ + # Configuration from ini files are obtained from the [pytest] section, if present. + if filepath.suffix == ".ini": + iniconfig = _parse_ini_config(filepath) + + if "pytest" in iniconfig: + return dict(iniconfig["pytest"].items()) + else: + # "pytest.ini" files are always the source of configuration, even if empty. + if filepath.name == "pytest.ini": + return {} + + # '.cfg' files are considered if they contain a "[tool:pytest]" section. + elif filepath.suffix == ".cfg": + iniconfig = _parse_ini_config(filepath) + + if "tool:pytest" in iniconfig.sections: + return dict(iniconfig["tool:pytest"].items()) + elif "pytest" in iniconfig.sections: + # If a setup.cfg contains a "[pytest]" section, we raise a failure to indicate users that + # plain "[pytest]" sections in setup.cfg files is no longer supported (#3086). + fail(CFG_PYTEST_SECTION.format(filename="setup.cfg"), pytrace=False) + + # '.toml' files are considered if they contain a [tool.pytest.ini_options] table. + elif filepath.suffix == ".toml": + if sys.version_info >= (3, 11): + import tomllib + else: + import tomli as tomllib + + toml_text = filepath.read_text(encoding="utf-8") + try: + config = tomllib.loads(toml_text) + except tomllib.TOMLDecodeError as exc: + raise UsageError(f"{filepath}: {exc}") from exc + + result = config.get("tool", {}).get("pytest", {}).get("ini_options", None) + if result is not None: + # TOML supports richer data types than ini files (strings, arrays, floats, ints, etc), + # however we need to convert all scalar values to str for compatibility with the rest + # of the configuration system, which expects strings only. + def make_scalar(v: object) -> str | list[str]: + return v if isinstance(v, list) else str(v) + + return {k: make_scalar(v) for k, v in result.items()} + + return None + + +def locate_config( + invocation_dir: Path, + args: Iterable[Path], +) -> tuple[Path | None, Path | None, dict[str, str | list[str]]]: + """Search in the list of arguments for a valid ini-file for pytest, + and return a tuple of (rootdir, inifile, cfg-dict).""" + config_names = [ + "pytest.ini", + ".pytest.ini", + "pyproject.toml", + "tox.ini", + "setup.cfg", + ] + args = [x for x in args if not str(x).startswith("-")] + if not args: + args = [invocation_dir] + found_pyproject_toml: Path | None = None + for arg in args: + argpath = absolutepath(arg) + for base in (argpath, *argpath.parents): + for config_name in config_names: + p = base / config_name + if p.is_file(): + if p.name == "pyproject.toml" and found_pyproject_toml is None: + found_pyproject_toml = p + ini_config = load_config_dict_from_file(p) + if ini_config is not None: + return base, p, ini_config + if found_pyproject_toml is not None: + return found_pyproject_toml.parent, found_pyproject_toml, {} + return None, None, {} + + +def get_common_ancestor( + invocation_dir: Path, + paths: Iterable[Path], +) -> Path: + common_ancestor: Path | None = None + for path in paths: + if not path.exists(): + continue + if common_ancestor is None: + common_ancestor = path + else: + if common_ancestor in path.parents or path == common_ancestor: + continue + elif path in common_ancestor.parents: + common_ancestor = path + else: + shared = commonpath(path, common_ancestor) + if shared is not None: + common_ancestor = shared + if common_ancestor is None: + common_ancestor = invocation_dir + elif common_ancestor.is_file(): + common_ancestor = common_ancestor.parent + return common_ancestor + + +def get_dirs_from_args(args: Iterable[str]) -> list[Path]: + def is_option(x: str) -> bool: + return x.startswith("-") + + def get_file_part_from_node_id(x: str) -> str: + return x.split("::")[0] + + def get_dir_from_path(path: Path) -> Path: + if path.is_dir(): + return path + return path.parent + + # These look like paths but may not exist + possible_paths = ( + absolutepath(get_file_part_from_node_id(arg)) + for arg in args + if not is_option(arg) + ) + + return [get_dir_from_path(path) for path in possible_paths if safe_exists(path)] + + +CFG_PYTEST_SECTION = "[pytest] section in {filename} files is no longer supported, change to [tool:pytest] instead." + + +def determine_setup( + *, + inifile: str | None, + args: Sequence[str], + rootdir_cmd_arg: str | None, + invocation_dir: Path, +) -> tuple[Path, Path | None, dict[str, str | list[str]]]: + """Determine the rootdir, inifile and ini configuration values from the + command line arguments. + + :param inifile: + The `--inifile` command line argument, if given. + :param args: + The free command line arguments. + :param rootdir_cmd_arg: + The `--rootdir` command line argument, if given. + :param invocation_dir: + The working directory when pytest was invoked. + """ + rootdir = None + dirs = get_dirs_from_args(args) + if inifile: + inipath_ = absolutepath(inifile) + inipath: Path | None = inipath_ + inicfg = load_config_dict_from_file(inipath_) or {} + if rootdir_cmd_arg is None: + rootdir = inipath_.parent + else: + ancestor = get_common_ancestor(invocation_dir, dirs) + rootdir, inipath, inicfg = locate_config(invocation_dir, [ancestor]) + if rootdir is None and rootdir_cmd_arg is None: + for possible_rootdir in (ancestor, *ancestor.parents): + if (possible_rootdir / "setup.py").is_file(): + rootdir = possible_rootdir + break + else: + if dirs != [ancestor]: + rootdir, inipath, inicfg = locate_config(invocation_dir, dirs) + if rootdir is None: + rootdir = get_common_ancestor( + invocation_dir, [invocation_dir, ancestor] + ) + if is_fs_root(rootdir): + rootdir = ancestor + if rootdir_cmd_arg: + rootdir = absolutepath(os.path.expandvars(rootdir_cmd_arg)) + if not rootdir.is_dir(): + raise UsageError( + f"Directory '{rootdir}' not found. Check your '--rootdir' option." + ) + assert rootdir is not None + return rootdir, inipath, inicfg or {} + + +def is_fs_root(p: Path) -> bool: + r""" + Return True if the given path is pointing to the root of the + file system ("/" on Unix and "C:\\" on Windows for example). + """ + return os.path.splitdrive(str(p))[1] == os.sep diff --git a/.venv/Lib/site-packages/_pytest/debugging.py b/.venv/Lib/site-packages/_pytest/debugging.py new file mode 100644 index 00000000..2dfe321e --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/debugging.py @@ -0,0 +1,385 @@ +# mypy: allow-untyped-defs +# ruff: noqa: T100 +"""Interactive debugging with PDB, the Python Debugger.""" + +from __future__ import annotations + +import argparse +import functools +import sys +import types +from typing import Any +from typing import Callable +from typing import Generator +import unittest + +from _pytest import outcomes +from _pytest._code import ExceptionInfo +from _pytest.capture import CaptureManager +from _pytest.config import Config +from _pytest.config import ConftestImportFailure +from _pytest.config import hookimpl +from _pytest.config import PytestPluginManager +from _pytest.config.argparsing import Parser +from _pytest.config.exceptions import UsageError +from _pytest.nodes import Node +from _pytest.reports import BaseReport +from _pytest.runner import CallInfo + + +def _validate_usepdb_cls(value: str) -> tuple[str, str]: + """Validate syntax of --pdbcls option.""" + try: + modname, classname = value.split(":") + except ValueError as e: + raise argparse.ArgumentTypeError( + f"{value!r} is not in the format 'modname:classname'" + ) from e + return (modname, classname) + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group._addoption( + "--pdb", + dest="usepdb", + action="store_true", + help="Start the interactive Python debugger on errors or KeyboardInterrupt", + ) + group._addoption( + "--pdbcls", + dest="usepdb_cls", + metavar="modulename:classname", + type=_validate_usepdb_cls, + help="Specify a custom interactive Python debugger for use with --pdb." + "For example: --pdbcls=IPython.terminal.debugger:TerminalPdb", + ) + group._addoption( + "--trace", + dest="trace", + action="store_true", + help="Immediately break when running each test", + ) + + +def pytest_configure(config: Config) -> None: + import pdb + + if config.getvalue("trace"): + config.pluginmanager.register(PdbTrace(), "pdbtrace") + if config.getvalue("usepdb"): + config.pluginmanager.register(PdbInvoke(), "pdbinvoke") + + pytestPDB._saved.append( + (pdb.set_trace, pytestPDB._pluginmanager, pytestPDB._config) + ) + pdb.set_trace = pytestPDB.set_trace + pytestPDB._pluginmanager = config.pluginmanager + pytestPDB._config = config + + # NOTE: not using pytest_unconfigure, since it might get called although + # pytest_configure was not (if another plugin raises UsageError). + def fin() -> None: + ( + pdb.set_trace, + pytestPDB._pluginmanager, + pytestPDB._config, + ) = pytestPDB._saved.pop() + + config.add_cleanup(fin) + + +class pytestPDB: + """Pseudo PDB that defers to the real pdb.""" + + _pluginmanager: PytestPluginManager | None = None + _config: Config | None = None + _saved: list[ + tuple[Callable[..., None], PytestPluginManager | None, Config | None] + ] = [] + _recursive_debug = 0 + _wrapped_pdb_cls: tuple[type[Any], type[Any]] | None = None + + @classmethod + def _is_capturing(cls, capman: CaptureManager | None) -> str | bool: + if capman: + return capman.is_capturing() + return False + + @classmethod + def _import_pdb_cls(cls, capman: CaptureManager | None): + if not cls._config: + import pdb + + # Happens when using pytest.set_trace outside of a test. + return pdb.Pdb + + usepdb_cls = cls._config.getvalue("usepdb_cls") + + if cls._wrapped_pdb_cls and cls._wrapped_pdb_cls[0] == usepdb_cls: + return cls._wrapped_pdb_cls[1] + + if usepdb_cls: + modname, classname = usepdb_cls + + try: + __import__(modname) + mod = sys.modules[modname] + + # Handle --pdbcls=pdb:pdb.Pdb (useful e.g. with pdbpp). + parts = classname.split(".") + pdb_cls = getattr(mod, parts[0]) + for part in parts[1:]: + pdb_cls = getattr(pdb_cls, part) + except Exception as exc: + value = ":".join((modname, classname)) + raise UsageError( + f"--pdbcls: could not import {value!r}: {exc}" + ) from exc + else: + import pdb + + pdb_cls = pdb.Pdb + + wrapped_cls = cls._get_pdb_wrapper_class(pdb_cls, capman) + cls._wrapped_pdb_cls = (usepdb_cls, wrapped_cls) + return wrapped_cls + + @classmethod + def _get_pdb_wrapper_class(cls, pdb_cls, capman: CaptureManager | None): + import _pytest.config + + class PytestPdbWrapper(pdb_cls): + _pytest_capman = capman + _continued = False + + def do_debug(self, arg): + cls._recursive_debug += 1 + ret = super().do_debug(arg) + cls._recursive_debug -= 1 + return ret + + def do_continue(self, arg): + ret = super().do_continue(arg) + if cls._recursive_debug == 0: + assert cls._config is not None + tw = _pytest.config.create_terminal_writer(cls._config) + tw.line() + + capman = self._pytest_capman + capturing = pytestPDB._is_capturing(capman) + if capturing: + if capturing == "global": + tw.sep(">", "PDB continue (IO-capturing resumed)") + else: + tw.sep( + ">", + f"PDB continue (IO-capturing resumed for {capturing})", + ) + assert capman is not None + capman.resume() + else: + tw.sep(">", "PDB continue") + assert cls._pluginmanager is not None + cls._pluginmanager.hook.pytest_leave_pdb(config=cls._config, pdb=self) + self._continued = True + return ret + + do_c = do_cont = do_continue + + def do_quit(self, arg): + """Raise Exit outcome when quit command is used in pdb. + + This is a bit of a hack - it would be better if BdbQuit + could be handled, but this would require to wrap the + whole pytest run, and adjust the report etc. + """ + ret = super().do_quit(arg) + + if cls._recursive_debug == 0: + outcomes.exit("Quitting debugger") + + return ret + + do_q = do_quit + do_exit = do_quit + + def setup(self, f, tb): + """Suspend on setup(). + + Needed after do_continue resumed, and entering another + breakpoint again. + """ + ret = super().setup(f, tb) + if not ret and self._continued: + # pdb.setup() returns True if the command wants to exit + # from the interaction: do not suspend capturing then. + if self._pytest_capman: + self._pytest_capman.suspend_global_capture(in_=True) + return ret + + def get_stack(self, f, t): + stack, i = super().get_stack(f, t) + if f is None: + # Find last non-hidden frame. + i = max(0, len(stack) - 1) + while i and stack[i][0].f_locals.get("__tracebackhide__", False): + i -= 1 + return stack, i + + return PytestPdbWrapper + + @classmethod + def _init_pdb(cls, method, *args, **kwargs): + """Initialize PDB debugging, dropping any IO capturing.""" + import _pytest.config + + if cls._pluginmanager is None: + capman: CaptureManager | None = None + else: + capman = cls._pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend(in_=True) + + if cls._config: + tw = _pytest.config.create_terminal_writer(cls._config) + tw.line() + + if cls._recursive_debug == 0: + # Handle header similar to pdb.set_trace in py37+. + header = kwargs.pop("header", None) + if header is not None: + tw.sep(">", header) + else: + capturing = cls._is_capturing(capman) + if capturing == "global": + tw.sep(">", f"PDB {method} (IO-capturing turned off)") + elif capturing: + tw.sep( + ">", + f"PDB {method} (IO-capturing turned off for {capturing})", + ) + else: + tw.sep(">", f"PDB {method}") + + _pdb = cls._import_pdb_cls(capman)(**kwargs) + + if cls._pluginmanager: + cls._pluginmanager.hook.pytest_enter_pdb(config=cls._config, pdb=_pdb) + return _pdb + + @classmethod + def set_trace(cls, *args, **kwargs) -> None: + """Invoke debugging via ``Pdb.set_trace``, dropping any IO capturing.""" + frame = sys._getframe().f_back + _pdb = cls._init_pdb("set_trace", *args, **kwargs) + _pdb.set_trace(frame) + + +class PdbInvoke: + def pytest_exception_interact( + self, node: Node, call: CallInfo[Any], report: BaseReport + ) -> None: + capman = node.config.pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend_global_capture(in_=True) + out, err = capman.read_global_capture() + sys.stdout.write(out) + sys.stdout.write(err) + assert call.excinfo is not None + + if not isinstance(call.excinfo.value, unittest.SkipTest): + _enter_pdb(node, call.excinfo, report) + + def pytest_internalerror(self, excinfo: ExceptionInfo[BaseException]) -> None: + tb = _postmortem_traceback(excinfo) + post_mortem(tb) + + +class PdbTrace: + @hookimpl(wrapper=True) + def pytest_pyfunc_call(self, pyfuncitem) -> Generator[None, object, object]: + wrap_pytest_function_for_tracing(pyfuncitem) + return (yield) + + +def wrap_pytest_function_for_tracing(pyfuncitem) -> None: + """Change the Python function object of the given Function item by a + wrapper which actually enters pdb before calling the python function + itself, effectively leaving the user in the pdb prompt in the first + statement of the function.""" + _pdb = pytestPDB._init_pdb("runcall") + testfunction = pyfuncitem.obj + + # we can't just return `partial(pdb.runcall, testfunction)` because (on + # python < 3.7.4) runcall's first param is `func`, which means we'd get + # an exception if one of the kwargs to testfunction was called `func`. + @functools.wraps(testfunction) + def wrapper(*args, **kwargs) -> None: + func = functools.partial(testfunction, *args, **kwargs) + _pdb.runcall(func) + + pyfuncitem.obj = wrapper + + +def maybe_wrap_pytest_function_for_tracing(pyfuncitem) -> None: + """Wrap the given pytestfunct item for tracing support if --trace was given in + the command line.""" + if pyfuncitem.config.getvalue("trace"): + wrap_pytest_function_for_tracing(pyfuncitem) + + +def _enter_pdb( + node: Node, excinfo: ExceptionInfo[BaseException], rep: BaseReport +) -> BaseReport: + # XXX we reuse the TerminalReporter's terminalwriter + # because this seems to avoid some encoding related troubles + # for not completely clear reasons. + tw = node.config.pluginmanager.getplugin("terminalreporter")._tw + tw.line() + + showcapture = node.config.option.showcapture + + for sectionname, content in ( + ("stdout", rep.capstdout), + ("stderr", rep.capstderr), + ("log", rep.caplog), + ): + if showcapture in (sectionname, "all") and content: + tw.sep(">", "captured " + sectionname) + if content[-1:] == "\n": + content = content[:-1] + tw.line(content) + + tw.sep(">", "traceback") + rep.toterminal(tw) + tw.sep(">", "entering PDB") + tb = _postmortem_traceback(excinfo) + rep._pdbshown = True # type: ignore[attr-defined] + post_mortem(tb) + return rep + + +def _postmortem_traceback(excinfo: ExceptionInfo[BaseException]) -> types.TracebackType: + from doctest import UnexpectedException + + if isinstance(excinfo.value, UnexpectedException): + # A doctest.UnexpectedException is not useful for post_mortem. + # Use the underlying exception instead: + return excinfo.value.exc_info[2] + elif isinstance(excinfo.value, ConftestImportFailure): + # A config.ConftestImportFailure is not useful for post_mortem. + # Use the underlying exception instead: + assert excinfo.value.cause.__traceback__ is not None + return excinfo.value.cause.__traceback__ + else: + assert excinfo._excinfo is not None + return excinfo._excinfo[2] + + +def post_mortem(t: types.TracebackType) -> None: + p = pytestPDB._init_pdb("post_mortem") + p.reset() + p.interaction(None, t) + if p.quitting: + outcomes.exit("Quitting debugger") diff --git a/.venv/Lib/site-packages/_pytest/deprecated.py b/.venv/Lib/site-packages/_pytest/deprecated.py new file mode 100644 index 00000000..a605c24e --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/deprecated.py @@ -0,0 +1,91 @@ +"""Deprecation messages and bits of code used elsewhere in the codebase that +is planned to be removed in the next pytest release. + +Keeping it in a central location makes it easy to track what is deprecated and should +be removed when the time comes. + +All constants defined in this module should be either instances of +:class:`PytestWarning`, or :class:`UnformattedWarning` +in case of warnings which need to format their messages. +""" + +from __future__ import annotations + +from warnings import warn + +from _pytest.warning_types import PytestDeprecationWarning +from _pytest.warning_types import PytestRemovedIn9Warning +from _pytest.warning_types import UnformattedWarning + + +# set of plugins which have been integrated into the core; we use this list to ignore +# them during registration to avoid conflicts +DEPRECATED_EXTERNAL_PLUGINS = { + "pytest_catchlog", + "pytest_capturelog", + "pytest_faulthandler", +} + + +# This can be* removed pytest 8, but it's harmless and common, so no rush to remove. +# * If you're in the future: "could have been". +YIELD_FIXTURE = PytestDeprecationWarning( + "@pytest.yield_fixture is deprecated.\n" + "Use @pytest.fixture instead; they are the same." +) + +# This deprecation is never really meant to be removed. +PRIVATE = PytestDeprecationWarning("A private pytest class or function was used.") + + +HOOK_LEGACY_PATH_ARG = UnformattedWarning( + PytestRemovedIn9Warning, + "The ({pylib_path_arg}: py.path.local) argument is deprecated, please use ({pathlib_path_arg}: pathlib.Path)\n" + "see https://docs.pytest.org/en/latest/deprecations.html" + "#py-path-local-arguments-for-hooks-replaced-with-pathlib-path", +) + +NODE_CTOR_FSPATH_ARG = UnformattedWarning( + PytestRemovedIn9Warning, + "The (fspath: py.path.local) argument to {node_type_name} is deprecated. " + "Please use the (path: pathlib.Path) argument instead.\n" + "See https://docs.pytest.org/en/latest/deprecations.html" + "#fspath-argument-for-node-constructors-replaced-with-pathlib-path", +) + +HOOK_LEGACY_MARKING = UnformattedWarning( + PytestDeprecationWarning, + "The hook{type} {fullname} uses old-style configuration options (marks or attributes).\n" + "Please use the pytest.hook{type}({hook_opts}) decorator instead\n" + " to configure the hooks.\n" + " See https://docs.pytest.org/en/latest/deprecations.html" + "#configuring-hook-specs-impls-using-markers", +) + +MARKED_FIXTURE = PytestRemovedIn9Warning( + "Marks applied to fixtures have no effect\n" + "See docs: https://docs.pytest.org/en/stable/deprecations.html#applying-a-mark-to-a-fixture-function" +) + +# You want to make some `__init__` or function "private". +# +# def my_private_function(some, args): +# ... +# +# Do this: +# +# def my_private_function(some, args, *, _ispytest: bool = False): +# check_ispytest(_ispytest) +# ... +# +# Change all internal/allowed calls to +# +# my_private_function(some, args, _ispytest=True) +# +# All other calls will get the default _ispytest=False and trigger +# the warning (possibly error in the future). + + +def check_ispytest(ispytest: bool) -> None: + if not ispytest: + warn(PRIVATE, stacklevel=3) diff --git a/.venv/Lib/site-packages/_pytest/doctest.py b/.venv/Lib/site-packages/_pytest/doctest.py new file mode 100644 index 00000000..384dea97 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/doctest.py @@ -0,0 +1,755 @@ +# mypy: allow-untyped-defs +"""Discover and run doctests in modules and test files.""" + +from __future__ import annotations + +import bdb +from contextlib import contextmanager +import functools +import inspect +import os +from pathlib import Path +import platform +import sys +import traceback +import types +from typing import Any +from typing import Callable +from typing import Generator +from typing import Iterable +from typing import Pattern +from typing import Sequence +from typing import TYPE_CHECKING +import warnings + +from _pytest import outcomes +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import ReprFileLocation +from _pytest._code.code import TerminalRepr +from _pytest._io import TerminalWriter +from _pytest.compat import safe_getattr +from _pytest.config import Config +from _pytest.config.argparsing import Parser +from _pytest.fixtures import fixture +from _pytest.fixtures import TopRequest +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.outcomes import OutcomeException +from _pytest.outcomes import skip +from _pytest.pathlib import fnmatch_ex +from _pytest.python import Module +from _pytest.python_api import approx +from _pytest.warning_types import PytestWarning + + +if TYPE_CHECKING: + import doctest + + from typing_extensions import Self + +DOCTEST_REPORT_CHOICE_NONE = "none" +DOCTEST_REPORT_CHOICE_CDIFF = "cdiff" +DOCTEST_REPORT_CHOICE_NDIFF = "ndiff" +DOCTEST_REPORT_CHOICE_UDIFF = "udiff" +DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE = "only_first_failure" + +DOCTEST_REPORT_CHOICES = ( + DOCTEST_REPORT_CHOICE_NONE, + DOCTEST_REPORT_CHOICE_CDIFF, + DOCTEST_REPORT_CHOICE_NDIFF, + DOCTEST_REPORT_CHOICE_UDIFF, + DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE, +) + +# Lazy definition of runner class +RUNNER_CLASS = None +# Lazy definition of output checker class +CHECKER_CLASS: type[doctest.OutputChecker] | None = None + + +def pytest_addoption(parser: Parser) -> None: + parser.addini( + "doctest_optionflags", + "Option flags for doctests", + type="args", + default=["ELLIPSIS"], + ) + parser.addini( + "doctest_encoding", "Encoding used for doctest files", default="utf-8" + ) + group = parser.getgroup("collect") + group.addoption( + "--doctest-modules", + action="store_true", + default=False, + help="Run doctests in all .py modules", + dest="doctestmodules", + ) + group.addoption( + "--doctest-report", + type=str.lower, + default="udiff", + help="Choose another output format for diffs on doctest failure", + choices=DOCTEST_REPORT_CHOICES, + dest="doctestreport", + ) + group.addoption( + "--doctest-glob", + action="append", + default=[], + metavar="pat", + help="Doctests file matching pattern, default: test*.txt", + dest="doctestglob", + ) + group.addoption( + "--doctest-ignore-import-errors", + action="store_true", + default=False, + help="Ignore doctest collection errors", + dest="doctest_ignore_import_errors", + ) + group.addoption( + "--doctest-continue-on-failure", + action="store_true", + default=False, + help="For a given doctest, continue to run after the first failure", + dest="doctest_continue_on_failure", + ) + + +def pytest_unconfigure() -> None: + global RUNNER_CLASS + + RUNNER_CLASS = None + + +def pytest_collect_file( + file_path: Path, + parent: Collector, +) -> DoctestModule | DoctestTextfile | None: + config = parent.config + if file_path.suffix == ".py": + if config.option.doctestmodules and not any( + (_is_setup_py(file_path), _is_main_py(file_path)) + ): + return DoctestModule.from_parent(parent, path=file_path) + elif _is_doctest(config, file_path, parent): + return DoctestTextfile.from_parent(parent, path=file_path) + return None + + +def _is_setup_py(path: Path) -> bool: + if path.name != "setup.py": + return False + contents = path.read_bytes() + return b"setuptools" in contents or b"distutils" in contents + + +def _is_doctest(config: Config, path: Path, parent: Collector) -> bool: + if path.suffix in (".txt", ".rst") and parent.session.isinitpath(path): + return True + globs = config.getoption("doctestglob") or ["test*.txt"] + return any(fnmatch_ex(glob, path) for glob in globs) + + +def _is_main_py(path: Path) -> bool: + return path.name == "__main__.py" + + +class ReprFailDoctest(TerminalRepr): + def __init__( + self, reprlocation_lines: Sequence[tuple[ReprFileLocation, Sequence[str]]] + ) -> None: + self.reprlocation_lines = reprlocation_lines + + def toterminal(self, tw: TerminalWriter) -> None: + for reprlocation, lines in self.reprlocation_lines: + for line in lines: + tw.line(line) + reprlocation.toterminal(tw) + + +class MultipleDoctestFailures(Exception): + def __init__(self, failures: Sequence[doctest.DocTestFailure]) -> None: + super().__init__() + self.failures = failures + + +def _init_runner_class() -> type[doctest.DocTestRunner]: + import doctest + + class PytestDoctestRunner(doctest.DebugRunner): + """Runner to collect failures. + + Note that the out variable in this case is a list instead of a + stdout-like object. + """ + + def __init__( + self, + checker: doctest.OutputChecker | None = None, + verbose: bool | None = None, + optionflags: int = 0, + continue_on_failure: bool = True, + ) -> None: + super().__init__(checker=checker, verbose=verbose, optionflags=optionflags) + self.continue_on_failure = continue_on_failure + + def report_failure( + self, + out, + test: doctest.DocTest, + example: doctest.Example, + got: str, + ) -> None: + failure = doctest.DocTestFailure(test, example, got) + if self.continue_on_failure: + out.append(failure) + else: + raise failure + + def report_unexpected_exception( + self, + out, + test: doctest.DocTest, + example: doctest.Example, + exc_info: tuple[type[BaseException], BaseException, types.TracebackType], + ) -> None: + if isinstance(exc_info[1], OutcomeException): + raise exc_info[1] + if isinstance(exc_info[1], bdb.BdbQuit): + outcomes.exit("Quitting debugger") + failure = doctest.UnexpectedException(test, example, exc_info) + if self.continue_on_failure: + out.append(failure) + else: + raise failure + + return PytestDoctestRunner + + +def _get_runner( + checker: doctest.OutputChecker | None = None, + verbose: bool | None = None, + optionflags: int = 0, + continue_on_failure: bool = True, +) -> doctest.DocTestRunner: + # We need this in order to do a lazy import on doctest + global RUNNER_CLASS + if RUNNER_CLASS is None: + RUNNER_CLASS = _init_runner_class() + # Type ignored because the continue_on_failure argument is only defined on + # PytestDoctestRunner, which is lazily defined so can't be used as a type. + return RUNNER_CLASS( # type: ignore + checker=checker, + verbose=verbose, + optionflags=optionflags, + continue_on_failure=continue_on_failure, + ) + + +class DoctestItem(Item): + def __init__( + self, + name: str, + parent: DoctestTextfile | DoctestModule, + runner: doctest.DocTestRunner, + dtest: doctest.DocTest, + ) -> None: + super().__init__(name, parent) + self.runner = runner + self.dtest = dtest + + # Stuff needed for fixture support. + self.obj = None + fm = self.session._fixturemanager + fixtureinfo = fm.getfixtureinfo(node=self, func=None, cls=None) + self._fixtureinfo = fixtureinfo + self.fixturenames = fixtureinfo.names_closure + self._initrequest() + + @classmethod + def from_parent( # type: ignore[override] + cls, + parent: DoctestTextfile | DoctestModule, + *, + name: str, + runner: doctest.DocTestRunner, + dtest: doctest.DocTest, + ) -> Self: + # incompatible signature due to imposed limits on subclass + """The public named constructor.""" + return super().from_parent(name=name, parent=parent, runner=runner, dtest=dtest) + + def _initrequest(self) -> None: + self.funcargs: dict[str, object] = {} + self._request = TopRequest(self, _ispytest=True) # type: ignore[arg-type] + + def setup(self) -> None: + self._request._fillfixtures() + globs = dict(getfixture=self._request.getfixturevalue) + for name, value in self._request.getfixturevalue("doctest_namespace").items(): + globs[name] = value + self.dtest.globs.update(globs) + + def runtest(self) -> None: + _check_all_skipped(self.dtest) + self._disable_output_capturing_for_darwin() + failures: list[doctest.DocTestFailure] = [] + # Type ignored because we change the type of `out` from what + # doctest expects. + self.runner.run(self.dtest, out=failures) # type: ignore[arg-type] + if failures: + raise MultipleDoctestFailures(failures) + + def _disable_output_capturing_for_darwin(self) -> None: + """Disable output capturing. Otherwise, stdout is lost to doctest (#985).""" + if platform.system() != "Darwin": + return + capman = self.config.pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend_global_capture(in_=True) + out, err = capman.read_global_capture() + sys.stdout.write(out) + sys.stderr.write(err) + + # TODO: Type ignored -- breaks Liskov Substitution. + def repr_failure( # type: ignore[override] + self, + excinfo: ExceptionInfo[BaseException], + ) -> str | TerminalRepr: + import doctest + + failures: ( + Sequence[doctest.DocTestFailure | doctest.UnexpectedException] | None + ) = None + if isinstance( + excinfo.value, (doctest.DocTestFailure, doctest.UnexpectedException) + ): + failures = [excinfo.value] + elif isinstance(excinfo.value, MultipleDoctestFailures): + failures = excinfo.value.failures + + if failures is None: + return super().repr_failure(excinfo) + + reprlocation_lines = [] + for failure in failures: + example = failure.example + test = failure.test + filename = test.filename + if test.lineno is None: + lineno = None + else: + lineno = test.lineno + example.lineno + 1 + message = type(failure).__name__ + # TODO: ReprFileLocation doesn't expect a None lineno. + reprlocation = ReprFileLocation(filename, lineno, message) # type: ignore[arg-type] + checker = _get_checker() + report_choice = _get_report_choice(self.config.getoption("doctestreport")) + if lineno is not None: + assert failure.test.docstring is not None + lines = failure.test.docstring.splitlines(False) + # add line numbers to the left of the error message + assert test.lineno is not None + lines = [ + "%03d %s" % (i + test.lineno + 1, x) for (i, x) in enumerate(lines) + ] + # trim docstring error lines to 10 + lines = lines[max(example.lineno - 9, 0) : example.lineno + 1] + else: + lines = [ + "EXAMPLE LOCATION UNKNOWN, not showing all tests of that example" + ] + indent = ">>>" + for line in example.source.splitlines(): + lines.append(f"??? {indent} {line}") + indent = "..." + if isinstance(failure, doctest.DocTestFailure): + lines += checker.output_difference( + example, failure.got, report_choice + ).split("\n") + else: + inner_excinfo = ExceptionInfo.from_exc_info(failure.exc_info) + lines += [f"UNEXPECTED EXCEPTION: {inner_excinfo.value!r}"] + lines += [ + x.strip("\n") for x in traceback.format_exception(*failure.exc_info) + ] + reprlocation_lines.append((reprlocation, lines)) + return ReprFailDoctest(reprlocation_lines) + + def reportinfo(self) -> tuple[os.PathLike[str] | str, int | None, str]: + return self.path, self.dtest.lineno, f"[doctest] {self.name}" + + +def _get_flag_lookup() -> dict[str, int]: + import doctest + + return dict( + DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1, + DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE, + NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE, + ELLIPSIS=doctest.ELLIPSIS, + IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL, + COMPARISON_FLAGS=doctest.COMPARISON_FLAGS, + ALLOW_UNICODE=_get_allow_unicode_flag(), + ALLOW_BYTES=_get_allow_bytes_flag(), + NUMBER=_get_number_flag(), + ) + + +def get_optionflags(config: Config) -> int: + optionflags_str = config.getini("doctest_optionflags") + flag_lookup_table = _get_flag_lookup() + flag_acc = 0 + for flag in optionflags_str: + flag_acc |= flag_lookup_table[flag] + return flag_acc + + +def _get_continue_on_failure(config: Config) -> bool: + continue_on_failure: bool = config.getvalue("doctest_continue_on_failure") + if continue_on_failure: + # We need to turn off this if we use pdb since we should stop at + # the first failure. + if config.getvalue("usepdb"): + continue_on_failure = False + return continue_on_failure + + +class DoctestTextfile(Module): + obj = None + + def collect(self) -> Iterable[DoctestItem]: + import doctest + + # Inspired by doctest.testfile; ideally we would use it directly, + # but it doesn't support passing a custom checker. + encoding = self.config.getini("doctest_encoding") + text = self.path.read_text(encoding) + filename = str(self.path) + name = self.path.name + globs = {"__name__": "__main__"} + + optionflags = get_optionflags(self.config) + + runner = _get_runner( + verbose=False, + optionflags=optionflags, + checker=_get_checker(), + continue_on_failure=_get_continue_on_failure(self.config), + ) + + parser = doctest.DocTestParser() + test = parser.get_doctest(text, globs, name, filename, 0) + if test.examples: + yield DoctestItem.from_parent( + self, name=test.name, runner=runner, dtest=test + ) + + +def _check_all_skipped(test: doctest.DocTest) -> None: + """Raise pytest.skip() if all examples in the given DocTest have the SKIP + option set.""" + import doctest + + all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples) + if all_skipped: + skip("all tests skipped by +SKIP option") + + +def _is_mocked(obj: object) -> bool: + """Return if an object is possibly a mock object by checking the + existence of a highly improbable attribute.""" + return ( + safe_getattr(obj, "pytest_mock_example_attribute_that_shouldnt_exist", None) + is not None + ) + + +@contextmanager +def _patch_unwrap_mock_aware() -> Generator[None]: + """Context manager which replaces ``inspect.unwrap`` with a version + that's aware of mock objects and doesn't recurse into them.""" + real_unwrap = inspect.unwrap + + def _mock_aware_unwrap( + func: Callable[..., Any], *, stop: Callable[[Any], Any] | None = None + ) -> Any: + try: + if stop is None or stop is _is_mocked: + return real_unwrap(func, stop=_is_mocked) + _stop = stop + return real_unwrap(func, stop=lambda obj: _is_mocked(obj) or _stop(func)) + except Exception as e: + warnings.warn( + f"Got {e!r} when unwrapping {func!r}. This is usually caused " + "by a violation of Python's object protocol; see e.g. " + "https://github.com/pytest-dev/pytest/issues/5080", + PytestWarning, + ) + raise + + inspect.unwrap = _mock_aware_unwrap + try: + yield + finally: + inspect.unwrap = real_unwrap + + +class DoctestModule(Module): + def collect(self) -> Iterable[DoctestItem]: + import doctest + + class MockAwareDocTestFinder(doctest.DocTestFinder): + py_ver_info_minor = sys.version_info[:2] + is_find_lineno_broken = ( + py_ver_info_minor < (3, 11) + or (py_ver_info_minor == (3, 11) and sys.version_info.micro < 9) + or (py_ver_info_minor == (3, 12) and sys.version_info.micro < 3) + ) + if is_find_lineno_broken: + + def _find_lineno(self, obj, source_lines): + """On older Pythons, doctest code does not take into account + `@property`. https://github.com/python/cpython/issues/61648 + + Moreover, wrapped Doctests need to be unwrapped so the correct + line number is returned. #8796 + """ + if isinstance(obj, property): + obj = getattr(obj, "fget", obj) + + if hasattr(obj, "__wrapped__"): + # Get the main obj in case of it being wrapped + obj = inspect.unwrap(obj) + + # Type ignored because this is a private function. + return super()._find_lineno( # type:ignore[misc] + obj, + source_lines, + ) + + if sys.version_info < (3, 10): + + def _find( + self, tests, obj, name, module, source_lines, globs, seen + ) -> None: + """Override _find to work around issue in stdlib. + + https://github.com/pytest-dev/pytest/issues/3456 + https://github.com/python/cpython/issues/69718 + """ + if _is_mocked(obj): + return # pragma: no cover + with _patch_unwrap_mock_aware(): + # Type ignored because this is a private function. + super()._find( # type:ignore[misc] + tests, obj, name, module, source_lines, globs, seen + ) + + if sys.version_info < (3, 13): + + def _from_module(self, module, object): + """`cached_property` objects are never considered a part + of the 'current module'. As such they are skipped by doctest. + Here we override `_from_module` to check the underlying + function instead. https://github.com/python/cpython/issues/107995 + """ + if isinstance(object, functools.cached_property): + object = object.func + + # Type ignored because this is a private function. + return super()._from_module(module, object) # type: ignore[misc] + + try: + module = self.obj + except Collector.CollectError: + if self.config.getvalue("doctest_ignore_import_errors"): + skip(f"unable to import module {self.path!r}") + else: + raise + + # While doctests currently don't support fixtures directly, we still + # need to pick up autouse fixtures. + self.session._fixturemanager.parsefactories(self) + + # Uses internal doctest module parsing mechanism. + finder = MockAwareDocTestFinder() + optionflags = get_optionflags(self.config) + runner = _get_runner( + verbose=False, + optionflags=optionflags, + checker=_get_checker(), + continue_on_failure=_get_continue_on_failure(self.config), + ) + + for test in finder.find(module, module.__name__): + if test.examples: # skip empty doctests + yield DoctestItem.from_parent( + self, name=test.name, runner=runner, dtest=test + ) + + +def _init_checker_class() -> type[doctest.OutputChecker]: + import doctest + import re + + class LiteralsOutputChecker(doctest.OutputChecker): + # Based on doctest_nose_plugin.py from the nltk project + # (https://github.com/nltk/nltk) and on the "numtest" doctest extension + # by Sebastien Boisgerault (https://github.com/boisgera/numtest). + + _unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE) + _bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE) + _number_re = re.compile( + r""" + (?P + (?P + (?P [+-]?\d*)\.(?P\d+) + | + (?P [+-]?\d+)\. + ) + (?: + [Ee] + (?P [+-]?\d+) + )? + | + (?P [+-]?\d+) + (?: + [Ee] + (?P [+-]?\d+) + ) + ) + """, + re.VERBOSE, + ) + + def check_output(self, want: str, got: str, optionflags: int) -> bool: + if super().check_output(want, got, optionflags): + return True + + allow_unicode = optionflags & _get_allow_unicode_flag() + allow_bytes = optionflags & _get_allow_bytes_flag() + allow_number = optionflags & _get_number_flag() + + if not allow_unicode and not allow_bytes and not allow_number: + return False + + def remove_prefixes(regex: Pattern[str], txt: str) -> str: + return re.sub(regex, r"\1\2", txt) + + if allow_unicode: + want = remove_prefixes(self._unicode_literal_re, want) + got = remove_prefixes(self._unicode_literal_re, got) + + if allow_bytes: + want = remove_prefixes(self._bytes_literal_re, want) + got = remove_prefixes(self._bytes_literal_re, got) + + if allow_number: + got = self._remove_unwanted_precision(want, got) + + return super().check_output(want, got, optionflags) + + def _remove_unwanted_precision(self, want: str, got: str) -> str: + wants = list(self._number_re.finditer(want)) + gots = list(self._number_re.finditer(got)) + if len(wants) != len(gots): + return got + offset = 0 + for w, g in zip(wants, gots): + fraction: str | None = w.group("fraction") + exponent: str | None = w.group("exponent1") + if exponent is None: + exponent = w.group("exponent2") + precision = 0 if fraction is None else len(fraction) + if exponent is not None: + precision -= int(exponent) + if float(w.group()) == approx(float(g.group()), abs=10**-precision): + # They're close enough. Replace the text we actually + # got with the text we want, so that it will match when we + # check the string literally. + got = ( + got[: g.start() + offset] + w.group() + got[g.end() + offset :] + ) + offset += w.end() - w.start() - (g.end() - g.start()) + return got + + return LiteralsOutputChecker + + +def _get_checker() -> doctest.OutputChecker: + """Return a doctest.OutputChecker subclass that supports some + additional options: + + * ALLOW_UNICODE and ALLOW_BYTES options to ignore u'' and b'' + prefixes (respectively) in string literals. Useful when the same + doctest should run in Python 2 and Python 3. + + * NUMBER to ignore floating-point differences smaller than the + precision of the literal number in the doctest. + + An inner class is used to avoid importing "doctest" at the module + level. + """ + global CHECKER_CLASS + if CHECKER_CLASS is None: + CHECKER_CLASS = _init_checker_class() + return CHECKER_CLASS() + + +def _get_allow_unicode_flag() -> int: + """Register and return the ALLOW_UNICODE flag.""" + import doctest + + return doctest.register_optionflag("ALLOW_UNICODE") + + +def _get_allow_bytes_flag() -> int: + """Register and return the ALLOW_BYTES flag.""" + import doctest + + return doctest.register_optionflag("ALLOW_BYTES") + + +def _get_number_flag() -> int: + """Register and return the NUMBER flag.""" + import doctest + + return doctest.register_optionflag("NUMBER") + + +def _get_report_choice(key: str) -> int: + """Return the actual `doctest` module flag value. + + We want to do it as late as possible to avoid importing `doctest` and all + its dependencies when parsing options, as it adds overhead and breaks tests. + """ + import doctest + + return { + DOCTEST_REPORT_CHOICE_UDIFF: doctest.REPORT_UDIFF, + DOCTEST_REPORT_CHOICE_CDIFF: doctest.REPORT_CDIFF, + DOCTEST_REPORT_CHOICE_NDIFF: doctest.REPORT_NDIFF, + DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE: doctest.REPORT_ONLY_FIRST_FAILURE, + DOCTEST_REPORT_CHOICE_NONE: 0, + }[key] + + +@fixture(scope="session") +def doctest_namespace() -> dict[str, Any]: + """Fixture that returns a :py:class:`dict` that will be injected into the + namespace of doctests. + + Usually this fixture is used in conjunction with another ``autouse`` fixture: + + .. code-block:: python + + @pytest.fixture(autouse=True) + def add_np(doctest_namespace): + doctest_namespace["np"] = numpy + + For more details: :ref:`doctest_namespace`. + """ + return dict() diff --git a/.venv/Lib/site-packages/_pytest/faulthandler.py b/.venv/Lib/site-packages/_pytest/faulthandler.py new file mode 100644 index 00000000..d16aea1e --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/faulthandler.py @@ -0,0 +1,105 @@ +from __future__ import annotations + +import os +import sys +from typing import Generator + +from _pytest.config import Config +from _pytest.config.argparsing import Parser +from _pytest.nodes import Item +from _pytest.stash import StashKey +import pytest + + +fault_handler_original_stderr_fd_key = StashKey[int]() +fault_handler_stderr_fd_key = StashKey[int]() + + +def pytest_addoption(parser: Parser) -> None: + help = ( + "Dump the traceback of all threads if a test takes " + "more than TIMEOUT seconds to finish" + ) + parser.addini("faulthandler_timeout", help, default=0.0) + + +def pytest_configure(config: Config) -> None: + import faulthandler + + # at teardown we want to restore the original faulthandler fileno + # but faulthandler has no api to return the original fileno + # so here we stash the stderr fileno to be used at teardown + # sys.stderr and sys.__stderr__ may be closed or patched during the session + # so we can't rely on their values being good at that point (#11572). + stderr_fileno = get_stderr_fileno() + if faulthandler.is_enabled(): + config.stash[fault_handler_original_stderr_fd_key] = stderr_fileno + config.stash[fault_handler_stderr_fd_key] = os.dup(stderr_fileno) + faulthandler.enable(file=config.stash[fault_handler_stderr_fd_key]) + + +def pytest_unconfigure(config: Config) -> None: + import faulthandler + + faulthandler.disable() + # Close the dup file installed during pytest_configure. + if fault_handler_stderr_fd_key in config.stash: + os.close(config.stash[fault_handler_stderr_fd_key]) + del config.stash[fault_handler_stderr_fd_key] + # Re-enable the faulthandler if it was originally enabled. + if fault_handler_original_stderr_fd_key in config.stash: + faulthandler.enable(config.stash[fault_handler_original_stderr_fd_key]) + del config.stash[fault_handler_original_stderr_fd_key] + + +def get_stderr_fileno() -> int: + try: + fileno = sys.stderr.fileno() + # The Twisted Logger will return an invalid file descriptor since it is not backed + # by an FD. So, let's also forward this to the same code path as with pytest-xdist. + if fileno == -1: + raise AttributeError() + return fileno + except (AttributeError, ValueError): + # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file. + # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors + # This is potentially dangerous, but the best we can do. + assert sys.__stderr__ is not None + return sys.__stderr__.fileno() + + +def get_timeout_config_value(config: Config) -> float: + return float(config.getini("faulthandler_timeout") or 0.0) + + +@pytest.hookimpl(wrapper=True, trylast=True) +def pytest_runtest_protocol(item: Item) -> Generator[None, object, object]: + timeout = get_timeout_config_value(item.config) + if timeout > 0: + import faulthandler + + stderr = item.config.stash[fault_handler_stderr_fd_key] + faulthandler.dump_traceback_later(timeout, file=stderr) + try: + return (yield) + finally: + faulthandler.cancel_dump_traceback_later() + else: + return (yield) + + +@pytest.hookimpl(tryfirst=True) +def pytest_enter_pdb() -> None: + """Cancel any traceback dumping due to timeout before entering pdb.""" + import faulthandler + + faulthandler.cancel_dump_traceback_later() + + +@pytest.hookimpl(tryfirst=True) +def pytest_exception_interact() -> None: + """Cancel any traceback dumping due to an interactive exception being + raised.""" + import faulthandler + + faulthandler.cancel_dump_traceback_later() diff --git a/.venv/Lib/site-packages/_pytest/fixtures.py b/.venv/Lib/site-packages/_pytest/fixtures.py new file mode 100644 index 00000000..6b882fa3 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/fixtures.py @@ -0,0 +1,1932 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import abc +from collections import defaultdict +from collections import deque +import dataclasses +import functools +import inspect +import os +from pathlib import Path +import sys +import types +from typing import AbstractSet +from typing import Any +from typing import Callable +from typing import cast +from typing import Dict +from typing import Final +from typing import final +from typing import Generator +from typing import Generic +from typing import Iterable +from typing import Iterator +from typing import Mapping +from typing import MutableMapping +from typing import NoReturn +from typing import Optional +from typing import OrderedDict +from typing import overload +from typing import Sequence +from typing import Tuple +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union +import warnings + +import _pytest +from _pytest import nodes +from _pytest._code import getfslineno +from _pytest._code import Source +from _pytest._code.code import FormattedExcinfo +from _pytest._code.code import TerminalRepr +from _pytest._io import TerminalWriter +from _pytest.compat import _PytestWrapper +from _pytest.compat import assert_never +from _pytest.compat import get_real_func +from _pytest.compat import get_real_method +from _pytest.compat import getfuncargnames +from _pytest.compat import getimfunc +from _pytest.compat import getlocation +from _pytest.compat import is_generator +from _pytest.compat import NOTSET +from _pytest.compat import NotSetType +from _pytest.compat import safe_getattr +from _pytest.compat import safe_isclass +from _pytest.config import _PluggyPlugin +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.deprecated import MARKED_FIXTURE +from _pytest.deprecated import YIELD_FIXTURE +from _pytest.main import Session +from _pytest.mark import Mark +from _pytest.mark import ParameterSet +from _pytest.mark.structures import MarkDecorator +from _pytest.outcomes import fail +from _pytest.outcomes import skip +from _pytest.outcomes import TEST_OUTCOME +from _pytest.pathlib import absolutepath +from _pytest.pathlib import bestrelpath +from _pytest.scope import _ScopeName +from _pytest.scope import HIGH_SCOPES +from _pytest.scope import Scope + + +if sys.version_info < (3, 11): + from exceptiongroup import BaseExceptionGroup + + +if TYPE_CHECKING: + from _pytest.python import CallSpec2 + from _pytest.python import Function + from _pytest.python import Metafunc + + +# The value of the fixture -- return/yield of the fixture function (type variable). +FixtureValue = TypeVar("FixtureValue") +# The type of the fixture function (type variable). +FixtureFunction = TypeVar("FixtureFunction", bound=Callable[..., object]) +# The type of a fixture function (type alias generic in fixture value). +_FixtureFunc = Union[ + Callable[..., FixtureValue], Callable[..., Generator[FixtureValue, None, None]] +] +# The type of FixtureDef.cached_result (type alias generic in fixture value). +_FixtureCachedResult = Union[ + Tuple[ + # The result. + FixtureValue, + # Cache key. + object, + None, + ], + Tuple[ + None, + # Cache key. + object, + # The exception and the original traceback. + Tuple[BaseException, Optional[types.TracebackType]], + ], +] + + +@dataclasses.dataclass(frozen=True) +class PseudoFixtureDef(Generic[FixtureValue]): + cached_result: _FixtureCachedResult[FixtureValue] + _scope: Scope + + +def pytest_sessionstart(session: Session) -> None: + session._fixturemanager = FixtureManager(session) + + +def get_scope_package( + node: nodes.Item, + fixturedef: FixtureDef[object], +) -> nodes.Node | None: + from _pytest.python import Package + + for parent in node.iter_parents(): + if isinstance(parent, Package) and parent.nodeid == fixturedef.baseid: + return parent + return node.session + + +def get_scope_node(node: nodes.Node, scope: Scope) -> nodes.Node | None: + import _pytest.python + + if scope is Scope.Function: + # Type ignored because this is actually safe, see: + # https://github.com/python/mypy/issues/4717 + return node.getparent(nodes.Item) # type: ignore[type-abstract] + elif scope is Scope.Class: + return node.getparent(_pytest.python.Class) + elif scope is Scope.Module: + return node.getparent(_pytest.python.Module) + elif scope is Scope.Package: + return node.getparent(_pytest.python.Package) + elif scope is Scope.Session: + return node.getparent(_pytest.main.Session) + else: + assert_never(scope) + + +def getfixturemarker(obj: object) -> FixtureFunctionMarker | None: + """Return fixturemarker or None if it doesn't exist or raised + exceptions.""" + return cast( + Optional[FixtureFunctionMarker], + safe_getattr(obj, "_pytestfixturefunction", None), + ) + + +# Algorithm for sorting on a per-parametrized resource setup basis. +# It is called for Session scope first and performs sorting +# down to the lower scopes such as to minimize number of "high scope" +# setups and teardowns. + + +@dataclasses.dataclass(frozen=True) +class FixtureArgKey: + argname: str + param_index: int + scoped_item_path: Path | None + item_cls: type | None + + +_V = TypeVar("_V") +OrderedSet = Dict[_V, None] + + +def get_parametrized_fixture_argkeys( + item: nodes.Item, scope: Scope +) -> Iterator[FixtureArgKey]: + """Return list of keys for all parametrized arguments which match + the specified scope.""" + assert scope is not Scope.Function + + try: + callspec: CallSpec2 = item.callspec # type: ignore[attr-defined] + except AttributeError: + return + + item_cls = None + if scope is Scope.Session: + scoped_item_path = None + elif scope is Scope.Package: + # Package key = module's directory. + scoped_item_path = item.path.parent + elif scope is Scope.Module: + scoped_item_path = item.path + elif scope is Scope.Class: + scoped_item_path = item.path + item_cls = item.cls # type: ignore[attr-defined] + else: + assert_never(scope) + + for argname in callspec.indices: + if callspec._arg2scope[argname] != scope: + continue + param_index = callspec.indices[argname] + yield FixtureArgKey(argname, param_index, scoped_item_path, item_cls) + + +def reorder_items(items: Sequence[nodes.Item]) -> list[nodes.Item]: + argkeys_by_item: dict[Scope, dict[nodes.Item, OrderedSet[FixtureArgKey]]] = {} + items_by_argkey: dict[ + Scope, dict[FixtureArgKey, OrderedDict[nodes.Item, None]] + ] = {} + for scope in HIGH_SCOPES: + scoped_argkeys_by_item = argkeys_by_item[scope] = {} + scoped_items_by_argkey = items_by_argkey[scope] = defaultdict(OrderedDict) + for item in items: + argkeys = dict.fromkeys(get_parametrized_fixture_argkeys(item, scope)) + if argkeys: + scoped_argkeys_by_item[item] = argkeys + for argkey in argkeys: + scoped_items_by_argkey[argkey][item] = None + + items_set = dict.fromkeys(items) + return list( + reorder_items_atscope( + items_set, argkeys_by_item, items_by_argkey, Scope.Session + ) + ) + + +def reorder_items_atscope( + items: OrderedSet[nodes.Item], + argkeys_by_item: Mapping[Scope, Mapping[nodes.Item, OrderedSet[FixtureArgKey]]], + items_by_argkey: Mapping[ + Scope, Mapping[FixtureArgKey, OrderedDict[nodes.Item, None]] + ], + scope: Scope, +) -> OrderedSet[nodes.Item]: + if scope is Scope.Function or len(items) < 3: + return items + + scoped_items_by_argkey = items_by_argkey[scope] + scoped_argkeys_by_item = argkeys_by_item[scope] + + ignore: set[FixtureArgKey] = set() + items_deque = deque(items) + items_done: OrderedSet[nodes.Item] = {} + while items_deque: + no_argkey_items: OrderedSet[nodes.Item] = {} + slicing_argkey = None + while items_deque: + item = items_deque.popleft() + if item in items_done or item in no_argkey_items: + continue + argkeys = dict.fromkeys( + k for k in scoped_argkeys_by_item.get(item, ()) if k not in ignore + ) + if not argkeys: + no_argkey_items[item] = None + else: + slicing_argkey, _ = argkeys.popitem() + # We don't have to remove relevant items from later in the + # deque because they'll just be ignored. + matching_items = [ + i for i in scoped_items_by_argkey[slicing_argkey] if i in items + ] + for i in reversed(matching_items): + items_deque.appendleft(i) + # Fix items_by_argkey order. + for other_scope in HIGH_SCOPES: + other_scoped_items_by_argkey = items_by_argkey[other_scope] + for argkey in argkeys_by_item[other_scope].get(i, ()): + other_scoped_items_by_argkey[argkey][i] = None + other_scoped_items_by_argkey[argkey].move_to_end( + i, last=False + ) + break + if no_argkey_items: + reordered_no_argkey_items = reorder_items_atscope( + no_argkey_items, argkeys_by_item, items_by_argkey, scope.next_lower() + ) + items_done.update(reordered_no_argkey_items) + if slicing_argkey is not None: + ignore.add(slicing_argkey) + return items_done + + +@dataclasses.dataclass(frozen=True) +class FuncFixtureInfo: + """Fixture-related information for a fixture-requesting item (e.g. test + function). + + This is used to examine the fixtures which an item requests statically + (known during collection). This includes autouse fixtures, fixtures + requested by the `usefixtures` marker, fixtures requested in the function + parameters, and the transitive closure of these. + + An item may also request fixtures dynamically (using `request.getfixturevalue`); + these are not reflected here. + """ + + __slots__ = ("argnames", "initialnames", "names_closure", "name2fixturedefs") + + # Fixture names that the item requests directly by function parameters. + argnames: tuple[str, ...] + # Fixture names that the item immediately requires. These include + # argnames + fixture names specified via usefixtures and via autouse=True in + # fixture definitions. + initialnames: tuple[str, ...] + # The transitive closure of the fixture names that the item requires. + # Note: can't include dynamic dependencies (`request.getfixturevalue` calls). + names_closure: list[str] + # A map from a fixture name in the transitive closure to the FixtureDefs + # matching the name which are applicable to this function. + # There may be multiple overriding fixtures with the same name. The + # sequence is ordered from furthest to closes to the function. + name2fixturedefs: dict[str, Sequence[FixtureDef[Any]]] + + def prune_dependency_tree(self) -> None: + """Recompute names_closure from initialnames and name2fixturedefs. + + Can only reduce names_closure, which means that the new closure will + always be a subset of the old one. The order is preserved. + + This method is needed because direct parametrization may shadow some + of the fixtures that were included in the originally built dependency + tree. In this way the dependency tree can get pruned, and the closure + of argnames may get reduced. + """ + closure: set[str] = set() + working_set = set(self.initialnames) + while working_set: + argname = working_set.pop() + # Argname may be something not included in the original names_closure, + # in which case we ignore it. This currently happens with pseudo + # FixtureDefs which wrap 'get_direct_param_fixture_func(request)'. + # So they introduce the new dependency 'request' which might have + # been missing in the original tree (closure). + if argname not in closure and argname in self.names_closure: + closure.add(argname) + if argname in self.name2fixturedefs: + working_set.update(self.name2fixturedefs[argname][-1].argnames) + + self.names_closure[:] = sorted(closure, key=self.names_closure.index) + + +class FixtureRequest(abc.ABC): + """The type of the ``request`` fixture. + + A request object gives access to the requesting test context and has a + ``param`` attribute in case the fixture is parametrized. + """ + + def __init__( + self, + pyfuncitem: Function, + fixturename: str | None, + arg2fixturedefs: dict[str, Sequence[FixtureDef[Any]]], + fixture_defs: dict[str, FixtureDef[Any]], + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + #: Fixture for which this request is being performed. + self.fixturename: Final = fixturename + self._pyfuncitem: Final = pyfuncitem + # The FixtureDefs for each fixture name requested by this item. + # Starts from the statically-known fixturedefs resolved during + # collection. Dynamically requested fixtures (using + # `request.getfixturevalue("foo")`) are added dynamically. + self._arg2fixturedefs: Final = arg2fixturedefs + # The evaluated argnames so far, mapping to the FixtureDef they resolved + # to. + self._fixture_defs: Final = fixture_defs + # Notes on the type of `param`: + # -`request.param` is only defined in parametrized fixtures, and will raise + # AttributeError otherwise. Python typing has no notion of "undefined", so + # this cannot be reflected in the type. + # - Technically `param` is only (possibly) defined on SubRequest, not + # FixtureRequest, but the typing of that is still in flux so this cheats. + # - In the future we might consider using a generic for the param type, but + # for now just using Any. + self.param: Any + + @property + def _fixturemanager(self) -> FixtureManager: + return self._pyfuncitem.session._fixturemanager + + @property + @abc.abstractmethod + def _scope(self) -> Scope: + raise NotImplementedError() + + @property + def scope(self) -> _ScopeName: + """Scope string, one of "function", "class", "module", "package", "session".""" + return self._scope.value + + @abc.abstractmethod + def _check_scope( + self, + requested_fixturedef: FixtureDef[object] | PseudoFixtureDef[object], + requested_scope: Scope, + ) -> None: + raise NotImplementedError() + + @property + def fixturenames(self) -> list[str]: + """Names of all active fixtures in this request.""" + result = list(self._pyfuncitem.fixturenames) + result.extend(set(self._fixture_defs).difference(result)) + return result + + @property + @abc.abstractmethod + def node(self): + """Underlying collection node (depends on current request scope).""" + raise NotImplementedError() + + @property + def config(self) -> Config: + """The pytest config object associated with this request.""" + return self._pyfuncitem.config + + @property + def function(self): + """Test function object if the request has a per-function scope.""" + if self.scope != "function": + raise AttributeError( + f"function not available in {self.scope}-scoped context" + ) + return self._pyfuncitem.obj + + @property + def cls(self): + """Class (can be None) where the test function was collected.""" + if self.scope not in ("class", "function"): + raise AttributeError(f"cls not available in {self.scope}-scoped context") + clscol = self._pyfuncitem.getparent(_pytest.python.Class) + if clscol: + return clscol.obj + + @property + def instance(self): + """Instance (can be None) on which test function was collected.""" + if self.scope != "function": + return None + return getattr(self._pyfuncitem, "instance", None) + + @property + def module(self): + """Python module object where the test function was collected.""" + if self.scope not in ("function", "class", "module"): + raise AttributeError(f"module not available in {self.scope}-scoped context") + mod = self._pyfuncitem.getparent(_pytest.python.Module) + assert mod is not None + return mod.obj + + @property + def path(self) -> Path: + """Path where the test function was collected.""" + if self.scope not in ("function", "class", "module", "package"): + raise AttributeError(f"path not available in {self.scope}-scoped context") + return self._pyfuncitem.path + + @property + def keywords(self) -> MutableMapping[str, Any]: + """Keywords/markers dictionary for the underlying node.""" + node: nodes.Node = self.node + return node.keywords + + @property + def session(self) -> Session: + """Pytest session object.""" + return self._pyfuncitem.session + + @abc.abstractmethod + def addfinalizer(self, finalizer: Callable[[], object]) -> None: + """Add finalizer/teardown function to be called without arguments after + the last test within the requesting test context finished execution.""" + raise NotImplementedError() + + def applymarker(self, marker: str | MarkDecorator) -> None: + """Apply a marker to a single test function invocation. + + This method is useful if you don't want to have a keyword/marker + on all function invocations. + + :param marker: + An object created by a call to ``pytest.mark.NAME(...)``. + """ + self.node.add_marker(marker) + + def raiseerror(self, msg: str | None) -> NoReturn: + """Raise a FixtureLookupError exception. + + :param msg: + An optional custom error message. + """ + raise FixtureLookupError(None, self, msg) + + def getfixturevalue(self, argname: str) -> Any: + """Dynamically run a named fixture function. + + Declaring fixtures via function argument is recommended where possible. + But if you can only decide whether to use another fixture at test + setup time, you may use this function to retrieve it inside a fixture + or test function body. + + This method can be used during the test setup phase or the test run + phase, but during the test teardown phase a fixture's value may not + be available. + + :param argname: + The fixture name. + :raises pytest.FixtureLookupError: + If the given fixture could not be found. + """ + # Note that in addition to the use case described in the docstring, + # getfixturevalue() is also called by pytest itself during item and fixture + # setup to evaluate the fixtures that are requested statically + # (using function parameters, autouse, etc). + + fixturedef = self._get_active_fixturedef(argname) + assert fixturedef.cached_result is not None, ( + f'The fixture value for "{argname}" is not available. ' + "This can happen when the fixture has already been torn down." + ) + return fixturedef.cached_result[0] + + def _iter_chain(self) -> Iterator[SubRequest]: + """Yield all SubRequests in the chain, from self up. + + Note: does *not* yield the TopRequest. + """ + current = self + while isinstance(current, SubRequest): + yield current + current = current._parent_request + + def _get_active_fixturedef( + self, argname: str + ) -> FixtureDef[object] | PseudoFixtureDef[object]: + if argname == "request": + cached_result = (self, [0], None) + return PseudoFixtureDef(cached_result, Scope.Function) + + # If we already finished computing a fixture by this name in this item, + # return it. + fixturedef = self._fixture_defs.get(argname) + if fixturedef is not None: + self._check_scope(fixturedef, fixturedef._scope) + return fixturedef + + # Find the appropriate fixturedef. + fixturedefs = self._arg2fixturedefs.get(argname, None) + if fixturedefs is None: + # We arrive here because of a dynamic call to + # getfixturevalue(argname) which was naturally + # not known at parsing/collection time. + fixturedefs = self._fixturemanager.getfixturedefs(argname, self._pyfuncitem) + if fixturedefs is not None: + self._arg2fixturedefs[argname] = fixturedefs + # No fixtures defined with this name. + if fixturedefs is None: + raise FixtureLookupError(argname, self) + # The are no fixtures with this name applicable for the function. + if not fixturedefs: + raise FixtureLookupError(argname, self) + # A fixture may override another fixture with the same name, e.g. a + # fixture in a module can override a fixture in a conftest, a fixture in + # a class can override a fixture in the module, and so on. + # An overriding fixture can request its own name (possibly indirectly); + # in this case it gets the value of the fixture it overrides, one level + # up. + # Check how many `argname`s deep we are, and take the next one. + # `fixturedefs` is sorted from furthest to closest, so use negative + # indexing to go in reverse. + index = -1 + for request in self._iter_chain(): + if request.fixturename == argname: + index -= 1 + # If already consumed all of the available levels, fail. + if -index > len(fixturedefs): + raise FixtureLookupError(argname, self) + fixturedef = fixturedefs[index] + + # Prepare a SubRequest object for calling the fixture. + try: + callspec = self._pyfuncitem.callspec + except AttributeError: + callspec = None + if callspec is not None and argname in callspec.params: + param = callspec.params[argname] + param_index = callspec.indices[argname] + # The parametrize invocation scope overrides the fixture's scope. + scope = callspec._arg2scope[argname] + else: + param = NOTSET + param_index = 0 + scope = fixturedef._scope + self._check_fixturedef_without_param(fixturedef) + self._check_scope(fixturedef, scope) + subrequest = SubRequest( + self, scope, param, param_index, fixturedef, _ispytest=True + ) + + # Make sure the fixture value is cached, running it if it isn't + fixturedef.execute(request=subrequest) + + self._fixture_defs[argname] = fixturedef + return fixturedef + + def _check_fixturedef_without_param(self, fixturedef: FixtureDef[object]) -> None: + """Check that this request is allowed to execute this fixturedef without + a param.""" + funcitem = self._pyfuncitem + has_params = fixturedef.params is not None + fixtures_not_supported = getattr(funcitem, "nofuncargs", False) + if has_params and fixtures_not_supported: + msg = ( + f"{funcitem.name} does not support fixtures, maybe unittest.TestCase subclass?\n" + f"Node id: {funcitem.nodeid}\n" + f"Function type: {type(funcitem).__name__}" + ) + fail(msg, pytrace=False) + if has_params: + frame = inspect.stack()[3] + frameinfo = inspect.getframeinfo(frame[0]) + source_path = absolutepath(frameinfo.filename) + source_lineno = frameinfo.lineno + try: + source_path_str = str(source_path.relative_to(funcitem.config.rootpath)) + except ValueError: + source_path_str = str(source_path) + location = getlocation(fixturedef.func, funcitem.config.rootpath) + msg = ( + "The requested fixture has no parameter defined for test:\n" + f" {funcitem.nodeid}\n\n" + f"Requested fixture '{fixturedef.argname}' defined in:\n" + f"{location}\n\n" + f"Requested here:\n" + f"{source_path_str}:{source_lineno}" + ) + fail(msg, pytrace=False) + + def _get_fixturestack(self) -> list[FixtureDef[Any]]: + values = [request._fixturedef for request in self._iter_chain()] + values.reverse() + return values + + +@final +class TopRequest(FixtureRequest): + """The type of the ``request`` fixture in a test function.""" + + def __init__(self, pyfuncitem: Function, *, _ispytest: bool = False) -> None: + super().__init__( + fixturename=None, + pyfuncitem=pyfuncitem, + arg2fixturedefs=pyfuncitem._fixtureinfo.name2fixturedefs.copy(), + fixture_defs={}, + _ispytest=_ispytest, + ) + + @property + def _scope(self) -> Scope: + return Scope.Function + + def _check_scope( + self, + requested_fixturedef: FixtureDef[object] | PseudoFixtureDef[object], + requested_scope: Scope, + ) -> None: + # TopRequest always has function scope so always valid. + pass + + @property + def node(self): + return self._pyfuncitem + + def __repr__(self) -> str: + return f"" + + def _fillfixtures(self) -> None: + item = self._pyfuncitem + for argname in item.fixturenames: + if argname not in item.funcargs: + item.funcargs[argname] = self.getfixturevalue(argname) + + def addfinalizer(self, finalizer: Callable[[], object]) -> None: + self.node.addfinalizer(finalizer) + + +@final +class SubRequest(FixtureRequest): + """The type of the ``request`` fixture in a fixture function requested + (transitively) by a test function.""" + + def __init__( + self, + request: FixtureRequest, + scope: Scope, + param: Any, + param_index: int, + fixturedef: FixtureDef[object], + *, + _ispytest: bool = False, + ) -> None: + super().__init__( + pyfuncitem=request._pyfuncitem, + fixturename=fixturedef.argname, + fixture_defs=request._fixture_defs, + arg2fixturedefs=request._arg2fixturedefs, + _ispytest=_ispytest, + ) + self._parent_request: Final[FixtureRequest] = request + self._scope_field: Final = scope + self._fixturedef: Final[FixtureDef[object]] = fixturedef + if param is not NOTSET: + self.param = param + self.param_index: Final = param_index + + def __repr__(self) -> str: + return f"" + + @property + def _scope(self) -> Scope: + return self._scope_field + + @property + def node(self): + scope = self._scope + if scope is Scope.Function: + # This might also be a non-function Item despite its attribute name. + node: nodes.Node | None = self._pyfuncitem + elif scope is Scope.Package: + node = get_scope_package(self._pyfuncitem, self._fixturedef) + else: + node = get_scope_node(self._pyfuncitem, scope) + if node is None and scope is Scope.Class: + # Fallback to function item itself. + node = self._pyfuncitem + assert node, f'Could not obtain a node for scope "{scope}" for function {self._pyfuncitem!r}' + return node + + def _check_scope( + self, + requested_fixturedef: FixtureDef[object] | PseudoFixtureDef[object], + requested_scope: Scope, + ) -> None: + if isinstance(requested_fixturedef, PseudoFixtureDef): + return + if self._scope > requested_scope: + # Try to report something helpful. + argname = requested_fixturedef.argname + fixture_stack = "\n".join( + self._format_fixturedef_line(fixturedef) + for fixturedef in self._get_fixturestack() + ) + requested_fixture = self._format_fixturedef_line(requested_fixturedef) + fail( + f"ScopeMismatch: You tried to access the {requested_scope.value} scoped " + f"fixture {argname} with a {self._scope.value} scoped request object. " + f"Requesting fixture stack:\n{fixture_stack}\n" + f"Requested fixture:\n{requested_fixture}", + pytrace=False, + ) + + def _format_fixturedef_line(self, fixturedef: FixtureDef[object]) -> str: + factory = fixturedef.func + path, lineno = getfslineno(factory) + if isinstance(path, Path): + path = bestrelpath(self._pyfuncitem.session.path, path) + signature = inspect.signature(factory) + return f"{path}:{lineno + 1}: def {factory.__name__}{signature}" + + def addfinalizer(self, finalizer: Callable[[], object]) -> None: + self._fixturedef.addfinalizer(finalizer) + + +@final +class FixtureLookupError(LookupError): + """Could not return a requested fixture (missing or invalid).""" + + def __init__( + self, argname: str | None, request: FixtureRequest, msg: str | None = None + ) -> None: + self.argname = argname + self.request = request + self.fixturestack = request._get_fixturestack() + self.msg = msg + + def formatrepr(self) -> FixtureLookupErrorRepr: + tblines: list[str] = [] + addline = tblines.append + stack = [self.request._pyfuncitem.obj] + stack.extend(map(lambda x: x.func, self.fixturestack)) + msg = self.msg + if msg is not None: + # The last fixture raise an error, let's present + # it at the requesting side. + stack = stack[:-1] + for function in stack: + fspath, lineno = getfslineno(function) + try: + lines, _ = inspect.getsourcelines(get_real_func(function)) + except (OSError, IndexError, TypeError): + error_msg = "file %s, line %s: source code not available" + addline(error_msg % (fspath, lineno + 1)) + else: + addline(f"file {fspath}, line {lineno + 1}") + for i, line in enumerate(lines): + line = line.rstrip() + addline(" " + line) + if line.lstrip().startswith("def"): + break + + if msg is None: + fm = self.request._fixturemanager + available = set() + parent = self.request._pyfuncitem.parent + assert parent is not None + for name, fixturedefs in fm._arg2fixturedefs.items(): + faclist = list(fm._matchfactories(fixturedefs, parent)) + if faclist: + available.add(name) + if self.argname in available: + msg = ( + f" recursive dependency involving fixture '{self.argname}' detected" + ) + else: + msg = f"fixture '{self.argname}' not found" + msg += "\n available fixtures: {}".format(", ".join(sorted(available))) + msg += "\n use 'pytest --fixtures [testpath]' for help on them." + + return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname) + + +class FixtureLookupErrorRepr(TerminalRepr): + def __init__( + self, + filename: str | os.PathLike[str], + firstlineno: int, + tblines: Sequence[str], + errorstring: str, + argname: str | None, + ) -> None: + self.tblines = tblines + self.errorstring = errorstring + self.filename = filename + self.firstlineno = firstlineno + self.argname = argname + + def toterminal(self, tw: TerminalWriter) -> None: + # tw.line("FixtureLookupError: %s" %(self.argname), red=True) + for tbline in self.tblines: + tw.line(tbline.rstrip()) + lines = self.errorstring.split("\n") + if lines: + tw.line( + f"{FormattedExcinfo.fail_marker} {lines[0].strip()}", + red=True, + ) + for line in lines[1:]: + tw.line( + f"{FormattedExcinfo.flow_marker} {line.strip()}", + red=True, + ) + tw.line() + tw.line("%s:%d" % (os.fspath(self.filename), self.firstlineno + 1)) + + +def call_fixture_func( + fixturefunc: _FixtureFunc[FixtureValue], request: FixtureRequest, kwargs +) -> FixtureValue: + if is_generator(fixturefunc): + fixturefunc = cast( + Callable[..., Generator[FixtureValue, None, None]], fixturefunc + ) + generator = fixturefunc(**kwargs) + try: + fixture_result = next(generator) + except StopIteration: + raise ValueError(f"{request.fixturename} did not yield a value") from None + finalizer = functools.partial(_teardown_yield_fixture, fixturefunc, generator) + request.addfinalizer(finalizer) + else: + fixturefunc = cast(Callable[..., FixtureValue], fixturefunc) + fixture_result = fixturefunc(**kwargs) + return fixture_result + + +def _teardown_yield_fixture(fixturefunc, it) -> None: + """Execute the teardown of a fixture function by advancing the iterator + after the yield and ensure the iteration ends (if not it means there is + more than one yield in the function).""" + try: + next(it) + except StopIteration: + pass + else: + fs, lineno = getfslineno(fixturefunc) + fail( + f"fixture function has more than one 'yield':\n\n" + f"{Source(fixturefunc).indent()}\n" + f"{fs}:{lineno + 1}", + pytrace=False, + ) + + +def _eval_scope_callable( + scope_callable: Callable[[str, Config], _ScopeName], + fixture_name: str, + config: Config, +) -> _ScopeName: + try: + # Type ignored because there is no typing mechanism to specify + # keyword arguments, currently. + result = scope_callable(fixture_name=fixture_name, config=config) # type: ignore[call-arg] + except Exception as e: + raise TypeError( + f"Error evaluating {scope_callable} while defining fixture '{fixture_name}'.\n" + "Expected a function with the signature (*, fixture_name, config)" + ) from e + if not isinstance(result, str): + fail( + f"Expected {scope_callable} to return a 'str' while defining fixture '{fixture_name}', but it returned:\n" + f"{result!r}", + pytrace=False, + ) + return result + + +@final +class FixtureDef(Generic[FixtureValue]): + """A container for a fixture definition. + + Note: At this time, only explicitly documented fields and methods are + considered public stable API. + """ + + def __init__( + self, + config: Config, + baseid: str | None, + argname: str, + func: _FixtureFunc[FixtureValue], + scope: Scope | _ScopeName | Callable[[str, Config], _ScopeName] | None, + params: Sequence[object] | None, + ids: tuple[object | None, ...] | Callable[[Any], object | None] | None = None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + # The "base" node ID for the fixture. + # + # This is a node ID prefix. A fixture is only available to a node (e.g. + # a `Function` item) if the fixture's baseid is a nodeid of a parent of + # node. + # + # For a fixture found in a Collector's object (e.g. a `Module`s module, + # a `Class`'s class), the baseid is the Collector's nodeid. + # + # For a fixture found in a conftest plugin, the baseid is the conftest's + # directory path relative to the rootdir. + # + # For other plugins, the baseid is the empty string (always matches). + self.baseid: Final = baseid or "" + # Whether the fixture was found from a node or a conftest in the + # collection tree. Will be false for fixtures defined in non-conftest + # plugins. + self.has_location: Final = baseid is not None + # The fixture factory function. + self.func: Final = func + # The name by which the fixture may be requested. + self.argname: Final = argname + if scope is None: + scope = Scope.Function + elif callable(scope): + scope = _eval_scope_callable(scope, argname, config) + if isinstance(scope, str): + scope = Scope.from_user( + scope, descr=f"Fixture '{func.__name__}'", where=baseid + ) + self._scope: Final = scope + # If the fixture is directly parametrized, the parameter values. + self.params: Final = params + # If the fixture is directly parametrized, a tuple of explicit IDs to + # assign to the parameter values, or a callable to generate an ID given + # a parameter value. + self.ids: Final = ids + # The names requested by the fixtures. + self.argnames: Final = getfuncargnames(func, name=argname) + # If the fixture was executed, the current value of the fixture. + # Can change if the fixture is executed with different parameters. + self.cached_result: _FixtureCachedResult[FixtureValue] | None = None + self._finalizers: Final[list[Callable[[], object]]] = [] + + @property + def scope(self) -> _ScopeName: + """Scope string, one of "function", "class", "module", "package", "session".""" + return self._scope.value + + def addfinalizer(self, finalizer: Callable[[], object]) -> None: + self._finalizers.append(finalizer) + + def finish(self, request: SubRequest) -> None: + exceptions: list[BaseException] = [] + while self._finalizers: + fin = self._finalizers.pop() + try: + fin() + except BaseException as e: + exceptions.append(e) + node = request.node + node.ihook.pytest_fixture_post_finalizer(fixturedef=self, request=request) + # Even if finalization fails, we invalidate the cached fixture + # value and remove all finalizers because they may be bound methods + # which will keep instances alive. + self.cached_result = None + self._finalizers.clear() + if len(exceptions) == 1: + raise exceptions[0] + elif len(exceptions) > 1: + msg = f'errors while tearing down fixture "{self.argname}" of {node}' + raise BaseExceptionGroup(msg, exceptions[::-1]) + + def execute(self, request: SubRequest) -> FixtureValue: + """Return the value of this fixture, executing it if not cached.""" + # Ensure that the dependent fixtures requested by this fixture are loaded. + # This needs to be done before checking if we have a cached value, since + # if a dependent fixture has their cache invalidated, e.g. due to + # parametrization, they finalize themselves and fixtures depending on it + # (which will likely include this fixture) setting `self.cached_result = None`. + # See #4871 + requested_fixtures_that_should_finalize_us = [] + for argname in self.argnames: + fixturedef = request._get_active_fixturedef(argname) + # Saves requested fixtures in a list so we later can add our finalizer + # to them, ensuring that if a requested fixture gets torn down we get torn + # down first. This is generally handled by SetupState, but still currently + # needed when this fixture is not parametrized but depends on a parametrized + # fixture. + if not isinstance(fixturedef, PseudoFixtureDef): + requested_fixtures_that_should_finalize_us.append(fixturedef) + + # Check for (and return) cached value/exception. + if self.cached_result is not None: + request_cache_key = self.cache_key(request) + cache_key = self.cached_result[1] + try: + # Attempt to make a normal == check: this might fail for objects + # which do not implement the standard comparison (like numpy arrays -- #6497). + cache_hit = bool(request_cache_key == cache_key) + except (ValueError, RuntimeError): + # If the comparison raises, use 'is' as fallback. + cache_hit = request_cache_key is cache_key + + if cache_hit: + if self.cached_result[2] is not None: + exc, exc_tb = self.cached_result[2] + raise exc.with_traceback(exc_tb) + else: + result = self.cached_result[0] + return result + # We have a previous but differently parametrized fixture instance + # so we need to tear it down before creating a new one. + self.finish(request) + assert self.cached_result is None + + # Add finalizer to requested fixtures we saved previously. + # We make sure to do this after checking for cached value to avoid + # adding our finalizer multiple times. (#12135) + finalizer = functools.partial(self.finish, request=request) + for parent_fixture in requested_fixtures_that_should_finalize_us: + parent_fixture.addfinalizer(finalizer) + + ihook = request.node.ihook + try: + # Setup the fixture, run the code in it, and cache the value + # in self.cached_result + result = ihook.pytest_fixture_setup(fixturedef=self, request=request) + finally: + # schedule our finalizer, even if the setup failed + request.node.addfinalizer(finalizer) + + return result + + def cache_key(self, request: SubRequest) -> object: + return getattr(request, "param", None) + + def __repr__(self) -> str: + return f"" + + +def resolve_fixture_function( + fixturedef: FixtureDef[FixtureValue], request: FixtureRequest +) -> _FixtureFunc[FixtureValue]: + """Get the actual callable that can be called to obtain the fixture + value.""" + fixturefunc = fixturedef.func + # The fixture function needs to be bound to the actual + # request.instance so that code working with "fixturedef" behaves + # as expected. + instance = request.instance + if instance is not None: + # Handle the case where fixture is defined not in a test class, but some other class + # (for example a plugin class with a fixture), see #2270. + if hasattr(fixturefunc, "__self__") and not isinstance( + instance, + fixturefunc.__self__.__class__, + ): + return fixturefunc + fixturefunc = getimfunc(fixturedef.func) + if fixturefunc != fixturedef.func: + fixturefunc = fixturefunc.__get__(instance) + return fixturefunc + + +def pytest_fixture_setup( + fixturedef: FixtureDef[FixtureValue], request: SubRequest +) -> FixtureValue: + """Execution of fixture setup.""" + kwargs = {} + for argname in fixturedef.argnames: + kwargs[argname] = request.getfixturevalue(argname) + + fixturefunc = resolve_fixture_function(fixturedef, request) + my_cache_key = fixturedef.cache_key(request) + try: + result = call_fixture_func(fixturefunc, request, kwargs) + except TEST_OUTCOME as e: + if isinstance(e, skip.Exception): + # The test requested a fixture which caused a skip. + # Don't show the fixture as the skip location, as then the user + # wouldn't know which test skipped. + e._use_item_location = True + fixturedef.cached_result = (None, my_cache_key, (e, e.__traceback__)) + raise + fixturedef.cached_result = (result, my_cache_key, None) + return result + + +def wrap_function_to_error_out_if_called_directly( + function: FixtureFunction, + fixture_marker: FixtureFunctionMarker, +) -> FixtureFunction: + """Wrap the given fixture function so we can raise an error about it being called directly, + instead of used as an argument in a test function.""" + name = fixture_marker.name or function.__name__ + message = ( + f'Fixture "{name}" called directly. Fixtures are not meant to be called directly,\n' + "but are created automatically when test functions request them as parameters.\n" + "See https://docs.pytest.org/en/stable/explanation/fixtures.html for more information about fixtures, and\n" + "https://docs.pytest.org/en/stable/deprecations.html#calling-fixtures-directly about how to update your code." + ) + + @functools.wraps(function) + def result(*args, **kwargs): + fail(message, pytrace=False) + + # Keep reference to the original function in our own custom attribute so we don't unwrap + # further than this point and lose useful wrappings like @mock.patch (#3774). + result.__pytest_wrapped__ = _PytestWrapper(function) # type: ignore[attr-defined] + + return cast(FixtureFunction, result) + + +@final +@dataclasses.dataclass(frozen=True) +class FixtureFunctionMarker: + scope: _ScopeName | Callable[[str, Config], _ScopeName] + params: tuple[object, ...] | None + autouse: bool = False + ids: tuple[object | None, ...] | Callable[[Any], object | None] | None = None + name: str | None = None + + _ispytest: dataclasses.InitVar[bool] = False + + def __post_init__(self, _ispytest: bool) -> None: + check_ispytest(_ispytest) + + def __call__(self, function: FixtureFunction) -> FixtureFunction: + if inspect.isclass(function): + raise ValueError("class fixtures not supported (maybe in the future)") + + if getattr(function, "_pytestfixturefunction", False): + raise ValueError( + f"@pytest.fixture is being applied more than once to the same function {function.__name__!r}" + ) + + if hasattr(function, "pytestmark"): + warnings.warn(MARKED_FIXTURE, stacklevel=2) + + function = wrap_function_to_error_out_if_called_directly(function, self) + + name = self.name or function.__name__ + if name == "request": + location = getlocation(function) + fail( + f"'request' is a reserved word for fixtures, use another name:\n {location}", + pytrace=False, + ) + + # Type ignored because https://github.com/python/mypy/issues/2087. + function._pytestfixturefunction = self # type: ignore[attr-defined] + return function + + +@overload +def fixture( + fixture_function: FixtureFunction, + *, + scope: _ScopeName | Callable[[str, Config], _ScopeName] = ..., + params: Iterable[object] | None = ..., + autouse: bool = ..., + ids: Sequence[object | None] | Callable[[Any], object | None] | None = ..., + name: str | None = ..., +) -> FixtureFunction: ... + + +@overload +def fixture( + fixture_function: None = ..., + *, + scope: _ScopeName | Callable[[str, Config], _ScopeName] = ..., + params: Iterable[object] | None = ..., + autouse: bool = ..., + ids: Sequence[object | None] | Callable[[Any], object | None] | None = ..., + name: str | None = None, +) -> FixtureFunctionMarker: ... + + +def fixture( + fixture_function: FixtureFunction | None = None, + *, + scope: _ScopeName | Callable[[str, Config], _ScopeName] = "function", + params: Iterable[object] | None = None, + autouse: bool = False, + ids: Sequence[object | None] | Callable[[Any], object | None] | None = None, + name: str | None = None, +) -> FixtureFunctionMarker | FixtureFunction: + """Decorator to mark a fixture factory function. + + This decorator can be used, with or without parameters, to define a + fixture function. + + The name of the fixture function can later be referenced to cause its + invocation ahead of running tests: test modules or classes can use the + ``pytest.mark.usefixtures(fixturename)`` marker. + + Test functions can directly use fixture names as input arguments in which + case the fixture instance returned from the fixture function will be + injected. + + Fixtures can provide their values to test functions using ``return`` or + ``yield`` statements. When using ``yield`` the code block after the + ``yield`` statement is executed as teardown code regardless of the test + outcome, and must yield exactly once. + + :param scope: + The scope for which this fixture is shared; one of ``"function"`` + (default), ``"class"``, ``"module"``, ``"package"`` or ``"session"``. + + This parameter may also be a callable which receives ``(fixture_name, config)`` + as parameters, and must return a ``str`` with one of the values mentioned above. + + See :ref:`dynamic scope` in the docs for more information. + + :param params: + An optional list of parameters which will cause multiple invocations + of the fixture function and all of the tests using it. The current + parameter is available in ``request.param``. + + :param autouse: + If True, the fixture func is activated for all tests that can see it. + If False (the default), an explicit reference is needed to activate + the fixture. + + :param ids: + Sequence of ids each corresponding to the params so that they are + part of the test id. If no ids are provided they will be generated + automatically from the params. + + :param name: + The name of the fixture. This defaults to the name of the decorated + function. If a fixture is used in the same module in which it is + defined, the function name of the fixture will be shadowed by the + function arg that requests the fixture; one way to resolve this is to + name the decorated function ``fixture_`` and then use + ``@pytest.fixture(name='')``. + """ + fixture_marker = FixtureFunctionMarker( + scope=scope, + params=tuple(params) if params is not None else None, + autouse=autouse, + ids=None if ids is None else ids if callable(ids) else tuple(ids), + name=name, + _ispytest=True, + ) + + # Direct decoration. + if fixture_function: + return fixture_marker(fixture_function) + + return fixture_marker + + +def yield_fixture( + fixture_function=None, + *args, + scope="function", + params=None, + autouse=False, + ids=None, + name=None, +): + """(Return a) decorator to mark a yield-fixture factory function. + + .. deprecated:: 3.0 + Use :py:func:`pytest.fixture` directly instead. + """ + warnings.warn(YIELD_FIXTURE, stacklevel=2) + return fixture( + fixture_function, + *args, + scope=scope, + params=params, + autouse=autouse, + ids=ids, + name=name, + ) + + +@fixture(scope="session") +def pytestconfig(request: FixtureRequest) -> Config: + """Session-scoped fixture that returns the session's :class:`pytest.Config` + object. + + Example:: + + def test_foo(pytestconfig): + if pytestconfig.get_verbosity() > 0: + ... + + """ + return request.config + + +def pytest_addoption(parser: Parser) -> None: + parser.addini( + "usefixtures", + type="args", + default=[], + help="List of default fixtures to be used with this project", + ) + group = parser.getgroup("general") + group.addoption( + "--fixtures", + "--funcargs", + action="store_true", + dest="showfixtures", + default=False, + help="Show available fixtures, sorted by plugin appearance " + "(fixtures with leading '_' are only shown with '-v')", + ) + group.addoption( + "--fixtures-per-test", + action="store_true", + dest="show_fixtures_per_test", + default=False, + help="Show fixtures per test", + ) + + +def pytest_cmdline_main(config: Config) -> int | ExitCode | None: + if config.option.showfixtures: + showfixtures(config) + return 0 + if config.option.show_fixtures_per_test: + show_fixtures_per_test(config) + return 0 + return None + + +def _get_direct_parametrize_args(node: nodes.Node) -> set[str]: + """Return all direct parametrization arguments of a node, so we don't + mistake them for fixtures. + + Check https://github.com/pytest-dev/pytest/issues/5036. + + These things are done later as well when dealing with parametrization + so this could be improved. + """ + parametrize_argnames: set[str] = set() + for marker in node.iter_markers(name="parametrize"): + if not marker.kwargs.get("indirect", False): + p_argnames, _ = ParameterSet._parse_parametrize_args( + *marker.args, **marker.kwargs + ) + parametrize_argnames.update(p_argnames) + return parametrize_argnames + + +def deduplicate_names(*seqs: Iterable[str]) -> tuple[str, ...]: + """De-duplicate the sequence of names while keeping the original order.""" + # Ideally we would use a set, but it does not preserve insertion order. + return tuple(dict.fromkeys(name for seq in seqs for name in seq)) + + +class FixtureManager: + """pytest fixture definitions and information is stored and managed + from this class. + + During collection fm.parsefactories() is called multiple times to parse + fixture function definitions into FixtureDef objects and internal + data structures. + + During collection of test functions, metafunc-mechanics instantiate + a FuncFixtureInfo object which is cached per node/func-name. + This FuncFixtureInfo object is later retrieved by Function nodes + which themselves offer a fixturenames attribute. + + The FuncFixtureInfo object holds information about fixtures and FixtureDefs + relevant for a particular function. An initial list of fixtures is + assembled like this: + + - ini-defined usefixtures + - autouse-marked fixtures along the collection chain up from the function + - usefixtures markers at module/class/function level + - test function funcargs + + Subsequently the funcfixtureinfo.fixturenames attribute is computed + as the closure of the fixtures needed to setup the initial fixtures, + i.e. fixtures needed by fixture functions themselves are appended + to the fixturenames list. + + Upon the test-setup phases all fixturenames are instantiated, retrieved + by a lookup of their FuncFixtureInfo. + """ + + def __init__(self, session: Session) -> None: + self.session = session + self.config: Config = session.config + # Maps a fixture name (argname) to all of the FixtureDefs in the test + # suite/plugins defined with this name. Populated by parsefactories(). + # TODO: The order of the FixtureDefs list of each arg is significant, + # explain. + self._arg2fixturedefs: Final[dict[str, list[FixtureDef[Any]]]] = {} + self._holderobjseen: Final[set[object]] = set() + # A mapping from a nodeid to a list of autouse fixtures it defines. + self._nodeid_autousenames: Final[dict[str, list[str]]] = { + "": self.config.getini("usefixtures"), + } + session.config.pluginmanager.register(self, "funcmanage") + + def getfixtureinfo( + self, + node: nodes.Item, + func: Callable[..., object] | None, + cls: type | None, + ) -> FuncFixtureInfo: + """Calculate the :class:`FuncFixtureInfo` for an item. + + If ``func`` is None, or if the item sets an attribute + ``nofuncargs = True``, then ``func`` is not examined at all. + + :param node: + The item requesting the fixtures. + :param func: + The item's function. + :param cls: + If the function is a method, the method's class. + """ + if func is not None and not getattr(node, "nofuncargs", False): + argnames = getfuncargnames(func, name=node.name, cls=cls) + else: + argnames = () + usefixturesnames = self._getusefixturesnames(node) + autousenames = self._getautousenames(node) + initialnames = deduplicate_names(autousenames, usefixturesnames, argnames) + + direct_parametrize_args = _get_direct_parametrize_args(node) + + names_closure, arg2fixturedefs = self.getfixtureclosure( + parentnode=node, + initialnames=initialnames, + ignore_args=direct_parametrize_args, + ) + + return FuncFixtureInfo(argnames, initialnames, names_closure, arg2fixturedefs) + + def pytest_plugin_registered(self, plugin: _PluggyPlugin, plugin_name: str) -> None: + # Fixtures defined in conftest plugins are only visible to within the + # conftest's directory. This is unlike fixtures in non-conftest plugins + # which have global visibility. So for conftests, construct the base + # nodeid from the plugin name (which is the conftest path). + if plugin_name and plugin_name.endswith("conftest.py"): + # Note: we explicitly do *not* use `plugin.__file__` here -- The + # difference is that plugin_name has the correct capitalization on + # case-insensitive systems (Windows) and other normalization issues + # (issue #11816). + conftestpath = absolutepath(plugin_name) + try: + nodeid = str(conftestpath.parent.relative_to(self.config.rootpath)) + except ValueError: + nodeid = "" + if nodeid == ".": + nodeid = "" + if os.sep != nodes.SEP: + nodeid = nodeid.replace(os.sep, nodes.SEP) + else: + nodeid = None + + self.parsefactories(plugin, nodeid) + + def _getautousenames(self, node: nodes.Node) -> Iterator[str]: + """Return the names of autouse fixtures applicable to node.""" + for parentnode in node.listchain(): + basenames = self._nodeid_autousenames.get(parentnode.nodeid) + if basenames: + yield from basenames + + def _getusefixturesnames(self, node: nodes.Item) -> Iterator[str]: + """Return the names of usefixtures fixtures applicable to node.""" + for mark in node.iter_markers(name="usefixtures"): + yield from mark.args + + def getfixtureclosure( + self, + parentnode: nodes.Node, + initialnames: tuple[str, ...], + ignore_args: AbstractSet[str], + ) -> tuple[list[str], dict[str, Sequence[FixtureDef[Any]]]]: + # Collect the closure of all fixtures, starting with the given + # fixturenames as the initial set. As we have to visit all + # factory definitions anyway, we also return an arg2fixturedefs + # mapping so that the caller can reuse it and does not have + # to re-discover fixturedefs again for each fixturename + # (discovering matching fixtures for a given name/node is expensive). + + fixturenames_closure = list(initialnames) + + arg2fixturedefs: dict[str, Sequence[FixtureDef[Any]]] = {} + lastlen = -1 + while lastlen != len(fixturenames_closure): + lastlen = len(fixturenames_closure) + for argname in fixturenames_closure: + if argname in ignore_args: + continue + if argname in arg2fixturedefs: + continue + fixturedefs = self.getfixturedefs(argname, parentnode) + if fixturedefs: + arg2fixturedefs[argname] = fixturedefs + for arg in fixturedefs[-1].argnames: + if arg not in fixturenames_closure: + fixturenames_closure.append(arg) + + def sort_by_scope(arg_name: str) -> Scope: + try: + fixturedefs = arg2fixturedefs[arg_name] + except KeyError: + return Scope.Function + else: + return fixturedefs[-1]._scope + + fixturenames_closure.sort(key=sort_by_scope, reverse=True) + return fixturenames_closure, arg2fixturedefs + + def pytest_generate_tests(self, metafunc: Metafunc) -> None: + """Generate new tests based on parametrized fixtures used by the given metafunc""" + + def get_parametrize_mark_argnames(mark: Mark) -> Sequence[str]: + args, _ = ParameterSet._parse_parametrize_args(*mark.args, **mark.kwargs) + return args + + for argname in metafunc.fixturenames: + # Get the FixtureDefs for the argname. + fixture_defs = metafunc._arg2fixturedefs.get(argname) + if not fixture_defs: + # Will raise FixtureLookupError at setup time if not parametrized somewhere + # else (e.g @pytest.mark.parametrize) + continue + + # If the test itself parametrizes using this argname, give it + # precedence. + if any( + argname in get_parametrize_mark_argnames(mark) + for mark in metafunc.definition.iter_markers("parametrize") + ): + continue + + # In the common case we only look at the fixture def with the + # closest scope (last in the list). But if the fixture overrides + # another fixture, while requesting the super fixture, keep going + # in case the super fixture is parametrized (#1953). + for fixturedef in reversed(fixture_defs): + # Fixture is parametrized, apply it and stop. + if fixturedef.params is not None: + metafunc.parametrize( + argname, + fixturedef.params, + indirect=True, + scope=fixturedef.scope, + ids=fixturedef.ids, + ) + break + + # Not requesting the overridden super fixture, stop. + if argname not in fixturedef.argnames: + break + + # Try next super fixture, if any. + + def pytest_collection_modifyitems(self, items: list[nodes.Item]) -> None: + # Separate parametrized setups. + items[:] = reorder_items(items) + + def _register_fixture( + self, + *, + name: str, + func: _FixtureFunc[object], + nodeid: str | None, + scope: Scope | _ScopeName | Callable[[str, Config], _ScopeName] = "function", + params: Sequence[object] | None = None, + ids: tuple[object | None, ...] | Callable[[Any], object | None] | None = None, + autouse: bool = False, + ) -> None: + """Register a fixture + + :param name: + The fixture's name. + :param func: + The fixture's implementation function. + :param nodeid: + The visibility of the fixture. The fixture will be available to the + node with this nodeid and its children in the collection tree. + None means that the fixture is visible to the entire collection tree, + e.g. a fixture defined for general use in a plugin. + :param scope: + The fixture's scope. + :param params: + The fixture's parametrization params. + :param ids: + The fixture's IDs. + :param autouse: + Whether this is an autouse fixture. + """ + fixture_def = FixtureDef( + config=self.config, + baseid=nodeid, + argname=name, + func=func, + scope=scope, + params=params, + ids=ids, + _ispytest=True, + ) + + faclist = self._arg2fixturedefs.setdefault(name, []) + if fixture_def.has_location: + faclist.append(fixture_def) + else: + # fixturedefs with no location are at the front + # so this inserts the current fixturedef after the + # existing fixturedefs from external plugins but + # before the fixturedefs provided in conftests. + i = len([f for f in faclist if not f.has_location]) + faclist.insert(i, fixture_def) + if autouse: + self._nodeid_autousenames.setdefault(nodeid or "", []).append(name) + + @overload + def parsefactories( + self, + node_or_obj: nodes.Node, + ) -> None: + raise NotImplementedError() + + @overload + def parsefactories( + self, + node_or_obj: object, + nodeid: str | None, + ) -> None: + raise NotImplementedError() + + def parsefactories( + self, + node_or_obj: nodes.Node | object, + nodeid: str | NotSetType | None = NOTSET, + ) -> None: + """Collect fixtures from a collection node or object. + + Found fixtures are parsed into `FixtureDef`s and saved. + + If `node_or_object` is a collection node (with an underlying Python + object), the node's object is traversed and the node's nodeid is used to + determine the fixtures' visibility. `nodeid` must not be specified in + this case. + + If `node_or_object` is an object (e.g. a plugin), the object is + traversed and the given `nodeid` is used to determine the fixtures' + visibility. `nodeid` must be specified in this case; None and "" mean + total visibility. + """ + if nodeid is not NOTSET: + holderobj = node_or_obj + else: + assert isinstance(node_or_obj, nodes.Node) + holderobj = cast(object, node_or_obj.obj) # type: ignore[attr-defined] + assert isinstance(node_or_obj.nodeid, str) + nodeid = node_or_obj.nodeid + if holderobj in self._holderobjseen: + return + + # Avoid accessing `@property` (and other descriptors) when iterating fixtures. + if not safe_isclass(holderobj) and not isinstance(holderobj, types.ModuleType): + holderobj_tp: object = type(holderobj) + else: + holderobj_tp = holderobj + + self._holderobjseen.add(holderobj) + for name in dir(holderobj): + # The attribute can be an arbitrary descriptor, so the attribute + # access below can raise. safe_getattr() ignores such exceptions. + obj_ub = safe_getattr(holderobj_tp, name, None) + marker = getfixturemarker(obj_ub) + if not isinstance(marker, FixtureFunctionMarker): + # Magic globals with __getattr__ might have got us a wrong + # fixture attribute. + continue + + # OK we know it is a fixture -- now safe to look up on the _instance_. + obj = getattr(holderobj, name) + + if marker.name: + name = marker.name + + # During fixture definition we wrap the original fixture function + # to issue a warning if called directly, so here we unwrap it in + # order to not emit the warning when pytest itself calls the + # fixture function. + func = get_real_method(obj, holderobj) + + self._register_fixture( + name=name, + nodeid=nodeid, + func=func, + scope=marker.scope, + params=marker.params, + ids=marker.ids, + autouse=marker.autouse, + ) + + def getfixturedefs( + self, argname: str, node: nodes.Node + ) -> Sequence[FixtureDef[Any]] | None: + """Get FixtureDefs for a fixture name which are applicable + to a given node. + + Returns None if there are no fixtures at all defined with the given + name. (This is different from the case in which there are fixtures + with the given name, but none applicable to the node. In this case, + an empty result is returned). + + :param argname: Name of the fixture to search for. + :param node: The requesting Node. + """ + try: + fixturedefs = self._arg2fixturedefs[argname] + except KeyError: + return None + return tuple(self._matchfactories(fixturedefs, node)) + + def _matchfactories( + self, fixturedefs: Iterable[FixtureDef[Any]], node: nodes.Node + ) -> Iterator[FixtureDef[Any]]: + parentnodeids = {n.nodeid for n in node.iter_parents()} + for fixturedef in fixturedefs: + if fixturedef.baseid in parentnodeids: + yield fixturedef + + +def show_fixtures_per_test(config: Config) -> int | ExitCode: + from _pytest.main import wrap_session + + return wrap_session(config, _show_fixtures_per_test) + + +_PYTEST_DIR = Path(_pytest.__file__).parent + + +def _pretty_fixture_path(invocation_dir: Path, func) -> str: + loc = Path(getlocation(func, invocation_dir)) + prefix = Path("...", "_pytest") + try: + return str(prefix / loc.relative_to(_PYTEST_DIR)) + except ValueError: + return bestrelpath(invocation_dir, loc) + + +def _show_fixtures_per_test(config: Config, session: Session) -> None: + import _pytest.config + + session.perform_collect() + invocation_dir = config.invocation_params.dir + tw = _pytest.config.create_terminal_writer(config) + verbose = config.get_verbosity() + + def get_best_relpath(func) -> str: + loc = getlocation(func, invocation_dir) + return bestrelpath(invocation_dir, Path(loc)) + + def write_fixture(fixture_def: FixtureDef[object]) -> None: + argname = fixture_def.argname + if verbose <= 0 and argname.startswith("_"): + return + prettypath = _pretty_fixture_path(invocation_dir, fixture_def.func) + tw.write(f"{argname}", green=True) + tw.write(f" -- {prettypath}", yellow=True) + tw.write("\n") + fixture_doc = inspect.getdoc(fixture_def.func) + if fixture_doc: + write_docstring( + tw, + fixture_doc.split("\n\n", maxsplit=1)[0] + if verbose <= 0 + else fixture_doc, + ) + else: + tw.line(" no docstring available", red=True) + + def write_item(item: nodes.Item) -> None: + # Not all items have _fixtureinfo attribute. + info: FuncFixtureInfo | None = getattr(item, "_fixtureinfo", None) + if info is None or not info.name2fixturedefs: + # This test item does not use any fixtures. + return + tw.line() + tw.sep("-", f"fixtures used by {item.name}") + # TODO: Fix this type ignore. + tw.sep("-", f"({get_best_relpath(item.function)})") # type: ignore[attr-defined] + # dict key not used in loop but needed for sorting. + for _, fixturedefs in sorted(info.name2fixturedefs.items()): + assert fixturedefs is not None + if not fixturedefs: + continue + # Last item is expected to be the one used by the test item. + write_fixture(fixturedefs[-1]) + + for session_item in session.items: + write_item(session_item) + + +def showfixtures(config: Config) -> int | ExitCode: + from _pytest.main import wrap_session + + return wrap_session(config, _showfixtures_main) + + +def _showfixtures_main(config: Config, session: Session) -> None: + import _pytest.config + + session.perform_collect() + invocation_dir = config.invocation_params.dir + tw = _pytest.config.create_terminal_writer(config) + verbose = config.get_verbosity() + + fm = session._fixturemanager + + available = [] + seen: set[tuple[str, str]] = set() + + for argname, fixturedefs in fm._arg2fixturedefs.items(): + assert fixturedefs is not None + if not fixturedefs: + continue + for fixturedef in fixturedefs: + loc = getlocation(fixturedef.func, invocation_dir) + if (fixturedef.argname, loc) in seen: + continue + seen.add((fixturedef.argname, loc)) + available.append( + ( + len(fixturedef.baseid), + fixturedef.func.__module__, + _pretty_fixture_path(invocation_dir, fixturedef.func), + fixturedef.argname, + fixturedef, + ) + ) + + available.sort() + currentmodule = None + for baseid, module, prettypath, argname, fixturedef in available: + if currentmodule != module: + if not module.startswith("_pytest."): + tw.line() + tw.sep("-", f"fixtures defined from {module}") + currentmodule = module + if verbose <= 0 and argname.startswith("_"): + continue + tw.write(f"{argname}", green=True) + if fixturedef.scope != "function": + tw.write(f" [{fixturedef.scope} scope]", cyan=True) + tw.write(f" -- {prettypath}", yellow=True) + tw.write("\n") + doc = inspect.getdoc(fixturedef.func) + if doc: + write_docstring( + tw, doc.split("\n\n", maxsplit=1)[0] if verbose <= 0 else doc + ) + else: + tw.line(" no docstring available", red=True) + tw.line() + + +def write_docstring(tw: TerminalWriter, doc: str, indent: str = " ") -> None: + for line in doc.split("\n"): + tw.line(indent + line) diff --git a/.venv/Lib/site-packages/_pytest/freeze_support.py b/.venv/Lib/site-packages/_pytest/freeze_support.py new file mode 100644 index 00000000..2ba6f9b8 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/freeze_support.py @@ -0,0 +1,45 @@ +"""Provides a function to report all internal modules for using freezing +tools.""" + +from __future__ import annotations + +import types +from typing import Iterator + + +def freeze_includes() -> list[str]: + """Return a list of module names used by pytest that should be + included by cx_freeze.""" + import _pytest + + result = list(_iter_all_modules(_pytest)) + return result + + +def _iter_all_modules( + package: str | types.ModuleType, + prefix: str = "", +) -> Iterator[str]: + """Iterate over the names of all modules that can be found in the given + package, recursively. + + >>> import _pytest + >>> list(_iter_all_modules(_pytest)) + ['_pytest._argcomplete', '_pytest._code.code', ...] + """ + import os + import pkgutil + + if isinstance(package, str): + path = package + else: + # Type ignored because typeshed doesn't define ModuleType.__path__ + # (only defined on packages). + package_path = package.__path__ + path, prefix = package_path[0], package.__name__ + "." + for _, name, is_package in pkgutil.iter_modules([path]): + if is_package: + for m in _iter_all_modules(os.path.join(path, name), prefix=name + "."): + yield prefix + m + else: + yield prefix + name diff --git a/.venv/Lib/site-packages/_pytest/helpconfig.py b/.venv/Lib/site-packages/_pytest/helpconfig.py new file mode 100644 index 00000000..1886d5c9 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/helpconfig.py @@ -0,0 +1,276 @@ +# mypy: allow-untyped-defs +"""Version info, help messages, tracing configuration.""" + +from __future__ import annotations + +from argparse import Action +import os +import sys +from typing import Generator + +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import PrintHelp +from _pytest.config.argparsing import Parser +from _pytest.terminal import TerminalReporter +import pytest + + +class HelpAction(Action): + """An argparse Action that will raise an exception in order to skip the + rest of the argument parsing when --help is passed. + + This prevents argparse from quitting due to missing required arguments + when any are defined, for example by ``pytest_addoption``. + This is similar to the way that the builtin argparse --help option is + implemented by raising SystemExit. + """ + + def __init__(self, option_strings, dest=None, default=False, help=None): + super().__init__( + option_strings=option_strings, + dest=dest, + const=True, + default=default, + nargs=0, + help=help, + ) + + def __call__(self, parser, namespace, values, option_string=None): + setattr(namespace, self.dest, self.const) + + # We should only skip the rest of the parsing after preparse is done. + if getattr(parser._parser, "after_preparse", False): + raise PrintHelp + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("debugconfig") + group.addoption( + "--version", + "-V", + action="count", + default=0, + dest="version", + help="Display pytest version and information about plugins. " + "When given twice, also display information about plugins.", + ) + group._addoption( + "-h", + "--help", + action=HelpAction, + dest="help", + help="Show help message and configuration info", + ) + group._addoption( + "-p", + action="append", + dest="plugins", + default=[], + metavar="name", + help="Early-load given plugin module name or entry point (multi-allowed). " + "To avoid loading of plugins, use the `no:` prefix, e.g. " + "`no:doctest`.", + ) + group.addoption( + "--traceconfig", + "--trace-config", + action="store_true", + default=False, + help="Trace considerations of conftest.py files", + ) + group.addoption( + "--debug", + action="store", + nargs="?", + const="pytestdebug.log", + dest="debug", + metavar="DEBUG_FILE_NAME", + help="Store internal tracing debug information in this log file. " + "This file is opened with 'w' and truncated as a result, care advised. " + "Default: pytestdebug.log.", + ) + group._addoption( + "-o", + "--override-ini", + dest="override_ini", + action="append", + help='Override ini option with "option=value" style, ' + "e.g. `-o xfail_strict=True -o cache_dir=cache`.", + ) + + +@pytest.hookimpl(wrapper=True) +def pytest_cmdline_parse() -> Generator[None, Config, Config]: + config = yield + + if config.option.debug: + # --debug | --debug was provided. + path = config.option.debug + debugfile = open(path, "w", encoding="utf-8") + debugfile.write( + "versions pytest-{}, " + "python-{}\ninvocation_dir={}\ncwd={}\nargs={}\n\n".format( + pytest.__version__, + ".".join(map(str, sys.version_info)), + config.invocation_params.dir, + os.getcwd(), + config.invocation_params.args, + ) + ) + config.trace.root.setwriter(debugfile.write) + undo_tracing = config.pluginmanager.enable_tracing() + sys.stderr.write(f"writing pytest debug information to {path}\n") + + def unset_tracing() -> None: + debugfile.close() + sys.stderr.write(f"wrote pytest debug information to {debugfile.name}\n") + config.trace.root.setwriter(None) + undo_tracing() + + config.add_cleanup(unset_tracing) + + return config + + +def showversion(config: Config) -> None: + if config.option.version > 1: + sys.stdout.write( + f"This is pytest version {pytest.__version__}, imported from {pytest.__file__}\n" + ) + plugininfo = getpluginversioninfo(config) + if plugininfo: + for line in plugininfo: + sys.stdout.write(line + "\n") + else: + sys.stdout.write(f"pytest {pytest.__version__}\n") + + +def pytest_cmdline_main(config: Config) -> int | ExitCode | None: + if config.option.version > 0: + showversion(config) + return 0 + elif config.option.help: + config._do_configure() + showhelp(config) + config._ensure_unconfigure() + return 0 + return None + + +def showhelp(config: Config) -> None: + import textwrap + + reporter: TerminalReporter | None = config.pluginmanager.get_plugin( + "terminalreporter" + ) + assert reporter is not None + tw = reporter._tw + tw.write(config._parser.optparser.format_help()) + tw.line() + tw.line( + "[pytest] ini-options in the first " + "pytest.ini|tox.ini|setup.cfg|pyproject.toml file found:" + ) + tw.line() + + columns = tw.fullwidth # costly call + indent_len = 24 # based on argparse's max_help_position=24 + indent = " " * indent_len + for name in config._parser._ininames: + help, type, default = config._parser._inidict[name] + if type is None: + type = "string" + if help is None: + raise TypeError(f"help argument cannot be None for {name}") + spec = f"{name} ({type}):" + tw.write(f" {spec}") + spec_len = len(spec) + if spec_len > (indent_len - 3): + # Display help starting at a new line. + tw.line() + helplines = textwrap.wrap( + help, + columns, + initial_indent=indent, + subsequent_indent=indent, + break_on_hyphens=False, + ) + + for line in helplines: + tw.line(line) + else: + # Display help starting after the spec, following lines indented. + tw.write(" " * (indent_len - spec_len - 2)) + wrapped = textwrap.wrap(help, columns - indent_len, break_on_hyphens=False) + + if wrapped: + tw.line(wrapped[0]) + for line in wrapped[1:]: + tw.line(indent + line) + + tw.line() + tw.line("Environment variables:") + vars = [ + ( + "CI", + "When set (regardless of value), pytest knows it is running in a " + "CI process and does not truncate summary info", + ), + ("BUILD_NUMBER", "Equivalent to CI"), + ("PYTEST_ADDOPTS", "Extra command line options"), + ("PYTEST_PLUGINS", "Comma-separated plugins to load during startup"), + ("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "Set to disable plugin auto-loading"), + ("PYTEST_DEBUG", "Set to enable debug tracing of pytest's internals"), + ] + for name, help in vars: + tw.line(f" {name:<24} {help}") + tw.line() + tw.line() + + tw.line("to see available markers type: pytest --markers") + tw.line("to see available fixtures type: pytest --fixtures") + tw.line( + "(shown according to specified file_or_dir or current dir " + "if not specified; fixtures with leading '_' are only shown " + "with the '-v' option" + ) + + for warningreport in reporter.stats.get("warnings", []): + tw.line("warning : " + warningreport.message, red=True) + + +conftest_options = [("pytest_plugins", "list of plugin names to load")] + + +def getpluginversioninfo(config: Config) -> list[str]: + lines = [] + plugininfo = config.pluginmanager.list_plugin_distinfo() + if plugininfo: + lines.append("registered third-party plugins:") + for plugin, dist in plugininfo: + loc = getattr(plugin, "__file__", repr(plugin)) + content = f"{dist.project_name}-{dist.version} at {loc}" + lines.append(" " + content) + return lines + + +def pytest_report_header(config: Config) -> list[str]: + lines = [] + if config.option.debug or config.option.traceconfig: + lines.append(f"using: pytest-{pytest.__version__}") + + verinfo = getpluginversioninfo(config) + if verinfo: + lines.extend(verinfo) + + if config.option.traceconfig: + lines.append("active plugins:") + items = config.pluginmanager.list_name_plugin() + for name, plugin in items: + if hasattr(plugin, "__file__"): + r = plugin.__file__ + else: + r = repr(plugin) + lines.append(f" {name:<20}: {r}") + return lines diff --git a/.venv/Lib/site-packages/_pytest/hookspec.py b/.venv/Lib/site-packages/_pytest/hookspec.py new file mode 100644 index 00000000..0a41b0ac --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/hookspec.py @@ -0,0 +1,1333 @@ +# mypy: allow-untyped-defs +# ruff: noqa: T100 +"""Hook specifications for pytest plugins which are invoked by pytest itself +and by builtin plugins.""" + +from __future__ import annotations + +from pathlib import Path +from typing import Any +from typing import Mapping +from typing import Sequence +from typing import TYPE_CHECKING + +from pluggy import HookspecMarker + +from .deprecated import HOOK_LEGACY_PATH_ARG + + +if TYPE_CHECKING: + import pdb + from typing import Literal + import warnings + + from _pytest._code.code import ExceptionInfo + from _pytest._code.code import ExceptionRepr + from _pytest.compat import LEGACY_PATH + from _pytest.config import _PluggyPlugin + from _pytest.config import Config + from _pytest.config import ExitCode + from _pytest.config import PytestPluginManager + from _pytest.config.argparsing import Parser + from _pytest.fixtures import FixtureDef + from _pytest.fixtures import SubRequest + from _pytest.main import Session + from _pytest.nodes import Collector + from _pytest.nodes import Item + from _pytest.outcomes import Exit + from _pytest.python import Class + from _pytest.python import Function + from _pytest.python import Metafunc + from _pytest.python import Module + from _pytest.reports import CollectReport + from _pytest.reports import TestReport + from _pytest.runner import CallInfo + from _pytest.terminal import TerminalReporter + from _pytest.terminal import TestShortLogReport + + +hookspec = HookspecMarker("pytest") + +# ------------------------------------------------------------------------- +# Initialization hooks called for every plugin +# ------------------------------------------------------------------------- + + +@hookspec(historic=True) +def pytest_addhooks(pluginmanager: PytestPluginManager) -> None: + """Called at plugin registration time to allow adding new hooks via a call to + :func:`pluginmanager.add_hookspecs(module_or_class, prefix) `. + + :param pluginmanager: The pytest plugin manager. + + .. note:: + This hook is incompatible with hook wrappers. + + Use in conftest plugins + ======================= + + If a conftest plugin implements this hook, it will be called immediately + when the conftest is registered. + """ + + +@hookspec(historic=True) +def pytest_plugin_registered( + plugin: _PluggyPlugin, + plugin_name: str, + manager: PytestPluginManager, +) -> None: + """A new pytest plugin got registered. + + :param plugin: The plugin module or instance. + :param plugin_name: The name by which the plugin is registered. + :param manager: The pytest plugin manager. + + .. note:: + This hook is incompatible with hook wrappers. + + Use in conftest plugins + ======================= + + If a conftest plugin implements this hook, it will be called immediately + when the conftest is registered, once for each plugin registered thus far + (including itself!), and for all plugins thereafter when they are + registered. + """ + + +@hookspec(historic=True) +def pytest_addoption(parser: Parser, pluginmanager: PytestPluginManager) -> None: + """Register argparse-style options and ini-style config values, + called once at the beginning of a test run. + + :param parser: + To add command line options, call + :py:func:`parser.addoption(...) `. + To add ini-file values call :py:func:`parser.addini(...) + `. + + :param pluginmanager: + The pytest plugin manager, which can be used to install :py:func:`~pytest.hookspec`'s + or :py:func:`~pytest.hookimpl`'s and allow one plugin to call another plugin's hooks + to change how command line options are added. + + Options can later be accessed through the + :py:class:`config ` object, respectively: + + - :py:func:`config.getoption(name) ` to + retrieve the value of a command line option. + + - :py:func:`config.getini(name) ` to retrieve + a value read from an ini-style file. + + The config object is passed around on many internal objects via the ``.config`` + attribute or can be retrieved as the ``pytestconfig`` fixture. + + .. note:: + This hook is incompatible with hook wrappers. + + Use in conftest plugins + ======================= + + If a conftest plugin implements this hook, it will be called immediately + when the conftest is registered. + + This hook is only called for :ref:`initial conftests `. + """ + + +@hookspec(historic=True) +def pytest_configure(config: Config) -> None: + """Allow plugins and conftest files to perform initial configuration. + + .. note:: + This hook is incompatible with hook wrappers. + + :param config: The pytest config object. + + Use in conftest plugins + ======================= + + This hook is called for every :ref:`initial conftest ` file + after command line options have been parsed. After that, the hook is called + for other conftest files as they are registered. + """ + + +# ------------------------------------------------------------------------- +# Bootstrapping hooks called for plugins registered early enough: +# internal and 3rd party plugins. +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_cmdline_parse( + pluginmanager: PytestPluginManager, args: list[str] +) -> Config | None: + """Return an initialized :class:`~pytest.Config`, parsing the specified args. + + Stops at first non-None result, see :ref:`firstresult`. + + .. note:: + This hook is only called for plugin classes passed to the + ``plugins`` arg when using `pytest.main`_ to perform an in-process + test run. + + :param pluginmanager: The pytest plugin manager. + :param args: List of arguments passed on the command line. + :returns: A pytest config object. + + Use in conftest plugins + ======================= + + This hook is not called for conftest files. + """ + + +def pytest_load_initial_conftests( + early_config: Config, parser: Parser, args: list[str] +) -> None: + """Called to implement the loading of :ref:`initial conftest files + ` ahead of command line option parsing. + + :param early_config: The pytest config object. + :param args: Arguments passed on the command line. + :param parser: To add command line options. + + Use in conftest plugins + ======================= + + This hook is not called for conftest files. + """ + + +@hookspec(firstresult=True) +def pytest_cmdline_main(config: Config) -> ExitCode | int | None: + """Called for performing the main command line action. + + The default implementation will invoke the configure hooks and + :hook:`pytest_runtestloop`. + + Stops at first non-None result, see :ref:`firstresult`. + + :param config: The pytest config object. + :returns: The exit code. + + Use in conftest plugins + ======================= + + This hook is only called for :ref:`initial conftests `. + """ + + +# ------------------------------------------------------------------------- +# collection hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_collection(session: Session) -> object | None: + """Perform the collection phase for the given session. + + Stops at first non-None result, see :ref:`firstresult`. + The return value is not used, but only stops further processing. + + The default collection phase is this (see individual hooks for full details): + + 1. Starting from ``session`` as the initial collector: + + 1. ``pytest_collectstart(collector)`` + 2. ``report = pytest_make_collect_report(collector)`` + 3. ``pytest_exception_interact(collector, call, report)`` if an interactive exception occurred + 4. For each collected node: + + 1. If an item, ``pytest_itemcollected(item)`` + 2. If a collector, recurse into it. + + 5. ``pytest_collectreport(report)`` + + 2. ``pytest_collection_modifyitems(session, config, items)`` + + 1. ``pytest_deselected(items)`` for any deselected items (may be called multiple times) + + 3. ``pytest_collection_finish(session)`` + 4. Set ``session.items`` to the list of collected items + 5. Set ``session.testscollected`` to the number of collected items + + You can implement this hook to only perform some action before collection, + for example the terminal plugin uses it to start displaying the collection + counter (and returns `None`). + + :param session: The pytest session object. + + Use in conftest plugins + ======================= + + This hook is only called for :ref:`initial conftests `. + """ + + +def pytest_collection_modifyitems( + session: Session, config: Config, items: list[Item] +) -> None: + """Called after collection has been performed. May filter or re-order + the items in-place. + + When items are deselected (filtered out from ``items``), + the hook :hook:`pytest_deselected` must be called explicitly + with the deselected items to properly notify other plugins, + e.g. with ``config.hook.pytest_deselected(deselected_items)``. + + :param session: The pytest session object. + :param config: The pytest config object. + :param items: List of item objects. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ + + +def pytest_collection_finish(session: Session) -> None: + """Called after collection has been performed and modified. + + :param session: The pytest session object. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ + + +@hookspec( + firstresult=True, + warn_on_impl_args={ + "path": HOOK_LEGACY_PATH_ARG.format( + pylib_path_arg="path", pathlib_path_arg="collection_path" + ), + }, +) +def pytest_ignore_collect( + collection_path: Path, path: LEGACY_PATH, config: Config +) -> bool | None: + """Return ``True`` to ignore this path for collection. + + Return ``None`` to let other plugins ignore the path for collection. + + Returning ``False`` will forcefully *not* ignore this path for collection, + without giving a chance for other plugins to ignore this path. + + This hook is consulted for all files and directories prior to calling + more specific hooks. + + Stops at first non-None result, see :ref:`firstresult`. + + :param collection_path: The path to analyze. + :type collection_path: pathlib.Path + :param path: The path to analyze (deprecated). + :param config: The pytest config object. + + .. versionchanged:: 7.0.0 + The ``collection_path`` parameter was added as a :class:`pathlib.Path` + equivalent of the ``path`` parameter. The ``path`` parameter + has been deprecated. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given collection path, only + conftest files in parent directories of the collection path are consulted + (if the path is a directory, its own conftest file is *not* consulted - a + directory cannot ignore itself!). + """ + + +@hookspec(firstresult=True) +def pytest_collect_directory(path: Path, parent: Collector) -> Collector | None: + """Create a :class:`~pytest.Collector` for the given directory, or None if + not relevant. + + .. versionadded:: 8.0 + + For best results, the returned collector should be a subclass of + :class:`~pytest.Directory`, but this is not required. + + The new node needs to have the specified ``parent`` as a parent. + + Stops at first non-None result, see :ref:`firstresult`. + + :param path: The path to analyze. + :type path: pathlib.Path + + See :ref:`custom directory collectors` for a simple example of use of this + hook. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given collection path, only + conftest files in parent directories of the collection path are consulted + (if the path is a directory, its own conftest file is *not* consulted - a + directory cannot collect itself!). + """ + + +@hookspec( + warn_on_impl_args={ + "path": HOOK_LEGACY_PATH_ARG.format( + pylib_path_arg="path", pathlib_path_arg="file_path" + ), + }, +) +def pytest_collect_file( + file_path: Path, path: LEGACY_PATH, parent: Collector +) -> Collector | None: + """Create a :class:`~pytest.Collector` for the given path, or None if not relevant. + + For best results, the returned collector should be a subclass of + :class:`~pytest.File`, but this is not required. + + The new node needs to have the specified ``parent`` as a parent. + + :param file_path: The path to analyze. + :type file_path: pathlib.Path + :param path: The path to collect (deprecated). + + .. versionchanged:: 7.0.0 + The ``file_path`` parameter was added as a :class:`pathlib.Path` + equivalent of the ``path`` parameter. The ``path`` parameter + has been deprecated. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given file path, only + conftest files in parent directories of the file path are consulted. + """ + + +# logging hooks for collection + + +def pytest_collectstart(collector: Collector) -> None: + """Collector starts collecting. + + :param collector: + The collector. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given collector, only + conftest files in the collector's directory and its parent directories are + consulted. + """ + + +def pytest_itemcollected(item: Item) -> None: + """We just collected a test item. + + :param item: + The item. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +def pytest_collectreport(report: CollectReport) -> None: + """Collector finished collecting. + + :param report: + The collect report. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given collector, only + conftest files in the collector's directory and its parent directories are + consulted. + """ + + +def pytest_deselected(items: Sequence[Item]) -> None: + """Called for deselected test items, e.g. by keyword. + + Note that this hook has two integration aspects for plugins: + + - it can be *implemented* to be notified of deselected items + - it must be *called* from :hook:`pytest_collection_modifyitems` + implementations when items are deselected (to properly notify other plugins). + + May be called multiple times. + + :param items: + The items. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. + """ + + +@hookspec(firstresult=True) +def pytest_make_collect_report(collector: Collector) -> CollectReport | None: + """Perform :func:`collector.collect() ` and return + a :class:`~pytest.CollectReport`. + + Stops at first non-None result, see :ref:`firstresult`. + + :param collector: + The collector. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given collector, only + conftest files in the collector's directory and its parent directories are + consulted. + """ + + +# ------------------------------------------------------------------------- +# Python test function related hooks +# ------------------------------------------------------------------------- + + +@hookspec( + firstresult=True, + warn_on_impl_args={ + "path": HOOK_LEGACY_PATH_ARG.format( + pylib_path_arg="path", pathlib_path_arg="module_path" + ), + }, +) +def pytest_pycollect_makemodule( + module_path: Path, path: LEGACY_PATH, parent +) -> Module | None: + """Return a :class:`pytest.Module` collector or None for the given path. + + This hook will be called for each matching test module path. + The :hook:`pytest_collect_file` hook needs to be used if you want to + create test modules for files that do not match as a test module. + + Stops at first non-None result, see :ref:`firstresult`. + + :param module_path: The path of the module to collect. + :type module_path: pathlib.Path + :param path: The path of the module to collect (deprecated). + + .. versionchanged:: 7.0.0 + The ``module_path`` parameter was added as a :class:`pathlib.Path` + equivalent of the ``path`` parameter. + + The ``path`` parameter has been deprecated in favor of ``fspath``. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given parent collector, + only conftest files in the collector's directory and its parent directories + are consulted. + """ + + +@hookspec(firstresult=True) +def pytest_pycollect_makeitem( + collector: Module | Class, name: str, obj: object +) -> None | Item | Collector | list[Item | Collector]: + """Return a custom item/collector for a Python object in a module, or None. + + Stops at first non-None result, see :ref:`firstresult`. + + :param collector: + The module/class collector. + :param name: + The name of the object in the module/class. + :param obj: + The object. + :returns: + The created items/collectors. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given collector, only + conftest files in the collector's directory and its parent directories + are consulted. + """ + + +@hookspec(firstresult=True) +def pytest_pyfunc_call(pyfuncitem: Function) -> object | None: + """Call underlying test function. + + Stops at first non-None result, see :ref:`firstresult`. + + :param pyfuncitem: + The function item. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only + conftest files in the item's directory and its parent directories + are consulted. + """ + + +def pytest_generate_tests(metafunc: Metafunc) -> None: + """Generate (multiple) parametrized calls to a test function. + + :param metafunc: + The :class:`~pytest.Metafunc` helper for the test function. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given function definition, + only conftest files in the functions's directory and its parent directories + are consulted. + """ + + +@hookspec(firstresult=True) +def pytest_make_parametrize_id(config: Config, val: object, argname: str) -> str | None: + """Return a user-friendly string representation of the given ``val`` + that will be used by @pytest.mark.parametrize calls, or None if the hook + doesn't know about ``val``. + + The parameter name is available as ``argname``, if required. + + Stops at first non-None result, see :ref:`firstresult`. + + :param config: The pytest config object. + :param val: The parametrized value. + :param argname: The automatic parameter name produced by pytest. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. + """ + + +# ------------------------------------------------------------------------- +# runtest related hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_runtestloop(session: Session) -> object | None: + """Perform the main runtest loop (after collection finished). + + The default hook implementation performs the runtest protocol for all items + collected in the session (``session.items``), unless the collection failed + or the ``collectonly`` pytest option is set. + + If at any point :py:func:`pytest.exit` is called, the loop is + terminated immediately. + + If at any point ``session.shouldfail`` or ``session.shouldstop`` are set, the + loop is terminated after the runtest protocol for the current item is finished. + + :param session: The pytest session object. + + Stops at first non-None result, see :ref:`firstresult`. + The return value is not used, but only stops further processing. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. + """ + + +@hookspec(firstresult=True) +def pytest_runtest_protocol(item: Item, nextitem: Item | None) -> object | None: + """Perform the runtest protocol for a single test item. + + The default runtest protocol is this (see individual hooks for full details): + + - ``pytest_runtest_logstart(nodeid, location)`` + + - Setup phase: + - ``call = pytest_runtest_setup(item)`` (wrapped in ``CallInfo(when="setup")``) + - ``report = pytest_runtest_makereport(item, call)`` + - ``pytest_runtest_logreport(report)`` + - ``pytest_exception_interact(call, report)`` if an interactive exception occurred + + - Call phase, if the setup passed and the ``setuponly`` pytest option is not set: + - ``call = pytest_runtest_call(item)`` (wrapped in ``CallInfo(when="call")``) + - ``report = pytest_runtest_makereport(item, call)`` + - ``pytest_runtest_logreport(report)`` + - ``pytest_exception_interact(call, report)`` if an interactive exception occurred + + - Teardown phase: + - ``call = pytest_runtest_teardown(item, nextitem)`` (wrapped in ``CallInfo(when="teardown")``) + - ``report = pytest_runtest_makereport(item, call)`` + - ``pytest_runtest_logreport(report)`` + - ``pytest_exception_interact(call, report)`` if an interactive exception occurred + + - ``pytest_runtest_logfinish(nodeid, location)`` + + :param item: Test item for which the runtest protocol is performed. + :param nextitem: The scheduled-to-be-next test item (or None if this is the end my friend). + + Stops at first non-None result, see :ref:`firstresult`. + The return value is not used, but only stops further processing. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. + """ + + +def pytest_runtest_logstart(nodeid: str, location: tuple[str, int | None, str]) -> None: + """Called at the start of running the runtest protocol for a single item. + + See :hook:`pytest_runtest_protocol` for a description of the runtest protocol. + + :param nodeid: Full node ID of the item. + :param location: A tuple of ``(filename, lineno, testname)`` + where ``filename`` is a file path relative to ``config.rootpath`` + and ``lineno`` is 0-based. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +def pytest_runtest_logfinish( + nodeid: str, location: tuple[str, int | None, str] +) -> None: + """Called at the end of running the runtest protocol for a single item. + + See :hook:`pytest_runtest_protocol` for a description of the runtest protocol. + + :param nodeid: Full node ID of the item. + :param location: A tuple of ``(filename, lineno, testname)`` + where ``filename`` is a file path relative to ``config.rootpath`` + and ``lineno`` is 0-based. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +def pytest_runtest_setup(item: Item) -> None: + """Called to perform the setup phase for a test item. + + The default implementation runs ``setup()`` on ``item`` and all of its + parents (which haven't been setup yet). This includes obtaining the + values of fixtures required by the item (which haven't been obtained + yet). + + :param item: + The item. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +def pytest_runtest_call(item: Item) -> None: + """Called to run the test for test item (the call phase). + + The default implementation calls ``item.runtest()``. + + :param item: + The item. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +def pytest_runtest_teardown(item: Item, nextitem: Item | None) -> None: + """Called to perform the teardown phase for a test item. + + The default implementation runs the finalizers and calls ``teardown()`` + on ``item`` and all of its parents (which need to be torn down). This + includes running the teardown phase of fixtures required by the item (if + they go out of scope). + + :param item: + The item. + :param nextitem: + The scheduled-to-be-next test item (None if no further test item is + scheduled). This argument is used to perform exact teardowns, i.e. + calling just enough finalizers so that nextitem only needs to call + setup functions. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +@hookspec(firstresult=True) +def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> TestReport | None: + """Called to create a :class:`~pytest.TestReport` for each of + the setup, call and teardown runtest phases of a test item. + + See :hook:`pytest_runtest_protocol` for a description of the runtest protocol. + + :param item: The item. + :param call: The :class:`~pytest.CallInfo` for the phase. + + Stops at first non-None result, see :ref:`firstresult`. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +def pytest_runtest_logreport(report: TestReport) -> None: + """Process the :class:`~pytest.TestReport` produced for each + of the setup, call and teardown runtest phases of an item. + + See :hook:`pytest_runtest_protocol` for a description of the runtest protocol. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +@hookspec(firstresult=True) +def pytest_report_to_serializable( + config: Config, + report: CollectReport | TestReport, +) -> dict[str, Any] | None: + """Serialize the given report object into a data structure suitable for + sending over the wire, e.g. converted to JSON. + + :param config: The pytest config object. + :param report: The report. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. The exact details may depend + on the plugin which calls the hook. + """ + + +@hookspec(firstresult=True) +def pytest_report_from_serializable( + config: Config, + data: dict[str, Any], +) -> CollectReport | TestReport | None: + """Restore a report object previously serialized with + :hook:`pytest_report_to_serializable`. + + :param config: The pytest config object. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. The exact details may depend + on the plugin which calls the hook. + """ + + +# ------------------------------------------------------------------------- +# Fixture related hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_fixture_setup( + fixturedef: FixtureDef[Any], request: SubRequest +) -> object | None: + """Perform fixture setup execution. + + :param fixturedef: + The fixture definition object. + :param request: + The fixture request object. + :returns: + The return value of the call to the fixture function. + + Stops at first non-None result, see :ref:`firstresult`. + + .. note:: + If the fixture function returns None, other implementations of + this hook function will continue to be called, according to the + behavior of the :ref:`firstresult` option. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given fixture, only + conftest files in the fixture scope's directory and its parent directories + are consulted. + """ + + +def pytest_fixture_post_finalizer( + fixturedef: FixtureDef[Any], request: SubRequest +) -> None: + """Called after fixture teardown, but before the cache is cleared, so + the fixture result ``fixturedef.cached_result`` is still available (not + ``None``). + + :param fixturedef: + The fixture definition object. + :param request: + The fixture request object. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given fixture, only + conftest files in the fixture scope's directory and its parent directories + are consulted. + """ + + +# ------------------------------------------------------------------------- +# test session related hooks +# ------------------------------------------------------------------------- + + +def pytest_sessionstart(session: Session) -> None: + """Called after the ``Session`` object has been created and before performing collection + and entering the run test loop. + + :param session: The pytest session object. + + Use in conftest plugins + ======================= + + This hook is only called for :ref:`initial conftests `. + """ + + +def pytest_sessionfinish( + session: Session, + exitstatus: int | ExitCode, +) -> None: + """Called after whole test run finished, right before returning the exit status to the system. + + :param session: The pytest session object. + :param exitstatus: The status which pytest will return to the system. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. + """ + + +def pytest_unconfigure(config: Config) -> None: + """Called before test process is exited. + + :param config: The pytest config object. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. + """ + + +# ------------------------------------------------------------------------- +# hooks for customizing the assert methods +# ------------------------------------------------------------------------- + + +def pytest_assertrepr_compare( + config: Config, op: str, left: object, right: object +) -> list[str] | None: + """Return explanation for comparisons in failing assert expressions. + + Return None for no custom explanation, otherwise return a list + of strings. The strings will be joined by newlines but any newlines + *in* a string will be escaped. Note that all but the first line will + be indented slightly, the intention is for the first line to be a summary. + + :param config: The pytest config object. + :param op: The operator, e.g. `"=="`, `"!="`, `"not in"`. + :param left: The left operand. + :param right: The right operand. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +def pytest_assertion_pass(item: Item, lineno: int, orig: str, expl: str) -> None: + """Called whenever an assertion passes. + + .. versionadded:: 5.0 + + Use this hook to do some processing after a passing assertion. + The original assertion information is available in the `orig` string + and the pytest introspected assertion information is available in the + `expl` string. + + This hook must be explicitly enabled by the ``enable_assertion_pass_hook`` + ini-file option: + + .. code-block:: ini + + [pytest] + enable_assertion_pass_hook=true + + You need to **clean the .pyc** files in your project directory and interpreter libraries + when enabling this option, as assertions will require to be re-written. + + :param item: pytest item object of current test. + :param lineno: Line number of the assert statement. + :param orig: String with the original assertion. + :param expl: String with the assert explanation. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +# ------------------------------------------------------------------------- +# Hooks for influencing reporting (invoked from _pytest_terminal). +# ------------------------------------------------------------------------- + + +@hookspec( + warn_on_impl_args={ + "startdir": HOOK_LEGACY_PATH_ARG.format( + pylib_path_arg="startdir", pathlib_path_arg="start_path" + ), + }, +) +def pytest_report_header( # type:ignore[empty-body] + config: Config, start_path: Path, startdir: LEGACY_PATH +) -> str | list[str]: + """Return a string or list of strings to be displayed as header info for terminal reporting. + + :param config: The pytest config object. + :param start_path: The starting dir. + :type start_path: pathlib.Path + :param startdir: The starting dir (deprecated). + + .. note:: + + Lines returned by a plugin are displayed before those of plugins which + ran before it. + If you want to have your line(s) displayed first, use + :ref:`trylast=True `. + + .. versionchanged:: 7.0.0 + The ``start_path`` parameter was added as a :class:`pathlib.Path` + equivalent of the ``startdir`` parameter. The ``startdir`` parameter + has been deprecated. + + Use in conftest plugins + ======================= + + This hook is only called for :ref:`initial conftests `. + """ + + +@hookspec( + warn_on_impl_args={ + "startdir": HOOK_LEGACY_PATH_ARG.format( + pylib_path_arg="startdir", pathlib_path_arg="start_path" + ), + }, +) +def pytest_report_collectionfinish( # type:ignore[empty-body] + config: Config, + start_path: Path, + startdir: LEGACY_PATH, + items: Sequence[Item], +) -> str | list[str]: + """Return a string or list of strings to be displayed after collection + has finished successfully. + + These strings will be displayed after the standard "collected X items" message. + + .. versionadded:: 3.2 + + :param config: The pytest config object. + :param start_path: The starting dir. + :type start_path: pathlib.Path + :param startdir: The starting dir (deprecated). + :param items: List of pytest items that are going to be executed; this list should not be modified. + + .. note:: + + Lines returned by a plugin are displayed before those of plugins which + ran before it. + If you want to have your line(s) displayed first, use + :ref:`trylast=True `. + + .. versionchanged:: 7.0.0 + The ``start_path`` parameter was added as a :class:`pathlib.Path` + equivalent of the ``startdir`` parameter. The ``startdir`` parameter + has been deprecated. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ + + +@hookspec(firstresult=True) +def pytest_report_teststatus( # type:ignore[empty-body] + report: CollectReport | TestReport, config: Config +) -> TestShortLogReport | tuple[str, str, str | tuple[str, Mapping[str, bool]]]: + """Return result-category, shortletter and verbose word for status + reporting. + + The result-category is a category in which to count the result, for + example "passed", "skipped", "error" or the empty string. + + The shortletter is shown as testing progresses, for example ".", "s", + "E" or the empty string. + + The verbose word is shown as testing progresses in verbose mode, for + example "PASSED", "SKIPPED", "ERROR" or the empty string. + + pytest may style these implicitly according to the report outcome. + To provide explicit styling, return a tuple for the verbose word, + for example ``"rerun", "R", ("RERUN", {"yellow": True})``. + + :param report: The report object whose status is to be returned. + :param config: The pytest config object. + :returns: The test status. + + Stops at first non-None result, see :ref:`firstresult`. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ + + +def pytest_terminal_summary( + terminalreporter: TerminalReporter, + exitstatus: ExitCode, + config: Config, +) -> None: + """Add a section to terminal summary reporting. + + :param terminalreporter: The internal terminal reporter object. + :param exitstatus: The exit status that will be reported back to the OS. + :param config: The pytest config object. + + .. versionadded:: 4.2 + The ``config`` parameter. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ + + +@hookspec(historic=True) +def pytest_warning_recorded( + warning_message: warnings.WarningMessage, + when: Literal["config", "collect", "runtest"], + nodeid: str, + location: tuple[str, int, str] | None, +) -> None: + """Process a warning captured by the internal pytest warnings plugin. + + :param warning_message: + The captured warning. This is the same object produced by :class:`warnings.catch_warnings`, + and contains the same attributes as the parameters of :py:func:`warnings.showwarning`. + + :param when: + Indicates when the warning was captured. Possible values: + + * ``"config"``: during pytest configuration/initialization stage. + * ``"collect"``: during test collection. + * ``"runtest"``: during test execution. + + :param nodeid: + Full id of the item. Empty string for warnings that are not specific to + a particular node. + + :param location: + When available, holds information about the execution context of the captured + warning (filename, linenumber, function). ``function`` evaluates to + when the execution context is at the module level. + + .. versionadded:: 6.0 + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. If the warning is specific to a + particular node, only conftest files in parent directories of the node are + consulted. + """ + + +# ------------------------------------------------------------------------- +# Hooks for influencing skipping +# ------------------------------------------------------------------------- + + +def pytest_markeval_namespace( # type:ignore[empty-body] + config: Config, +) -> dict[str, Any]: + """Called when constructing the globals dictionary used for + evaluating string conditions in xfail/skipif markers. + + This is useful when the condition for a marker requires + objects that are expensive or impossible to obtain during + collection time, which is required by normal boolean + conditions. + + .. versionadded:: 6.2 + + :param config: The pytest config object. + :returns: A dictionary of additional globals to add. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in parent directories of the item are consulted. + """ + + +# ------------------------------------------------------------------------- +# error handling and internal debugging hooks +# ------------------------------------------------------------------------- + + +def pytest_internalerror( + excrepr: ExceptionRepr, + excinfo: ExceptionInfo[BaseException], +) -> bool | None: + """Called for internal errors. + + Return True to suppress the fallback handling of printing an + INTERNALERROR message directly to sys.stderr. + + :param excrepr: The exception repr object. + :param excinfo: The exception info. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ + + +def pytest_keyboard_interrupt( + excinfo: ExceptionInfo[KeyboardInterrupt | Exit], +) -> None: + """Called for keyboard interrupt. + + :param excinfo: The exception info. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ + + +def pytest_exception_interact( + node: Item | Collector, + call: CallInfo[Any], + report: CollectReport | TestReport, +) -> None: + """Called when an exception was raised which can potentially be + interactively handled. + + May be called during collection (see :hook:`pytest_make_collect_report`), + in which case ``report`` is a :class:`~pytest.CollectReport`. + + May be called during runtest of an item (see :hook:`pytest_runtest_protocol`), + in which case ``report`` is a :class:`~pytest.TestReport`. + + This hook is not called if the exception that was raised is an internal + exception like ``skip.Exception``. + + :param node: + The item or collector. + :param call: + The call information. Contains the exception. + :param report: + The collection or test report. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given node, only conftest + files in parent directories of the node are consulted. + """ + + +def pytest_enter_pdb(config: Config, pdb: pdb.Pdb) -> None: + """Called upon pdb.set_trace(). + + Can be used by plugins to take special action just before the python + debugger enters interactive mode. + + :param config: The pytest config object. + :param pdb: The Pdb instance. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ + + +def pytest_leave_pdb(config: Config, pdb: pdb.Pdb) -> None: + """Called when leaving pdb (e.g. with continue after pdb.set_trace()). + + Can be used by plugins to take special action just after the python + debugger leaves interactive mode. + + :param config: The pytest config object. + :param pdb: The Pdb instance. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ diff --git a/.venv/Lib/site-packages/_pytest/junitxml.py b/.venv/Lib/site-packages/_pytest/junitxml.py new file mode 100644 index 00000000..3a2cb59a --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/junitxml.py @@ -0,0 +1,697 @@ +# mypy: allow-untyped-defs +"""Report test results in JUnit-XML format, for use with Jenkins and build +integration servers. + +Based on initial code from Ross Lawley. + +Output conforms to +https://github.com/jenkinsci/xunit-plugin/blob/master/src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd +""" + +from __future__ import annotations + +from datetime import datetime +from datetime import timezone +import functools +import os +import platform +import re +from typing import Callable +from typing import Match +import xml.etree.ElementTree as ET + +from _pytest import nodes +from _pytest import timing +from _pytest._code.code import ExceptionRepr +from _pytest._code.code import ReprFileLocation +from _pytest.config import Config +from _pytest.config import filename_arg +from _pytest.config.argparsing import Parser +from _pytest.fixtures import FixtureRequest +from _pytest.reports import TestReport +from _pytest.stash import StashKey +from _pytest.terminal import TerminalReporter +import pytest + + +xml_key = StashKey["LogXML"]() + + +def bin_xml_escape(arg: object) -> str: + r"""Visually escape invalid XML characters. + + For example, transforms + 'hello\aworld\b' + into + 'hello#x07world#x08' + Note that the #xABs are *not* XML escapes - missing the ampersand «. + The idea is to escape visually for the user rather than for XML itself. + """ + + def repl(matchobj: Match[str]) -> str: + i = ord(matchobj.group()) + if i <= 0xFF: + return f"#x{i:02X}" + else: + return f"#x{i:04X}" + + # The spec range of valid chars is: + # Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF] + # For an unknown(?) reason, we disallow #x7F (DEL) as well. + illegal_xml_re = ( + "[^\u0009\u000a\u000d\u0020-\u007e\u0080-\ud7ff\ue000-\ufffd\u10000-\u10ffff]" + ) + return re.sub(illegal_xml_re, repl, str(arg)) + + +def merge_family(left, right) -> None: + result = {} + for kl, vl in left.items(): + for kr, vr in right.items(): + if not isinstance(vl, list): + raise TypeError(type(vl)) + result[kl] = vl + vr + left.update(result) + + +families = {} +families["_base"] = {"testcase": ["classname", "name"]} +families["_base_legacy"] = {"testcase": ["file", "line", "url"]} + +# xUnit 1.x inherits legacy attributes. +families["xunit1"] = families["_base"].copy() +merge_family(families["xunit1"], families["_base_legacy"]) + +# xUnit 2.x uses strict base attributes. +families["xunit2"] = families["_base"] + + +class _NodeReporter: + def __init__(self, nodeid: str | TestReport, xml: LogXML) -> None: + self.id = nodeid + self.xml = xml + self.add_stats = self.xml.add_stats + self.family = self.xml.family + self.duration = 0.0 + self.properties: list[tuple[str, str]] = [] + self.nodes: list[ET.Element] = [] + self.attrs: dict[str, str] = {} + + def append(self, node: ET.Element) -> None: + self.xml.add_stats(node.tag) + self.nodes.append(node) + + def add_property(self, name: str, value: object) -> None: + self.properties.append((str(name), bin_xml_escape(value))) + + def add_attribute(self, name: str, value: object) -> None: + self.attrs[str(name)] = bin_xml_escape(value) + + def make_properties_node(self) -> ET.Element | None: + """Return a Junit node containing custom properties, if any.""" + if self.properties: + properties = ET.Element("properties") + for name, value in self.properties: + properties.append(ET.Element("property", name=name, value=value)) + return properties + return None + + def record_testreport(self, testreport: TestReport) -> None: + names = mangle_test_address(testreport.nodeid) + existing_attrs = self.attrs + classnames = names[:-1] + if self.xml.prefix: + classnames.insert(0, self.xml.prefix) + attrs: dict[str, str] = { + "classname": ".".join(classnames), + "name": bin_xml_escape(names[-1]), + "file": testreport.location[0], + } + if testreport.location[1] is not None: + attrs["line"] = str(testreport.location[1]) + if hasattr(testreport, "url"): + attrs["url"] = testreport.url + self.attrs = attrs + self.attrs.update(existing_attrs) # Restore any user-defined attributes. + + # Preserve legacy testcase behavior. + if self.family == "xunit1": + return + + # Filter out attributes not permitted by this test family. + # Including custom attributes because they are not valid here. + temp_attrs = {} + for key in self.attrs: + if key in families[self.family]["testcase"]: + temp_attrs[key] = self.attrs[key] + self.attrs = temp_attrs + + def to_xml(self) -> ET.Element: + testcase = ET.Element("testcase", self.attrs, time=f"{self.duration:.3f}") + properties = self.make_properties_node() + if properties is not None: + testcase.append(properties) + testcase.extend(self.nodes) + return testcase + + def _add_simple(self, tag: str, message: str, data: str | None = None) -> None: + node = ET.Element(tag, message=message) + node.text = bin_xml_escape(data) + self.append(node) + + def write_captured_output(self, report: TestReport) -> None: + if not self.xml.log_passing_tests and report.passed: + return + + content_out = report.capstdout + content_log = report.caplog + content_err = report.capstderr + if self.xml.logging == "no": + return + content_all = "" + if self.xml.logging in ["log", "all"]: + content_all = self._prepare_content(content_log, " Captured Log ") + if self.xml.logging in ["system-out", "out-err", "all"]: + content_all += self._prepare_content(content_out, " Captured Out ") + self._write_content(report, content_all, "system-out") + content_all = "" + if self.xml.logging in ["system-err", "out-err", "all"]: + content_all += self._prepare_content(content_err, " Captured Err ") + self._write_content(report, content_all, "system-err") + content_all = "" + if content_all: + self._write_content(report, content_all, "system-out") + + def _prepare_content(self, content: str, header: str) -> str: + return "\n".join([header.center(80, "-"), content, ""]) + + def _write_content(self, report: TestReport, content: str, jheader: str) -> None: + tag = ET.Element(jheader) + tag.text = bin_xml_escape(content) + self.append(tag) + + def append_pass(self, report: TestReport) -> None: + self.add_stats("passed") + + def append_failure(self, report: TestReport) -> None: + # msg = str(report.longrepr.reprtraceback.extraline) + if hasattr(report, "wasxfail"): + self._add_simple("skipped", "xfail-marked test passes unexpectedly") + else: + assert report.longrepr is not None + reprcrash: ReprFileLocation | None = getattr( + report.longrepr, "reprcrash", None + ) + if reprcrash is not None: + message = reprcrash.message + else: + message = str(report.longrepr) + message = bin_xml_escape(message) + self._add_simple("failure", message, str(report.longrepr)) + + def append_collect_error(self, report: TestReport) -> None: + # msg = str(report.longrepr.reprtraceback.extraline) + assert report.longrepr is not None + self._add_simple("error", "collection failure", str(report.longrepr)) + + def append_collect_skipped(self, report: TestReport) -> None: + self._add_simple("skipped", "collection skipped", str(report.longrepr)) + + def append_error(self, report: TestReport) -> None: + assert report.longrepr is not None + reprcrash: ReprFileLocation | None = getattr(report.longrepr, "reprcrash", None) + if reprcrash is not None: + reason = reprcrash.message + else: + reason = str(report.longrepr) + + if report.when == "teardown": + msg = f'failed on teardown with "{reason}"' + else: + msg = f'failed on setup with "{reason}"' + self._add_simple("error", bin_xml_escape(msg), str(report.longrepr)) + + def append_skipped(self, report: TestReport) -> None: + if hasattr(report, "wasxfail"): + xfailreason = report.wasxfail + if xfailreason.startswith("reason: "): + xfailreason = xfailreason[8:] + xfailreason = bin_xml_escape(xfailreason) + skipped = ET.Element("skipped", type="pytest.xfail", message=xfailreason) + self.append(skipped) + else: + assert isinstance(report.longrepr, tuple) + filename, lineno, skipreason = report.longrepr + if skipreason.startswith("Skipped: "): + skipreason = skipreason[9:] + details = f"{filename}:{lineno}: {skipreason}" + + skipped = ET.Element( + "skipped", type="pytest.skip", message=bin_xml_escape(skipreason) + ) + skipped.text = bin_xml_escape(details) + self.append(skipped) + self.write_captured_output(report) + + def finalize(self) -> None: + data = self.to_xml() + self.__dict__.clear() + # Type ignored because mypy doesn't like overriding a method. + # Also the return value doesn't match... + self.to_xml = lambda: data # type: ignore[method-assign] + + +def _warn_incompatibility_with_xunit2( + request: FixtureRequest, fixture_name: str +) -> None: + """Emit a PytestWarning about the given fixture being incompatible with newer xunit revisions.""" + from _pytest.warning_types import PytestWarning + + xml = request.config.stash.get(xml_key, None) + if xml is not None and xml.family not in ("xunit1", "legacy"): + request.node.warn( + PytestWarning( + f"{fixture_name} is incompatible with junit_family '{xml.family}' (use 'legacy' or 'xunit1')" + ) + ) + + +@pytest.fixture +def record_property(request: FixtureRequest) -> Callable[[str, object], None]: + """Add extra properties to the calling test. + + User properties become part of the test report and are available to the + configured reporters, like JUnit XML. + + The fixture is callable with ``name, value``. The value is automatically + XML-encoded. + + Example:: + + def test_function(record_property): + record_property("example_key", 1) + """ + _warn_incompatibility_with_xunit2(request, "record_property") + + def append_property(name: str, value: object) -> None: + request.node.user_properties.append((name, value)) + + return append_property + + +@pytest.fixture +def record_xml_attribute(request: FixtureRequest) -> Callable[[str, object], None]: + """Add extra xml attributes to the tag for the calling test. + + The fixture is callable with ``name, value``. The value is + automatically XML-encoded. + """ + from _pytest.warning_types import PytestExperimentalApiWarning + + request.node.warn( + PytestExperimentalApiWarning("record_xml_attribute is an experimental feature") + ) + + _warn_incompatibility_with_xunit2(request, "record_xml_attribute") + + # Declare noop + def add_attr_noop(name: str, value: object) -> None: + pass + + attr_func = add_attr_noop + + xml = request.config.stash.get(xml_key, None) + if xml is not None: + node_reporter = xml.node_reporter(request.node.nodeid) + attr_func = node_reporter.add_attribute + + return attr_func + + +def _check_record_param_type(param: str, v: str) -> None: + """Used by record_testsuite_property to check that the given parameter name is of the proper + type.""" + __tracebackhide__ = True + if not isinstance(v, str): + msg = "{param} parameter needs to be a string, but {g} given" # type: ignore[unreachable] + raise TypeError(msg.format(param=param, g=type(v).__name__)) + + +@pytest.fixture(scope="session") +def record_testsuite_property(request: FixtureRequest) -> Callable[[str, object], None]: + """Record a new ```` tag as child of the root ````. + + This is suitable to writing global information regarding the entire test + suite, and is compatible with ``xunit2`` JUnit family. + + This is a ``session``-scoped fixture which is called with ``(name, value)``. Example: + + .. code-block:: python + + def test_foo(record_testsuite_property): + record_testsuite_property("ARCH", "PPC") + record_testsuite_property("STORAGE_TYPE", "CEPH") + + :param name: + The property name. + :param value: + The property value. Will be converted to a string. + + .. warning:: + + Currently this fixture **does not work** with the + `pytest-xdist `__ plugin. See + :issue:`7767` for details. + """ + __tracebackhide__ = True + + def record_func(name: str, value: object) -> None: + """No-op function in case --junit-xml was not passed in the command-line.""" + __tracebackhide__ = True + _check_record_param_type("name", name) + + xml = request.config.stash.get(xml_key, None) + if xml is not None: + record_func = xml.add_global_property + return record_func + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("terminal reporting") + group.addoption( + "--junitxml", + "--junit-xml", + action="store", + dest="xmlpath", + metavar="path", + type=functools.partial(filename_arg, optname="--junitxml"), + default=None, + help="Create junit-xml style report file at given path", + ) + group.addoption( + "--junitprefix", + "--junit-prefix", + action="store", + metavar="str", + default=None, + help="Prepend prefix to classnames in junit-xml output", + ) + parser.addini( + "junit_suite_name", "Test suite name for JUnit report", default="pytest" + ) + parser.addini( + "junit_logging", + "Write captured log messages to JUnit report: " + "one of no|log|system-out|system-err|out-err|all", + default="no", + ) + parser.addini( + "junit_log_passing_tests", + "Capture log information for passing tests to JUnit report: ", + type="bool", + default=True, + ) + parser.addini( + "junit_duration_report", + "Duration time to report: one of total|call", + default="total", + ) # choices=['total', 'call']) + parser.addini( + "junit_family", + "Emit XML for schema: one of legacy|xunit1|xunit2", + default="xunit2", + ) + + +def pytest_configure(config: Config) -> None: + xmlpath = config.option.xmlpath + # Prevent opening xmllog on worker nodes (xdist). + if xmlpath and not hasattr(config, "workerinput"): + junit_family = config.getini("junit_family") + config.stash[xml_key] = LogXML( + xmlpath, + config.option.junitprefix, + config.getini("junit_suite_name"), + config.getini("junit_logging"), + config.getini("junit_duration_report"), + junit_family, + config.getini("junit_log_passing_tests"), + ) + config.pluginmanager.register(config.stash[xml_key]) + + +def pytest_unconfigure(config: Config) -> None: + xml = config.stash.get(xml_key, None) + if xml: + del config.stash[xml_key] + config.pluginmanager.unregister(xml) + + +def mangle_test_address(address: str) -> list[str]: + path, possible_open_bracket, params = address.partition("[") + names = path.split("::") + # Convert file path to dotted path. + names[0] = names[0].replace(nodes.SEP, ".") + names[0] = re.sub(r"\.py$", "", names[0]) + # Put any params back. + names[-1] += possible_open_bracket + params + return names + + +class LogXML: + def __init__( + self, + logfile, + prefix: str | None, + suite_name: str = "pytest", + logging: str = "no", + report_duration: str = "total", + family="xunit1", + log_passing_tests: bool = True, + ) -> None: + logfile = os.path.expanduser(os.path.expandvars(logfile)) + self.logfile = os.path.normpath(os.path.abspath(logfile)) + self.prefix = prefix + self.suite_name = suite_name + self.logging = logging + self.log_passing_tests = log_passing_tests + self.report_duration = report_duration + self.family = family + self.stats: dict[str, int] = dict.fromkeys( + ["error", "passed", "failure", "skipped"], 0 + ) + self.node_reporters: dict[tuple[str | TestReport, object], _NodeReporter] = {} + self.node_reporters_ordered: list[_NodeReporter] = [] + self.global_properties: list[tuple[str, str]] = [] + + # List of reports that failed on call but teardown is pending. + self.open_reports: list[TestReport] = [] + self.cnt_double_fail_tests = 0 + + # Replaces convenience family with real family. + if self.family == "legacy": + self.family = "xunit1" + + def finalize(self, report: TestReport) -> None: + nodeid = getattr(report, "nodeid", report) + # Local hack to handle xdist report order. + workernode = getattr(report, "node", None) + reporter = self.node_reporters.pop((nodeid, workernode)) + + for propname, propvalue in report.user_properties: + reporter.add_property(propname, str(propvalue)) + + if reporter is not None: + reporter.finalize() + + def node_reporter(self, report: TestReport | str) -> _NodeReporter: + nodeid: str | TestReport = getattr(report, "nodeid", report) + # Local hack to handle xdist report order. + workernode = getattr(report, "node", None) + + key = nodeid, workernode + + if key in self.node_reporters: + # TODO: breaks for --dist=each + return self.node_reporters[key] + + reporter = _NodeReporter(nodeid, self) + + self.node_reporters[key] = reporter + self.node_reporters_ordered.append(reporter) + + return reporter + + def add_stats(self, key: str) -> None: + if key in self.stats: + self.stats[key] += 1 + + def _opentestcase(self, report: TestReport) -> _NodeReporter: + reporter = self.node_reporter(report) + reporter.record_testreport(report) + return reporter + + def pytest_runtest_logreport(self, report: TestReport) -> None: + """Handle a setup/call/teardown report, generating the appropriate + XML tags as necessary. + + Note: due to plugins like xdist, this hook may be called in interlaced + order with reports from other nodes. For example: + + Usual call order: + -> setup node1 + -> call node1 + -> teardown node1 + -> setup node2 + -> call node2 + -> teardown node2 + + Possible call order in xdist: + -> setup node1 + -> call node1 + -> setup node2 + -> call node2 + -> teardown node2 + -> teardown node1 + """ + close_report = None + if report.passed: + if report.when == "call": # ignore setup/teardown + reporter = self._opentestcase(report) + reporter.append_pass(report) + elif report.failed: + if report.when == "teardown": + # The following vars are needed when xdist plugin is used. + report_wid = getattr(report, "worker_id", None) + report_ii = getattr(report, "item_index", None) + close_report = next( + ( + rep + for rep in self.open_reports + if ( + rep.nodeid == report.nodeid + and getattr(rep, "item_index", None) == report_ii + and getattr(rep, "worker_id", None) == report_wid + ) + ), + None, + ) + if close_report: + # We need to open new testcase in case we have failure in + # call and error in teardown in order to follow junit + # schema. + self.finalize(close_report) + self.cnt_double_fail_tests += 1 + reporter = self._opentestcase(report) + if report.when == "call": + reporter.append_failure(report) + self.open_reports.append(report) + if not self.log_passing_tests: + reporter.write_captured_output(report) + else: + reporter.append_error(report) + elif report.skipped: + reporter = self._opentestcase(report) + reporter.append_skipped(report) + self.update_testcase_duration(report) + if report.when == "teardown": + reporter = self._opentestcase(report) + reporter.write_captured_output(report) + + self.finalize(report) + report_wid = getattr(report, "worker_id", None) + report_ii = getattr(report, "item_index", None) + close_report = next( + ( + rep + for rep in self.open_reports + if ( + rep.nodeid == report.nodeid + and getattr(rep, "item_index", None) == report_ii + and getattr(rep, "worker_id", None) == report_wid + ) + ), + None, + ) + if close_report: + self.open_reports.remove(close_report) + + def update_testcase_duration(self, report: TestReport) -> None: + """Accumulate total duration for nodeid from given report and update + the Junit.testcase with the new total if already created.""" + if self.report_duration in {"total", report.when}: + reporter = self.node_reporter(report) + reporter.duration += getattr(report, "duration", 0.0) + + def pytest_collectreport(self, report: TestReport) -> None: + if not report.passed: + reporter = self._opentestcase(report) + if report.failed: + reporter.append_collect_error(report) + else: + reporter.append_collect_skipped(report) + + def pytest_internalerror(self, excrepr: ExceptionRepr) -> None: + reporter = self.node_reporter("internal") + reporter.attrs.update(classname="pytest", name="internal") + reporter._add_simple("error", "internal error", str(excrepr)) + + def pytest_sessionstart(self) -> None: + self.suite_start_time = timing.time() + + def pytest_sessionfinish(self) -> None: + dirname = os.path.dirname(os.path.abspath(self.logfile)) + # exist_ok avoids filesystem race conditions between checking path existence and requesting creation + os.makedirs(dirname, exist_ok=True) + + with open(self.logfile, "w", encoding="utf-8") as logfile: + suite_stop_time = timing.time() + suite_time_delta = suite_stop_time - self.suite_start_time + + numtests = ( + self.stats["passed"] + + self.stats["failure"] + + self.stats["skipped"] + + self.stats["error"] + - self.cnt_double_fail_tests + ) + logfile.write('') + + suite_node = ET.Element( + "testsuite", + name=self.suite_name, + errors=str(self.stats["error"]), + failures=str(self.stats["failure"]), + skipped=str(self.stats["skipped"]), + tests=str(numtests), + time=f"{suite_time_delta:.3f}", + timestamp=datetime.fromtimestamp(self.suite_start_time, timezone.utc) + .astimezone() + .isoformat(), + hostname=platform.node(), + ) + global_properties = self._get_global_properties_node() + if global_properties is not None: + suite_node.append(global_properties) + for node_reporter in self.node_reporters_ordered: + suite_node.append(node_reporter.to_xml()) + testsuites = ET.Element("testsuites") + testsuites.append(suite_node) + logfile.write(ET.tostring(testsuites, encoding="unicode")) + + def pytest_terminal_summary(self, terminalreporter: TerminalReporter) -> None: + terminalreporter.write_sep("-", f"generated xml file: {self.logfile}") + + def add_global_property(self, name: str, value: object) -> None: + __tracebackhide__ = True + _check_record_param_type("name", name) + self.global_properties.append((name, bin_xml_escape(value))) + + def _get_global_properties_node(self) -> ET.Element | None: + """Return a Junit node containing custom properties, if any.""" + if self.global_properties: + properties = ET.Element("properties") + for name, value in self.global_properties: + properties.append(ET.Element("property", name=name, value=value)) + return properties + return None diff --git a/.venv/Lib/site-packages/_pytest/legacypath.py b/.venv/Lib/site-packages/_pytest/legacypath.py new file mode 100644 index 00000000..61476d68 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/legacypath.py @@ -0,0 +1,473 @@ +# mypy: allow-untyped-defs +"""Add backward compatibility support for the legacy py path type.""" + +from __future__ import annotations + +import dataclasses +from pathlib import Path +import shlex +import subprocess +from typing import Final +from typing import final +from typing import TYPE_CHECKING + +from iniconfig import SectionWrapper + +from _pytest.cacheprovider import Cache +from _pytest.compat import LEGACY_PATH +from _pytest.compat import legacy_path +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config import PytestPluginManager +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.main import Session +from _pytest.monkeypatch import MonkeyPatch +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.nodes import Node +from _pytest.pytester import HookRecorder +from _pytest.pytester import Pytester +from _pytest.pytester import RunResult +from _pytest.terminal import TerminalReporter +from _pytest.tmpdir import TempPathFactory + + +if TYPE_CHECKING: + import pexpect + + +@final +class Testdir: + """ + Similar to :class:`Pytester`, but this class works with legacy legacy_path objects instead. + + All methods just forward to an internal :class:`Pytester` instance, converting results + to `legacy_path` objects as necessary. + """ + + __test__ = False + + CLOSE_STDIN: Final = Pytester.CLOSE_STDIN + TimeoutExpired: Final = Pytester.TimeoutExpired + + def __init__(self, pytester: Pytester, *, _ispytest: bool = False) -> None: + check_ispytest(_ispytest) + self._pytester = pytester + + @property + def tmpdir(self) -> LEGACY_PATH: + """Temporary directory where tests are executed.""" + return legacy_path(self._pytester.path) + + @property + def test_tmproot(self) -> LEGACY_PATH: + return legacy_path(self._pytester._test_tmproot) + + @property + def request(self): + return self._pytester._request + + @property + def plugins(self): + return self._pytester.plugins + + @plugins.setter + def plugins(self, plugins): + self._pytester.plugins = plugins + + @property + def monkeypatch(self) -> MonkeyPatch: + return self._pytester._monkeypatch + + def make_hook_recorder(self, pluginmanager) -> HookRecorder: + """See :meth:`Pytester.make_hook_recorder`.""" + return self._pytester.make_hook_recorder(pluginmanager) + + def chdir(self) -> None: + """See :meth:`Pytester.chdir`.""" + return self._pytester.chdir() + + def finalize(self) -> None: + return self._pytester._finalize() + + def makefile(self, ext, *args, **kwargs) -> LEGACY_PATH: + """See :meth:`Pytester.makefile`.""" + if ext and not ext.startswith("."): + # pytester.makefile is going to throw a ValueError in a way that + # testdir.makefile did not, because + # pathlib.Path is stricter suffixes than py.path + # This ext arguments is likely user error, but since testdir has + # allowed this, we will prepend "." as a workaround to avoid breaking + # testdir usage that worked before + ext = "." + ext + return legacy_path(self._pytester.makefile(ext, *args, **kwargs)) + + def makeconftest(self, source) -> LEGACY_PATH: + """See :meth:`Pytester.makeconftest`.""" + return legacy_path(self._pytester.makeconftest(source)) + + def makeini(self, source) -> LEGACY_PATH: + """See :meth:`Pytester.makeini`.""" + return legacy_path(self._pytester.makeini(source)) + + def getinicfg(self, source: str) -> SectionWrapper: + """See :meth:`Pytester.getinicfg`.""" + return self._pytester.getinicfg(source) + + def makepyprojecttoml(self, source) -> LEGACY_PATH: + """See :meth:`Pytester.makepyprojecttoml`.""" + return legacy_path(self._pytester.makepyprojecttoml(source)) + + def makepyfile(self, *args, **kwargs) -> LEGACY_PATH: + """See :meth:`Pytester.makepyfile`.""" + return legacy_path(self._pytester.makepyfile(*args, **kwargs)) + + def maketxtfile(self, *args, **kwargs) -> LEGACY_PATH: + """See :meth:`Pytester.maketxtfile`.""" + return legacy_path(self._pytester.maketxtfile(*args, **kwargs)) + + def syspathinsert(self, path=None) -> None: + """See :meth:`Pytester.syspathinsert`.""" + return self._pytester.syspathinsert(path) + + def mkdir(self, name) -> LEGACY_PATH: + """See :meth:`Pytester.mkdir`.""" + return legacy_path(self._pytester.mkdir(name)) + + def mkpydir(self, name) -> LEGACY_PATH: + """See :meth:`Pytester.mkpydir`.""" + return legacy_path(self._pytester.mkpydir(name)) + + def copy_example(self, name=None) -> LEGACY_PATH: + """See :meth:`Pytester.copy_example`.""" + return legacy_path(self._pytester.copy_example(name)) + + def getnode(self, config: Config, arg) -> Item | Collector | None: + """See :meth:`Pytester.getnode`.""" + return self._pytester.getnode(config, arg) + + def getpathnode(self, path): + """See :meth:`Pytester.getpathnode`.""" + return self._pytester.getpathnode(path) + + def genitems(self, colitems: list[Item | Collector]) -> list[Item]: + """See :meth:`Pytester.genitems`.""" + return self._pytester.genitems(colitems) + + def runitem(self, source): + """See :meth:`Pytester.runitem`.""" + return self._pytester.runitem(source) + + def inline_runsource(self, source, *cmdlineargs): + """See :meth:`Pytester.inline_runsource`.""" + return self._pytester.inline_runsource(source, *cmdlineargs) + + def inline_genitems(self, *args): + """See :meth:`Pytester.inline_genitems`.""" + return self._pytester.inline_genitems(*args) + + def inline_run(self, *args, plugins=(), no_reraise_ctrlc: bool = False): + """See :meth:`Pytester.inline_run`.""" + return self._pytester.inline_run( + *args, plugins=plugins, no_reraise_ctrlc=no_reraise_ctrlc + ) + + def runpytest_inprocess(self, *args, **kwargs) -> RunResult: + """See :meth:`Pytester.runpytest_inprocess`.""" + return self._pytester.runpytest_inprocess(*args, **kwargs) + + def runpytest(self, *args, **kwargs) -> RunResult: + """See :meth:`Pytester.runpytest`.""" + return self._pytester.runpytest(*args, **kwargs) + + def parseconfig(self, *args) -> Config: + """See :meth:`Pytester.parseconfig`.""" + return self._pytester.parseconfig(*args) + + def parseconfigure(self, *args) -> Config: + """See :meth:`Pytester.parseconfigure`.""" + return self._pytester.parseconfigure(*args) + + def getitem(self, source, funcname="test_func"): + """See :meth:`Pytester.getitem`.""" + return self._pytester.getitem(source, funcname) + + def getitems(self, source): + """See :meth:`Pytester.getitems`.""" + return self._pytester.getitems(source) + + def getmodulecol(self, source, configargs=(), withinit=False): + """See :meth:`Pytester.getmodulecol`.""" + return self._pytester.getmodulecol( + source, configargs=configargs, withinit=withinit + ) + + def collect_by_name(self, modcol: Collector, name: str) -> Item | Collector | None: + """See :meth:`Pytester.collect_by_name`.""" + return self._pytester.collect_by_name(modcol, name) + + def popen( + self, + cmdargs, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=CLOSE_STDIN, + **kw, + ): + """See :meth:`Pytester.popen`.""" + return self._pytester.popen(cmdargs, stdout, stderr, stdin, **kw) + + def run(self, *cmdargs, timeout=None, stdin=CLOSE_STDIN) -> RunResult: + """See :meth:`Pytester.run`.""" + return self._pytester.run(*cmdargs, timeout=timeout, stdin=stdin) + + def runpython(self, script) -> RunResult: + """See :meth:`Pytester.runpython`.""" + return self._pytester.runpython(script) + + def runpython_c(self, command): + """See :meth:`Pytester.runpython_c`.""" + return self._pytester.runpython_c(command) + + def runpytest_subprocess(self, *args, timeout=None) -> RunResult: + """See :meth:`Pytester.runpytest_subprocess`.""" + return self._pytester.runpytest_subprocess(*args, timeout=timeout) + + def spawn_pytest(self, string: str, expect_timeout: float = 10.0) -> pexpect.spawn: + """See :meth:`Pytester.spawn_pytest`.""" + return self._pytester.spawn_pytest(string, expect_timeout=expect_timeout) + + def spawn(self, cmd: str, expect_timeout: float = 10.0) -> pexpect.spawn: + """See :meth:`Pytester.spawn`.""" + return self._pytester.spawn(cmd, expect_timeout=expect_timeout) + + def __repr__(self) -> str: + return f"" + + def __str__(self) -> str: + return str(self.tmpdir) + + +class LegacyTestdirPlugin: + @staticmethod + @fixture + def testdir(pytester: Pytester) -> Testdir: + """ + Identical to :fixture:`pytester`, and provides an instance whose methods return + legacy ``LEGACY_PATH`` objects instead when applicable. + + New code should avoid using :fixture:`testdir` in favor of :fixture:`pytester`. + """ + return Testdir(pytester, _ispytest=True) + + +@final +@dataclasses.dataclass +class TempdirFactory: + """Backward compatibility wrapper that implements ``py.path.local`` + for :class:`TempPathFactory`. + + .. note:: + These days, it is preferred to use ``tmp_path_factory``. + + :ref:`About the tmpdir and tmpdir_factory fixtures`. + + """ + + _tmppath_factory: TempPathFactory + + def __init__( + self, tmppath_factory: TempPathFactory, *, _ispytest: bool = False + ) -> None: + check_ispytest(_ispytest) + self._tmppath_factory = tmppath_factory + + def mktemp(self, basename: str, numbered: bool = True) -> LEGACY_PATH: + """Same as :meth:`TempPathFactory.mktemp`, but returns a ``py.path.local`` object.""" + return legacy_path(self._tmppath_factory.mktemp(basename, numbered).resolve()) + + def getbasetemp(self) -> LEGACY_PATH: + """Same as :meth:`TempPathFactory.getbasetemp`, but returns a ``py.path.local`` object.""" + return legacy_path(self._tmppath_factory.getbasetemp().resolve()) + + +class LegacyTmpdirPlugin: + @staticmethod + @fixture(scope="session") + def tmpdir_factory(request: FixtureRequest) -> TempdirFactory: + """Return a :class:`pytest.TempdirFactory` instance for the test session.""" + # Set dynamically by pytest_configure(). + return request.config._tmpdirhandler # type: ignore + + @staticmethod + @fixture + def tmpdir(tmp_path: Path) -> LEGACY_PATH: + """Return a temporary directory path object which is unique to each test + function invocation, created as a sub directory of the base temporary + directory. + + By default, a new base temporary directory is created each test session, + and old bases are removed after 3 sessions, to aid in debugging. If + ``--basetemp`` is used then it is cleared each session. See + :ref:`temporary directory location and retention`. + + The returned object is a `legacy_path`_ object. + + .. note:: + These days, it is preferred to use ``tmp_path``. + + :ref:`About the tmpdir and tmpdir_factory fixtures`. + + .. _legacy_path: https://py.readthedocs.io/en/latest/path.html + """ + return legacy_path(tmp_path) + + +def Cache_makedir(self: Cache, name: str) -> LEGACY_PATH: + """Return a directory path object with the given name. + + Same as :func:`mkdir`, but returns a legacy py path instance. + """ + return legacy_path(self.mkdir(name)) + + +def FixtureRequest_fspath(self: FixtureRequest) -> LEGACY_PATH: + """(deprecated) The file system path of the test module which collected this test.""" + return legacy_path(self.path) + + +def TerminalReporter_startdir(self: TerminalReporter) -> LEGACY_PATH: + """The directory from which pytest was invoked. + + Prefer to use ``startpath`` which is a :class:`pathlib.Path`. + + :type: LEGACY_PATH + """ + return legacy_path(self.startpath) + + +def Config_invocation_dir(self: Config) -> LEGACY_PATH: + """The directory from which pytest was invoked. + + Prefer to use :attr:`invocation_params.dir `, + which is a :class:`pathlib.Path`. + + :type: LEGACY_PATH + """ + return legacy_path(str(self.invocation_params.dir)) + + +def Config_rootdir(self: Config) -> LEGACY_PATH: + """The path to the :ref:`rootdir `. + + Prefer to use :attr:`rootpath`, which is a :class:`pathlib.Path`. + + :type: LEGACY_PATH + """ + return legacy_path(str(self.rootpath)) + + +def Config_inifile(self: Config) -> LEGACY_PATH | None: + """The path to the :ref:`configfile `. + + Prefer to use :attr:`inipath`, which is a :class:`pathlib.Path`. + + :type: Optional[LEGACY_PATH] + """ + return legacy_path(str(self.inipath)) if self.inipath else None + + +def Session_startdir(self: Session) -> LEGACY_PATH: + """The path from which pytest was invoked. + + Prefer to use ``startpath`` which is a :class:`pathlib.Path`. + + :type: LEGACY_PATH + """ + return legacy_path(self.startpath) + + +def Config__getini_unknown_type(self, name: str, type: str, value: str | list[str]): + if type == "pathlist": + # TODO: This assert is probably not valid in all cases. + assert self.inipath is not None + dp = self.inipath.parent + input_values = shlex.split(value) if isinstance(value, str) else value + return [legacy_path(str(dp / x)) for x in input_values] + else: + raise ValueError(f"unknown configuration type: {type}", value) + + +def Node_fspath(self: Node) -> LEGACY_PATH: + """(deprecated) returns a legacy_path copy of self.path""" + return legacy_path(self.path) + + +def Node_fspath_set(self: Node, value: LEGACY_PATH) -> None: + self.path = Path(value) + + +@hookimpl(tryfirst=True) +def pytest_load_initial_conftests(early_config: Config) -> None: + """Monkeypatch legacy path attributes in several classes, as early as possible.""" + mp = MonkeyPatch() + early_config.add_cleanup(mp.undo) + + # Add Cache.makedir(). + mp.setattr(Cache, "makedir", Cache_makedir, raising=False) + + # Add FixtureRequest.fspath property. + mp.setattr(FixtureRequest, "fspath", property(FixtureRequest_fspath), raising=False) + + # Add TerminalReporter.startdir property. + mp.setattr( + TerminalReporter, "startdir", property(TerminalReporter_startdir), raising=False + ) + + # Add Config.{invocation_dir,rootdir,inifile} properties. + mp.setattr(Config, "invocation_dir", property(Config_invocation_dir), raising=False) + mp.setattr(Config, "rootdir", property(Config_rootdir), raising=False) + mp.setattr(Config, "inifile", property(Config_inifile), raising=False) + + # Add Session.startdir property. + mp.setattr(Session, "startdir", property(Session_startdir), raising=False) + + # Add pathlist configuration type. + mp.setattr(Config, "_getini_unknown_type", Config__getini_unknown_type) + + # Add Node.fspath property. + mp.setattr(Node, "fspath", property(Node_fspath, Node_fspath_set), raising=False) + + +@hookimpl +def pytest_configure(config: Config) -> None: + """Installs the LegacyTmpdirPlugin if the ``tmpdir`` plugin is also installed.""" + if config.pluginmanager.has_plugin("tmpdir"): + mp = MonkeyPatch() + config.add_cleanup(mp.undo) + # Create TmpdirFactory and attach it to the config object. + # + # This is to comply with existing plugins which expect the handler to be + # available at pytest_configure time, but ideally should be moved entirely + # to the tmpdir_factory session fixture. + try: + tmp_path_factory = config._tmp_path_factory # type: ignore[attr-defined] + except AttributeError: + # tmpdir plugin is blocked. + pass + else: + _tmpdirhandler = TempdirFactory(tmp_path_factory, _ispytest=True) + mp.setattr(config, "_tmpdirhandler", _tmpdirhandler, raising=False) + + config.pluginmanager.register(LegacyTmpdirPlugin, "legacypath-tmpdir") + + +@hookimpl +def pytest_plugin_registered(plugin: object, manager: PytestPluginManager) -> None: + # pytester is not loaded by default and is commonly loaded from a conftest, + # so checking for it in `pytest_configure` is not enough. + is_pytester = plugin is manager.get_plugin("pytester") + if is_pytester and not manager.is_registered(LegacyTestdirPlugin): + manager.register(LegacyTestdirPlugin, "legacypath-pytester") diff --git a/.venv/Lib/site-packages/_pytest/logging.py b/.venv/Lib/site-packages/_pytest/logging.py new file mode 100644 index 00000000..08c826ff --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/logging.py @@ -0,0 +1,955 @@ +# mypy: allow-untyped-defs +"""Access and control log capturing.""" + +from __future__ import annotations + +from contextlib import contextmanager +from contextlib import nullcontext +from datetime import datetime +from datetime import timedelta +from datetime import timezone +import io +from io import StringIO +import logging +from logging import LogRecord +import os +from pathlib import Path +import re +from types import TracebackType +from typing import AbstractSet +from typing import Dict +from typing import final +from typing import Generator +from typing import Generic +from typing import List +from typing import Literal +from typing import Mapping +from typing import TYPE_CHECKING +from typing import TypeVar + +from _pytest import nodes +from _pytest._io import TerminalWriter +from _pytest.capture import CaptureManager +from _pytest.config import _strtobool +from _pytest.config import Config +from _pytest.config import create_terminal_writer +from _pytest.config import hookimpl +from _pytest.config import UsageError +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.main import Session +from _pytest.stash import StashKey +from _pytest.terminal import TerminalReporter + + +if TYPE_CHECKING: + logging_StreamHandler = logging.StreamHandler[StringIO] +else: + logging_StreamHandler = logging.StreamHandler + +DEFAULT_LOG_FORMAT = "%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s" +DEFAULT_LOG_DATE_FORMAT = "%H:%M:%S" +_ANSI_ESCAPE_SEQ = re.compile(r"\x1b\[[\d;]+m") +caplog_handler_key = StashKey["LogCaptureHandler"]() +caplog_records_key = StashKey[Dict[str, List[logging.LogRecord]]]() + + +def _remove_ansi_escape_sequences(text: str) -> str: + return _ANSI_ESCAPE_SEQ.sub("", text) + + +class DatetimeFormatter(logging.Formatter): + """A logging formatter which formats record with + :func:`datetime.datetime.strftime` formatter instead of + :func:`time.strftime` in case of microseconds in format string. + """ + + def formatTime(self, record: LogRecord, datefmt: str | None = None) -> str: + if datefmt and "%f" in datefmt: + ct = self.converter(record.created) + tz = timezone(timedelta(seconds=ct.tm_gmtoff), ct.tm_zone) + # Construct `datetime.datetime` object from `struct_time` + # and msecs information from `record` + # Using int() instead of round() to avoid it exceeding 1_000_000 and causing a ValueError (#11861). + dt = datetime(*ct[0:6], microsecond=int(record.msecs * 1000), tzinfo=tz) + return dt.strftime(datefmt) + # Use `logging.Formatter` for non-microsecond formats + return super().formatTime(record, datefmt) + + +class ColoredLevelFormatter(DatetimeFormatter): + """A logging formatter which colorizes the %(levelname)..s part of the + log format passed to __init__.""" + + LOGLEVEL_COLOROPTS: Mapping[int, AbstractSet[str]] = { + logging.CRITICAL: {"red"}, + logging.ERROR: {"red", "bold"}, + logging.WARNING: {"yellow"}, + logging.WARN: {"yellow"}, + logging.INFO: {"green"}, + logging.DEBUG: {"purple"}, + logging.NOTSET: set(), + } + LEVELNAME_FMT_REGEX = re.compile(r"%\(levelname\)([+-.]?\d*(?:\.\d+)?s)") + + def __init__(self, terminalwriter: TerminalWriter, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + self._terminalwriter = terminalwriter + self._original_fmt = self._style._fmt + self._level_to_fmt_mapping: dict[int, str] = {} + + for level, color_opts in self.LOGLEVEL_COLOROPTS.items(): + self.add_color_level(level, *color_opts) + + def add_color_level(self, level: int, *color_opts: str) -> None: + """Add or update color opts for a log level. + + :param level: + Log level to apply a style to, e.g. ``logging.INFO``. + :param color_opts: + ANSI escape sequence color options. Capitalized colors indicates + background color, i.e. ``'green', 'Yellow', 'bold'`` will give bold + green text on yellow background. + + .. warning:: + This is an experimental API. + """ + assert self._fmt is not None + levelname_fmt_match = self.LEVELNAME_FMT_REGEX.search(self._fmt) + if not levelname_fmt_match: + return + levelname_fmt = levelname_fmt_match.group() + + formatted_levelname = levelname_fmt % {"levelname": logging.getLevelName(level)} + + # add ANSI escape sequences around the formatted levelname + color_kwargs = {name: True for name in color_opts} + colorized_formatted_levelname = self._terminalwriter.markup( + formatted_levelname, **color_kwargs + ) + self._level_to_fmt_mapping[level] = self.LEVELNAME_FMT_REGEX.sub( + colorized_formatted_levelname, self._fmt + ) + + def format(self, record: logging.LogRecord) -> str: + fmt = self._level_to_fmt_mapping.get(record.levelno, self._original_fmt) + self._style._fmt = fmt + return super().format(record) + + +class PercentStyleMultiline(logging.PercentStyle): + """A logging style with special support for multiline messages. + + If the message of a record consists of multiple lines, this style + formats the message as if each line were logged separately. + """ + + def __init__(self, fmt: str, auto_indent: int | str | bool | None) -> None: + super().__init__(fmt) + self._auto_indent = self._get_auto_indent(auto_indent) + + @staticmethod + def _get_auto_indent(auto_indent_option: int | str | bool | None) -> int: + """Determine the current auto indentation setting. + + Specify auto indent behavior (on/off/fixed) by passing in + extra={"auto_indent": [value]} to the call to logging.log() or + using a --log-auto-indent [value] command line or the + log_auto_indent [value] config option. + + Default behavior is auto-indent off. + + Using the string "True" or "on" or the boolean True as the value + turns auto indent on, using the string "False" or "off" or the + boolean False or the int 0 turns it off, and specifying a + positive integer fixes the indentation position to the value + specified. + + Any other values for the option are invalid, and will silently be + converted to the default. + + :param None|bool|int|str auto_indent_option: + User specified option for indentation from command line, config + or extra kwarg. Accepts int, bool or str. str option accepts the + same range of values as boolean config options, as well as + positive integers represented in str form. + + :returns: + Indentation value, which can be + -1 (automatically determine indentation) or + 0 (auto-indent turned off) or + >0 (explicitly set indentation position). + """ + if auto_indent_option is None: + return 0 + elif isinstance(auto_indent_option, bool): + if auto_indent_option: + return -1 + else: + return 0 + elif isinstance(auto_indent_option, int): + return int(auto_indent_option) + elif isinstance(auto_indent_option, str): + try: + return int(auto_indent_option) + except ValueError: + pass + try: + if _strtobool(auto_indent_option): + return -1 + except ValueError: + return 0 + + return 0 + + def format(self, record: logging.LogRecord) -> str: + if "\n" in record.message: + if hasattr(record, "auto_indent"): + # Passed in from the "extra={}" kwarg on the call to logging.log(). + auto_indent = self._get_auto_indent(record.auto_indent) + else: + auto_indent = self._auto_indent + + if auto_indent: + lines = record.message.splitlines() + formatted = self._fmt % {**record.__dict__, "message": lines[0]} + + if auto_indent < 0: + indentation = _remove_ansi_escape_sequences(formatted).find( + lines[0] + ) + else: + # Optimizes logging by allowing a fixed indentation. + indentation = auto_indent + lines[0] = formatted + return ("\n" + " " * indentation).join(lines) + return self._fmt % record.__dict__ + + +def get_option_ini(config: Config, *names: str): + for name in names: + ret = config.getoption(name) # 'default' arg won't work as expected + if ret is None: + ret = config.getini(name) + if ret: + return ret + + +def pytest_addoption(parser: Parser) -> None: + """Add options to control log capturing.""" + group = parser.getgroup("logging") + + def add_option_ini(option, dest, default=None, type=None, **kwargs): + parser.addini( + dest, default=default, type=type, help="Default value for " + option + ) + group.addoption(option, dest=dest, **kwargs) + + add_option_ini( + "--log-level", + dest="log_level", + default=None, + metavar="LEVEL", + help=( + "Level of messages to catch/display." + " Not set by default, so it depends on the root/parent log handler's" + ' effective level, where it is "WARNING" by default.' + ), + ) + add_option_ini( + "--log-format", + dest="log_format", + default=DEFAULT_LOG_FORMAT, + help="Log format used by the logging module", + ) + add_option_ini( + "--log-date-format", + dest="log_date_format", + default=DEFAULT_LOG_DATE_FORMAT, + help="Log date format used by the logging module", + ) + parser.addini( + "log_cli", + default=False, + type="bool", + help='Enable log display during test run (also known as "live logging")', + ) + add_option_ini( + "--log-cli-level", dest="log_cli_level", default=None, help="CLI logging level" + ) + add_option_ini( + "--log-cli-format", + dest="log_cli_format", + default=None, + help="Log format used by the logging module", + ) + add_option_ini( + "--log-cli-date-format", + dest="log_cli_date_format", + default=None, + help="Log date format used by the logging module", + ) + add_option_ini( + "--log-file", + dest="log_file", + default=None, + help="Path to a file when logging will be written to", + ) + add_option_ini( + "--log-file-mode", + dest="log_file_mode", + default="w", + choices=["w", "a"], + help="Log file open mode", + ) + add_option_ini( + "--log-file-level", + dest="log_file_level", + default=None, + help="Log file logging level", + ) + add_option_ini( + "--log-file-format", + dest="log_file_format", + default=None, + help="Log format used by the logging module", + ) + add_option_ini( + "--log-file-date-format", + dest="log_file_date_format", + default=None, + help="Log date format used by the logging module", + ) + add_option_ini( + "--log-auto-indent", + dest="log_auto_indent", + default=None, + help="Auto-indent multiline messages passed to the logging module. Accepts true|on, false|off or an integer.", + ) + group.addoption( + "--log-disable", + action="append", + default=[], + dest="logger_disable", + help="Disable a logger by name. Can be passed multiple times.", + ) + + +_HandlerType = TypeVar("_HandlerType", bound=logging.Handler) + + +# Not using @contextmanager for performance reasons. +class catching_logs(Generic[_HandlerType]): + """Context manager that prepares the whole logging machinery properly.""" + + __slots__ = ("handler", "level", "orig_level") + + def __init__(self, handler: _HandlerType, level: int | None = None) -> None: + self.handler = handler + self.level = level + + def __enter__(self) -> _HandlerType: + root_logger = logging.getLogger() + if self.level is not None: + self.handler.setLevel(self.level) + root_logger.addHandler(self.handler) + if self.level is not None: + self.orig_level = root_logger.level + root_logger.setLevel(min(self.orig_level, self.level)) + return self.handler + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + root_logger = logging.getLogger() + if self.level is not None: + root_logger.setLevel(self.orig_level) + root_logger.removeHandler(self.handler) + + +class LogCaptureHandler(logging_StreamHandler): + """A logging handler that stores log records and the log text.""" + + def __init__(self) -> None: + """Create a new log handler.""" + super().__init__(StringIO()) + self.records: list[logging.LogRecord] = [] + + def emit(self, record: logging.LogRecord) -> None: + """Keep the log records in a list in addition to the log text.""" + self.records.append(record) + super().emit(record) + + def reset(self) -> None: + self.records = [] + self.stream = StringIO() + + def clear(self) -> None: + self.records.clear() + self.stream = StringIO() + + def handleError(self, record: logging.LogRecord) -> None: + if logging.raiseExceptions: + # Fail the test if the log message is bad (emit failed). + # The default behavior of logging is to print "Logging error" + # to stderr with the call stack and some extra details. + # pytest wants to make such mistakes visible during testing. + raise # noqa: PLE0704 + + +@final +class LogCaptureFixture: + """Provides access and control of log capturing.""" + + def __init__(self, item: nodes.Node, *, _ispytest: bool = False) -> None: + check_ispytest(_ispytest) + self._item = item + self._initial_handler_level: int | None = None + # Dict of log name -> log level. + self._initial_logger_levels: dict[str | None, int] = {} + self._initial_disabled_logging_level: int | None = None + + def _finalize(self) -> None: + """Finalize the fixture. + + This restores the log levels and the disabled logging levels changed by :meth:`set_level`. + """ + # Restore log levels. + if self._initial_handler_level is not None: + self.handler.setLevel(self._initial_handler_level) + for logger_name, level in self._initial_logger_levels.items(): + logger = logging.getLogger(logger_name) + logger.setLevel(level) + # Disable logging at the original disabled logging level. + if self._initial_disabled_logging_level is not None: + logging.disable(self._initial_disabled_logging_level) + self._initial_disabled_logging_level = None + + @property + def handler(self) -> LogCaptureHandler: + """Get the logging handler used by the fixture.""" + return self._item.stash[caplog_handler_key] + + def get_records( + self, when: Literal["setup", "call", "teardown"] + ) -> list[logging.LogRecord]: + """Get the logging records for one of the possible test phases. + + :param when: + Which test phase to obtain the records from. + Valid values are: "setup", "call" and "teardown". + + :returns: The list of captured records at the given stage. + + .. versionadded:: 3.4 + """ + return self._item.stash[caplog_records_key].get(when, []) + + @property + def text(self) -> str: + """The formatted log text.""" + return _remove_ansi_escape_sequences(self.handler.stream.getvalue()) + + @property + def records(self) -> list[logging.LogRecord]: + """The list of log records.""" + return self.handler.records + + @property + def record_tuples(self) -> list[tuple[str, int, str]]: + """A list of a stripped down version of log records intended + for use in assertion comparison. + + The format of the tuple is: + + (logger_name, log_level, message) + """ + return [(r.name, r.levelno, r.getMessage()) for r in self.records] + + @property + def messages(self) -> list[str]: + """A list of format-interpolated log messages. + + Unlike 'records', which contains the format string and parameters for + interpolation, log messages in this list are all interpolated. + + Unlike 'text', which contains the output from the handler, log + messages in this list are unadorned with levels, timestamps, etc, + making exact comparisons more reliable. + + Note that traceback or stack info (from :func:`logging.exception` or + the `exc_info` or `stack_info` arguments to the logging functions) is + not included, as this is added by the formatter in the handler. + + .. versionadded:: 3.7 + """ + return [r.getMessage() for r in self.records] + + def clear(self) -> None: + """Reset the list of log records and the captured log text.""" + self.handler.clear() + + def _force_enable_logging( + self, level: int | str, logger_obj: logging.Logger + ) -> int: + """Enable the desired logging level if the global level was disabled via ``logging.disabled``. + + Only enables logging levels greater than or equal to the requested ``level``. + + Does nothing if the desired ``level`` wasn't disabled. + + :param level: + The logger level caplog should capture. + All logging is enabled if a non-standard logging level string is supplied. + Valid level strings are in :data:`logging._nameToLevel`. + :param logger_obj: The logger object to check. + + :return: The original disabled logging level. + """ + original_disable_level: int = logger_obj.manager.disable + + if isinstance(level, str): + # Try to translate the level string to an int for `logging.disable()` + level = logging.getLevelName(level) + + if not isinstance(level, int): + # The level provided was not valid, so just un-disable all logging. + logging.disable(logging.NOTSET) + elif not logger_obj.isEnabledFor(level): + # Each level is `10` away from other levels. + # https://docs.python.org/3/library/logging.html#logging-levels + disable_level = max(level - 10, logging.NOTSET) + logging.disable(disable_level) + + return original_disable_level + + def set_level(self, level: int | str, logger: str | None = None) -> None: + """Set the threshold level of a logger for the duration of a test. + + Logging messages which are less severe than this level will not be captured. + + .. versionchanged:: 3.4 + The levels of the loggers changed by this function will be + restored to their initial values at the end of the test. + + Will enable the requested logging level if it was disabled via :func:`logging.disable`. + + :param level: The level. + :param logger: The logger to update. If not given, the root logger. + """ + logger_obj = logging.getLogger(logger) + # Save the original log-level to restore it during teardown. + self._initial_logger_levels.setdefault(logger, logger_obj.level) + logger_obj.setLevel(level) + if self._initial_handler_level is None: + self._initial_handler_level = self.handler.level + self.handler.setLevel(level) + initial_disabled_logging_level = self._force_enable_logging(level, logger_obj) + if self._initial_disabled_logging_level is None: + self._initial_disabled_logging_level = initial_disabled_logging_level + + @contextmanager + def at_level(self, level: int | str, logger: str | None = None) -> Generator[None]: + """Context manager that sets the level for capturing of logs. After + the end of the 'with' statement the level is restored to its original + value. + + Will enable the requested logging level if it was disabled via :func:`logging.disable`. + + :param level: The level. + :param logger: The logger to update. If not given, the root logger. + """ + logger_obj = logging.getLogger(logger) + orig_level = logger_obj.level + logger_obj.setLevel(level) + handler_orig_level = self.handler.level + self.handler.setLevel(level) + original_disable_level = self._force_enable_logging(level, logger_obj) + try: + yield + finally: + logger_obj.setLevel(orig_level) + self.handler.setLevel(handler_orig_level) + logging.disable(original_disable_level) + + @contextmanager + def filtering(self, filter_: logging.Filter) -> Generator[None]: + """Context manager that temporarily adds the given filter to the caplog's + :meth:`handler` for the 'with' statement block, and removes that filter at the + end of the block. + + :param filter_: A custom :class:`logging.Filter` object. + + .. versionadded:: 7.5 + """ + self.handler.addFilter(filter_) + try: + yield + finally: + self.handler.removeFilter(filter_) + + +@fixture +def caplog(request: FixtureRequest) -> Generator[LogCaptureFixture]: + """Access and control log capturing. + + Captured logs are available through the following properties/methods:: + + * caplog.messages -> list of format-interpolated log messages + * caplog.text -> string containing formatted log output + * caplog.records -> list of logging.LogRecord instances + * caplog.record_tuples -> list of (logger_name, level, message) tuples + * caplog.clear() -> clear captured records and formatted log output string + """ + result = LogCaptureFixture(request.node, _ispytest=True) + yield result + result._finalize() + + +def get_log_level_for_setting(config: Config, *setting_names: str) -> int | None: + for setting_name in setting_names: + log_level = config.getoption(setting_name) + if log_level is None: + log_level = config.getini(setting_name) + if log_level: + break + else: + return None + + if isinstance(log_level, str): + log_level = log_level.upper() + try: + return int(getattr(logging, log_level, log_level)) + except ValueError as e: + # Python logging does not recognise this as a logging level + raise UsageError( + f"'{log_level}' is not recognized as a logging level name for " + f"'{setting_name}'. Please consider passing the " + "logging level num instead." + ) from e + + +# run after terminalreporter/capturemanager are configured +@hookimpl(trylast=True) +def pytest_configure(config: Config) -> None: + config.pluginmanager.register(LoggingPlugin(config), "logging-plugin") + + +class LoggingPlugin: + """Attaches to the logging module and captures log messages for each test.""" + + def __init__(self, config: Config) -> None: + """Create a new plugin to capture log messages. + + The formatter can be safely shared across all handlers so + create a single one for the entire test session here. + """ + self._config = config + + # Report logging. + self.formatter = self._create_formatter( + get_option_ini(config, "log_format"), + get_option_ini(config, "log_date_format"), + get_option_ini(config, "log_auto_indent"), + ) + self.log_level = get_log_level_for_setting(config, "log_level") + self.caplog_handler = LogCaptureHandler() + self.caplog_handler.setFormatter(self.formatter) + self.report_handler = LogCaptureHandler() + self.report_handler.setFormatter(self.formatter) + + # File logging. + self.log_file_level = get_log_level_for_setting( + config, "log_file_level", "log_level" + ) + log_file = get_option_ini(config, "log_file") or os.devnull + if log_file != os.devnull: + directory = os.path.dirname(os.path.abspath(log_file)) + if not os.path.isdir(directory): + os.makedirs(directory) + + self.log_file_mode = get_option_ini(config, "log_file_mode") or "w" + self.log_file_handler = _FileHandler( + log_file, mode=self.log_file_mode, encoding="UTF-8" + ) + log_file_format = get_option_ini(config, "log_file_format", "log_format") + log_file_date_format = get_option_ini( + config, "log_file_date_format", "log_date_format" + ) + + log_file_formatter = DatetimeFormatter( + log_file_format, datefmt=log_file_date_format + ) + self.log_file_handler.setFormatter(log_file_formatter) + + # CLI/live logging. + self.log_cli_level = get_log_level_for_setting( + config, "log_cli_level", "log_level" + ) + if self._log_cli_enabled(): + terminal_reporter = config.pluginmanager.get_plugin("terminalreporter") + # Guaranteed by `_log_cli_enabled()`. + assert terminal_reporter is not None + capture_manager = config.pluginmanager.get_plugin("capturemanager") + # if capturemanager plugin is disabled, live logging still works. + self.log_cli_handler: ( + _LiveLoggingStreamHandler | _LiveLoggingNullHandler + ) = _LiveLoggingStreamHandler(terminal_reporter, capture_manager) + else: + self.log_cli_handler = _LiveLoggingNullHandler() + log_cli_formatter = self._create_formatter( + get_option_ini(config, "log_cli_format", "log_format"), + get_option_ini(config, "log_cli_date_format", "log_date_format"), + get_option_ini(config, "log_auto_indent"), + ) + self.log_cli_handler.setFormatter(log_cli_formatter) + self._disable_loggers(loggers_to_disable=config.option.logger_disable) + + def _disable_loggers(self, loggers_to_disable: list[str]) -> None: + if not loggers_to_disable: + return + + for name in loggers_to_disable: + logger = logging.getLogger(name) + logger.disabled = True + + def _create_formatter(self, log_format, log_date_format, auto_indent): + # Color option doesn't exist if terminal plugin is disabled. + color = getattr(self._config.option, "color", "no") + if color != "no" and ColoredLevelFormatter.LEVELNAME_FMT_REGEX.search( + log_format + ): + formatter: logging.Formatter = ColoredLevelFormatter( + create_terminal_writer(self._config), log_format, log_date_format + ) + else: + formatter = DatetimeFormatter(log_format, log_date_format) + + formatter._style = PercentStyleMultiline( + formatter._style._fmt, auto_indent=auto_indent + ) + + return formatter + + def set_log_path(self, fname: str) -> None: + """Set the filename parameter for Logging.FileHandler(). + + Creates parent directory if it does not exist. + + .. warning:: + This is an experimental API. + """ + fpath = Path(fname) + + if not fpath.is_absolute(): + fpath = self._config.rootpath / fpath + + if not fpath.parent.exists(): + fpath.parent.mkdir(exist_ok=True, parents=True) + + # https://github.com/python/mypy/issues/11193 + stream: io.TextIOWrapper = fpath.open(mode=self.log_file_mode, encoding="UTF-8") # type: ignore[assignment] + old_stream = self.log_file_handler.setStream(stream) + if old_stream: + old_stream.close() + + def _log_cli_enabled(self) -> bool: + """Return whether live logging is enabled.""" + enabled = self._config.getoption( + "--log-cli-level" + ) is not None or self._config.getini("log_cli") + if not enabled: + return False + + terminal_reporter = self._config.pluginmanager.get_plugin("terminalreporter") + if terminal_reporter is None: + # terminal reporter is disabled e.g. by pytest-xdist. + return False + + return True + + @hookimpl(wrapper=True, tryfirst=True) + def pytest_sessionstart(self) -> Generator[None]: + self.log_cli_handler.set_when("sessionstart") + + with catching_logs(self.log_cli_handler, level=self.log_cli_level): + with catching_logs(self.log_file_handler, level=self.log_file_level): + return (yield) + + @hookimpl(wrapper=True, tryfirst=True) + def pytest_collection(self) -> Generator[None]: + self.log_cli_handler.set_when("collection") + + with catching_logs(self.log_cli_handler, level=self.log_cli_level): + with catching_logs(self.log_file_handler, level=self.log_file_level): + return (yield) + + @hookimpl(wrapper=True) + def pytest_runtestloop(self, session: Session) -> Generator[None, object, object]: + if session.config.option.collectonly: + return (yield) + + if self._log_cli_enabled() and self._config.get_verbosity() < 1: + # The verbose flag is needed to avoid messy test progress output. + self._config.option.verbose = 1 + + with catching_logs(self.log_cli_handler, level=self.log_cli_level): + with catching_logs(self.log_file_handler, level=self.log_file_level): + return (yield) # Run all the tests. + + @hookimpl + def pytest_runtest_logstart(self) -> None: + self.log_cli_handler.reset() + self.log_cli_handler.set_when("start") + + @hookimpl + def pytest_runtest_logreport(self) -> None: + self.log_cli_handler.set_when("logreport") + + def _runtest_for(self, item: nodes.Item, when: str) -> Generator[None]: + """Implement the internals of the pytest_runtest_xxx() hooks.""" + with catching_logs( + self.caplog_handler, + level=self.log_level, + ) as caplog_handler, catching_logs( + self.report_handler, + level=self.log_level, + ) as report_handler: + caplog_handler.reset() + report_handler.reset() + item.stash[caplog_records_key][when] = caplog_handler.records + item.stash[caplog_handler_key] = caplog_handler + + try: + yield + finally: + log = report_handler.stream.getvalue().strip() + item.add_report_section(when, "log", log) + + @hookimpl(wrapper=True) + def pytest_runtest_setup(self, item: nodes.Item) -> Generator[None]: + self.log_cli_handler.set_when("setup") + + empty: dict[str, list[logging.LogRecord]] = {} + item.stash[caplog_records_key] = empty + yield from self._runtest_for(item, "setup") + + @hookimpl(wrapper=True) + def pytest_runtest_call(self, item: nodes.Item) -> Generator[None]: + self.log_cli_handler.set_when("call") + + yield from self._runtest_for(item, "call") + + @hookimpl(wrapper=True) + def pytest_runtest_teardown(self, item: nodes.Item) -> Generator[None]: + self.log_cli_handler.set_when("teardown") + + try: + yield from self._runtest_for(item, "teardown") + finally: + del item.stash[caplog_records_key] + del item.stash[caplog_handler_key] + + @hookimpl + def pytest_runtest_logfinish(self) -> None: + self.log_cli_handler.set_when("finish") + + @hookimpl(wrapper=True, tryfirst=True) + def pytest_sessionfinish(self) -> Generator[None]: + self.log_cli_handler.set_when("sessionfinish") + + with catching_logs(self.log_cli_handler, level=self.log_cli_level): + with catching_logs(self.log_file_handler, level=self.log_file_level): + return (yield) + + @hookimpl + def pytest_unconfigure(self) -> None: + # Close the FileHandler explicitly. + # (logging.shutdown might have lost the weakref?!) + self.log_file_handler.close() + + +class _FileHandler(logging.FileHandler): + """A logging FileHandler with pytest tweaks.""" + + def handleError(self, record: logging.LogRecord) -> None: + # Handled by LogCaptureHandler. + pass + + +class _LiveLoggingStreamHandler(logging_StreamHandler): + """A logging StreamHandler used by the live logging feature: it will + write a newline before the first log message in each test. + + During live logging we must also explicitly disable stdout/stderr + capturing otherwise it will get captured and won't appear in the + terminal. + """ + + # Officially stream needs to be a IO[str], but TerminalReporter + # isn't. So force it. + stream: TerminalReporter = None # type: ignore + + def __init__( + self, + terminal_reporter: TerminalReporter, + capture_manager: CaptureManager | None, + ) -> None: + super().__init__(stream=terminal_reporter) # type: ignore[arg-type] + self.capture_manager = capture_manager + self.reset() + self.set_when(None) + self._test_outcome_written = False + + def reset(self) -> None: + """Reset the handler; should be called before the start of each test.""" + self._first_record_emitted = False + + def set_when(self, when: str | None) -> None: + """Prepare for the given test phase (setup/call/teardown).""" + self._when = when + self._section_name_shown = False + if when == "start": + self._test_outcome_written = False + + def emit(self, record: logging.LogRecord) -> None: + ctx_manager = ( + self.capture_manager.global_and_fixture_disabled() + if self.capture_manager + else nullcontext() + ) + with ctx_manager: + if not self._first_record_emitted: + self.stream.write("\n") + self._first_record_emitted = True + elif self._when in ("teardown", "finish"): + if not self._test_outcome_written: + self._test_outcome_written = True + self.stream.write("\n") + if not self._section_name_shown and self._when: + self.stream.section("live log " + self._when, sep="-", bold=True) + self._section_name_shown = True + super().emit(record) + + def handleError(self, record: logging.LogRecord) -> None: + # Handled by LogCaptureHandler. + pass + + +class _LiveLoggingNullHandler(logging.NullHandler): + """A logging handler used when live logging is disabled.""" + + def reset(self) -> None: + pass + + def set_when(self, when: str) -> None: + pass + + def handleError(self, record: logging.LogRecord) -> None: + # Handled by LogCaptureHandler. + pass diff --git a/.venv/Lib/site-packages/_pytest/main.py b/.venv/Lib/site-packages/_pytest/main.py new file mode 100644 index 00000000..e5534e98 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/main.py @@ -0,0 +1,1072 @@ +"""Core implementation of the testing process: init, session, runtest loop.""" + +from __future__ import annotations + +import argparse +import dataclasses +import fnmatch +import functools +import importlib +import importlib.util +import os +from pathlib import Path +import sys +from typing import AbstractSet +from typing import Callable +from typing import Dict +from typing import final +from typing import Iterable +from typing import Iterator +from typing import Literal +from typing import overload +from typing import Sequence +from typing import TYPE_CHECKING +import warnings + +import pluggy + +from _pytest import nodes +import _pytest._code +from _pytest.config import Config +from _pytest.config import directory_arg +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config import PytestPluginManager +from _pytest.config import UsageError +from _pytest.config.argparsing import Parser +from _pytest.config.compat import PathAwareHookProxy +from _pytest.outcomes import exit +from _pytest.pathlib import absolutepath +from _pytest.pathlib import bestrelpath +from _pytest.pathlib import fnmatch_ex +from _pytest.pathlib import safe_exists +from _pytest.pathlib import scandir +from _pytest.reports import CollectReport +from _pytest.reports import TestReport +from _pytest.runner import collect_one_node +from _pytest.runner import SetupState +from _pytest.warning_types import PytestWarning + + +if TYPE_CHECKING: + from typing_extensions import Self + + from _pytest.fixtures import FixtureManager + + +def pytest_addoption(parser: Parser) -> None: + parser.addini( + "norecursedirs", + "Directory patterns to avoid for recursion", + type="args", + default=[ + "*.egg", + ".*", + "_darcs", + "build", + "CVS", + "dist", + "node_modules", + "venv", + "{arch}", + ], + ) + parser.addini( + "testpaths", + "Directories to search for tests when no files or directories are given on the " + "command line", + type="args", + default=[], + ) + group = parser.getgroup("general", "Running and selection options") + group._addoption( + "-x", + "--exitfirst", + action="store_const", + dest="maxfail", + const=1, + help="Exit instantly on first error or failed test", + ) + group = parser.getgroup("pytest-warnings") + group.addoption( + "-W", + "--pythonwarnings", + action="append", + help="Set which warnings to report, see -W option of Python itself", + ) + parser.addini( + "filterwarnings", + type="linelist", + help="Each line specifies a pattern for " + "warnings.filterwarnings. " + "Processed after -W/--pythonwarnings.", + ) + group._addoption( + "--maxfail", + metavar="num", + action="store", + type=int, + dest="maxfail", + default=0, + help="Exit after first num failures or errors", + ) + group._addoption( + "--strict-config", + action="store_true", + help="Any warnings encountered while parsing the `pytest` section of the " + "configuration file raise errors", + ) + group._addoption( + "--strict-markers", + action="store_true", + help="Markers not registered in the `markers` section of the configuration " + "file raise errors", + ) + group._addoption( + "--strict", + action="store_true", + help="(Deprecated) alias to --strict-markers", + ) + group._addoption( + "-c", + "--config-file", + metavar="FILE", + type=str, + dest="inifilename", + help="Load configuration from `FILE` instead of trying to locate one of the " + "implicit configuration files.", + ) + group._addoption( + "--continue-on-collection-errors", + action="store_true", + default=False, + dest="continue_on_collection_errors", + help="Force test execution even if collection errors occur", + ) + group._addoption( + "--rootdir", + action="store", + dest="rootdir", + help="Define root directory for tests. Can be relative path: 'root_dir', './root_dir', " + "'root_dir/another_dir/'; absolute path: '/home/user/root_dir'; path with variables: " + "'$HOME/root_dir'.", + ) + + group = parser.getgroup("collect", "collection") + group.addoption( + "--collectonly", + "--collect-only", + "--co", + action="store_true", + help="Only collect tests, don't execute them", + ) + group.addoption( + "--pyargs", + action="store_true", + help="Try to interpret all arguments as Python packages", + ) + group.addoption( + "--ignore", + action="append", + metavar="path", + help="Ignore path during collection (multi-allowed)", + ) + group.addoption( + "--ignore-glob", + action="append", + metavar="path", + help="Ignore path pattern during collection (multi-allowed)", + ) + group.addoption( + "--deselect", + action="append", + metavar="nodeid_prefix", + help="Deselect item (via node id prefix) during collection (multi-allowed)", + ) + group.addoption( + "--confcutdir", + dest="confcutdir", + default=None, + metavar="dir", + type=functools.partial(directory_arg, optname="--confcutdir"), + help="Only load conftest.py's relative to specified dir", + ) + group.addoption( + "--noconftest", + action="store_true", + dest="noconftest", + default=False, + help="Don't load any conftest.py files", + ) + group.addoption( + "--keepduplicates", + "--keep-duplicates", + action="store_true", + dest="keepduplicates", + default=False, + help="Keep duplicate tests", + ) + group.addoption( + "--collect-in-virtualenv", + action="store_true", + dest="collect_in_virtualenv", + default=False, + help="Don't ignore tests in a local virtualenv directory", + ) + group.addoption( + "--import-mode", + default="prepend", + choices=["prepend", "append", "importlib"], + dest="importmode", + help="Prepend/append to sys.path when importing test modules and conftest " + "files. Default: prepend.", + ) + parser.addini( + "consider_namespace_packages", + type="bool", + default=False, + help="Consider namespace packages when resolving module names during import", + ) + + group = parser.getgroup("debugconfig", "test session debugging and configuration") + group.addoption( + "--basetemp", + dest="basetemp", + default=None, + type=validate_basetemp, + metavar="dir", + help=( + "Base temporary directory for this test run. " + "(Warning: this directory is removed if it exists.)" + ), + ) + + +def validate_basetemp(path: str) -> str: + # GH 7119 + msg = "basetemp must not be empty, the current working directory or any parent directory of it" + + # empty path + if not path: + raise argparse.ArgumentTypeError(msg) + + def is_ancestor(base: Path, query: Path) -> bool: + """Return whether query is an ancestor of base.""" + if base == query: + return True + return query in base.parents + + # check if path is an ancestor of cwd + if is_ancestor(Path.cwd(), Path(path).absolute()): + raise argparse.ArgumentTypeError(msg) + + # check symlinks for ancestors + if is_ancestor(Path.cwd().resolve(), Path(path).resolve()): + raise argparse.ArgumentTypeError(msg) + + return path + + +def wrap_session( + config: Config, doit: Callable[[Config, Session], int | ExitCode | None] +) -> int | ExitCode: + """Skeleton command line program.""" + session = Session.from_config(config) + session.exitstatus = ExitCode.OK + initstate = 0 + try: + try: + config._do_configure() + initstate = 1 + config.hook.pytest_sessionstart(session=session) + initstate = 2 + session.exitstatus = doit(config, session) or 0 + except UsageError: + session.exitstatus = ExitCode.USAGE_ERROR + raise + except Failed: + session.exitstatus = ExitCode.TESTS_FAILED + except (KeyboardInterrupt, exit.Exception): + excinfo = _pytest._code.ExceptionInfo.from_current() + exitstatus: int | ExitCode = ExitCode.INTERRUPTED + if isinstance(excinfo.value, exit.Exception): + if excinfo.value.returncode is not None: + exitstatus = excinfo.value.returncode + if initstate < 2: + sys.stderr.write(f"{excinfo.typename}: {excinfo.value.msg}\n") + config.hook.pytest_keyboard_interrupt(excinfo=excinfo) + session.exitstatus = exitstatus + except BaseException: + session.exitstatus = ExitCode.INTERNAL_ERROR + excinfo = _pytest._code.ExceptionInfo.from_current() + try: + config.notify_exception(excinfo, config.option) + except exit.Exception as exc: + if exc.returncode is not None: + session.exitstatus = exc.returncode + sys.stderr.write(f"{type(exc).__name__}: {exc}\n") + else: + if isinstance(excinfo.value, SystemExit): + sys.stderr.write("mainloop: caught unexpected SystemExit!\n") + + finally: + # Explicitly break reference cycle. + excinfo = None # type: ignore + os.chdir(session.startpath) + if initstate >= 2: + try: + config.hook.pytest_sessionfinish( + session=session, exitstatus=session.exitstatus + ) + except exit.Exception as exc: + if exc.returncode is not None: + session.exitstatus = exc.returncode + sys.stderr.write(f"{type(exc).__name__}: {exc}\n") + config._ensure_unconfigure() + return session.exitstatus + + +def pytest_cmdline_main(config: Config) -> int | ExitCode: + return wrap_session(config, _main) + + +def _main(config: Config, session: Session) -> int | ExitCode | None: + """Default command line protocol for initialization, session, + running tests and reporting.""" + config.hook.pytest_collection(session=session) + config.hook.pytest_runtestloop(session=session) + + if session.testsfailed: + return ExitCode.TESTS_FAILED + elif session.testscollected == 0: + return ExitCode.NO_TESTS_COLLECTED + return None + + +def pytest_collection(session: Session) -> None: + session.perform_collect() + + +def pytest_runtestloop(session: Session) -> bool: + if session.testsfailed and not session.config.option.continue_on_collection_errors: + raise session.Interrupted( + "%d error%s during collection" + % (session.testsfailed, "s" if session.testsfailed != 1 else "") + ) + + if session.config.option.collectonly: + return True + + for i, item in enumerate(session.items): + nextitem = session.items[i + 1] if i + 1 < len(session.items) else None + item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) + if session.shouldfail: + raise session.Failed(session.shouldfail) + if session.shouldstop: + raise session.Interrupted(session.shouldstop) + return True + + +def _in_venv(path: Path) -> bool: + """Attempt to detect if ``path`` is the root of a Virtual Environment by + checking for the existence of the pyvenv.cfg file. + + [https://peps.python.org/pep-0405/] + + For regression protection we also check for conda environments that do not include pyenv.cfg yet -- + https://github.com/conda/conda/issues/13337 is the conda issue tracking adding pyenv.cfg. + + Checking for the `conda-meta/history` file per https://github.com/pytest-dev/pytest/issues/12652#issuecomment-2246336902. + + """ + try: + return ( + path.joinpath("pyvenv.cfg").is_file() + or path.joinpath("conda-meta", "history").is_file() + ) + except OSError: + return False + + +def pytest_ignore_collect(collection_path: Path, config: Config) -> bool | None: + if collection_path.name == "__pycache__": + return True + + ignore_paths = config._getconftest_pathlist( + "collect_ignore", path=collection_path.parent + ) + ignore_paths = ignore_paths or [] + excludeopt = config.getoption("ignore") + if excludeopt: + ignore_paths.extend(absolutepath(x) for x in excludeopt) + + if collection_path in ignore_paths: + return True + + ignore_globs = config._getconftest_pathlist( + "collect_ignore_glob", path=collection_path.parent + ) + ignore_globs = ignore_globs or [] + excludeglobopt = config.getoption("ignore_glob") + if excludeglobopt: + ignore_globs.extend(absolutepath(x) for x in excludeglobopt) + + if any(fnmatch.fnmatch(str(collection_path), str(glob)) for glob in ignore_globs): + return True + + allow_in_venv = config.getoption("collect_in_virtualenv") + if not allow_in_venv and _in_venv(collection_path): + return True + + if collection_path.is_dir(): + norecursepatterns = config.getini("norecursedirs") + if any(fnmatch_ex(pat, collection_path) for pat in norecursepatterns): + return True + + return None + + +def pytest_collect_directory( + path: Path, parent: nodes.Collector +) -> nodes.Collector | None: + return Dir.from_parent(parent, path=path) + + +def pytest_collection_modifyitems(items: list[nodes.Item], config: Config) -> None: + deselect_prefixes = tuple(config.getoption("deselect") or []) + if not deselect_prefixes: + return + + remaining = [] + deselected = [] + for colitem in items: + if colitem.nodeid.startswith(deselect_prefixes): + deselected.append(colitem) + else: + remaining.append(colitem) + + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +class FSHookProxy: + def __init__( + self, + pm: PytestPluginManager, + remove_mods: AbstractSet[object], + ) -> None: + self.pm = pm + self.remove_mods = remove_mods + + def __getattr__(self, name: str) -> pluggy.HookCaller: + x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods) + self.__dict__[name] = x + return x + + +class Interrupted(KeyboardInterrupt): + """Signals that the test run was interrupted.""" + + __module__ = "builtins" # For py3. + + +class Failed(Exception): + """Signals a stop as failed test run.""" + + +@dataclasses.dataclass +class _bestrelpath_cache(Dict[Path, str]): + __slots__ = ("path",) + + path: Path + + def __missing__(self, path: Path) -> str: + r = bestrelpath(self.path, path) + self[path] = r + return r + + +@final +class Dir(nodes.Directory): + """Collector of files in a file system directory. + + .. versionadded:: 8.0 + + .. note:: + + Python directories with an `__init__.py` file are instead collected by + :class:`~pytest.Package` by default. Both are :class:`~pytest.Directory` + collectors. + """ + + @classmethod + def from_parent( # type: ignore[override] + cls, + parent: nodes.Collector, + *, + path: Path, + ) -> Self: + """The public constructor. + + :param parent: The parent collector of this Dir. + :param path: The directory's path. + :type path: pathlib.Path + """ + return super().from_parent(parent=parent, path=path) + + def collect(self) -> Iterable[nodes.Item | nodes.Collector]: + config = self.config + col: nodes.Collector | None + cols: Sequence[nodes.Collector] + ihook = self.ihook + for direntry in scandir(self.path): + if direntry.is_dir(): + path = Path(direntry.path) + if not self.session.isinitpath(path, with_parents=True): + if ihook.pytest_ignore_collect(collection_path=path, config=config): + continue + col = ihook.pytest_collect_directory(path=path, parent=self) + if col is not None: + yield col + + elif direntry.is_file(): + path = Path(direntry.path) + if not self.session.isinitpath(path): + if ihook.pytest_ignore_collect(collection_path=path, config=config): + continue + cols = ihook.pytest_collect_file(file_path=path, parent=self) + yield from cols + + +@final +class Session(nodes.Collector): + """The root of the collection tree. + + ``Session`` collects the initial paths given as arguments to pytest. + """ + + Interrupted = Interrupted + Failed = Failed + # Set on the session by runner.pytest_sessionstart. + _setupstate: SetupState + # Set on the session by fixtures.pytest_sessionstart. + _fixturemanager: FixtureManager + exitstatus: int | ExitCode + + def __init__(self, config: Config) -> None: + super().__init__( + name="", + path=config.rootpath, + fspath=None, + parent=None, + config=config, + session=self, + nodeid="", + ) + self.testsfailed = 0 + self.testscollected = 0 + self._shouldstop: bool | str = False + self._shouldfail: bool | str = False + self.trace = config.trace.root.get("collection") + self._initialpaths: frozenset[Path] = frozenset() + self._initialpaths_with_parents: frozenset[Path] = frozenset() + self._notfound: list[tuple[str, Sequence[nodes.Collector]]] = [] + self._initial_parts: list[CollectionArgument] = [] + self._collection_cache: dict[nodes.Collector, CollectReport] = {} + self.items: list[nodes.Item] = [] + + self._bestrelpathcache: dict[Path, str] = _bestrelpath_cache(config.rootpath) + + self.config.pluginmanager.register(self, name="session") + + @classmethod + def from_config(cls, config: Config) -> Session: + session: Session = cls._create(config=config) + return session + + def __repr__(self) -> str: + return "<%s %s exitstatus=%r testsfailed=%d testscollected=%d>" % ( + self.__class__.__name__, + self.name, + getattr(self, "exitstatus", ""), + self.testsfailed, + self.testscollected, + ) + + @property + def shouldstop(self) -> bool | str: + return self._shouldstop + + @shouldstop.setter + def shouldstop(self, value: bool | str) -> None: + # The runner checks shouldfail and assumes that if it is set we are + # definitely stopping, so prevent unsetting it. + if value is False and self._shouldstop: + warnings.warn( + PytestWarning( + "session.shouldstop cannot be unset after it has been set; ignoring." + ), + stacklevel=2, + ) + return + self._shouldstop = value + + @property + def shouldfail(self) -> bool | str: + return self._shouldfail + + @shouldfail.setter + def shouldfail(self, value: bool | str) -> None: + # The runner checks shouldfail and assumes that if it is set we are + # definitely stopping, so prevent unsetting it. + if value is False and self._shouldfail: + warnings.warn( + PytestWarning( + "session.shouldfail cannot be unset after it has been set; ignoring." + ), + stacklevel=2, + ) + return + self._shouldfail = value + + @property + def startpath(self) -> Path: + """The path from which pytest was invoked. + + .. versionadded:: 7.0.0 + """ + return self.config.invocation_params.dir + + def _node_location_to_relpath(self, node_path: Path) -> str: + # bestrelpath is a quite slow function. + return self._bestrelpathcache[node_path] + + @hookimpl(tryfirst=True) + def pytest_collectstart(self) -> None: + if self.shouldfail: + raise self.Failed(self.shouldfail) + if self.shouldstop: + raise self.Interrupted(self.shouldstop) + + @hookimpl(tryfirst=True) + def pytest_runtest_logreport(self, report: TestReport | CollectReport) -> None: + if report.failed and not hasattr(report, "wasxfail"): + self.testsfailed += 1 + maxfail = self.config.getvalue("maxfail") + if maxfail and self.testsfailed >= maxfail: + self.shouldfail = "stopping after %d failures" % (self.testsfailed) + + pytest_collectreport = pytest_runtest_logreport + + def isinitpath( + self, + path: str | os.PathLike[str], + *, + with_parents: bool = False, + ) -> bool: + """Is path an initial path? + + An initial path is a path explicitly given to pytest on the command + line. + + :param with_parents: + If set, also return True if the path is a parent of an initial path. + + .. versionchanged:: 8.0 + Added the ``with_parents`` parameter. + """ + # Optimization: Path(Path(...)) is much slower than isinstance. + path_ = path if isinstance(path, Path) else Path(path) + if with_parents: + return path_ in self._initialpaths_with_parents + else: + return path_ in self._initialpaths + + def gethookproxy(self, fspath: os.PathLike[str]) -> pluggy.HookRelay: + # Optimization: Path(Path(...)) is much slower than isinstance. + path = fspath if isinstance(fspath, Path) else Path(fspath) + pm = self.config.pluginmanager + # Check if we have the common case of running + # hooks with all conftest.py files. + my_conftestmodules = pm._getconftestmodules(path) + remove_mods = pm._conftest_plugins.difference(my_conftestmodules) + proxy: pluggy.HookRelay + if remove_mods: + # One or more conftests are not in use at this path. + proxy = PathAwareHookProxy(FSHookProxy(pm, remove_mods)) # type: ignore[arg-type,assignment] + else: + # All plugins are active for this fspath. + proxy = self.config.hook + return proxy + + def _collect_path( + self, + path: Path, + path_cache: dict[Path, Sequence[nodes.Collector]], + ) -> Sequence[nodes.Collector]: + """Create a Collector for the given path. + + `path_cache` makes it so the same Collectors are returned for the same + path. + """ + if path in path_cache: + return path_cache[path] + + if path.is_dir(): + ihook = self.gethookproxy(path.parent) + col: nodes.Collector | None = ihook.pytest_collect_directory( + path=path, parent=self + ) + cols: Sequence[nodes.Collector] = (col,) if col is not None else () + + elif path.is_file(): + ihook = self.gethookproxy(path) + cols = ihook.pytest_collect_file(file_path=path, parent=self) + + else: + # Broken symlink or invalid/missing file. + cols = () + + path_cache[path] = cols + return cols + + @overload + def perform_collect( + self, args: Sequence[str] | None = ..., genitems: Literal[True] = ... + ) -> Sequence[nodes.Item]: ... + + @overload + def perform_collect( + self, args: Sequence[str] | None = ..., genitems: bool = ... + ) -> Sequence[nodes.Item | nodes.Collector]: ... + + def perform_collect( + self, args: Sequence[str] | None = None, genitems: bool = True + ) -> Sequence[nodes.Item | nodes.Collector]: + """Perform the collection phase for this session. + + This is called by the default :hook:`pytest_collection` hook + implementation; see the documentation of this hook for more details. + For testing purposes, it may also be called directly on a fresh + ``Session``. + + This function normally recursively expands any collectors collected + from the session to their items, and only items are returned. For + testing purposes, this may be suppressed by passing ``genitems=False``, + in which case the return value contains these collectors unexpanded, + and ``session.items`` is empty. + """ + if args is None: + args = self.config.args + + self.trace("perform_collect", self, args) + self.trace.root.indent += 1 + + hook = self.config.hook + + self._notfound = [] + self._initial_parts = [] + self._collection_cache = {} + self.items = [] + items: Sequence[nodes.Item | nodes.Collector] = self.items + try: + initialpaths: list[Path] = [] + initialpaths_with_parents: list[Path] = [] + for arg in args: + collection_argument = resolve_collection_argument( + self.config.invocation_params.dir, + arg, + as_pypath=self.config.option.pyargs, + ) + self._initial_parts.append(collection_argument) + initialpaths.append(collection_argument.path) + initialpaths_with_parents.append(collection_argument.path) + initialpaths_with_parents.extend(collection_argument.path.parents) + self._initialpaths = frozenset(initialpaths) + self._initialpaths_with_parents = frozenset(initialpaths_with_parents) + + rep = collect_one_node(self) + self.ihook.pytest_collectreport(report=rep) + self.trace.root.indent -= 1 + if self._notfound: + errors = [] + for arg, collectors in self._notfound: + if collectors: + errors.append( + f"not found: {arg}\n(no match in any of {collectors!r})" + ) + else: + errors.append(f"found no collectors for {arg}") + + raise UsageError(*errors) + + if not genitems: + items = rep.result + else: + if rep.passed: + for node in rep.result: + self.items.extend(self.genitems(node)) + + self.config.pluginmanager.check_pending() + hook.pytest_collection_modifyitems( + session=self, config=self.config, items=items + ) + finally: + self._notfound = [] + self._initial_parts = [] + self._collection_cache = {} + hook.pytest_collection_finish(session=self) + + if genitems: + self.testscollected = len(items) + + return items + + def _collect_one_node( + self, + node: nodes.Collector, + handle_dupes: bool = True, + ) -> tuple[CollectReport, bool]: + if node in self._collection_cache and handle_dupes: + rep = self._collection_cache[node] + return rep, True + else: + rep = collect_one_node(node) + self._collection_cache[node] = rep + return rep, False + + def collect(self) -> Iterator[nodes.Item | nodes.Collector]: + # This is a cache for the root directories of the initial paths. + # We can't use collection_cache for Session because of its special + # role as the bootstrapping collector. + path_cache: dict[Path, Sequence[nodes.Collector]] = {} + + pm = self.config.pluginmanager + + for collection_argument in self._initial_parts: + self.trace("processing argument", collection_argument) + self.trace.root.indent += 1 + + argpath = collection_argument.path + names = collection_argument.parts + module_name = collection_argument.module_name + + # resolve_collection_argument() ensures this. + if argpath.is_dir(): + assert not names, f"invalid arg {(argpath, names)!r}" + + paths = [argpath] + # Add relevant parents of the path, from the root, e.g. + # /a/b/c.py -> [/, /a, /a/b, /a/b/c.py] + if module_name is None: + # Paths outside of the confcutdir should not be considered. + for path in argpath.parents: + if not pm._is_in_confcutdir(path): + break + paths.insert(0, path) + else: + # For --pyargs arguments, only consider paths matching the module + # name. Paths beyond the package hierarchy are not included. + module_name_parts = module_name.split(".") + for i, path in enumerate(argpath.parents, 2): + if i > len(module_name_parts) or path.stem != module_name_parts[-i]: + break + paths.insert(0, path) + + # Start going over the parts from the root, collecting each level + # and discarding all nodes which don't match the level's part. + any_matched_in_initial_part = False + notfound_collectors = [] + work: list[tuple[nodes.Collector | nodes.Item, list[Path | str]]] = [ + (self, [*paths, *names]) + ] + while work: + matchnode, matchparts = work.pop() + + # Pop'd all of the parts, this is a match. + if not matchparts: + yield matchnode + any_matched_in_initial_part = True + continue + + # Should have been matched by now, discard. + if not isinstance(matchnode, nodes.Collector): + continue + + # Collect this level of matching. + # Collecting Session (self) is done directly to avoid endless + # recursion to this function. + subnodes: Sequence[nodes.Collector | nodes.Item] + if isinstance(matchnode, Session): + assert isinstance(matchparts[0], Path) + subnodes = matchnode._collect_path(matchparts[0], path_cache) + else: + # For backward compat, files given directly multiple + # times on the command line should not be deduplicated. + handle_dupes = not ( + len(matchparts) == 1 + and isinstance(matchparts[0], Path) + and matchparts[0].is_file() + ) + rep, duplicate = self._collect_one_node(matchnode, handle_dupes) + if not duplicate and not rep.passed: + # Report collection failures here to avoid failing to + # run some test specified in the command line because + # the module could not be imported (#134). + matchnode.ihook.pytest_collectreport(report=rep) + if not rep.passed: + continue + subnodes = rep.result + + # Prune this level. + any_matched_in_collector = False + for node in reversed(subnodes): + # Path part e.g. `/a/b/` in `/a/b/test_file.py::TestIt::test_it`. + if isinstance(matchparts[0], Path): + is_match = node.path == matchparts[0] + if sys.platform == "win32" and not is_match: + # In case the file paths do not match, fallback to samefile() to + # account for short-paths on Windows (#11895). + same_file = os.path.samefile(node.path, matchparts[0]) + # We don't want to match links to the current node, + # otherwise we would match the same file more than once (#12039). + is_match = same_file and ( + os.path.islink(node.path) + == os.path.islink(matchparts[0]) + ) + + # Name part e.g. `TestIt` in `/a/b/test_file.py::TestIt::test_it`. + else: + # TODO: Remove parametrized workaround once collection structure contains + # parametrization. + is_match = ( + node.name == matchparts[0] + or node.name.split("[")[0] == matchparts[0] + ) + if is_match: + work.append((node, matchparts[1:])) + any_matched_in_collector = True + + if not any_matched_in_collector: + notfound_collectors.append(matchnode) + + if not any_matched_in_initial_part: + report_arg = "::".join((str(argpath), *names)) + self._notfound.append((report_arg, notfound_collectors)) + + self.trace.root.indent -= 1 + + def genitems(self, node: nodes.Item | nodes.Collector) -> Iterator[nodes.Item]: + self.trace("genitems", node) + if isinstance(node, nodes.Item): + node.ihook.pytest_itemcollected(item=node) + yield node + else: + assert isinstance(node, nodes.Collector) + keepduplicates = self.config.getoption("keepduplicates") + # For backward compat, dedup only applies to files. + handle_dupes = not (keepduplicates and isinstance(node, nodes.File)) + rep, duplicate = self._collect_one_node(node, handle_dupes) + if duplicate and not keepduplicates: + return + if rep.passed: + for subnode in rep.result: + yield from self.genitems(subnode) + if not duplicate: + node.ihook.pytest_collectreport(report=rep) + + +def search_pypath(module_name: str) -> str | None: + """Search sys.path for the given a dotted module name, and return its file + system path if found.""" + try: + spec = importlib.util.find_spec(module_name) + # AttributeError: looks like package module, but actually filename + # ImportError: module does not exist + # ValueError: not a module name + except (AttributeError, ImportError, ValueError): + return None + if spec is None or spec.origin is None or spec.origin == "namespace": + return None + elif spec.submodule_search_locations: + return os.path.dirname(spec.origin) + else: + return spec.origin + + +@dataclasses.dataclass(frozen=True) +class CollectionArgument: + """A resolved collection argument.""" + + path: Path + parts: Sequence[str] + module_name: str | None + + +def resolve_collection_argument( + invocation_path: Path, arg: str, *, as_pypath: bool = False +) -> CollectionArgument: + """Parse path arguments optionally containing selection parts and return (fspath, names). + + Command-line arguments can point to files and/or directories, and optionally contain + parts for specific tests selection, for example: + + "pkg/tests/test_foo.py::TestClass::test_foo" + + This function ensures the path exists, and returns a resolved `CollectionArgument`: + + CollectionArgument( + path=Path("/full/path/to/pkg/tests/test_foo.py"), + parts=["TestClass", "test_foo"], + module_name=None, + ) + + When as_pypath is True, expects that the command-line argument actually contains + module paths instead of file-system paths: + + "pkg.tests.test_foo::TestClass::test_foo" + + In which case we search sys.path for a matching module, and then return the *path* to the + found module, which may look like this: + + CollectionArgument( + path=Path("/home/u/myvenv/lib/site-packages/pkg/tests/test_foo.py"), + parts=["TestClass", "test_foo"], + module_name="pkg.tests.test_foo", + ) + + If the path doesn't exist, raise UsageError. + If the path is a directory and selection parts are present, raise UsageError. + """ + base, squacket, rest = str(arg).partition("[") + strpath, *parts = base.split("::") + if parts: + parts[-1] = f"{parts[-1]}{squacket}{rest}" + module_name = None + if as_pypath: + pyarg_strpath = search_pypath(strpath) + if pyarg_strpath is not None: + module_name = strpath + strpath = pyarg_strpath + fspath = invocation_path / strpath + fspath = absolutepath(fspath) + if not safe_exists(fspath): + msg = ( + "module or package not found: {arg} (missing __init__.py?)" + if as_pypath + else "file or directory not found: {arg}" + ) + raise UsageError(msg.format(arg=arg)) + if parts and fspath.is_dir(): + msg = ( + "package argument cannot contain :: selection parts: {arg}" + if as_pypath + else "directory argument cannot contain :: selection parts: {arg}" + ) + raise UsageError(msg.format(arg=arg)) + return CollectionArgument( + path=fspath, + parts=parts, + module_name=module_name, + ) diff --git a/.venv/Lib/site-packages/_pytest/mark/__init__.py b/.venv/Lib/site-packages/_pytest/mark/__init__.py new file mode 100644 index 00000000..a4f942c5 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/mark/__init__.py @@ -0,0 +1,292 @@ +"""Generic mechanism for marking and selecting python functions.""" + +from __future__ import annotations + +import collections +import dataclasses +from typing import AbstractSet +from typing import Collection +from typing import Iterable +from typing import Optional +from typing import TYPE_CHECKING + +from .expression import Expression +from .expression import ParseError +from .structures import EMPTY_PARAMETERSET_OPTION +from .structures import get_empty_parameterset_mark +from .structures import Mark +from .structures import MARK_GEN +from .structures import MarkDecorator +from .structures import MarkGenerator +from .structures import ParameterSet +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config import UsageError +from _pytest.config.argparsing import NOT_SET +from _pytest.config.argparsing import Parser +from _pytest.stash import StashKey + + +if TYPE_CHECKING: + from _pytest.nodes import Item + + +__all__ = [ + "MARK_GEN", + "Mark", + "MarkDecorator", + "MarkGenerator", + "ParameterSet", + "get_empty_parameterset_mark", +] + + +old_mark_config_key = StashKey[Optional[Config]]() + + +def param( + *values: object, + marks: MarkDecorator | Collection[MarkDecorator | Mark] = (), + id: str | None = None, +) -> ParameterSet: + """Specify a parameter in `pytest.mark.parametrize`_ calls or + :ref:`parametrized fixtures `. + + .. code-block:: python + + @pytest.mark.parametrize( + "test_input,expected", + [ + ("3+5", 8), + pytest.param("6*9", 42, marks=pytest.mark.xfail), + ], + ) + def test_eval(test_input, expected): + assert eval(test_input) == expected + + :param values: Variable args of the values of the parameter set, in order. + :param marks: A single mark or a list of marks to be applied to this parameter set. + :param id: The id to attribute to this parameter set. + """ + return ParameterSet.param(*values, marks=marks, id=id) + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group._addoption( + "-k", + action="store", + dest="keyword", + default="", + metavar="EXPRESSION", + help="Only run tests which match the given substring expression. " + "An expression is a Python evaluable expression " + "where all names are substring-matched against test names " + "and their parent classes. Example: -k 'test_method or test_" + "other' matches all test functions and classes whose name " + "contains 'test_method' or 'test_other', while -k 'not test_method' " + "matches those that don't contain 'test_method' in their names. " + "-k 'not test_method and not test_other' will eliminate the matches. " + "Additionally keywords are matched to classes and functions " + "containing extra names in their 'extra_keyword_matches' set, " + "as well as functions which have names assigned directly to them. " + "The matching is case-insensitive.", + ) + + group._addoption( + "-m", + action="store", + dest="markexpr", + default="", + metavar="MARKEXPR", + help="Only run tests matching given mark expression. " + "For example: -m 'mark1 and not mark2'.", + ) + + group.addoption( + "--markers", + action="store_true", + help="show markers (builtin, plugin and per-project ones).", + ) + + parser.addini("markers", "Register new markers for test functions", "linelist") + parser.addini(EMPTY_PARAMETERSET_OPTION, "Default marker for empty parametersets") + + +@hookimpl(tryfirst=True) +def pytest_cmdline_main(config: Config) -> int | ExitCode | None: + import _pytest.config + + if config.option.markers: + config._do_configure() + tw = _pytest.config.create_terminal_writer(config) + for line in config.getini("markers"): + parts = line.split(":", 1) + name = parts[0] + rest = parts[1] if len(parts) == 2 else "" + tw.write(f"@pytest.mark.{name}:", bold=True) + tw.line(rest) + tw.line() + config._ensure_unconfigure() + return 0 + + return None + + +@dataclasses.dataclass +class KeywordMatcher: + """A matcher for keywords. + + Given a list of names, matches any substring of one of these names. The + string inclusion check is case-insensitive. + + Will match on the name of colitem, including the names of its parents. + Only matches names of items which are either a :class:`Class` or a + :class:`Function`. + + Additionally, matches on names in the 'extra_keyword_matches' set of + any item, as well as names directly assigned to test functions. + """ + + __slots__ = ("_names",) + + _names: AbstractSet[str] + + @classmethod + def from_item(cls, item: Item) -> KeywordMatcher: + mapped_names = set() + + # Add the names of the current item and any parent items, + # except the Session and root Directory's which are not + # interesting for matching. + import pytest + + for node in item.listchain(): + if isinstance(node, pytest.Session): + continue + if isinstance(node, pytest.Directory) and isinstance( + node.parent, pytest.Session + ): + continue + mapped_names.add(node.name) + + # Add the names added as extra keywords to current or parent items. + mapped_names.update(item.listextrakeywords()) + + # Add the names attached to the current function through direct assignment. + function_obj = getattr(item, "function", None) + if function_obj: + mapped_names.update(function_obj.__dict__) + + # Add the markers to the keywords as we no longer handle them correctly. + mapped_names.update(mark.name for mark in item.iter_markers()) + + return cls(mapped_names) + + def __call__(self, subname: str, /, **kwargs: str | int | bool | None) -> bool: + if kwargs: + raise UsageError("Keyword expressions do not support call parameters.") + subname = subname.lower() + names = (name.lower() for name in self._names) + + for name in names: + if subname in name: + return True + return False + + +def deselect_by_keyword(items: list[Item], config: Config) -> None: + keywordexpr = config.option.keyword.lstrip() + if not keywordexpr: + return + + expr = _parse_expression(keywordexpr, "Wrong expression passed to '-k'") + + remaining = [] + deselected = [] + for colitem in items: + if not expr.evaluate(KeywordMatcher.from_item(colitem)): + deselected.append(colitem) + else: + remaining.append(colitem) + + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +@dataclasses.dataclass +class MarkMatcher: + """A matcher for markers which are present. + + Tries to match on any marker names, attached to the given colitem. + """ + + __slots__ = ("own_mark_name_mapping",) + + own_mark_name_mapping: dict[str, list[Mark]] + + @classmethod + def from_markers(cls, markers: Iterable[Mark]) -> MarkMatcher: + mark_name_mapping = collections.defaultdict(list) + for mark in markers: + mark_name_mapping[mark.name].append(mark) + return cls(mark_name_mapping) + + def __call__(self, name: str, /, **kwargs: str | int | bool | None) -> bool: + if not (matches := self.own_mark_name_mapping.get(name, [])): + return False + + for mark in matches: + if all(mark.kwargs.get(k, NOT_SET) == v for k, v in kwargs.items()): + return True + + return False + + +def deselect_by_mark(items: list[Item], config: Config) -> None: + matchexpr = config.option.markexpr + if not matchexpr: + return + + expr = _parse_expression(matchexpr, "Wrong expression passed to '-m'") + remaining: list[Item] = [] + deselected: list[Item] = [] + for item in items: + if expr.evaluate(MarkMatcher.from_markers(item.iter_markers())): + remaining.append(item) + else: + deselected.append(item) + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +def _parse_expression(expr: str, exc_message: str) -> Expression: + try: + return Expression.compile(expr) + except ParseError as e: + raise UsageError(f"{exc_message}: {expr}: {e}") from None + + +def pytest_collection_modifyitems(items: list[Item], config: Config) -> None: + deselect_by_keyword(items, config) + deselect_by_mark(items, config) + + +def pytest_configure(config: Config) -> None: + config.stash[old_mark_config_key] = MARK_GEN._config + MARK_GEN._config = config + + empty_parameterset = config.getini(EMPTY_PARAMETERSET_OPTION) + + if empty_parameterset not in ("skip", "xfail", "fail_at_collect", None, ""): + raise UsageError( + f"{EMPTY_PARAMETERSET_OPTION!s} must be one of skip, xfail or fail_at_collect" + f" but it is {empty_parameterset!r}" + ) + + +def pytest_unconfigure(config: Config) -> None: + MARK_GEN._config = config.stash.get(old_mark_config_key, None) diff --git a/.venv/Lib/site-packages/_pytest/mark/expression.py b/.venv/Lib/site-packages/_pytest/mark/expression.py new file mode 100644 index 00000000..89cc0e94 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/mark/expression.py @@ -0,0 +1,333 @@ +r"""Evaluate match expressions, as used by `-k` and `-m`. + +The grammar is: + +expression: expr? EOF +expr: and_expr ('or' and_expr)* +and_expr: not_expr ('and' not_expr)* +not_expr: 'not' not_expr | '(' expr ')' | ident kwargs? + +ident: (\w|:|\+|-|\.|\[|\]|\\|/)+ +kwargs: ('(' name '=' value ( ', ' name '=' value )* ')') +name: a valid ident, but not a reserved keyword +value: (unescaped) string literal | (-)?[0-9]+ | 'False' | 'True' | 'None' + +The semantics are: + +- Empty expression evaluates to False. +- ident evaluates to True or False according to a provided matcher function. +- or/and/not evaluate according to the usual boolean semantics. +- ident with parentheses and keyword arguments evaluates to True or False according to a provided matcher function. +""" + +from __future__ import annotations + +import ast +import dataclasses +import enum +import keyword +import re +import types +from typing import Iterator +from typing import Literal +from typing import Mapping +from typing import NoReturn +from typing import overload +from typing import Protocol +from typing import Sequence + + +__all__ = [ + "Expression", + "ParseError", +] + + +class TokenType(enum.Enum): + LPAREN = "left parenthesis" + RPAREN = "right parenthesis" + OR = "or" + AND = "and" + NOT = "not" + IDENT = "identifier" + EOF = "end of input" + EQUAL = "=" + STRING = "string literal" + COMMA = "," + + +@dataclasses.dataclass(frozen=True) +class Token: + __slots__ = ("type", "value", "pos") + type: TokenType + value: str + pos: int + + +class ParseError(Exception): + """The expression contains invalid syntax. + + :param column: The column in the line where the error occurred (1-based). + :param message: A description of the error. + """ + + def __init__(self, column: int, message: str) -> None: + self.column = column + self.message = message + + def __str__(self) -> str: + return f"at column {self.column}: {self.message}" + + +class Scanner: + __slots__ = ("tokens", "current") + + def __init__(self, input: str) -> None: + self.tokens = self.lex(input) + self.current = next(self.tokens) + + def lex(self, input: str) -> Iterator[Token]: + pos = 0 + while pos < len(input): + if input[pos] in (" ", "\t"): + pos += 1 + elif input[pos] == "(": + yield Token(TokenType.LPAREN, "(", pos) + pos += 1 + elif input[pos] == ")": + yield Token(TokenType.RPAREN, ")", pos) + pos += 1 + elif input[pos] == "=": + yield Token(TokenType.EQUAL, "=", pos) + pos += 1 + elif input[pos] == ",": + yield Token(TokenType.COMMA, ",", pos) + pos += 1 + elif (quote_char := input[pos]) in ("'", '"'): + end_quote_pos = input.find(quote_char, pos + 1) + if end_quote_pos == -1: + raise ParseError( + pos + 1, + f'closing quote "{quote_char}" is missing', + ) + value = input[pos : end_quote_pos + 1] + if (backslash_pos := input.find("\\")) != -1: + raise ParseError( + backslash_pos + 1, + r'escaping with "\" not supported in marker expression', + ) + yield Token(TokenType.STRING, value, pos) + pos += len(value) + else: + match = re.match(r"(:?\w|:|\+|-|\.|\[|\]|\\|/)+", input[pos:]) + if match: + value = match.group(0) + if value == "or": + yield Token(TokenType.OR, value, pos) + elif value == "and": + yield Token(TokenType.AND, value, pos) + elif value == "not": + yield Token(TokenType.NOT, value, pos) + else: + yield Token(TokenType.IDENT, value, pos) + pos += len(value) + else: + raise ParseError( + pos + 1, + f'unexpected character "{input[pos]}"', + ) + yield Token(TokenType.EOF, "", pos) + + @overload + def accept(self, type: TokenType, *, reject: Literal[True]) -> Token: ... + + @overload + def accept( + self, type: TokenType, *, reject: Literal[False] = False + ) -> Token | None: ... + + def accept(self, type: TokenType, *, reject: bool = False) -> Token | None: + if self.current.type is type: + token = self.current + if token.type is not TokenType.EOF: + self.current = next(self.tokens) + return token + if reject: + self.reject((type,)) + return None + + def reject(self, expected: Sequence[TokenType]) -> NoReturn: + raise ParseError( + self.current.pos + 1, + "expected {}; got {}".format( + " OR ".join(type.value for type in expected), + self.current.type.value, + ), + ) + + +# True, False and None are legal match expression identifiers, +# but illegal as Python identifiers. To fix this, this prefix +# is added to identifiers in the conversion to Python AST. +IDENT_PREFIX = "$" + + +def expression(s: Scanner) -> ast.Expression: + if s.accept(TokenType.EOF): + ret: ast.expr = ast.Constant(False) + else: + ret = expr(s) + s.accept(TokenType.EOF, reject=True) + return ast.fix_missing_locations(ast.Expression(ret)) + + +def expr(s: Scanner) -> ast.expr: + ret = and_expr(s) + while s.accept(TokenType.OR): + rhs = and_expr(s) + ret = ast.BoolOp(ast.Or(), [ret, rhs]) + return ret + + +def and_expr(s: Scanner) -> ast.expr: + ret = not_expr(s) + while s.accept(TokenType.AND): + rhs = not_expr(s) + ret = ast.BoolOp(ast.And(), [ret, rhs]) + return ret + + +def not_expr(s: Scanner) -> ast.expr: + if s.accept(TokenType.NOT): + return ast.UnaryOp(ast.Not(), not_expr(s)) + if s.accept(TokenType.LPAREN): + ret = expr(s) + s.accept(TokenType.RPAREN, reject=True) + return ret + ident = s.accept(TokenType.IDENT) + if ident: + name = ast.Name(IDENT_PREFIX + ident.value, ast.Load()) + if s.accept(TokenType.LPAREN): + ret = ast.Call(func=name, args=[], keywords=all_kwargs(s)) + s.accept(TokenType.RPAREN, reject=True) + else: + ret = name + return ret + + s.reject((TokenType.NOT, TokenType.LPAREN, TokenType.IDENT)) + + +BUILTIN_MATCHERS = {"True": True, "False": False, "None": None} + + +def single_kwarg(s: Scanner) -> ast.keyword: + keyword_name = s.accept(TokenType.IDENT, reject=True) + if not keyword_name.value.isidentifier(): + raise ParseError( + keyword_name.pos + 1, + f"not a valid python identifier {keyword_name.value}", + ) + if keyword.iskeyword(keyword_name.value): + raise ParseError( + keyword_name.pos + 1, + f"unexpected reserved python keyword `{keyword_name.value}`", + ) + s.accept(TokenType.EQUAL, reject=True) + + if value_token := s.accept(TokenType.STRING): + value: str | int | bool | None = value_token.value[1:-1] # strip quotes + else: + value_token = s.accept(TokenType.IDENT, reject=True) + if ( + (number := value_token.value).isdigit() + or number.startswith("-") + and number[1:].isdigit() + ): + value = int(number) + elif value_token.value in BUILTIN_MATCHERS: + value = BUILTIN_MATCHERS[value_token.value] + else: + raise ParseError( + value_token.pos + 1, + f'unexpected character/s "{value_token.value}"', + ) + + ret = ast.keyword(keyword_name.value, ast.Constant(value)) + return ret + + +def all_kwargs(s: Scanner) -> list[ast.keyword]: + ret = [single_kwarg(s)] + while s.accept(TokenType.COMMA): + ret.append(single_kwarg(s)) + return ret + + +class MatcherCall(Protocol): + def __call__(self, name: str, /, **kwargs: str | int | bool | None) -> bool: ... + + +@dataclasses.dataclass +class MatcherNameAdapter: + matcher: MatcherCall + name: str + + def __bool__(self) -> bool: + return self.matcher(self.name) + + def __call__(self, **kwargs: str | int | bool | None) -> bool: + return self.matcher(self.name, **kwargs) + + +class MatcherAdapter(Mapping[str, MatcherNameAdapter]): + """Adapts a matcher function to a locals mapping as required by eval().""" + + def __init__(self, matcher: MatcherCall) -> None: + self.matcher = matcher + + def __getitem__(self, key: str) -> MatcherNameAdapter: + return MatcherNameAdapter(matcher=self.matcher, name=key[len(IDENT_PREFIX) :]) + + def __iter__(self) -> Iterator[str]: + raise NotImplementedError() + + def __len__(self) -> int: + raise NotImplementedError() + + +class Expression: + """A compiled match expression as used by -k and -m. + + The expression can be evaluated against different matchers. + """ + + __slots__ = ("code",) + + def __init__(self, code: types.CodeType) -> None: + self.code = code + + @classmethod + def compile(self, input: str) -> Expression: + """Compile a match expression. + + :param input: The input expression - one line. + """ + astexpr = expression(Scanner(input)) + code: types.CodeType = compile( + astexpr, + filename="", + mode="eval", + ) + return Expression(code) + + def evaluate(self, matcher: MatcherCall) -> bool: + """Evaluate the match expression. + + :param matcher: + Given an identifier, should return whether it matches or not. + Should be prepared to handle arbitrary strings as input. + + :returns: Whether the expression matches or not. + """ + ret: bool = bool(eval(self.code, {"__builtins__": {}}, MatcherAdapter(matcher))) + return ret diff --git a/.venv/Lib/site-packages/_pytest/mark/structures.py b/.venv/Lib/site-packages/_pytest/mark/structures.py new file mode 100644 index 00000000..92ade55f --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/mark/structures.py @@ -0,0 +1,615 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import collections.abc +import dataclasses +import inspect +from typing import Any +from typing import Callable +from typing import Collection +from typing import final +from typing import Iterable +from typing import Iterator +from typing import Mapping +from typing import MutableMapping +from typing import NamedTuple +from typing import overload +from typing import Sequence +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union +import warnings + +from .._code import getfslineno +from ..compat import ascii_escaped +from ..compat import NOTSET +from ..compat import NotSetType +from _pytest.config import Config +from _pytest.deprecated import check_ispytest +from _pytest.deprecated import MARKED_FIXTURE +from _pytest.outcomes import fail +from _pytest.scope import _ScopeName +from _pytest.warning_types import PytestUnknownMarkWarning + + +if TYPE_CHECKING: + from ..nodes import Node + + +EMPTY_PARAMETERSET_OPTION = "empty_parameter_set_mark" + + +def istestfunc(func) -> bool: + return callable(func) and getattr(func, "__name__", "") != "" + + +def get_empty_parameterset_mark( + config: Config, argnames: Sequence[str], func +) -> MarkDecorator: + from ..nodes import Collector + + fs, lineno = getfslineno(func) + reason = "got empty parameter set %r, function %s at %s:%d" % ( + argnames, + func.__name__, + fs, + lineno, + ) + + requested_mark = config.getini(EMPTY_PARAMETERSET_OPTION) + if requested_mark in ("", None, "skip"): + mark = MARK_GEN.skip(reason=reason) + elif requested_mark == "xfail": + mark = MARK_GEN.xfail(reason=reason, run=False) + elif requested_mark == "fail_at_collect": + f_name = func.__name__ + _, lineno = getfslineno(func) + raise Collector.CollectError( + "Empty parameter set in '%s' at line %d" % (f_name, lineno + 1) + ) + else: + raise LookupError(requested_mark) + return mark + + +class ParameterSet(NamedTuple): + values: Sequence[object | NotSetType] + marks: Collection[MarkDecorator | Mark] + id: str | None + + @classmethod + def param( + cls, + *values: object, + marks: MarkDecorator | Collection[MarkDecorator | Mark] = (), + id: str | None = None, + ) -> ParameterSet: + if isinstance(marks, MarkDecorator): + marks = (marks,) + else: + assert isinstance(marks, collections.abc.Collection) + + if id is not None: + if not isinstance(id, str): + raise TypeError(f"Expected id to be a string, got {type(id)}: {id!r}") + id = ascii_escaped(id) + return cls(values, marks, id) + + @classmethod + def extract_from( + cls, + parameterset: ParameterSet | Sequence[object] | object, + force_tuple: bool = False, + ) -> ParameterSet: + """Extract from an object or objects. + + :param parameterset: + A legacy style parameterset that may or may not be a tuple, + and may or may not be wrapped into a mess of mark objects. + + :param force_tuple: + Enforce tuple wrapping so single argument tuple values + don't get decomposed and break tests. + """ + if isinstance(parameterset, cls): + return parameterset + if force_tuple: + return cls.param(parameterset) + else: + # TODO: Refactor to fix this type-ignore. Currently the following + # passes type-checking but crashes: + # + # @pytest.mark.parametrize(('x', 'y'), [1, 2]) + # def test_foo(x, y): pass + return cls(parameterset, marks=[], id=None) # type: ignore[arg-type] + + @staticmethod + def _parse_parametrize_args( + argnames: str | Sequence[str], + argvalues: Iterable[ParameterSet | Sequence[object] | object], + *args, + **kwargs, + ) -> tuple[Sequence[str], bool]: + if isinstance(argnames, str): + argnames = [x.strip() for x in argnames.split(",") if x.strip()] + force_tuple = len(argnames) == 1 + else: + force_tuple = False + return argnames, force_tuple + + @staticmethod + def _parse_parametrize_parameters( + argvalues: Iterable[ParameterSet | Sequence[object] | object], + force_tuple: bool, + ) -> list[ParameterSet]: + return [ + ParameterSet.extract_from(x, force_tuple=force_tuple) for x in argvalues + ] + + @classmethod + def _for_parametrize( + cls, + argnames: str | Sequence[str], + argvalues: Iterable[ParameterSet | Sequence[object] | object], + func, + config: Config, + nodeid: str, + ) -> tuple[Sequence[str], list[ParameterSet]]: + argnames, force_tuple = cls._parse_parametrize_args(argnames, argvalues) + parameters = cls._parse_parametrize_parameters(argvalues, force_tuple) + del argvalues + + if parameters: + # Check all parameter sets have the correct number of values. + for param in parameters: + if len(param.values) != len(argnames): + msg = ( + '{nodeid}: in "parametrize" the number of names ({names_len}):\n' + " {names}\n" + "must be equal to the number of values ({values_len}):\n" + " {values}" + ) + fail( + msg.format( + nodeid=nodeid, + values=param.values, + names=argnames, + names_len=len(argnames), + values_len=len(param.values), + ), + pytrace=False, + ) + else: + # Empty parameter set (likely computed at runtime): create a single + # parameter set with NOTSET values, with the "empty parameter set" mark applied to it. + mark = get_empty_parameterset_mark(config, argnames, func) + parameters.append( + ParameterSet(values=(NOTSET,) * len(argnames), marks=[mark], id=None) + ) + return argnames, parameters + + +@final +@dataclasses.dataclass(frozen=True) +class Mark: + """A pytest mark.""" + + #: Name of the mark. + name: str + #: Positional arguments of the mark decorator. + args: tuple[Any, ...] + #: Keyword arguments of the mark decorator. + kwargs: Mapping[str, Any] + + #: Source Mark for ids with parametrize Marks. + _param_ids_from: Mark | None = dataclasses.field(default=None, repr=False) + #: Resolved/generated ids with parametrize Marks. + _param_ids_generated: Sequence[str] | None = dataclasses.field( + default=None, repr=False + ) + + def __init__( + self, + name: str, + args: tuple[Any, ...], + kwargs: Mapping[str, Any], + param_ids_from: Mark | None = None, + param_ids_generated: Sequence[str] | None = None, + *, + _ispytest: bool = False, + ) -> None: + """:meta private:""" + check_ispytest(_ispytest) + # Weirdness to bypass frozen=True. + object.__setattr__(self, "name", name) + object.__setattr__(self, "args", args) + object.__setattr__(self, "kwargs", kwargs) + object.__setattr__(self, "_param_ids_from", param_ids_from) + object.__setattr__(self, "_param_ids_generated", param_ids_generated) + + def _has_param_ids(self) -> bool: + return "ids" in self.kwargs or len(self.args) >= 4 + + def combined_with(self, other: Mark) -> Mark: + """Return a new Mark which is a combination of this + Mark and another Mark. + + Combines by appending args and merging kwargs. + + :param Mark other: The mark to combine with. + :rtype: Mark + """ + assert self.name == other.name + + # Remember source of ids with parametrize Marks. + param_ids_from: Mark | None = None + if self.name == "parametrize": + if other._has_param_ids(): + param_ids_from = other + elif self._has_param_ids(): + param_ids_from = self + + return Mark( + self.name, + self.args + other.args, + dict(self.kwargs, **other.kwargs), + param_ids_from=param_ids_from, + _ispytest=True, + ) + + +# A generic parameter designating an object to which a Mark may +# be applied -- a test function (callable) or class. +# Note: a lambda is not allowed, but this can't be represented. +Markable = TypeVar("Markable", bound=Union[Callable[..., object], type]) + + +@dataclasses.dataclass +class MarkDecorator: + """A decorator for applying a mark on test functions and classes. + + ``MarkDecorators`` are created with ``pytest.mark``:: + + mark1 = pytest.mark.NAME # Simple MarkDecorator + mark2 = pytest.mark.NAME(name1=value) # Parametrized MarkDecorator + + and can then be applied as decorators to test functions:: + + @mark2 + def test_function(): + pass + + When a ``MarkDecorator`` is called, it does the following: + + 1. If called with a single class as its only positional argument and no + additional keyword arguments, it attaches the mark to the class so it + gets applied automatically to all test cases found in that class. + + 2. If called with a single function as its only positional argument and + no additional keyword arguments, it attaches the mark to the function, + containing all the arguments already stored internally in the + ``MarkDecorator``. + + 3. When called in any other case, it returns a new ``MarkDecorator`` + instance with the original ``MarkDecorator``'s content updated with + the arguments passed to this call. + + Note: The rules above prevent a ``MarkDecorator`` from storing only a + single function or class reference as its positional argument with no + additional keyword or positional arguments. You can work around this by + using `with_args()`. + """ + + mark: Mark + + def __init__(self, mark: Mark, *, _ispytest: bool = False) -> None: + """:meta private:""" + check_ispytest(_ispytest) + self.mark = mark + + @property + def name(self) -> str: + """Alias for mark.name.""" + return self.mark.name + + @property + def args(self) -> tuple[Any, ...]: + """Alias for mark.args.""" + return self.mark.args + + @property + def kwargs(self) -> Mapping[str, Any]: + """Alias for mark.kwargs.""" + return self.mark.kwargs + + @property + def markname(self) -> str: + """:meta private:""" + return self.name # for backward-compat (2.4.1 had this attr) + + def with_args(self, *args: object, **kwargs: object) -> MarkDecorator: + """Return a MarkDecorator with extra arguments added. + + Unlike calling the MarkDecorator, with_args() can be used even + if the sole argument is a callable/class. + """ + mark = Mark(self.name, args, kwargs, _ispytest=True) + return MarkDecorator(self.mark.combined_with(mark), _ispytest=True) + + # Type ignored because the overloads overlap with an incompatible + # return type. Not much we can do about that. Thankfully mypy picks + # the first match so it works out even if we break the rules. + @overload + def __call__(self, arg: Markable) -> Markable: # type: ignore[overload-overlap] + pass + + @overload + def __call__(self, *args: object, **kwargs: object) -> MarkDecorator: + pass + + def __call__(self, *args: object, **kwargs: object): + """Call the MarkDecorator.""" + if args and not kwargs: + func = args[0] + is_class = inspect.isclass(func) + if len(args) == 1 and (istestfunc(func) or is_class): + store_mark(func, self.mark, stacklevel=3) + return func + return self.with_args(*args, **kwargs) + + +def get_unpacked_marks( + obj: object | type, + *, + consider_mro: bool = True, +) -> list[Mark]: + """Obtain the unpacked marks that are stored on an object. + + If obj is a class and consider_mro is true, return marks applied to + this class and all of its super-classes in MRO order. If consider_mro + is false, only return marks applied directly to this class. + """ + if isinstance(obj, type): + if not consider_mro: + mark_lists = [obj.__dict__.get("pytestmark", [])] + else: + mark_lists = [ + x.__dict__.get("pytestmark", []) for x in reversed(obj.__mro__) + ] + mark_list = [] + for item in mark_lists: + if isinstance(item, list): + mark_list.extend(item) + else: + mark_list.append(item) + else: + mark_attribute = getattr(obj, "pytestmark", []) + if isinstance(mark_attribute, list): + mark_list = mark_attribute + else: + mark_list = [mark_attribute] + return list(normalize_mark_list(mark_list)) + + +def normalize_mark_list( + mark_list: Iterable[Mark | MarkDecorator], +) -> Iterable[Mark]: + """ + Normalize an iterable of Mark or MarkDecorator objects into a list of marks + by retrieving the `mark` attribute on MarkDecorator instances. + + :param mark_list: marks to normalize + :returns: A new list of the extracted Mark objects + """ + for mark in mark_list: + mark_obj = getattr(mark, "mark", mark) + if not isinstance(mark_obj, Mark): + raise TypeError(f"got {mark_obj!r} instead of Mark") + yield mark_obj + + +def store_mark(obj, mark: Mark, *, stacklevel: int = 2) -> None: + """Store a Mark on an object. + + This is used to implement the Mark declarations/decorators correctly. + """ + assert isinstance(mark, Mark), mark + + from ..fixtures import getfixturemarker + + if getfixturemarker(obj) is not None: + warnings.warn(MARKED_FIXTURE, stacklevel=stacklevel) + + # Always reassign name to avoid updating pytestmark in a reference that + # was only borrowed. + obj.pytestmark = [*get_unpacked_marks(obj, consider_mro=False), mark] + + +# Typing for builtin pytest marks. This is cheating; it gives builtin marks +# special privilege, and breaks modularity. But practicality beats purity... +if TYPE_CHECKING: + + class _SkipMarkDecorator(MarkDecorator): + @overload # type: ignore[override,no-overload-impl] + def __call__(self, arg: Markable) -> Markable: ... + + @overload + def __call__(self, reason: str = ...) -> MarkDecorator: ... + + class _SkipifMarkDecorator(MarkDecorator): + def __call__( # type: ignore[override] + self, + condition: str | bool = ..., + *conditions: str | bool, + reason: str = ..., + ) -> MarkDecorator: ... + + class _XfailMarkDecorator(MarkDecorator): + @overload # type: ignore[override,no-overload-impl] + def __call__(self, arg: Markable) -> Markable: ... + + @overload + def __call__( + self, + condition: str | bool = False, + *conditions: str | bool, + reason: str = ..., + run: bool = ..., + raises: None | type[BaseException] | tuple[type[BaseException], ...] = ..., + strict: bool = ..., + ) -> MarkDecorator: ... + + class _ParametrizeMarkDecorator(MarkDecorator): + def __call__( # type: ignore[override] + self, + argnames: str | Sequence[str], + argvalues: Iterable[ParameterSet | Sequence[object] | object], + *, + indirect: bool | Sequence[str] = ..., + ids: Iterable[None | str | float | int | bool] + | Callable[[Any], object | None] + | None = ..., + scope: _ScopeName | None = ..., + ) -> MarkDecorator: ... + + class _UsefixturesMarkDecorator(MarkDecorator): + def __call__(self, *fixtures: str) -> MarkDecorator: # type: ignore[override] + ... + + class _FilterwarningsMarkDecorator(MarkDecorator): + def __call__(self, *filters: str) -> MarkDecorator: # type: ignore[override] + ... + + +@final +class MarkGenerator: + """Factory for :class:`MarkDecorator` objects - exposed as + a ``pytest.mark`` singleton instance. + + Example:: + + import pytest + + + @pytest.mark.slowtest + def test_function(): + pass + + applies a 'slowtest' :class:`Mark` on ``test_function``. + """ + + # See TYPE_CHECKING above. + if TYPE_CHECKING: + skip: _SkipMarkDecorator + skipif: _SkipifMarkDecorator + xfail: _XfailMarkDecorator + parametrize: _ParametrizeMarkDecorator + usefixtures: _UsefixturesMarkDecorator + filterwarnings: _FilterwarningsMarkDecorator + + def __init__(self, *, _ispytest: bool = False) -> None: + check_ispytest(_ispytest) + self._config: Config | None = None + self._markers: set[str] = set() + + def __getattr__(self, name: str) -> MarkDecorator: + """Generate a new :class:`MarkDecorator` with the given name.""" + if name[0] == "_": + raise AttributeError("Marker name must NOT start with underscore") + + if self._config is not None: + # We store a set of markers as a performance optimisation - if a mark + # name is in the set we definitely know it, but a mark may be known and + # not in the set. We therefore start by updating the set! + if name not in self._markers: + for line in self._config.getini("markers"): + # example lines: "skipif(condition): skip the given test if..." + # or "hypothesis: tests which use Hypothesis", so to get the + # marker name we split on both `:` and `(`. + marker = line.split(":")[0].split("(")[0].strip() + self._markers.add(marker) + + # If the name is not in the set of known marks after updating, + # then it really is time to issue a warning or an error. + if name not in self._markers: + if self._config.option.strict_markers or self._config.option.strict: + fail( + f"{name!r} not found in `markers` configuration option", + pytrace=False, + ) + + # Raise a specific error for common misspellings of "parametrize". + if name in ["parameterize", "parametrise", "parameterise"]: + __tracebackhide__ = True + fail(f"Unknown '{name}' mark, did you mean 'parametrize'?") + + warnings.warn( + f"Unknown pytest.mark.{name} - is this a typo? You can register " + "custom marks to avoid this warning - for details, see " + "https://docs.pytest.org/en/stable/how-to/mark.html", + PytestUnknownMarkWarning, + 2, + ) + + return MarkDecorator(Mark(name, (), {}, _ispytest=True), _ispytest=True) + + +MARK_GEN = MarkGenerator(_ispytest=True) + + +@final +class NodeKeywords(MutableMapping[str, Any]): + __slots__ = ("node", "parent", "_markers") + + def __init__(self, node: Node) -> None: + self.node = node + self.parent = node.parent + self._markers = {node.name: True} + + def __getitem__(self, key: str) -> Any: + try: + return self._markers[key] + except KeyError: + if self.parent is None: + raise + return self.parent.keywords[key] + + def __setitem__(self, key: str, value: Any) -> None: + self._markers[key] = value + + # Note: we could've avoided explicitly implementing some of the methods + # below and use the collections.abc fallback, but that would be slow. + + def __contains__(self, key: object) -> bool: + return ( + key in self._markers + or self.parent is not None + and key in self.parent.keywords + ) + + def update( # type: ignore[override] + self, + other: Mapping[str, Any] | Iterable[tuple[str, Any]] = (), + **kwds: Any, + ) -> None: + self._markers.update(other) + self._markers.update(kwds) + + def __delitem__(self, key: str) -> None: + raise ValueError("cannot delete key in keywords dict") + + def __iter__(self) -> Iterator[str]: + # Doesn't need to be fast. + yield from self._markers + if self.parent is not None: + for keyword in self.parent.keywords: + # self._marks and self.parent.keywords can have duplicates. + if keyword not in self._markers: + yield keyword + + def __len__(self) -> int: + # Doesn't need to be fast. + return sum(1 for keyword in self) + + def __repr__(self) -> str: + return f"" diff --git a/.venv/Lib/site-packages/_pytest/monkeypatch.py b/.venv/Lib/site-packages/_pytest/monkeypatch.py new file mode 100644 index 00000000..46eb1724 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/monkeypatch.py @@ -0,0 +1,415 @@ +# mypy: allow-untyped-defs +"""Monkeypatching and mocking functionality.""" + +from __future__ import annotations + +from contextlib import contextmanager +import os +import re +import sys +from typing import Any +from typing import final +from typing import Generator +from typing import Mapping +from typing import MutableMapping +from typing import overload +from typing import TypeVar +import warnings + +from _pytest.fixtures import fixture +from _pytest.warning_types import PytestWarning + + +RE_IMPORT_ERROR_NAME = re.compile(r"^No module named (.*)$") + + +K = TypeVar("K") +V = TypeVar("V") + + +@fixture +def monkeypatch() -> Generator[MonkeyPatch]: + """A convenient fixture for monkey-patching. + + The fixture provides these methods to modify objects, dictionaries, or + :data:`os.environ`: + + * :meth:`monkeypatch.setattr(obj, name, value, raising=True) ` + * :meth:`monkeypatch.delattr(obj, name, raising=True) ` + * :meth:`monkeypatch.setitem(mapping, name, value) ` + * :meth:`monkeypatch.delitem(obj, name, raising=True) ` + * :meth:`monkeypatch.setenv(name, value, prepend=None) ` + * :meth:`monkeypatch.delenv(name, raising=True) ` + * :meth:`monkeypatch.syspath_prepend(path) ` + * :meth:`monkeypatch.chdir(path) ` + * :meth:`monkeypatch.context() ` + + All modifications will be undone after the requesting test function or + fixture has finished. The ``raising`` parameter determines if a :class:`KeyError` + or :class:`AttributeError` will be raised if the set/deletion operation does not have the + specified target. + + To undo modifications done by the fixture in a contained scope, + use :meth:`context() `. + """ + mpatch = MonkeyPatch() + yield mpatch + mpatch.undo() + + +def resolve(name: str) -> object: + # Simplified from zope.dottedname. + parts = name.split(".") + + used = parts.pop(0) + found: object = __import__(used) + for part in parts: + used += "." + part + try: + found = getattr(found, part) + except AttributeError: + pass + else: + continue + # We use explicit un-nesting of the handling block in order + # to avoid nested exceptions. + try: + __import__(used) + except ImportError as ex: + expected = str(ex).split()[-1] + if expected == used: + raise + else: + raise ImportError(f"import error in {used}: {ex}") from ex + found = annotated_getattr(found, part, used) + return found + + +def annotated_getattr(obj: object, name: str, ann: str) -> object: + try: + obj = getattr(obj, name) + except AttributeError as e: + raise AttributeError( + f"{type(obj).__name__!r} object at {ann} has no attribute {name!r}" + ) from e + return obj + + +def derive_importpath(import_path: str, raising: bool) -> tuple[str, object]: + if not isinstance(import_path, str) or "." not in import_path: + raise TypeError(f"must be absolute import path string, not {import_path!r}") + module, attr = import_path.rsplit(".", 1) + target = resolve(module) + if raising: + annotated_getattr(target, attr, ann=module) + return attr, target + + +class Notset: + def __repr__(self) -> str: + return "" + + +notset = Notset() + + +@final +class MonkeyPatch: + """Helper to conveniently monkeypatch attributes/items/environment + variables/syspath. + + Returned by the :fixture:`monkeypatch` fixture. + + .. versionchanged:: 6.2 + Can now also be used directly as `pytest.MonkeyPatch()`, for when + the fixture is not available. In this case, use + :meth:`with MonkeyPatch.context() as mp: ` or remember to call + :meth:`undo` explicitly. + """ + + def __init__(self) -> None: + self._setattr: list[tuple[object, str, object]] = [] + self._setitem: list[tuple[Mapping[Any, Any], object, object]] = [] + self._cwd: str | None = None + self._savesyspath: list[str] | None = None + + @classmethod + @contextmanager + def context(cls) -> Generator[MonkeyPatch]: + """Context manager that returns a new :class:`MonkeyPatch` object + which undoes any patching done inside the ``with`` block upon exit. + + Example: + + .. code-block:: python + + import functools + + + def test_partial(monkeypatch): + with monkeypatch.context() as m: + m.setattr(functools, "partial", 3) + + Useful in situations where it is desired to undo some patches before the test ends, + such as mocking ``stdlib`` functions that might break pytest itself if mocked (for examples + of this see :issue:`3290`). + """ + m = cls() + try: + yield m + finally: + m.undo() + + @overload + def setattr( + self, + target: str, + name: object, + value: Notset = ..., + raising: bool = ..., + ) -> None: ... + + @overload + def setattr( + self, + target: object, + name: str, + value: object, + raising: bool = ..., + ) -> None: ... + + def setattr( + self, + target: str | object, + name: object | str, + value: object = notset, + raising: bool = True, + ) -> None: + """ + Set attribute value on target, memorizing the old value. + + For example: + + .. code-block:: python + + import os + + monkeypatch.setattr(os, "getcwd", lambda: "/") + + The code above replaces the :func:`os.getcwd` function by a ``lambda`` which + always returns ``"/"``. + + For convenience, you can specify a string as ``target`` which + will be interpreted as a dotted import path, with the last part + being the attribute name: + + .. code-block:: python + + monkeypatch.setattr("os.getcwd", lambda: "/") + + Raises :class:`AttributeError` if the attribute does not exist, unless + ``raising`` is set to False. + + **Where to patch** + + ``monkeypatch.setattr`` works by (temporarily) changing the object that a name points to with another one. + There can be many names pointing to any individual object, so for patching to work you must ensure + that you patch the name used by the system under test. + + See the section :ref:`Where to patch ` in the :mod:`unittest.mock` + docs for a complete explanation, which is meant for :func:`unittest.mock.patch` but + applies to ``monkeypatch.setattr`` as well. + """ + __tracebackhide__ = True + import inspect + + if isinstance(value, Notset): + if not isinstance(target, str): + raise TypeError( + "use setattr(target, name, value) or " + "setattr(target, value) with target being a dotted " + "import string" + ) + value = name + name, target = derive_importpath(target, raising) + else: + if not isinstance(name, str): + raise TypeError( + "use setattr(target, name, value) with name being a string or " + "setattr(target, value) with target being a dotted " + "import string" + ) + + oldval = getattr(target, name, notset) + if raising and oldval is notset: + raise AttributeError(f"{target!r} has no attribute {name!r}") + + # avoid class descriptors like staticmethod/classmethod + if inspect.isclass(target): + oldval = target.__dict__.get(name, notset) + self._setattr.append((target, name, oldval)) + setattr(target, name, value) + + def delattr( + self, + target: object | str, + name: str | Notset = notset, + raising: bool = True, + ) -> None: + """Delete attribute ``name`` from ``target``. + + If no ``name`` is specified and ``target`` is a string + it will be interpreted as a dotted import path with the + last part being the attribute name. + + Raises AttributeError it the attribute does not exist, unless + ``raising`` is set to False. + """ + __tracebackhide__ = True + import inspect + + if isinstance(name, Notset): + if not isinstance(target, str): + raise TypeError( + "use delattr(target, name) or " + "delattr(target) with target being a dotted " + "import string" + ) + name, target = derive_importpath(target, raising) + + if not hasattr(target, name): + if raising: + raise AttributeError(name) + else: + oldval = getattr(target, name, notset) + # Avoid class descriptors like staticmethod/classmethod. + if inspect.isclass(target): + oldval = target.__dict__.get(name, notset) + self._setattr.append((target, name, oldval)) + delattr(target, name) + + def setitem(self, dic: Mapping[K, V], name: K, value: V) -> None: + """Set dictionary entry ``name`` to value.""" + self._setitem.append((dic, name, dic.get(name, notset))) + # Not all Mapping types support indexing, but MutableMapping doesn't support TypedDict + dic[name] = value # type: ignore[index] + + def delitem(self, dic: Mapping[K, V], name: K, raising: bool = True) -> None: + """Delete ``name`` from dict. + + Raises ``KeyError`` if it doesn't exist, unless ``raising`` is set to + False. + """ + if name not in dic: + if raising: + raise KeyError(name) + else: + self._setitem.append((dic, name, dic.get(name, notset))) + # Not all Mapping types support indexing, but MutableMapping doesn't support TypedDict + del dic[name] # type: ignore[attr-defined] + + def setenv(self, name: str, value: str, prepend: str | None = None) -> None: + """Set environment variable ``name`` to ``value``. + + If ``prepend`` is a character, read the current environment variable + value and prepend the ``value`` adjoined with the ``prepend`` + character. + """ + if not isinstance(value, str): + warnings.warn( # type: ignore[unreachable] + PytestWarning( + f"Value of environment variable {name} type should be str, but got " + f"{value!r} (type: {type(value).__name__}); converted to str implicitly" + ), + stacklevel=2, + ) + value = str(value) + if prepend and name in os.environ: + value = value + prepend + os.environ[name] + self.setitem(os.environ, name, value) + + def delenv(self, name: str, raising: bool = True) -> None: + """Delete ``name`` from the environment. + + Raises ``KeyError`` if it does not exist, unless ``raising`` is set to + False. + """ + environ: MutableMapping[str, str] = os.environ + self.delitem(environ, name, raising=raising) + + def syspath_prepend(self, path) -> None: + """Prepend ``path`` to ``sys.path`` list of import locations.""" + if self._savesyspath is None: + self._savesyspath = sys.path[:] + sys.path.insert(0, str(path)) + + # https://github.com/pypa/setuptools/blob/d8b901bc/docs/pkg_resources.txt#L162-L171 + # this is only needed when pkg_resources was already loaded by the namespace package + if "pkg_resources" in sys.modules: + from pkg_resources import fixup_namespace_packages + + fixup_namespace_packages(str(path)) + + # A call to syspathinsert() usually means that the caller wants to + # import some dynamically created files, thus with python3 we + # invalidate its import caches. + # This is especially important when any namespace package is in use, + # since then the mtime based FileFinder cache (that gets created in + # this case already) gets not invalidated when writing the new files + # quickly afterwards. + from importlib import invalidate_caches + + invalidate_caches() + + def chdir(self, path: str | os.PathLike[str]) -> None: + """Change the current working directory to the specified path. + + :param path: + The path to change into. + """ + if self._cwd is None: + self._cwd = os.getcwd() + os.chdir(path) + + def undo(self) -> None: + """Undo previous changes. + + This call consumes the undo stack. Calling it a second time has no + effect unless you do more monkeypatching after the undo call. + + There is generally no need to call `undo()`, since it is + called automatically during tear-down. + + .. note:: + The same `monkeypatch` fixture is used across a + single test function invocation. If `monkeypatch` is used both by + the test function itself and one of the test fixtures, + calling `undo()` will undo all of the changes made in + both functions. + + Prefer to use :meth:`context() ` instead. + """ + for obj, name, value in reversed(self._setattr): + if value is not notset: + setattr(obj, name, value) + else: + delattr(obj, name) + self._setattr[:] = [] + for dictionary, key, value in reversed(self._setitem): + if value is notset: + try: + # Not all Mapping types support indexing, but MutableMapping doesn't support TypedDict + del dictionary[key] # type: ignore[attr-defined] + except KeyError: + pass # Was already deleted, so we have the desired state. + else: + # Not all Mapping types support indexing, but MutableMapping doesn't support TypedDict + dictionary[key] = value # type: ignore[index] + self._setitem[:] = [] + if self._savesyspath is not None: + sys.path[:] = self._savesyspath + self._savesyspath = None + + if self._cwd is not None: + os.chdir(self._cwd) + self._cwd = None diff --git a/.venv/Lib/site-packages/_pytest/nodes.py b/.venv/Lib/site-packages/_pytest/nodes.py new file mode 100644 index 00000000..51bc5174 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/nodes.py @@ -0,0 +1,766 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import abc +from functools import cached_property +from inspect import signature +import os +import pathlib +from pathlib import Path +from typing import Any +from typing import Callable +from typing import cast +from typing import Iterable +from typing import Iterator +from typing import MutableMapping +from typing import NoReturn +from typing import overload +from typing import TYPE_CHECKING +from typing import TypeVar +import warnings + +import pluggy + +import _pytest._code +from _pytest._code import getfslineno +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import TerminalRepr +from _pytest._code.code import Traceback +from _pytest._code.code import TracebackStyle +from _pytest.compat import LEGACY_PATH +from _pytest.config import Config +from _pytest.config import ConftestImportFailure +from _pytest.config.compat import _check_path +from _pytest.deprecated import NODE_CTOR_FSPATH_ARG +from _pytest.mark.structures import Mark +from _pytest.mark.structures import MarkDecorator +from _pytest.mark.structures import NodeKeywords +from _pytest.outcomes import fail +from _pytest.pathlib import absolutepath +from _pytest.pathlib import commonpath +from _pytest.stash import Stash +from _pytest.warning_types import PytestWarning + + +if TYPE_CHECKING: + from typing_extensions import Self + + # Imported here due to circular import. + from _pytest.main import Session + + +SEP = "/" + +tracebackcutdir = Path(_pytest.__file__).parent + + +_T = TypeVar("_T") + + +def _imply_path( + node_type: type[Node], + path: Path | None, + fspath: LEGACY_PATH | None, +) -> Path: + if fspath is not None: + warnings.warn( + NODE_CTOR_FSPATH_ARG.format( + node_type_name=node_type.__name__, + ), + stacklevel=6, + ) + if path is not None: + if fspath is not None: + _check_path(path, fspath) + return path + else: + assert fspath is not None + return Path(fspath) + + +_NodeType = TypeVar("_NodeType", bound="Node") + + +class NodeMeta(abc.ABCMeta): + """Metaclass used by :class:`Node` to enforce that direct construction raises + :class:`Failed`. + + This behaviour supports the indirection introduced with :meth:`Node.from_parent`, + the named constructor to be used instead of direct construction. The design + decision to enforce indirection with :class:`NodeMeta` was made as a + temporary aid for refactoring the collection tree, which was diagnosed to + have :class:`Node` objects whose creational patterns were overly entangled. + Once the refactoring is complete, this metaclass can be removed. + + See https://github.com/pytest-dev/pytest/projects/3 for an overview of the + progress on detangling the :class:`Node` classes. + """ + + def __call__(cls, *k, **kw) -> NoReturn: + msg = ( + "Direct construction of {name} has been deprecated, please use {name}.from_parent.\n" + "See " + "https://docs.pytest.org/en/stable/deprecations.html#node-construction-changed-to-node-from-parent" + " for more details." + ).format(name=f"{cls.__module__}.{cls.__name__}") + fail(msg, pytrace=False) + + def _create(cls: type[_T], *k, **kw) -> _T: + try: + return super().__call__(*k, **kw) # type: ignore[no-any-return,misc] + except TypeError: + sig = signature(getattr(cls, "__init__")) + known_kw = {k: v for k, v in kw.items() if k in sig.parameters} + from .warning_types import PytestDeprecationWarning + + warnings.warn( + PytestDeprecationWarning( + f"{cls} is not using a cooperative constructor and only takes {set(known_kw)}.\n" + "See https://docs.pytest.org/en/stable/deprecations.html" + "#constructors-of-custom-pytest-node-subclasses-should-take-kwargs " + "for more details." + ) + ) + + return super().__call__(*k, **known_kw) # type: ignore[no-any-return,misc] + + +class Node(abc.ABC, metaclass=NodeMeta): + r"""Base class of :class:`Collector` and :class:`Item`, the components of + the test collection tree. + + ``Collector``\'s are the internal nodes of the tree, and ``Item``\'s are the + leaf nodes. + """ + + # Implemented in the legacypath plugin. + #: A ``LEGACY_PATH`` copy of the :attr:`path` attribute. Intended for usage + #: for methods not migrated to ``pathlib.Path`` yet, such as + #: :meth:`Item.reportinfo `. Will be deprecated in + #: a future release, prefer using :attr:`path` instead. + fspath: LEGACY_PATH + + # Use __slots__ to make attribute access faster. + # Note that __dict__ is still available. + __slots__ = ( + "name", + "parent", + "config", + "session", + "path", + "_nodeid", + "_store", + "__dict__", + ) + + def __init__( + self, + name: str, + parent: Node | None = None, + config: Config | None = None, + session: Session | None = None, + fspath: LEGACY_PATH | None = None, + path: Path | None = None, + nodeid: str | None = None, + ) -> None: + #: A unique name within the scope of the parent node. + self.name: str = name + + #: The parent collector node. + self.parent = parent + + if config: + #: The pytest config object. + self.config: Config = config + else: + if not parent: + raise TypeError("config or parent must be provided") + self.config = parent.config + + if session: + #: The pytest session this node is part of. + self.session: Session = session + else: + if not parent: + raise TypeError("session or parent must be provided") + self.session = parent.session + + if path is None and fspath is None: + path = getattr(parent, "path", None) + #: Filesystem path where this node was collected from (can be None). + self.path: pathlib.Path = _imply_path(type(self), path, fspath=fspath) + + # The explicit annotation is to avoid publicly exposing NodeKeywords. + #: Keywords/markers collected from all scopes. + self.keywords: MutableMapping[str, Any] = NodeKeywords(self) + + #: The marker objects belonging to this node. + self.own_markers: list[Mark] = [] + + #: Allow adding of extra keywords to use for matching. + self.extra_keyword_matches: set[str] = set() + + if nodeid is not None: + assert "::()" not in nodeid + self._nodeid = nodeid + else: + if not self.parent: + raise TypeError("nodeid or parent must be provided") + self._nodeid = self.parent.nodeid + "::" + self.name + + #: A place where plugins can store information on the node for their + #: own use. + self.stash: Stash = Stash() + # Deprecated alias. Was never public. Can be removed in a few releases. + self._store = self.stash + + @classmethod + def from_parent(cls, parent: Node, **kw) -> Self: + """Public constructor for Nodes. + + This indirection got introduced in order to enable removing + the fragile logic from the node constructors. + + Subclasses can use ``super().from_parent(...)`` when overriding the + construction. + + :param parent: The parent node of this Node. + """ + if "config" in kw: + raise TypeError("config is not a valid argument for from_parent") + if "session" in kw: + raise TypeError("session is not a valid argument for from_parent") + return cls._create(parent=parent, **kw) + + @property + def ihook(self) -> pluggy.HookRelay: + """fspath-sensitive hook proxy used to call pytest hooks.""" + return self.session.gethookproxy(self.path) + + def __repr__(self) -> str: + return "<{} {}>".format(self.__class__.__name__, getattr(self, "name", None)) + + def warn(self, warning: Warning) -> None: + """Issue a warning for this Node. + + Warnings will be displayed after the test session, unless explicitly suppressed. + + :param Warning warning: + The warning instance to issue. + + :raises ValueError: If ``warning`` instance is not a subclass of Warning. + + Example usage: + + .. code-block:: python + + node.warn(PytestWarning("some message")) + node.warn(UserWarning("some message")) + + .. versionchanged:: 6.2 + Any subclass of :class:`Warning` is now accepted, rather than only + :class:`PytestWarning ` subclasses. + """ + # enforce type checks here to avoid getting a generic type error later otherwise. + if not isinstance(warning, Warning): + raise ValueError( + f"warning must be an instance of Warning or subclass, got {warning!r}" + ) + path, lineno = get_fslocation_from_item(self) + assert lineno is not None + warnings.warn_explicit( + warning, + category=None, + filename=str(path), + lineno=lineno + 1, + ) + + # Methods for ordering nodes. + + @property + def nodeid(self) -> str: + """A ::-separated string denoting its collection tree address.""" + return self._nodeid + + def __hash__(self) -> int: + return hash(self._nodeid) + + def setup(self) -> None: + pass + + def teardown(self) -> None: + pass + + def iter_parents(self) -> Iterator[Node]: + """Iterate over all parent collectors starting from and including self + up to the root of the collection tree. + + .. versionadded:: 8.1 + """ + parent: Node | None = self + while parent is not None: + yield parent + parent = parent.parent + + def listchain(self) -> list[Node]: + """Return a list of all parent collectors starting from the root of the + collection tree down to and including self.""" + chain = [] + item: Node | None = self + while item is not None: + chain.append(item) + item = item.parent + chain.reverse() + return chain + + def add_marker(self, marker: str | MarkDecorator, append: bool = True) -> None: + """Dynamically add a marker object to the node. + + :param marker: + The marker. + :param append: + Whether to append the marker, or prepend it. + """ + from _pytest.mark import MARK_GEN + + if isinstance(marker, MarkDecorator): + marker_ = marker + elif isinstance(marker, str): + marker_ = getattr(MARK_GEN, marker) + else: + raise ValueError("is not a string or pytest.mark.* Marker") + self.keywords[marker_.name] = marker_ + if append: + self.own_markers.append(marker_.mark) + else: + self.own_markers.insert(0, marker_.mark) + + def iter_markers(self, name: str | None = None) -> Iterator[Mark]: + """Iterate over all markers of the node. + + :param name: If given, filter the results by the name attribute. + :returns: An iterator of the markers of the node. + """ + return (x[1] for x in self.iter_markers_with_node(name=name)) + + def iter_markers_with_node( + self, name: str | None = None + ) -> Iterator[tuple[Node, Mark]]: + """Iterate over all markers of the node. + + :param name: If given, filter the results by the name attribute. + :returns: An iterator of (node, mark) tuples. + """ + for node in self.iter_parents(): + for mark in node.own_markers: + if name is None or getattr(mark, "name", None) == name: + yield node, mark + + @overload + def get_closest_marker(self, name: str) -> Mark | None: ... + + @overload + def get_closest_marker(self, name: str, default: Mark) -> Mark: ... + + def get_closest_marker(self, name: str, default: Mark | None = None) -> Mark | None: + """Return the first marker matching the name, from closest (for + example function) to farther level (for example module level). + + :param default: Fallback return value if no marker was found. + :param name: Name to filter by. + """ + return next(self.iter_markers(name=name), default) + + def listextrakeywords(self) -> set[str]: + """Return a set of all extra keywords in self and any parents.""" + extra_keywords: set[str] = set() + for item in self.listchain(): + extra_keywords.update(item.extra_keyword_matches) + return extra_keywords + + def listnames(self) -> list[str]: + return [x.name for x in self.listchain()] + + def addfinalizer(self, fin: Callable[[], object]) -> None: + """Register a function to be called without arguments when this node is + finalized. + + This method can only be called when this node is active + in a setup chain, for example during self.setup(). + """ + self.session._setupstate.addfinalizer(fin, self) + + def getparent(self, cls: type[_NodeType]) -> _NodeType | None: + """Get the closest parent node (including self) which is an instance of + the given class. + + :param cls: The node class to search for. + :returns: The node, if found. + """ + for node in self.iter_parents(): + if isinstance(node, cls): + return node + return None + + def _traceback_filter(self, excinfo: ExceptionInfo[BaseException]) -> Traceback: + return excinfo.traceback + + def _repr_failure_py( + self, + excinfo: ExceptionInfo[BaseException], + style: TracebackStyle | None = None, + ) -> TerminalRepr: + from _pytest.fixtures import FixtureLookupError + + if isinstance(excinfo.value, ConftestImportFailure): + excinfo = ExceptionInfo.from_exception(excinfo.value.cause) + if isinstance(excinfo.value, fail.Exception): + if not excinfo.value.pytrace: + style = "value" + if isinstance(excinfo.value, FixtureLookupError): + return excinfo.value.formatrepr() + + tbfilter: bool | Callable[[ExceptionInfo[BaseException]], Traceback] + if self.config.getoption("fulltrace", False): + style = "long" + tbfilter = False + else: + tbfilter = self._traceback_filter + if style == "auto": + style = "long" + # XXX should excinfo.getrepr record all data and toterminal() process it? + if style is None: + if self.config.getoption("tbstyle", "auto") == "short": + style = "short" + else: + style = "long" + + if self.config.get_verbosity() > 1: + truncate_locals = False + else: + truncate_locals = True + + truncate_args = False if self.config.get_verbosity() > 2 else True + + # excinfo.getrepr() formats paths relative to the CWD if `abspath` is False. + # It is possible for a fixture/test to change the CWD while this code runs, which + # would then result in the user seeing confusing paths in the failure message. + # To fix this, if the CWD changed, always display the full absolute path. + # It will be better to just always display paths relative to invocation_dir, but + # this requires a lot of plumbing (#6428). + try: + abspath = Path(os.getcwd()) != self.config.invocation_params.dir + except OSError: + abspath = True + + return excinfo.getrepr( + funcargs=True, + abspath=abspath, + showlocals=self.config.getoption("showlocals", False), + style=style, + tbfilter=tbfilter, + truncate_locals=truncate_locals, + truncate_args=truncate_args, + ) + + def repr_failure( + self, + excinfo: ExceptionInfo[BaseException], + style: TracebackStyle | None = None, + ) -> str | TerminalRepr: + """Return a representation of a collection or test failure. + + .. seealso:: :ref:`non-python tests` + + :param excinfo: Exception information for the failure. + """ + return self._repr_failure_py(excinfo, style) + + +def get_fslocation_from_item(node: Node) -> tuple[str | Path, int | None]: + """Try to extract the actual location from a node, depending on available attributes: + + * "location": a pair (path, lineno) + * "obj": a Python object that the node wraps. + * "path": just a path + + :rtype: A tuple of (str|Path, int) with filename and 0-based line number. + """ + # See Item.location. + location: tuple[str, int | None, str] | None = getattr(node, "location", None) + if location is not None: + return location[:2] + obj = getattr(node, "obj", None) + if obj is not None: + return getfslineno(obj) + return getattr(node, "path", "unknown location"), -1 + + +class Collector(Node, abc.ABC): + """Base class of all collectors. + + Collector create children through `collect()` and thus iteratively build + the collection tree. + """ + + class CollectError(Exception): + """An error during collection, contains a custom message.""" + + @abc.abstractmethod + def collect(self) -> Iterable[Item | Collector]: + """Collect children (items and collectors) for this collector.""" + raise NotImplementedError("abstract") + + # TODO: This omits the style= parameter which breaks Liskov Substitution. + def repr_failure( # type: ignore[override] + self, excinfo: ExceptionInfo[BaseException] + ) -> str | TerminalRepr: + """Return a representation of a collection failure. + + :param excinfo: Exception information for the failure. + """ + if isinstance(excinfo.value, self.CollectError) and not self.config.getoption( + "fulltrace", False + ): + exc = excinfo.value + return str(exc.args[0]) + + # Respect explicit tbstyle option, but default to "short" + # (_repr_failure_py uses "long" with "fulltrace" option always). + tbstyle = self.config.getoption("tbstyle", "auto") + if tbstyle == "auto": + tbstyle = "short" + + return self._repr_failure_py(excinfo, style=tbstyle) + + def _traceback_filter(self, excinfo: ExceptionInfo[BaseException]) -> Traceback: + if hasattr(self, "path"): + traceback = excinfo.traceback + ntraceback = traceback.cut(path=self.path) + if ntraceback == traceback: + ntraceback = ntraceback.cut(excludepath=tracebackcutdir) + return ntraceback.filter(excinfo) + return excinfo.traceback + + +def _check_initialpaths_for_relpath(session: Session, path: Path) -> str | None: + for initial_path in session._initialpaths: + if commonpath(path, initial_path) == initial_path: + rel = str(path.relative_to(initial_path)) + return "" if rel == "." else rel + return None + + +class FSCollector(Collector, abc.ABC): + """Base class for filesystem collectors.""" + + def __init__( + self, + fspath: LEGACY_PATH | None = None, + path_or_parent: Path | Node | None = None, + path: Path | None = None, + name: str | None = None, + parent: Node | None = None, + config: Config | None = None, + session: Session | None = None, + nodeid: str | None = None, + ) -> None: + if path_or_parent: + if isinstance(path_or_parent, Node): + assert parent is None + parent = cast(FSCollector, path_or_parent) + elif isinstance(path_or_parent, Path): + assert path is None + path = path_or_parent + + path = _imply_path(type(self), path, fspath=fspath) + if name is None: + name = path.name + if parent is not None and parent.path != path: + try: + rel = path.relative_to(parent.path) + except ValueError: + pass + else: + name = str(rel) + name = name.replace(os.sep, SEP) + self.path = path + + if session is None: + assert parent is not None + session = parent.session + + if nodeid is None: + try: + nodeid = str(self.path.relative_to(session.config.rootpath)) + except ValueError: + nodeid = _check_initialpaths_for_relpath(session, path) + + if nodeid and os.sep != SEP: + nodeid = nodeid.replace(os.sep, SEP) + + super().__init__( + name=name, + parent=parent, + config=config, + session=session, + nodeid=nodeid, + path=path, + ) + + @classmethod + def from_parent( + cls, + parent, + *, + fspath: LEGACY_PATH | None = None, + path: Path | None = None, + **kw, + ) -> Self: + """The public constructor.""" + return super().from_parent(parent=parent, fspath=fspath, path=path, **kw) + + +class File(FSCollector, abc.ABC): + """Base class for collecting tests from a file. + + :ref:`non-python tests`. + """ + + +class Directory(FSCollector, abc.ABC): + """Base class for collecting files from a directory. + + A basic directory collector does the following: goes over the files and + sub-directories in the directory and creates collectors for them by calling + the hooks :hook:`pytest_collect_directory` and :hook:`pytest_collect_file`, + after checking that they are not ignored using + :hook:`pytest_ignore_collect`. + + The default directory collectors are :class:`~pytest.Dir` and + :class:`~pytest.Package`. + + .. versionadded:: 8.0 + + :ref:`custom directory collectors`. + """ + + +class Item(Node, abc.ABC): + """Base class of all test invocation items. + + Note that for a single function there might be multiple test invocation items. + """ + + nextitem = None + + def __init__( + self, + name, + parent=None, + config: Config | None = None, + session: Session | None = None, + nodeid: str | None = None, + **kw, + ) -> None: + # The first two arguments are intentionally passed positionally, + # to keep plugins who define a node type which inherits from + # (pytest.Item, pytest.File) working (see issue #8435). + # They can be made kwargs when the deprecation above is done. + super().__init__( + name, + parent, + config=config, + session=session, + nodeid=nodeid, + **kw, + ) + self._report_sections: list[tuple[str, str, str]] = [] + + #: A list of tuples (name, value) that holds user defined properties + #: for this test. + self.user_properties: list[tuple[str, object]] = [] + + self._check_item_and_collector_diamond_inheritance() + + def _check_item_and_collector_diamond_inheritance(self) -> None: + """ + Check if the current type inherits from both File and Collector + at the same time, emitting a warning accordingly (#8447). + """ + cls = type(self) + + # We inject an attribute in the type to avoid issuing this warning + # for the same class more than once, which is not helpful. + # It is a hack, but was deemed acceptable in order to avoid + # flooding the user in the common case. + attr_name = "_pytest_diamond_inheritance_warning_shown" + if getattr(cls, attr_name, False): + return + setattr(cls, attr_name, True) + + problems = ", ".join( + base.__name__ for base in cls.__bases__ if issubclass(base, Collector) + ) + if problems: + warnings.warn( + f"{cls.__name__} is an Item subclass and should not be a collector, " + f"however its bases {problems} are collectors.\n" + "Please split the Collectors and the Item into separate node types.\n" + "Pytest Doc example: https://docs.pytest.org/en/latest/example/nonpython.html\n" + "example pull request on a plugin: https://github.com/asmeurer/pytest-flakes/pull/40/", + PytestWarning, + ) + + @abc.abstractmethod + def runtest(self) -> None: + """Run the test case for this item. + + Must be implemented by subclasses. + + .. seealso:: :ref:`non-python tests` + """ + raise NotImplementedError("runtest must be implemented by Item subclass") + + def add_report_section(self, when: str, key: str, content: str) -> None: + """Add a new report section, similar to what's done internally to add + stdout and stderr captured output:: + + item.add_report_section("call", "stdout", "report section contents") + + :param str when: + One of the possible capture states, ``"setup"``, ``"call"``, ``"teardown"``. + :param str key: + Name of the section, can be customized at will. Pytest uses ``"stdout"`` and + ``"stderr"`` internally. + :param str content: + The full contents as a string. + """ + if content: + self._report_sections.append((when, key, content)) + + def reportinfo(self) -> tuple[os.PathLike[str] | str, int | None, str]: + """Get location information for this item for test reports. + + Returns a tuple with three elements: + + - The path of the test (default ``self.path``) + - The 0-based line number of the test (default ``None``) + - A name of the test to be shown (default ``""``) + + .. seealso:: :ref:`non-python tests` + """ + return self.path, None, "" + + @cached_property + def location(self) -> tuple[str, int | None, str]: + """ + Returns a tuple of ``(relfspath, lineno, testname)`` for this item + where ``relfspath`` is file path relative to ``config.rootpath`` + and lineno is a 0-based line number. + """ + location = self.reportinfo() + path = absolutepath(location[0]) + relfspath = self.session._node_location_to_relpath(path) + assert type(location[2]) is str + return (relfspath, location[1], location[2]) diff --git a/.venv/Lib/site-packages/_pytest/outcomes.py b/.venv/Lib/site-packages/_pytest/outcomes.py new file mode 100644 index 00000000..5b20803e --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/outcomes.py @@ -0,0 +1,318 @@ +"""Exception classes and constants handling test outcomes as well as +functions creating them.""" + +from __future__ import annotations + +import sys +from typing import Any +from typing import Callable +from typing import cast +from typing import NoReturn +from typing import Protocol +from typing import Type +from typing import TypeVar + +from .warning_types import PytestDeprecationWarning + + +class OutcomeException(BaseException): + """OutcomeException and its subclass instances indicate and contain info + about test and collection outcomes.""" + + def __init__(self, msg: str | None = None, pytrace: bool = True) -> None: + if msg is not None and not isinstance(msg, str): + error_msg = ( # type: ignore[unreachable] + "{} expected string as 'msg' parameter, got '{}' instead.\n" + "Perhaps you meant to use a mark?" + ) + raise TypeError(error_msg.format(type(self).__name__, type(msg).__name__)) + super().__init__(msg) + self.msg = msg + self.pytrace = pytrace + + def __repr__(self) -> str: + if self.msg is not None: + return self.msg + return f"<{self.__class__.__name__} instance>" + + __str__ = __repr__ + + +TEST_OUTCOME = (OutcomeException, Exception) + + +class Skipped(OutcomeException): + # XXX hackish: on 3k we fake to live in the builtins + # in order to have Skipped exception printing shorter/nicer + __module__ = "builtins" + + def __init__( + self, + msg: str | None = None, + pytrace: bool = True, + allow_module_level: bool = False, + *, + _use_item_location: bool = False, + ) -> None: + super().__init__(msg=msg, pytrace=pytrace) + self.allow_module_level = allow_module_level + # If true, the skip location is reported as the item's location, + # instead of the place that raises the exception/calls skip(). + self._use_item_location = _use_item_location + + +class Failed(OutcomeException): + """Raised from an explicit call to pytest.fail().""" + + __module__ = "builtins" + + +class Exit(Exception): + """Raised for immediate program exits (no tracebacks/summaries).""" + + def __init__( + self, msg: str = "unknown reason", returncode: int | None = None + ) -> None: + self.msg = msg + self.returncode = returncode + super().__init__(msg) + + +# Elaborate hack to work around https://github.com/python/mypy/issues/2087. +# Ideally would just be `exit.Exception = Exit` etc. + +_F = TypeVar("_F", bound=Callable[..., object]) +_ET = TypeVar("_ET", bound=Type[BaseException]) + + +class _WithException(Protocol[_F, _ET]): + Exception: _ET + __call__: _F + + +def _with_exception(exception_type: _ET) -> Callable[[_F], _WithException[_F, _ET]]: + def decorate(func: _F) -> _WithException[_F, _ET]: + func_with_exception = cast(_WithException[_F, _ET], func) + func_with_exception.Exception = exception_type + return func_with_exception + + return decorate + + +# Exposed helper methods. + + +@_with_exception(Exit) +def exit( + reason: str = "", + returncode: int | None = None, +) -> NoReturn: + """Exit testing process. + + :param reason: + The message to show as the reason for exiting pytest. reason has a default value + only because `msg` is deprecated. + + :param returncode: + Return code to be used when exiting pytest. None means the same as ``0`` (no error), same as :func:`sys.exit`. + + :raises pytest.exit.Exception: + The exception that is raised. + """ + __tracebackhide__ = True + raise Exit(reason, returncode) + + +@_with_exception(Skipped) +def skip( + reason: str = "", + *, + allow_module_level: bool = False, +) -> NoReturn: + """Skip an executing test with the given message. + + This function should be called only during testing (setup, call or teardown) or + during collection by using the ``allow_module_level`` flag. This function can + be called in doctests as well. + + :param reason: + The message to show the user as reason for the skip. + + :param allow_module_level: + Allows this function to be called at module level. + Raising the skip exception at module level will stop + the execution of the module and prevent the collection of all tests in the module, + even those defined before the `skip` call. + + Defaults to False. + + :raises pytest.skip.Exception: + The exception that is raised. + + .. note:: + It is better to use the :ref:`pytest.mark.skipif ref` marker when + possible to declare a test to be skipped under certain conditions + like mismatching platforms or dependencies. + Similarly, use the ``# doctest: +SKIP`` directive (see :py:data:`doctest.SKIP`) + to skip a doctest statically. + """ + __tracebackhide__ = True + raise Skipped(msg=reason, allow_module_level=allow_module_level) + + +@_with_exception(Failed) +def fail(reason: str = "", pytrace: bool = True) -> NoReturn: + """Explicitly fail an executing test with the given message. + + :param reason: + The message to show the user as reason for the failure. + + :param pytrace: + If False, msg represents the full failure information and no + python traceback will be reported. + + :raises pytest.fail.Exception: + The exception that is raised. + """ + __tracebackhide__ = True + raise Failed(msg=reason, pytrace=pytrace) + + +class XFailed(Failed): + """Raised from an explicit call to pytest.xfail().""" + + +@_with_exception(XFailed) +def xfail(reason: str = "") -> NoReturn: + """Imperatively xfail an executing test or setup function with the given reason. + + This function should be called only during testing (setup, call or teardown). + + No other code is executed after using ``xfail()`` (it is implemented + internally by raising an exception). + + :param reason: + The message to show the user as reason for the xfail. + + .. note:: + It is better to use the :ref:`pytest.mark.xfail ref` marker when + possible to declare a test to be xfailed under certain conditions + like known bugs or missing features. + + :raises pytest.xfail.Exception: + The exception that is raised. + """ + __tracebackhide__ = True + raise XFailed(reason) + + +def importorskip( + modname: str, + minversion: str | None = None, + reason: str | None = None, + *, + exc_type: type[ImportError] | None = None, +) -> Any: + """Import and return the requested module ``modname``, or skip the + current test if the module cannot be imported. + + :param modname: + The name of the module to import. + :param minversion: + If given, the imported module's ``__version__`` attribute must be at + least this minimal version, otherwise the test is still skipped. + :param reason: + If given, this reason is shown as the message when the module cannot + be imported. + :param exc_type: + The exception that should be captured in order to skip modules. + Must be :py:class:`ImportError` or a subclass. + + If the module can be imported but raises :class:`ImportError`, pytest will + issue a warning to the user, as often users expect the module not to be + found (which would raise :class:`ModuleNotFoundError` instead). + + This warning can be suppressed by passing ``exc_type=ImportError`` explicitly. + + See :ref:`import-or-skip-import-error` for details. + + + :returns: + The imported module. This should be assigned to its canonical name. + + :raises pytest.skip.Exception: + If the module cannot be imported. + + Example:: + + docutils = pytest.importorskip("docutils") + + .. versionadded:: 8.2 + + The ``exc_type`` parameter. + """ + import warnings + + __tracebackhide__ = True + compile(modname, "", "eval") # to catch syntaxerrors + + # Until pytest 9.1, we will warn the user if we catch ImportError (instead of ModuleNotFoundError), + # as this might be hiding an installation/environment problem, which is not usually what is intended + # when using importorskip() (#11523). + # In 9.1, to keep the function signature compatible, we just change the code below to: + # 1. Use `exc_type = ModuleNotFoundError` if `exc_type` is not given. + # 2. Remove `warn_on_import` and the warning handling. + if exc_type is None: + exc_type = ImportError + warn_on_import_error = True + else: + warn_on_import_error = False + + skipped: Skipped | None = None + warning: Warning | None = None + + with warnings.catch_warnings(): + # Make sure to ignore ImportWarnings that might happen because + # of existing directories with the same name we're trying to + # import but without a __init__.py file. + warnings.simplefilter("ignore") + + try: + __import__(modname) + except exc_type as exc: + # Do not raise or issue warnings inside the catch_warnings() block. + if reason is None: + reason = f"could not import {modname!r}: {exc}" + skipped = Skipped(reason, allow_module_level=True) + + if warn_on_import_error and not isinstance(exc, ModuleNotFoundError): + lines = [ + "", + f"Module '{modname}' was found, but when imported by pytest it raised:", + f" {exc!r}", + "In pytest 9.1 this warning will become an error by default.", + "You can fix the underlying problem, or alternatively overwrite this behavior and silence this " + "warning by passing exc_type=ImportError explicitly.", + "See https://docs.pytest.org/en/stable/deprecations.html#pytest-importorskip-default-behavior-regarding-importerror", + ] + warning = PytestDeprecationWarning("\n".join(lines)) + + if warning: + warnings.warn(warning, stacklevel=2) + if skipped: + raise skipped + + mod = sys.modules[modname] + if minversion is None: + return mod + verattr = getattr(mod, "__version__", None) + if minversion is not None: + # Imported lazily to improve start-up time. + from packaging.version import Version + + if verattr is None or Version(verattr) < Version(minversion): + raise Skipped( + f"module {modname!r} has __version__ {verattr!r}, required is: {minversion!r}", + allow_module_level=True, + ) + return mod diff --git a/.venv/Lib/site-packages/_pytest/pastebin.py b/.venv/Lib/site-packages/_pytest/pastebin.py new file mode 100644 index 00000000..69c011ed --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/pastebin.py @@ -0,0 +1,113 @@ +# mypy: allow-untyped-defs +"""Submit failure or test session information to a pastebin service.""" + +from __future__ import annotations + +from io import StringIO +import tempfile +from typing import IO + +from _pytest.config import Config +from _pytest.config import create_terminal_writer +from _pytest.config.argparsing import Parser +from _pytest.stash import StashKey +from _pytest.terminal import TerminalReporter +import pytest + + +pastebinfile_key = StashKey[IO[bytes]]() + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("terminal reporting") + group._addoption( + "--pastebin", + metavar="mode", + action="store", + dest="pastebin", + default=None, + choices=["failed", "all"], + help="Send failed|all info to bpaste.net pastebin service", + ) + + +@pytest.hookimpl(trylast=True) +def pytest_configure(config: Config) -> None: + if config.option.pastebin == "all": + tr = config.pluginmanager.getplugin("terminalreporter") + # If no terminal reporter plugin is present, nothing we can do here; + # this can happen when this function executes in a worker node + # when using pytest-xdist, for example. + if tr is not None: + # pastebin file will be UTF-8 encoded binary file. + config.stash[pastebinfile_key] = tempfile.TemporaryFile("w+b") + oldwrite = tr._tw.write + + def tee_write(s, **kwargs): + oldwrite(s, **kwargs) + if isinstance(s, str): + s = s.encode("utf-8") + config.stash[pastebinfile_key].write(s) + + tr._tw.write = tee_write + + +def pytest_unconfigure(config: Config) -> None: + if pastebinfile_key in config.stash: + pastebinfile = config.stash[pastebinfile_key] + # Get terminal contents and delete file. + pastebinfile.seek(0) + sessionlog = pastebinfile.read() + pastebinfile.close() + del config.stash[pastebinfile_key] + # Undo our patching in the terminal reporter. + tr = config.pluginmanager.getplugin("terminalreporter") + del tr._tw.__dict__["write"] + # Write summary. + tr.write_sep("=", "Sending information to Paste Service") + pastebinurl = create_new_paste(sessionlog) + tr.write_line(f"pastebin session-log: {pastebinurl}\n") + + +def create_new_paste(contents: str | bytes) -> str: + """Create a new paste using the bpaste.net service. + + :contents: Paste contents string. + :returns: URL to the pasted contents, or an error message. + """ + import re + from urllib.parse import urlencode + from urllib.request import urlopen + + params = {"code": contents, "lexer": "text", "expiry": "1week"} + url = "https://bpa.st" + try: + response: str = ( + urlopen(url, data=urlencode(params).encode("ascii")).read().decode("utf-8") + ) + except OSError as exc_info: # urllib errors + return f"bad response: {exc_info}" + m = re.search(r'href="/raw/(\w+)"', response) + if m: + return f"{url}/show/{m.group(1)}" + else: + return "bad response: invalid format ('" + response + "')" + + +def pytest_terminal_summary(terminalreporter: TerminalReporter) -> None: + if terminalreporter.config.option.pastebin != "failed": + return + if "failed" in terminalreporter.stats: + terminalreporter.write_sep("=", "Sending information to Paste Service") + for rep in terminalreporter.stats["failed"]: + try: + msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc + except AttributeError: + msg = terminalreporter._getfailureheadline(rep) + file = StringIO() + tw = create_terminal_writer(terminalreporter.config, file) + rep.toterminal(tw) + s = file.getvalue() + assert len(s) + pastebinurl = create_new_paste(s) + terminalreporter.write_line(f"{msg} --> {pastebinurl}") diff --git a/.venv/Lib/site-packages/_pytest/pathlib.py b/.venv/Lib/site-packages/_pytest/pathlib.py new file mode 100644 index 00000000..81e52ea7 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/pathlib.py @@ -0,0 +1,975 @@ +from __future__ import annotations + +import atexit +import contextlib +from enum import Enum +from errno import EBADF +from errno import ELOOP +from errno import ENOENT +from errno import ENOTDIR +import fnmatch +from functools import partial +from importlib.machinery import ModuleSpec +import importlib.util +import itertools +import os +from os.path import expanduser +from os.path import expandvars +from os.path import isabs +from os.path import sep +from pathlib import Path +from pathlib import PurePath +from posixpath import sep as posix_sep +import shutil +import sys +import types +from types import ModuleType +from typing import Any +from typing import Callable +from typing import Iterable +from typing import Iterator +from typing import TypeVar +import uuid +import warnings + +from _pytest.compat import assert_never +from _pytest.outcomes import skip +from _pytest.warning_types import PytestWarning + + +LOCK_TIMEOUT = 60 * 60 * 24 * 3 + + +_AnyPurePath = TypeVar("_AnyPurePath", bound=PurePath) + +# The following function, variables and comments were +# copied from cpython 3.9 Lib/pathlib.py file. + +# EBADF - guard against macOS `stat` throwing EBADF +_IGNORED_ERRORS = (ENOENT, ENOTDIR, EBADF, ELOOP) + +_IGNORED_WINERRORS = ( + 21, # ERROR_NOT_READY - drive exists but is not accessible + 1921, # ERROR_CANT_RESOLVE_FILENAME - fix for broken symlink pointing to itself +) + + +def _ignore_error(exception: Exception) -> bool: + return ( + getattr(exception, "errno", None) in _IGNORED_ERRORS + or getattr(exception, "winerror", None) in _IGNORED_WINERRORS + ) + + +def get_lock_path(path: _AnyPurePath) -> _AnyPurePath: + return path.joinpath(".lock") + + +def on_rm_rf_error( + func: Callable[..., Any] | None, + path: str, + excinfo: BaseException + | tuple[type[BaseException], BaseException, types.TracebackType | None], + *, + start_path: Path, +) -> bool: + """Handle known read-only errors during rmtree. + + The returned value is used only by our own tests. + """ + if isinstance(excinfo, BaseException): + exc = excinfo + else: + exc = excinfo[1] + + # Another process removed the file in the middle of the "rm_rf" (xdist for example). + # More context: https://github.com/pytest-dev/pytest/issues/5974#issuecomment-543799018 + if isinstance(exc, FileNotFoundError): + return False + + if not isinstance(exc, PermissionError): + warnings.warn( + PytestWarning(f"(rm_rf) error removing {path}\n{type(exc)}: {exc}") + ) + return False + + if func not in (os.rmdir, os.remove, os.unlink): + if func not in (os.open,): + warnings.warn( + PytestWarning( + f"(rm_rf) unknown function {func} when removing {path}:\n{type(exc)}: {exc}" + ) + ) + return False + + # Chmod + retry. + import stat + + def chmod_rw(p: str) -> None: + mode = os.stat(p).st_mode + os.chmod(p, mode | stat.S_IRUSR | stat.S_IWUSR) + + # For files, we need to recursively go upwards in the directories to + # ensure they all are also writable. + p = Path(path) + if p.is_file(): + for parent in p.parents: + chmod_rw(str(parent)) + # Stop when we reach the original path passed to rm_rf. + if parent == start_path: + break + chmod_rw(str(path)) + + func(path) + return True + + +def ensure_extended_length_path(path: Path) -> Path: + """Get the extended-length version of a path (Windows). + + On Windows, by default, the maximum length of a path (MAX_PATH) is 260 + characters, and operations on paths longer than that fail. But it is possible + to overcome this by converting the path to "extended-length" form before + performing the operation: + https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file#maximum-path-length-limitation + + On Windows, this function returns the extended-length absolute version of path. + On other platforms it returns path unchanged. + """ + if sys.platform.startswith("win32"): + path = path.resolve() + path = Path(get_extended_length_path_str(str(path))) + return path + + +def get_extended_length_path_str(path: str) -> str: + """Convert a path to a Windows extended length path.""" + long_path_prefix = "\\\\?\\" + unc_long_path_prefix = "\\\\?\\UNC\\" + if path.startswith((long_path_prefix, unc_long_path_prefix)): + return path + # UNC + if path.startswith("\\\\"): + return unc_long_path_prefix + path[2:] + return long_path_prefix + path + + +def rm_rf(path: Path) -> None: + """Remove the path contents recursively, even if some elements + are read-only.""" + path = ensure_extended_length_path(path) + onerror = partial(on_rm_rf_error, start_path=path) + if sys.version_info >= (3, 12): + shutil.rmtree(str(path), onexc=onerror) + else: + shutil.rmtree(str(path), onerror=onerror) + + +def find_prefixed(root: Path, prefix: str) -> Iterator[os.DirEntry[str]]: + """Find all elements in root that begin with the prefix, case-insensitive.""" + l_prefix = prefix.lower() + for x in os.scandir(root): + if x.name.lower().startswith(l_prefix): + yield x + + +def extract_suffixes(iter: Iterable[os.DirEntry[str]], prefix: str) -> Iterator[str]: + """Return the parts of the paths following the prefix. + + :param iter: Iterator over path names. + :param prefix: Expected prefix of the path names. + """ + p_len = len(prefix) + for entry in iter: + yield entry.name[p_len:] + + +def find_suffixes(root: Path, prefix: str) -> Iterator[str]: + """Combine find_prefixes and extract_suffixes.""" + return extract_suffixes(find_prefixed(root, prefix), prefix) + + +def parse_num(maybe_num: str) -> int: + """Parse number path suffixes, returns -1 on error.""" + try: + return int(maybe_num) + except ValueError: + return -1 + + +def _force_symlink(root: Path, target: str | PurePath, link_to: str | Path) -> None: + """Helper to create the current symlink. + + It's full of race conditions that are reasonably OK to ignore + for the context of best effort linking to the latest test run. + + The presumption being that in case of much parallelism + the inaccuracy is going to be acceptable. + """ + current_symlink = root.joinpath(target) + try: + current_symlink.unlink() + except OSError: + pass + try: + current_symlink.symlink_to(link_to) + except Exception: + pass + + +def make_numbered_dir(root: Path, prefix: str, mode: int = 0o700) -> Path: + """Create a directory with an increased number as suffix for the given prefix.""" + for i in range(10): + # try up to 10 times to create the folder + max_existing = max(map(parse_num, find_suffixes(root, prefix)), default=-1) + new_number = max_existing + 1 + new_path = root.joinpath(f"{prefix}{new_number}") + try: + new_path.mkdir(mode=mode) + except Exception: + pass + else: + _force_symlink(root, prefix + "current", new_path) + return new_path + else: + raise OSError( + "could not create numbered dir with prefix " + f"{prefix} in {root} after 10 tries" + ) + + +def create_cleanup_lock(p: Path) -> Path: + """Create a lock to prevent premature folder cleanup.""" + lock_path = get_lock_path(p) + try: + fd = os.open(str(lock_path), os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644) + except FileExistsError as e: + raise OSError(f"cannot create lockfile in {p}") from e + else: + pid = os.getpid() + spid = str(pid).encode() + os.write(fd, spid) + os.close(fd) + if not lock_path.is_file(): + raise OSError("lock path got renamed after successful creation") + return lock_path + + +def register_cleanup_lock_removal( + lock_path: Path, register: Any = atexit.register +) -> Any: + """Register a cleanup function for removing a lock, by default on atexit.""" + pid = os.getpid() + + def cleanup_on_exit(lock_path: Path = lock_path, original_pid: int = pid) -> None: + current_pid = os.getpid() + if current_pid != original_pid: + # fork + return + try: + lock_path.unlink() + except OSError: + pass + + return register(cleanup_on_exit) + + +def maybe_delete_a_numbered_dir(path: Path) -> None: + """Remove a numbered directory if its lock can be obtained and it does + not seem to be in use.""" + path = ensure_extended_length_path(path) + lock_path = None + try: + lock_path = create_cleanup_lock(path) + parent = path.parent + + garbage = parent.joinpath(f"garbage-{uuid.uuid4()}") + path.rename(garbage) + rm_rf(garbage) + except OSError: + # known races: + # * other process did a cleanup at the same time + # * deletable folder was found + # * process cwd (Windows) + return + finally: + # If we created the lock, ensure we remove it even if we failed + # to properly remove the numbered dir. + if lock_path is not None: + try: + lock_path.unlink() + except OSError: + pass + + +def ensure_deletable(path: Path, consider_lock_dead_if_created_before: float) -> bool: + """Check if `path` is deletable based on whether the lock file is expired.""" + if path.is_symlink(): + return False + lock = get_lock_path(path) + try: + if not lock.is_file(): + return True + except OSError: + # we might not have access to the lock file at all, in this case assume + # we don't have access to the entire directory (#7491). + return False + try: + lock_time = lock.stat().st_mtime + except Exception: + return False + else: + if lock_time < consider_lock_dead_if_created_before: + # We want to ignore any errors while trying to remove the lock such as: + # - PermissionDenied, like the file permissions have changed since the lock creation; + # - FileNotFoundError, in case another pytest process got here first; + # and any other cause of failure. + with contextlib.suppress(OSError): + lock.unlink() + return True + return False + + +def try_cleanup(path: Path, consider_lock_dead_if_created_before: float) -> None: + """Try to cleanup a folder if we can ensure it's deletable.""" + if ensure_deletable(path, consider_lock_dead_if_created_before): + maybe_delete_a_numbered_dir(path) + + +def cleanup_candidates(root: Path, prefix: str, keep: int) -> Iterator[Path]: + """List candidates for numbered directories to be removed - follows py.path.""" + max_existing = max(map(parse_num, find_suffixes(root, prefix)), default=-1) + max_delete = max_existing - keep + entries = find_prefixed(root, prefix) + entries, entries2 = itertools.tee(entries) + numbers = map(parse_num, extract_suffixes(entries2, prefix)) + for entry, number in zip(entries, numbers): + if number <= max_delete: + yield Path(entry) + + +def cleanup_dead_symlinks(root: Path) -> None: + for left_dir in root.iterdir(): + if left_dir.is_symlink(): + if not left_dir.resolve().exists(): + left_dir.unlink() + + +def cleanup_numbered_dir( + root: Path, prefix: str, keep: int, consider_lock_dead_if_created_before: float +) -> None: + """Cleanup for lock driven numbered directories.""" + if not root.exists(): + return + for path in cleanup_candidates(root, prefix, keep): + try_cleanup(path, consider_lock_dead_if_created_before) + for path in root.glob("garbage-*"): + try_cleanup(path, consider_lock_dead_if_created_before) + + cleanup_dead_symlinks(root) + + +def make_numbered_dir_with_cleanup( + root: Path, + prefix: str, + keep: int, + lock_timeout: float, + mode: int, +) -> Path: + """Create a numbered dir with a cleanup lock and remove old ones.""" + e = None + for i in range(10): + try: + p = make_numbered_dir(root, prefix, mode) + # Only lock the current dir when keep is not 0 + if keep != 0: + lock_path = create_cleanup_lock(p) + register_cleanup_lock_removal(lock_path) + except Exception as exc: + e = exc + else: + consider_lock_dead_if_created_before = p.stat().st_mtime - lock_timeout + # Register a cleanup for program exit + atexit.register( + cleanup_numbered_dir, + root, + prefix, + keep, + consider_lock_dead_if_created_before, + ) + return p + assert e is not None + raise e + + +def resolve_from_str(input: str, rootpath: Path) -> Path: + input = expanduser(input) + input = expandvars(input) + if isabs(input): + return Path(input) + else: + return rootpath.joinpath(input) + + +def fnmatch_ex(pattern: str, path: str | os.PathLike[str]) -> bool: + """A port of FNMatcher from py.path.common which works with PurePath() instances. + + The difference between this algorithm and PurePath.match() is that the + latter matches "**" glob expressions for each part of the path, while + this algorithm uses the whole path instead. + + For example: + "tests/foo/bar/doc/test_foo.py" matches pattern "tests/**/doc/test*.py" + with this algorithm, but not with PurePath.match(). + + This algorithm was ported to keep backward-compatibility with existing + settings which assume paths match according this logic. + + References: + * https://bugs.python.org/issue29249 + * https://bugs.python.org/issue34731 + """ + path = PurePath(path) + iswin32 = sys.platform.startswith("win") + + if iswin32 and sep not in pattern and posix_sep in pattern: + # Running on Windows, the pattern has no Windows path separators, + # and the pattern has one or more Posix path separators. Replace + # the Posix path separators with the Windows path separator. + pattern = pattern.replace(posix_sep, sep) + + if sep not in pattern: + name = path.name + else: + name = str(path) + if path.is_absolute() and not os.path.isabs(pattern): + pattern = f"*{os.sep}{pattern}" + return fnmatch.fnmatch(name, pattern) + + +def parts(s: str) -> set[str]: + parts = s.split(sep) + return {sep.join(parts[: i + 1]) or sep for i in range(len(parts))} + + +def symlink_or_skip( + src: os.PathLike[str] | str, + dst: os.PathLike[str] | str, + **kwargs: Any, +) -> None: + """Make a symlink, or skip the test in case symlinks are not supported.""" + try: + os.symlink(src, dst, **kwargs) + except OSError as e: + skip(f"symlinks not supported: {e}") + + +class ImportMode(Enum): + """Possible values for `mode` parameter of `import_path`.""" + + prepend = "prepend" + append = "append" + importlib = "importlib" + + +class ImportPathMismatchError(ImportError): + """Raised on import_path() if there is a mismatch of __file__'s. + + This can happen when `import_path` is called multiple times with different filenames that has + the same basename but reside in packages + (for example "/tests1/test_foo.py" and "/tests2/test_foo.py"). + """ + + +def import_path( + path: str | os.PathLike[str], + *, + mode: str | ImportMode = ImportMode.prepend, + root: Path, + consider_namespace_packages: bool, +) -> ModuleType: + """ + Import and return a module from the given path, which can be a file (a module) or + a directory (a package). + + :param path: + Path to the file to import. + + :param mode: + Controls the underlying import mechanism that will be used: + + * ImportMode.prepend: the directory containing the module (or package, taking + `__init__.py` files into account) will be put at the *start* of `sys.path` before + being imported with `importlib.import_module`. + + * ImportMode.append: same as `prepend`, but the directory will be appended + to the end of `sys.path`, if not already in `sys.path`. + + * ImportMode.importlib: uses more fine control mechanisms provided by `importlib` + to import the module, which avoids having to muck with `sys.path` at all. It effectively + allows having same-named test modules in different places. + + :param root: + Used as an anchor when mode == ImportMode.importlib to obtain + a unique name for the module being imported so it can safely be stored + into ``sys.modules``. + + :param consider_namespace_packages: + If True, consider namespace packages when resolving module names. + + :raises ImportPathMismatchError: + If after importing the given `path` and the module `__file__` + are different. Only raised in `prepend` and `append` modes. + """ + path = Path(path) + mode = ImportMode(mode) + + if not path.exists(): + raise ImportError(path) + + if mode is ImportMode.importlib: + # Try to import this module using the standard import mechanisms, but + # without touching sys.path. + try: + pkg_root, module_name = resolve_pkg_root_and_module_name( + path, consider_namespace_packages=consider_namespace_packages + ) + except CouldNotResolvePathError: + pass + else: + # If the given module name is already in sys.modules, do not import it again. + with contextlib.suppress(KeyError): + return sys.modules[module_name] + + mod = _import_module_using_spec( + module_name, path, pkg_root, insert_modules=False + ) + if mod is not None: + return mod + + # Could not import the module with the current sys.path, so we fall back + # to importing the file as a single module, not being a part of a package. + module_name = module_name_from_path(path, root) + with contextlib.suppress(KeyError): + return sys.modules[module_name] + + mod = _import_module_using_spec( + module_name, path, path.parent, insert_modules=True + ) + if mod is None: + raise ImportError(f"Can't find module {module_name} at location {path}") + return mod + + try: + pkg_root, module_name = resolve_pkg_root_and_module_name( + path, consider_namespace_packages=consider_namespace_packages + ) + except CouldNotResolvePathError: + pkg_root, module_name = path.parent, path.stem + + # Change sys.path permanently: restoring it at the end of this function would cause surprising + # problems because of delayed imports: for example, a conftest.py file imported by this function + # might have local imports, which would fail at runtime if we restored sys.path. + if mode is ImportMode.append: + if str(pkg_root) not in sys.path: + sys.path.append(str(pkg_root)) + elif mode is ImportMode.prepend: + if str(pkg_root) != sys.path[0]: + sys.path.insert(0, str(pkg_root)) + else: + assert_never(mode) + + importlib.import_module(module_name) + + mod = sys.modules[module_name] + if path.name == "__init__.py": + return mod + + ignore = os.environ.get("PY_IGNORE_IMPORTMISMATCH", "") + if ignore != "1": + module_file = mod.__file__ + if module_file is None: + raise ImportPathMismatchError(module_name, module_file, path) + + if module_file.endswith((".pyc", ".pyo")): + module_file = module_file[:-1] + if module_file.endswith(os.sep + "__init__.py"): + module_file = module_file[: -(len(os.sep + "__init__.py"))] + + try: + is_same = _is_same(str(path), module_file) + except FileNotFoundError: + is_same = False + + if not is_same: + raise ImportPathMismatchError(module_name, module_file, path) + + return mod + + +def _import_module_using_spec( + module_name: str, module_path: Path, module_location: Path, *, insert_modules: bool +) -> ModuleType | None: + """ + Tries to import a module by its canonical name, path to the .py file, and its + parent location. + + :param insert_modules: + If True, will call insert_missing_modules to create empty intermediate modules + for made-up module names (when importing test files not reachable from sys.path). + """ + # Checking with sys.meta_path first in case one of its hooks can import this module, + # such as our own assertion-rewrite hook. + for meta_importer in sys.meta_path: + spec = meta_importer.find_spec( + module_name, [str(module_location), str(module_path)] + ) + if spec_matches_module_path(spec, module_path): + break + else: + spec = importlib.util.spec_from_file_location(module_name, str(module_path)) + + if spec_matches_module_path(spec, module_path): + assert spec is not None + # Attempt to import the parent module, seems is our responsibility: + # https://github.com/python/cpython/blob/73906d5c908c1e0b73c5436faeff7d93698fc074/Lib/importlib/_bootstrap.py#L1308-L1311 + parent_module_name, _, name = module_name.rpartition(".") + parent_module: ModuleType | None = None + if parent_module_name: + parent_module = sys.modules.get(parent_module_name) + if parent_module is None: + # Find the directory of this module's parent. + parent_dir = ( + module_path.parent.parent + if module_path.name == "__init__.py" + else module_path.parent + ) + # Consider the parent module path as its __init__.py file, if it has one. + parent_module_path = ( + parent_dir / "__init__.py" + if (parent_dir / "__init__.py").is_file() + else parent_dir + ) + parent_module = _import_module_using_spec( + parent_module_name, + parent_module_path, + parent_dir, + insert_modules=insert_modules, + ) + + # Find spec and import this module. + mod = importlib.util.module_from_spec(spec) + sys.modules[module_name] = mod + spec.loader.exec_module(mod) # type: ignore[union-attr] + + # Set this module as an attribute of the parent module (#12194). + if parent_module is not None: + setattr(parent_module, name, mod) + + if insert_modules: + insert_missing_modules(sys.modules, module_name) + return mod + + return None + + +def spec_matches_module_path(module_spec: ModuleSpec | None, module_path: Path) -> bool: + """Return true if the given ModuleSpec can be used to import the given module path.""" + if module_spec is None or module_spec.origin is None: + return False + + return Path(module_spec.origin) == module_path + + +# Implement a special _is_same function on Windows which returns True if the two filenames +# compare equal, to circumvent os.path.samefile returning False for mounts in UNC (#7678). +if sys.platform.startswith("win"): + + def _is_same(f1: str, f2: str) -> bool: + return Path(f1) == Path(f2) or os.path.samefile(f1, f2) + +else: + + def _is_same(f1: str, f2: str) -> bool: + return os.path.samefile(f1, f2) + + +def module_name_from_path(path: Path, root: Path) -> str: + """ + Return a dotted module name based on the given path, anchored on root. + + For example: path="projects/src/tests/test_foo.py" and root="/projects", the + resulting module name will be "src.tests.test_foo". + """ + path = path.with_suffix("") + try: + relative_path = path.relative_to(root) + except ValueError: + # If we can't get a relative path to root, use the full path, except + # for the first part ("d:\\" or "/" depending on the platform, for example). + path_parts = path.parts[1:] + else: + # Use the parts for the relative path to the root path. + path_parts = relative_path.parts + + # Module name for packages do not contain the __init__ file, unless + # the `__init__.py` file is at the root. + if len(path_parts) >= 2 and path_parts[-1] == "__init__": + path_parts = path_parts[:-1] + + # Module names cannot contain ".", normalize them to "_". This prevents + # a directory having a "." in the name (".env.310" for example) causing extra intermediate modules. + # Also, important to replace "." at the start of paths, as those are considered relative imports. + path_parts = tuple(x.replace(".", "_") for x in path_parts) + + return ".".join(path_parts) + + +def insert_missing_modules(modules: dict[str, ModuleType], module_name: str) -> None: + """ + Used by ``import_path`` to create intermediate modules when using mode=importlib. + + When we want to import a module as "src.tests.test_foo" for example, we need + to create empty modules "src" and "src.tests" after inserting "src.tests.test_foo", + otherwise "src.tests.test_foo" is not importable by ``__import__``. + """ + module_parts = module_name.split(".") + while module_name: + parent_module_name, _, child_name = module_name.rpartition(".") + if parent_module_name: + parent_module = modules.get(parent_module_name) + if parent_module is None: + try: + # If sys.meta_path is empty, calling import_module will issue + # a warning and raise ModuleNotFoundError. To avoid the + # warning, we check sys.meta_path explicitly and raise the error + # ourselves to fall back to creating a dummy module. + if not sys.meta_path: + raise ModuleNotFoundError + parent_module = importlib.import_module(parent_module_name) + except ModuleNotFoundError: + parent_module = ModuleType( + module_name, + doc="Empty module created by pytest's importmode=importlib.", + ) + modules[parent_module_name] = parent_module + + # Add child attribute to the parent that can reference the child + # modules. + if not hasattr(parent_module, child_name): + setattr(parent_module, child_name, modules[module_name]) + + module_parts.pop(-1) + module_name = ".".join(module_parts) + + +def resolve_package_path(path: Path) -> Path | None: + """Return the Python package path by looking for the last + directory upwards which still contains an __init__.py. + + Returns None if it cannot be determined. + """ + result = None + for parent in itertools.chain((path,), path.parents): + if parent.is_dir(): + if not (parent / "__init__.py").is_file(): + break + if not parent.name.isidentifier(): + break + result = parent + return result + + +def resolve_pkg_root_and_module_name( + path: Path, *, consider_namespace_packages: bool = False +) -> tuple[Path, str]: + """ + Return the path to the directory of the root package that contains the + given Python file, and its module name: + + src/ + app/ + __init__.py + core/ + __init__.py + models.py + + Passing the full path to `models.py` will yield Path("src") and "app.core.models". + + If consider_namespace_packages is True, then we additionally check upwards in the hierarchy + for namespace packages: + + https://packaging.python.org/en/latest/guides/packaging-namespace-packages + + Raises CouldNotResolvePathError if the given path does not belong to a package (missing any __init__.py files). + """ + pkg_root: Path | None = None + pkg_path = resolve_package_path(path) + if pkg_path is not None: + pkg_root = pkg_path.parent + if consider_namespace_packages: + start = pkg_root if pkg_root is not None else path.parent + for candidate in (start, *start.parents): + module_name = compute_module_name(candidate, path) + if module_name and is_importable(module_name, path): + # Point the pkg_root to the root of the namespace package. + pkg_root = candidate + break + + if pkg_root is not None: + module_name = compute_module_name(pkg_root, path) + if module_name: + return pkg_root, module_name + + raise CouldNotResolvePathError(f"Could not resolve for {path}") + + +def is_importable(module_name: str, module_path: Path) -> bool: + """ + Return if the given module path could be imported normally by Python, akin to the user + entering the REPL and importing the corresponding module name directly, and corresponds + to the module_path specified. + + :param module_name: + Full module name that we want to check if is importable. + For example, "app.models". + + :param module_path: + Full path to the python module/package we want to check if is importable. + For example, "/projects/src/app/models.py". + """ + try: + # Note this is different from what we do in ``_import_module_using_spec``, where we explicitly search through + # sys.meta_path to be able to pass the path of the module that we want to import (``meta_importer.find_spec``). + # Using importlib.util.find_spec() is different, it gives the same results as trying to import + # the module normally in the REPL. + spec = importlib.util.find_spec(module_name) + except (ImportError, ValueError, ImportWarning): + return False + else: + return spec_matches_module_path(spec, module_path) + + +def compute_module_name(root: Path, module_path: Path) -> str | None: + """Compute a module name based on a path and a root anchor.""" + try: + path_without_suffix = module_path.with_suffix("") + except ValueError: + # Empty paths (such as Path.cwd()) might break meta_path hooks (like our own assertion rewriter). + return None + + try: + relative = path_without_suffix.relative_to(root) + except ValueError: # pragma: no cover + return None + names = list(relative.parts) + if not names: + return None + if names[-1] == "__init__": + names.pop() + return ".".join(names) + + +class CouldNotResolvePathError(Exception): + """Custom exception raised by resolve_pkg_root_and_module_name.""" + + +def scandir( + path: str | os.PathLike[str], + sort_key: Callable[[os.DirEntry[str]], object] = lambda entry: entry.name, +) -> list[os.DirEntry[str]]: + """Scan a directory recursively, in breadth-first order. + + The returned entries are sorted according to the given key. + The default is to sort by name. + """ + entries = [] + with os.scandir(path) as s: + # Skip entries with symlink loops and other brokenness, so the caller + # doesn't have to deal with it. + for entry in s: + try: + entry.is_file() + except OSError as err: + if _ignore_error(err): + continue + raise + entries.append(entry) + entries.sort(key=sort_key) # type: ignore[arg-type] + return entries + + +def visit( + path: str | os.PathLike[str], recurse: Callable[[os.DirEntry[str]], bool] +) -> Iterator[os.DirEntry[str]]: + """Walk a directory recursively, in breadth-first order. + + The `recurse` predicate determines whether a directory is recursed. + + Entries at each directory level are sorted. + """ + entries = scandir(path) + yield from entries + for entry in entries: + if entry.is_dir() and recurse(entry): + yield from visit(entry.path, recurse) + + +def absolutepath(path: str | os.PathLike[str]) -> Path: + """Convert a path to an absolute path using os.path.abspath. + + Prefer this over Path.resolve() (see #6523). + Prefer this over Path.absolute() (not public, doesn't normalize). + """ + return Path(os.path.abspath(path)) + + +def commonpath(path1: Path, path2: Path) -> Path | None: + """Return the common part shared with the other path, or None if there is + no common part. + + If one path is relative and one is absolute, returns None. + """ + try: + return Path(os.path.commonpath((str(path1), str(path2)))) + except ValueError: + return None + + +def bestrelpath(directory: Path, dest: Path) -> str: + """Return a string which is a relative path from directory to dest such + that directory/bestrelpath == dest. + + The paths must be either both absolute or both relative. + + If no such path can be determined, returns dest. + """ + assert isinstance(directory, Path) + assert isinstance(dest, Path) + if dest == directory: + return os.curdir + # Find the longest common directory. + base = commonpath(directory, dest) + # Can be the case on Windows for two absolute paths on different drives. + # Can be the case for two relative paths without common prefix. + # Can be the case for a relative path and an absolute path. + if not base: + return str(dest) + reldirectory = directory.relative_to(base) + reldest = dest.relative_to(base) + return os.path.join( + # Back from directory to base. + *([os.pardir] * len(reldirectory.parts)), + # Forward from base to dest. + *reldest.parts, + ) + + +def safe_exists(p: Path) -> bool: + """Like Path.exists(), but account for input arguments that might be too long (#11394).""" + try: + return p.exists() + except (ValueError, OSError): + # ValueError: stat: path too long for Windows + # OSError: [WinError 123] The filename, directory name, or volume label syntax is incorrect + return False diff --git a/.venv/Lib/site-packages/_pytest/py.typed b/.venv/Lib/site-packages/_pytest/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/.venv/Lib/site-packages/_pytest/pytester.py b/.venv/Lib/site-packages/_pytest/pytester.py new file mode 100644 index 00000000..3f7520ee --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/pytester.py @@ -0,0 +1,1766 @@ +# mypy: allow-untyped-defs +"""(Disabled by default) support for testing pytest and pytest plugins. + +PYTEST_DONT_REWRITE +""" + +from __future__ import annotations + +import collections.abc +import contextlib +from fnmatch import fnmatch +import gc +import importlib +from io import StringIO +import locale +import os +from pathlib import Path +import platform +import re +import shutil +import subprocess +import sys +import traceback +from typing import Any +from typing import Callable +from typing import Final +from typing import final +from typing import Generator +from typing import IO +from typing import Iterable +from typing import Literal +from typing import overload +from typing import Sequence +from typing import TextIO +from typing import TYPE_CHECKING +from weakref import WeakKeyDictionary + +from iniconfig import IniConfig +from iniconfig import SectionWrapper + +from _pytest import timing +from _pytest._code import Source +from _pytest.capture import _get_multicapture +from _pytest.compat import NOTSET +from _pytest.compat import NotSetType +from _pytest.config import _PluggyPlugin +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config import main +from _pytest.config import PytestPluginManager +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.main import Session +from _pytest.monkeypatch import MonkeyPatch +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.outcomes import fail +from _pytest.outcomes import importorskip +from _pytest.outcomes import skip +from _pytest.pathlib import bestrelpath +from _pytest.pathlib import make_numbered_dir +from _pytest.reports import CollectReport +from _pytest.reports import TestReport +from _pytest.tmpdir import TempPathFactory +from _pytest.warning_types import PytestWarning + + +if TYPE_CHECKING: + import pexpect + + +pytest_plugins = ["pytester_assertions"] + + +IGNORE_PAM = [ # filenames added when obtaining details about the current user + "/var/lib/sss/mc/passwd" +] + + +def pytest_addoption(parser: Parser) -> None: + parser.addoption( + "--lsof", + action="store_true", + dest="lsof", + default=False, + help="Run FD checks if lsof is available", + ) + + parser.addoption( + "--runpytest", + default="inprocess", + dest="runpytest", + choices=("inprocess", "subprocess"), + help=( + "Run pytest sub runs in tests using an 'inprocess' " + "or 'subprocess' (python -m main) method" + ), + ) + + parser.addini( + "pytester_example_dir", help="Directory to take the pytester example files from" + ) + + +def pytest_configure(config: Config) -> None: + if config.getvalue("lsof"): + checker = LsofFdLeakChecker() + if checker.matching_platform(): + config.pluginmanager.register(checker) + + config.addinivalue_line( + "markers", + "pytester_example_path(*path_segments): join the given path " + "segments to `pytester_example_dir` for this test.", + ) + + +class LsofFdLeakChecker: + def get_open_files(self) -> list[tuple[str, str]]: + if sys.version_info >= (3, 11): + # New in Python 3.11, ignores utf-8 mode + encoding = locale.getencoding() + else: + encoding = locale.getpreferredencoding(False) + out = subprocess.run( + ("lsof", "-Ffn0", "-p", str(os.getpid())), + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + check=True, + text=True, + encoding=encoding, + ).stdout + + def isopen(line: str) -> bool: + return line.startswith("f") and ( + "deleted" not in line + and "mem" not in line + and "txt" not in line + and "cwd" not in line + ) + + open_files = [] + + for line in out.split("\n"): + if isopen(line): + fields = line.split("\0") + fd = fields[0][1:] + filename = fields[1][1:] + if filename in IGNORE_PAM: + continue + if filename.startswith("/"): + open_files.append((fd, filename)) + + return open_files + + def matching_platform(self) -> bool: + try: + subprocess.run(("lsof", "-v"), check=True) + except (OSError, subprocess.CalledProcessError): + return False + else: + return True + + @hookimpl(wrapper=True, tryfirst=True) + def pytest_runtest_protocol(self, item: Item) -> Generator[None, object, object]: + lines1 = self.get_open_files() + try: + return (yield) + finally: + if hasattr(sys, "pypy_version_info"): + gc.collect() + lines2 = self.get_open_files() + + new_fds = {t[0] for t in lines2} - {t[0] for t in lines1} + leaked_files = [t for t in lines2 if t[0] in new_fds] + if leaked_files: + error = [ + f"***** {len(leaked_files)} FD leakage detected", + *(str(f) for f in leaked_files), + "*** Before:", + *(str(f) for f in lines1), + "*** After:", + *(str(f) for f in lines2), + f"***** {len(leaked_files)} FD leakage detected", + "*** function {}:{}: {} ".format(*item.location), + "See issue #2366", + ] + item.warn(PytestWarning("\n".join(error))) + + +# used at least by pytest-xdist plugin + + +@fixture +def _pytest(request: FixtureRequest) -> PytestArg: + """Return a helper which offers a gethookrecorder(hook) method which + returns a HookRecorder instance which helps to make assertions about called + hooks.""" + return PytestArg(request) + + +class PytestArg: + def __init__(self, request: FixtureRequest) -> None: + self._request = request + + def gethookrecorder(self, hook) -> HookRecorder: + hookrecorder = HookRecorder(hook._pm) + self._request.addfinalizer(hookrecorder.finish_recording) + return hookrecorder + + +def get_public_names(values: Iterable[str]) -> list[str]: + """Only return names from iterator values without a leading underscore.""" + return [x for x in values if x[0] != "_"] + + +@final +class RecordedHookCall: + """A recorded call to a hook. + + The arguments to the hook call are set as attributes. + For example: + + .. code-block:: python + + calls = hook_recorder.getcalls("pytest_runtest_setup") + # Suppose pytest_runtest_setup was called once with `item=an_item`. + assert calls[0].item is an_item + """ + + def __init__(self, name: str, kwargs) -> None: + self.__dict__.update(kwargs) + self._name = name + + def __repr__(self) -> str: + d = self.__dict__.copy() + del d["_name"] + return f"" + + if TYPE_CHECKING: + # The class has undetermined attributes, this tells mypy about it. + def __getattr__(self, key: str): ... + + +@final +class HookRecorder: + """Record all hooks called in a plugin manager. + + Hook recorders are created by :class:`Pytester`. + + This wraps all the hook calls in the plugin manager, recording each call + before propagating the normal calls. + """ + + def __init__( + self, pluginmanager: PytestPluginManager, *, _ispytest: bool = False + ) -> None: + check_ispytest(_ispytest) + + self._pluginmanager = pluginmanager + self.calls: list[RecordedHookCall] = [] + self.ret: int | ExitCode | None = None + + def before(hook_name: str, hook_impls, kwargs) -> None: + self.calls.append(RecordedHookCall(hook_name, kwargs)) + + def after(outcome, hook_name: str, hook_impls, kwargs) -> None: + pass + + self._undo_wrapping = pluginmanager.add_hookcall_monitoring(before, after) + + def finish_recording(self) -> None: + self._undo_wrapping() + + def getcalls(self, names: str | Iterable[str]) -> list[RecordedHookCall]: + """Get all recorded calls to hooks with the given names (or name).""" + if isinstance(names, str): + names = names.split() + return [call for call in self.calls if call._name in names] + + def assert_contains(self, entries: Sequence[tuple[str, str]]) -> None: + __tracebackhide__ = True + i = 0 + entries = list(entries) + # Since Python 3.13, f_locals is not a dict, but eval requires a dict. + backlocals = dict(sys._getframe(1).f_locals) + while entries: + name, check = entries.pop(0) + for ind, call in enumerate(self.calls[i:]): + if call._name == name: + print("NAMEMATCH", name, call) + if eval(check, backlocals, call.__dict__): + print("CHECKERMATCH", repr(check), "->", call) + else: + print("NOCHECKERMATCH", repr(check), "-", call) + continue + i += ind + 1 + break + print("NONAMEMATCH", name, "with", call) + else: + fail(f"could not find {name!r} check {check!r}") + + def popcall(self, name: str) -> RecordedHookCall: + __tracebackhide__ = True + for i, call in enumerate(self.calls): + if call._name == name: + del self.calls[i] + return call + lines = [f"could not find call {name!r}, in:"] + lines.extend([f" {x}" for x in self.calls]) + fail("\n".join(lines)) + + def getcall(self, name: str) -> RecordedHookCall: + values = self.getcalls(name) + assert len(values) == 1, (name, values) + return values[0] + + # functionality for test reports + + @overload + def getreports( + self, + names: Literal["pytest_collectreport"], + ) -> Sequence[CollectReport]: ... + + @overload + def getreports( + self, + names: Literal["pytest_runtest_logreport"], + ) -> Sequence[TestReport]: ... + + @overload + def getreports( + self, + names: str | Iterable[str] = ( + "pytest_collectreport", + "pytest_runtest_logreport", + ), + ) -> Sequence[CollectReport | TestReport]: ... + + def getreports( + self, + names: str | Iterable[str] = ( + "pytest_collectreport", + "pytest_runtest_logreport", + ), + ) -> Sequence[CollectReport | TestReport]: + return [x.report for x in self.getcalls(names)] + + def matchreport( + self, + inamepart: str = "", + names: str | Iterable[str] = ( + "pytest_runtest_logreport", + "pytest_collectreport", + ), + when: str | None = None, + ) -> CollectReport | TestReport: + """Return a testreport whose dotted import path matches.""" + values = [] + for rep in self.getreports(names=names): + if not when and rep.when != "call" and rep.passed: + # setup/teardown passing reports - let's ignore those + continue + if when and rep.when != when: + continue + if not inamepart or inamepart in rep.nodeid.split("::"): + values.append(rep) + if not values: + raise ValueError( + f"could not find test report matching {inamepart!r}: " + "no test reports at all!" + ) + if len(values) > 1: + raise ValueError( + f"found 2 or more testreports matching {inamepart!r}: {values}" + ) + return values[0] + + @overload + def getfailures( + self, + names: Literal["pytest_collectreport"], + ) -> Sequence[CollectReport]: ... + + @overload + def getfailures( + self, + names: Literal["pytest_runtest_logreport"], + ) -> Sequence[TestReport]: ... + + @overload + def getfailures( + self, + names: str | Iterable[str] = ( + "pytest_collectreport", + "pytest_runtest_logreport", + ), + ) -> Sequence[CollectReport | TestReport]: ... + + def getfailures( + self, + names: str | Iterable[str] = ( + "pytest_collectreport", + "pytest_runtest_logreport", + ), + ) -> Sequence[CollectReport | TestReport]: + return [rep for rep in self.getreports(names) if rep.failed] + + def getfailedcollections(self) -> Sequence[CollectReport]: + return self.getfailures("pytest_collectreport") + + def listoutcomes( + self, + ) -> tuple[ + Sequence[TestReport], + Sequence[CollectReport | TestReport], + Sequence[CollectReport | TestReport], + ]: + passed = [] + skipped = [] + failed = [] + for rep in self.getreports( + ("pytest_collectreport", "pytest_runtest_logreport") + ): + if rep.passed: + if rep.when == "call": + assert isinstance(rep, TestReport) + passed.append(rep) + elif rep.skipped: + skipped.append(rep) + else: + assert rep.failed, f"Unexpected outcome: {rep!r}" + failed.append(rep) + return passed, skipped, failed + + def countoutcomes(self) -> list[int]: + return [len(x) for x in self.listoutcomes()] + + def assertoutcome(self, passed: int = 0, skipped: int = 0, failed: int = 0) -> None: + __tracebackhide__ = True + from _pytest.pytester_assertions import assertoutcome + + outcomes = self.listoutcomes() + assertoutcome( + outcomes, + passed=passed, + skipped=skipped, + failed=failed, + ) + + def clear(self) -> None: + self.calls[:] = [] + + +@fixture +def linecomp() -> LineComp: + """A :class: `LineComp` instance for checking that an input linearly + contains a sequence of strings.""" + return LineComp() + + +@fixture(name="LineMatcher") +def LineMatcher_fixture(request: FixtureRequest) -> type[LineMatcher]: + """A reference to the :class: `LineMatcher`. + + This is instantiable with a list of lines (without their trailing newlines). + This is useful for testing large texts, such as the output of commands. + """ + return LineMatcher + + +@fixture +def pytester( + request: FixtureRequest, tmp_path_factory: TempPathFactory, monkeypatch: MonkeyPatch +) -> Pytester: + """ + Facilities to write tests/configuration files, execute pytest in isolation, and match + against expected output, perfect for black-box testing of pytest plugins. + + It attempts to isolate the test run from external factors as much as possible, modifying + the current working directory to ``path`` and environment variables during initialization. + + It is particularly useful for testing plugins. It is similar to the :fixture:`tmp_path` + fixture but provides methods which aid in testing pytest itself. + """ + return Pytester(request, tmp_path_factory, monkeypatch, _ispytest=True) + + +@fixture +def _sys_snapshot() -> Generator[None]: + snappaths = SysPathsSnapshot() + snapmods = SysModulesSnapshot() + yield + snapmods.restore() + snappaths.restore() + + +@fixture +def _config_for_test() -> Generator[Config]: + from _pytest.config import get_config + + config = get_config() + yield config + config._ensure_unconfigure() # cleanup, e.g. capman closing tmpfiles. + + +# Regex to match the session duration string in the summary: "74.34s". +rex_session_duration = re.compile(r"\d+\.\d\ds") +# Regex to match all the counts and phrases in the summary line: "34 passed, 111 skipped". +rex_outcome = re.compile(r"(\d+) (\w+)") + + +@final +class RunResult: + """The result of running a command from :class:`~pytest.Pytester`.""" + + def __init__( + self, + ret: int | ExitCode, + outlines: list[str], + errlines: list[str], + duration: float, + ) -> None: + try: + self.ret: int | ExitCode = ExitCode(ret) + """The return value.""" + except ValueError: + self.ret = ret + self.outlines = outlines + """List of lines captured from stdout.""" + self.errlines = errlines + """List of lines captured from stderr.""" + self.stdout = LineMatcher(outlines) + """:class:`~pytest.LineMatcher` of stdout. + + Use e.g. :func:`str(stdout) ` to reconstruct stdout, or the commonly used + :func:`stdout.fnmatch_lines() ` method. + """ + self.stderr = LineMatcher(errlines) + """:class:`~pytest.LineMatcher` of stderr.""" + self.duration = duration + """Duration in seconds.""" + + def __repr__(self) -> str: + return ( + "" + % (self.ret, len(self.stdout.lines), len(self.stderr.lines), self.duration) + ) + + def parseoutcomes(self) -> dict[str, int]: + """Return a dictionary of outcome noun -> count from parsing the terminal + output that the test process produced. + + The returned nouns will always be in plural form:: + + ======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ==== + + Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``. + """ + return self.parse_summary_nouns(self.outlines) + + @classmethod + def parse_summary_nouns(cls, lines) -> dict[str, int]: + """Extract the nouns from a pytest terminal summary line. + + It always returns the plural noun for consistency:: + + ======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ==== + + Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``. + """ + for line in reversed(lines): + if rex_session_duration.search(line): + outcomes = rex_outcome.findall(line) + ret = {noun: int(count) for (count, noun) in outcomes} + break + else: + raise ValueError("Pytest terminal summary report not found") + + to_plural = { + "warning": "warnings", + "error": "errors", + } + return {to_plural.get(k, k): v for k, v in ret.items()} + + def assert_outcomes( + self, + passed: int = 0, + skipped: int = 0, + failed: int = 0, + errors: int = 0, + xpassed: int = 0, + xfailed: int = 0, + warnings: int | None = None, + deselected: int | None = None, + ) -> None: + """ + Assert that the specified outcomes appear with the respective + numbers (0 means it didn't occur) in the text output from a test run. + + ``warnings`` and ``deselected`` are only checked if not None. + """ + __tracebackhide__ = True + from _pytest.pytester_assertions import assert_outcomes + + outcomes = self.parseoutcomes() + assert_outcomes( + outcomes, + passed=passed, + skipped=skipped, + failed=failed, + errors=errors, + xpassed=xpassed, + xfailed=xfailed, + warnings=warnings, + deselected=deselected, + ) + + +class SysModulesSnapshot: + def __init__(self, preserve: Callable[[str], bool] | None = None) -> None: + self.__preserve = preserve + self.__saved = dict(sys.modules) + + def restore(self) -> None: + if self.__preserve: + self.__saved.update( + (k, m) for k, m in sys.modules.items() if self.__preserve(k) + ) + sys.modules.clear() + sys.modules.update(self.__saved) + + +class SysPathsSnapshot: + def __init__(self) -> None: + self.__saved = list(sys.path), list(sys.meta_path) + + def restore(self) -> None: + sys.path[:], sys.meta_path[:] = self.__saved + + +@final +class Pytester: + """ + Facilities to write tests/configuration files, execute pytest in isolation, and match + against expected output, perfect for black-box testing of pytest plugins. + + It attempts to isolate the test run from external factors as much as possible, modifying + the current working directory to :attr:`path` and environment variables during initialization. + """ + + __test__ = False + + CLOSE_STDIN: Final = NOTSET + + class TimeoutExpired(Exception): + pass + + def __init__( + self, + request: FixtureRequest, + tmp_path_factory: TempPathFactory, + monkeypatch: MonkeyPatch, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self._request = request + self._mod_collections: WeakKeyDictionary[Collector, list[Item | Collector]] = ( + WeakKeyDictionary() + ) + if request.function: + name: str = request.function.__name__ + else: + name = request.node.name + self._name = name + self._path: Path = tmp_path_factory.mktemp(name, numbered=True) + #: A list of plugins to use with :py:meth:`parseconfig` and + #: :py:meth:`runpytest`. Initially this is an empty list but plugins can + #: be added to the list. The type of items to add to the list depends on + #: the method using them so refer to them for details. + self.plugins: list[str | _PluggyPlugin] = [] + self._sys_path_snapshot = SysPathsSnapshot() + self._sys_modules_snapshot = self.__take_sys_modules_snapshot() + self._request.addfinalizer(self._finalize) + self._method = self._request.config.getoption("--runpytest") + self._test_tmproot = tmp_path_factory.mktemp(f"tmp-{name}", numbered=True) + + self._monkeypatch = mp = monkeypatch + self.chdir() + mp.setenv("PYTEST_DEBUG_TEMPROOT", str(self._test_tmproot)) + # Ensure no unexpected caching via tox. + mp.delenv("TOX_ENV_DIR", raising=False) + # Discard outer pytest options. + mp.delenv("PYTEST_ADDOPTS", raising=False) + # Ensure no user config is used. + tmphome = str(self.path) + mp.setenv("HOME", tmphome) + mp.setenv("USERPROFILE", tmphome) + # Do not use colors for inner runs by default. + mp.setenv("PY_COLORS", "0") + + @property + def path(self) -> Path: + """Temporary directory path used to create files/run tests from, etc.""" + return self._path + + def __repr__(self) -> str: + return f"" + + def _finalize(self) -> None: + """ + Clean up global state artifacts. + + Some methods modify the global interpreter state and this tries to + clean this up. It does not remove the temporary directory however so + it can be looked at after the test run has finished. + """ + self._sys_modules_snapshot.restore() + self._sys_path_snapshot.restore() + + def __take_sys_modules_snapshot(self) -> SysModulesSnapshot: + # Some zope modules used by twisted-related tests keep internal state + # and can't be deleted; we had some trouble in the past with + # `zope.interface` for example. + # + # Preserve readline due to https://bugs.python.org/issue41033. + # pexpect issues a SIGWINCH. + def preserve_module(name): + return name.startswith(("zope", "readline")) + + return SysModulesSnapshot(preserve=preserve_module) + + def make_hook_recorder(self, pluginmanager: PytestPluginManager) -> HookRecorder: + """Create a new :class:`HookRecorder` for a :class:`PytestPluginManager`.""" + pluginmanager.reprec = reprec = HookRecorder(pluginmanager, _ispytest=True) # type: ignore[attr-defined] + self._request.addfinalizer(reprec.finish_recording) + return reprec + + def chdir(self) -> None: + """Cd into the temporary directory. + + This is done automatically upon instantiation. + """ + self._monkeypatch.chdir(self.path) + + def _makefile( + self, + ext: str, + lines: Sequence[Any | bytes], + files: dict[str, str], + encoding: str = "utf-8", + ) -> Path: + items = list(files.items()) + + if ext is None: + raise TypeError("ext must not be None") + + if ext and not ext.startswith("."): + raise ValueError( + f"pytester.makefile expects a file extension, try .{ext} instead of {ext}" + ) + + def to_text(s: Any | bytes) -> str: + return s.decode(encoding) if isinstance(s, bytes) else str(s) + + if lines: + source = "\n".join(to_text(x) for x in lines) + basename = self._name + items.insert(0, (basename, source)) + + ret = None + for basename, value in items: + p = self.path.joinpath(basename).with_suffix(ext) + p.parent.mkdir(parents=True, exist_ok=True) + source_ = Source(value) + source = "\n".join(to_text(line) for line in source_.lines) + p.write_text(source.strip(), encoding=encoding) + if ret is None: + ret = p + assert ret is not None + return ret + + def makefile(self, ext: str, *args: str, **kwargs: str) -> Path: + r"""Create new text file(s) in the test directory. + + :param ext: + The extension the file(s) should use, including the dot, e.g. `.py`. + :param args: + All args are treated as strings and joined using newlines. + The result is written as contents to the file. The name of the + file is based on the test function requesting this fixture. + :param kwargs: + Each keyword is the name of a file, while the value of it will + be written as contents of the file. + :returns: + The first created file. + + Examples: + + .. code-block:: python + + pytester.makefile(".txt", "line1", "line2") + + pytester.makefile(".ini", pytest="[pytest]\naddopts=-rs\n") + + To create binary files, use :meth:`pathlib.Path.write_bytes` directly: + + .. code-block:: python + + filename = pytester.path.joinpath("foo.bin") + filename.write_bytes(b"...") + """ + return self._makefile(ext, args, kwargs) + + def makeconftest(self, source: str) -> Path: + """Write a conftest.py file. + + :param source: The contents. + :returns: The conftest.py file. + """ + return self.makepyfile(conftest=source) + + def makeini(self, source: str) -> Path: + """Write a tox.ini file. + + :param source: The contents. + :returns: The tox.ini file. + """ + return self.makefile(".ini", tox=source) + + def getinicfg(self, source: str) -> SectionWrapper: + """Return the pytest section from the tox.ini config file.""" + p = self.makeini(source) + return IniConfig(str(p))["pytest"] + + def makepyprojecttoml(self, source: str) -> Path: + """Write a pyproject.toml file. + + :param source: The contents. + :returns: The pyproject.ini file. + + .. versionadded:: 6.0 + """ + return self.makefile(".toml", pyproject=source) + + def makepyfile(self, *args, **kwargs) -> Path: + r"""Shortcut for .makefile() with a .py extension. + + Defaults to the test name with a '.py' extension, e.g test_foobar.py, overwriting + existing files. + + Examples: + + .. code-block:: python + + def test_something(pytester): + # Initial file is created test_something.py. + pytester.makepyfile("foobar") + # To create multiple files, pass kwargs accordingly. + pytester.makepyfile(custom="foobar") + # At this point, both 'test_something.py' & 'custom.py' exist in the test directory. + + """ + return self._makefile(".py", args, kwargs) + + def maketxtfile(self, *args, **kwargs) -> Path: + r"""Shortcut for .makefile() with a .txt extension. + + Defaults to the test name with a '.txt' extension, e.g test_foobar.txt, overwriting + existing files. + + Examples: + + .. code-block:: python + + def test_something(pytester): + # Initial file is created test_something.txt. + pytester.maketxtfile("foobar") + # To create multiple files, pass kwargs accordingly. + pytester.maketxtfile(custom="foobar") + # At this point, both 'test_something.txt' & 'custom.txt' exist in the test directory. + + """ + return self._makefile(".txt", args, kwargs) + + def syspathinsert(self, path: str | os.PathLike[str] | None = None) -> None: + """Prepend a directory to sys.path, defaults to :attr:`path`. + + This is undone automatically when this object dies at the end of each + test. + + :param path: + The path. + """ + if path is None: + path = self.path + + self._monkeypatch.syspath_prepend(str(path)) + + def mkdir(self, name: str | os.PathLike[str]) -> Path: + """Create a new (sub)directory. + + :param name: + The name of the directory, relative to the pytester path. + :returns: + The created directory. + :rtype: pathlib.Path + """ + p = self.path / name + p.mkdir() + return p + + def mkpydir(self, name: str | os.PathLike[str]) -> Path: + """Create a new python package. + + This creates a (sub)directory with an empty ``__init__.py`` file so it + gets recognised as a Python package. + """ + p = self.path / name + p.mkdir() + p.joinpath("__init__.py").touch() + return p + + def copy_example(self, name: str | None = None) -> Path: + """Copy file from project's directory into the testdir. + + :param name: + The name of the file to copy. + :return: + Path to the copied directory (inside ``self.path``). + :rtype: pathlib.Path + """ + example_dir_ = self._request.config.getini("pytester_example_dir") + if example_dir_ is None: + raise ValueError("pytester_example_dir is unset, can't copy examples") + example_dir: Path = self._request.config.rootpath / example_dir_ + + for extra_element in self._request.node.iter_markers("pytester_example_path"): + assert extra_element.args + example_dir = example_dir.joinpath(*extra_element.args) + + if name is None: + func_name = self._name + maybe_dir = example_dir / func_name + maybe_file = example_dir / (func_name + ".py") + + if maybe_dir.is_dir(): + example_path = maybe_dir + elif maybe_file.is_file(): + example_path = maybe_file + else: + raise LookupError( + f"{func_name} can't be found as module or package in {example_dir}" + ) + else: + example_path = example_dir.joinpath(name) + + if example_path.is_dir() and not example_path.joinpath("__init__.py").is_file(): + shutil.copytree(example_path, self.path, symlinks=True, dirs_exist_ok=True) + return self.path + elif example_path.is_file(): + result = self.path.joinpath(example_path.name) + shutil.copy(example_path, result) + return result + else: + raise LookupError( + f'example "{example_path}" is not found as a file or directory' + ) + + def getnode(self, config: Config, arg: str | os.PathLike[str]) -> Collector | Item: + """Get the collection node of a file. + + :param config: + A pytest config. + See :py:meth:`parseconfig` and :py:meth:`parseconfigure` for creating it. + :param arg: + Path to the file. + :returns: + The node. + """ + session = Session.from_config(config) + assert "::" not in str(arg) + p = Path(os.path.abspath(arg)) + config.hook.pytest_sessionstart(session=session) + res = session.perform_collect([str(p)], genitems=False)[0] + config.hook.pytest_sessionfinish(session=session, exitstatus=ExitCode.OK) + return res + + def getpathnode(self, path: str | os.PathLike[str]) -> Collector | Item: + """Return the collection node of a file. + + This is like :py:meth:`getnode` but uses :py:meth:`parseconfigure` to + create the (configured) pytest Config instance. + + :param path: + Path to the file. + :returns: + The node. + """ + path = Path(path) + config = self.parseconfigure(path) + session = Session.from_config(config) + x = bestrelpath(session.path, path) + config.hook.pytest_sessionstart(session=session) + res = session.perform_collect([x], genitems=False)[0] + config.hook.pytest_sessionfinish(session=session, exitstatus=ExitCode.OK) + return res + + def genitems(self, colitems: Sequence[Item | Collector]) -> list[Item]: + """Generate all test items from a collection node. + + This recurses into the collection node and returns a list of all the + test items contained within. + + :param colitems: + The collection nodes. + :returns: + The collected items. + """ + session = colitems[0].session + result: list[Item] = [] + for colitem in colitems: + result.extend(session.genitems(colitem)) + return result + + def runitem(self, source: str) -> Any: + """Run the "test_func" Item. + + The calling test instance (class containing the test method) must + provide a ``.getrunner()`` method which should return a runner which + can run the test protocol for a single item, e.g. + ``_pytest.runner.runtestprotocol``. + """ + # used from runner functional tests + item = self.getitem(source) + # the test class where we are called from wants to provide the runner + testclassinstance = self._request.instance + runner = testclassinstance.getrunner() + return runner(item) + + def inline_runsource(self, source: str, *cmdlineargs) -> HookRecorder: + """Run a test module in process using ``pytest.main()``. + + This run writes "source" into a temporary file and runs + ``pytest.main()`` on it, returning a :py:class:`HookRecorder` instance + for the result. + + :param source: The source code of the test module. + :param cmdlineargs: Any extra command line arguments to use. + """ + p = self.makepyfile(source) + values = [*list(cmdlineargs), p] + return self.inline_run(*values) + + def inline_genitems(self, *args) -> tuple[list[Item], HookRecorder]: + """Run ``pytest.main(['--collect-only'])`` in-process. + + Runs the :py:func:`pytest.main` function to run all of pytest inside + the test process itself like :py:meth:`inline_run`, but returns a + tuple of the collected items and a :py:class:`HookRecorder` instance. + """ + rec = self.inline_run("--collect-only", *args) + items = [x.item for x in rec.getcalls("pytest_itemcollected")] + return items, rec + + def inline_run( + self, + *args: str | os.PathLike[str], + plugins=(), + no_reraise_ctrlc: bool = False, + ) -> HookRecorder: + """Run ``pytest.main()`` in-process, returning a HookRecorder. + + Runs the :py:func:`pytest.main` function to run all of pytest inside + the test process itself. This means it can return a + :py:class:`HookRecorder` instance which gives more detailed results + from that run than can be done by matching stdout/stderr from + :py:meth:`runpytest`. + + :param args: + Command line arguments to pass to :py:func:`pytest.main`. + :param plugins: + Extra plugin instances the ``pytest.main()`` instance should use. + :param no_reraise_ctrlc: + Typically we reraise keyboard interrupts from the child run. If + True, the KeyboardInterrupt exception is captured. + """ + # (maybe a cpython bug?) the importlib cache sometimes isn't updated + # properly between file creation and inline_run (especially if imports + # are interspersed with file creation) + importlib.invalidate_caches() + + plugins = list(plugins) + finalizers = [] + try: + # Any sys.module or sys.path changes done while running pytest + # inline should be reverted after the test run completes to avoid + # clashing with later inline tests run within the same pytest test, + # e.g. just because they use matching test module names. + finalizers.append(self.__take_sys_modules_snapshot().restore) + finalizers.append(SysPathsSnapshot().restore) + + # Important note: + # - our tests should not leave any other references/registrations + # laying around other than possibly loaded test modules + # referenced from sys.modules, as nothing will clean those up + # automatically + + rec = [] + + class Collect: + def pytest_configure(x, config: Config) -> None: + rec.append(self.make_hook_recorder(config.pluginmanager)) + + plugins.append(Collect()) + ret = main([str(x) for x in args], plugins=plugins) + if len(rec) == 1: + reprec = rec.pop() + else: + + class reprec: # type: ignore + pass + + reprec.ret = ret + + # Typically we reraise keyboard interrupts from the child run + # because it's our user requesting interruption of the testing. + if ret == ExitCode.INTERRUPTED and not no_reraise_ctrlc: + calls = reprec.getcalls("pytest_keyboard_interrupt") + if calls and calls[-1].excinfo.type == KeyboardInterrupt: + raise KeyboardInterrupt() + return reprec + finally: + for finalizer in finalizers: + finalizer() + + def runpytest_inprocess( + self, *args: str | os.PathLike[str], **kwargs: Any + ) -> RunResult: + """Return result of running pytest in-process, providing a similar + interface to what self.runpytest() provides.""" + syspathinsert = kwargs.pop("syspathinsert", False) + + if syspathinsert: + self.syspathinsert() + now = timing.time() + capture = _get_multicapture("sys") + capture.start_capturing() + try: + try: + reprec = self.inline_run(*args, **kwargs) + except SystemExit as e: + ret = e.args[0] + try: + ret = ExitCode(e.args[0]) + except ValueError: + pass + + class reprec: # type: ignore + ret = ret + + except Exception: + traceback.print_exc() + + class reprec: # type: ignore + ret = ExitCode(3) + + finally: + out, err = capture.readouterr() + capture.stop_capturing() + sys.stdout.write(out) + sys.stderr.write(err) + + assert reprec.ret is not None + res = RunResult( + reprec.ret, out.splitlines(), err.splitlines(), timing.time() - now + ) + res.reprec = reprec # type: ignore + return res + + def runpytest(self, *args: str | os.PathLike[str], **kwargs: Any) -> RunResult: + """Run pytest inline or in a subprocess, depending on the command line + option "--runpytest" and return a :py:class:`~pytest.RunResult`.""" + new_args = self._ensure_basetemp(args) + if self._method == "inprocess": + return self.runpytest_inprocess(*new_args, **kwargs) + elif self._method == "subprocess": + return self.runpytest_subprocess(*new_args, **kwargs) + raise RuntimeError(f"Unrecognized runpytest option: {self._method}") + + def _ensure_basetemp( + self, args: Sequence[str | os.PathLike[str]] + ) -> list[str | os.PathLike[str]]: + new_args = list(args) + for x in new_args: + if str(x).startswith("--basetemp"): + break + else: + new_args.append( + "--basetemp={}".format(self.path.parent.joinpath("basetemp")) + ) + return new_args + + def parseconfig(self, *args: str | os.PathLike[str]) -> Config: + """Return a new pytest :class:`pytest.Config` instance from given + commandline args. + + This invokes the pytest bootstrapping code in _pytest.config to create a + new :py:class:`pytest.PytestPluginManager` and call the + :hook:`pytest_cmdline_parse` hook to create a new :class:`pytest.Config` + instance. + + If :attr:`plugins` has been populated they should be plugin modules + to be registered with the plugin manager. + """ + import _pytest.config + + new_args = self._ensure_basetemp(args) + new_args = [str(x) for x in new_args] + + config = _pytest.config._prepareconfig(new_args, self.plugins) # type: ignore[arg-type] + # we don't know what the test will do with this half-setup config + # object and thus we make sure it gets unconfigured properly in any + # case (otherwise capturing could still be active, for example) + self._request.addfinalizer(config._ensure_unconfigure) + return config + + def parseconfigure(self, *args: str | os.PathLike[str]) -> Config: + """Return a new pytest configured Config instance. + + Returns a new :py:class:`pytest.Config` instance like + :py:meth:`parseconfig`, but also calls the :hook:`pytest_configure` + hook. + """ + config = self.parseconfig(*args) + config._do_configure() + return config + + def getitem( + self, source: str | os.PathLike[str], funcname: str = "test_func" + ) -> Item: + """Return the test item for a test function. + + Writes the source to a python file and runs pytest's collection on + the resulting module, returning the test item for the requested + function name. + + :param source: + The module source. + :param funcname: + The name of the test function for which to return a test item. + :returns: + The test item. + """ + items = self.getitems(source) + for item in items: + if item.name == funcname: + return item + assert 0, f"{funcname!r} item not found in module:\n{source}\nitems: {items}" + + def getitems(self, source: str | os.PathLike[str]) -> list[Item]: + """Return all test items collected from the module. + + Writes the source to a Python file and runs pytest's collection on + the resulting module, returning all test items contained within. + """ + modcol = self.getmodulecol(source) + return self.genitems([modcol]) + + def getmodulecol( + self, + source: str | os.PathLike[str], + configargs=(), + *, + withinit: bool = False, + ): + """Return the module collection node for ``source``. + + Writes ``source`` to a file using :py:meth:`makepyfile` and then + runs the pytest collection on it, returning the collection node for the + test module. + + :param source: + The source code of the module to collect. + + :param configargs: + Any extra arguments to pass to :py:meth:`parseconfigure`. + + :param withinit: + Whether to also write an ``__init__.py`` file to the same + directory to ensure it is a package. + """ + if isinstance(source, os.PathLike): + path = self.path.joinpath(source) + assert not withinit, "not supported for paths" + else: + kw = {self._name: str(source)} + path = self.makepyfile(**kw) + if withinit: + self.makepyfile(__init__="#") + self.config = config = self.parseconfigure(path, *configargs) + return self.getnode(config, path) + + def collect_by_name(self, modcol: Collector, name: str) -> Item | Collector | None: + """Return the collection node for name from the module collection. + + Searches a module collection node for a collection node matching the + given name. + + :param modcol: A module collection node; see :py:meth:`getmodulecol`. + :param name: The name of the node to return. + """ + if modcol not in self._mod_collections: + self._mod_collections[modcol] = list(modcol.collect()) + for colitem in self._mod_collections[modcol]: + if colitem.name == name: + return colitem + return None + + def popen( + self, + cmdargs: Sequence[str | os.PathLike[str]], + stdout: int | TextIO = subprocess.PIPE, + stderr: int | TextIO = subprocess.PIPE, + stdin: NotSetType | bytes | IO[Any] | int = CLOSE_STDIN, + **kw, + ): + """Invoke :py:class:`subprocess.Popen`. + + Calls :py:class:`subprocess.Popen` making sure the current working + directory is in ``PYTHONPATH``. + + You probably want to use :py:meth:`run` instead. + """ + env = os.environ.copy() + env["PYTHONPATH"] = os.pathsep.join( + filter(None, [os.getcwd(), env.get("PYTHONPATH", "")]) + ) + kw["env"] = env + + if stdin is self.CLOSE_STDIN: + kw["stdin"] = subprocess.PIPE + elif isinstance(stdin, bytes): + kw["stdin"] = subprocess.PIPE + else: + kw["stdin"] = stdin + + popen = subprocess.Popen(cmdargs, stdout=stdout, stderr=stderr, **kw) + if stdin is self.CLOSE_STDIN: + assert popen.stdin is not None + popen.stdin.close() + elif isinstance(stdin, bytes): + assert popen.stdin is not None + popen.stdin.write(stdin) + + return popen + + def run( + self, + *cmdargs: str | os.PathLike[str], + timeout: float | None = None, + stdin: NotSetType | bytes | IO[Any] | int = CLOSE_STDIN, + ) -> RunResult: + """Run a command with arguments. + + Run a process using :py:class:`subprocess.Popen` saving the stdout and + stderr. + + :param cmdargs: + The sequence of arguments to pass to :py:class:`subprocess.Popen`, + with path-like objects being converted to :py:class:`str` + automatically. + :param timeout: + The period in seconds after which to timeout and raise + :py:class:`Pytester.TimeoutExpired`. + :param stdin: + Optional standard input. + + - If it is ``CLOSE_STDIN`` (Default), then this method calls + :py:class:`subprocess.Popen` with ``stdin=subprocess.PIPE``, and + the standard input is closed immediately after the new command is + started. + + - If it is of type :py:class:`bytes`, these bytes are sent to the + standard input of the command. + + - Otherwise, it is passed through to :py:class:`subprocess.Popen`. + For further information in this case, consult the document of the + ``stdin`` parameter in :py:class:`subprocess.Popen`. + :type stdin: _pytest.compat.NotSetType | bytes | IO[Any] | int + :returns: + The result. + + """ + __tracebackhide__ = True + + cmdargs = tuple(os.fspath(arg) for arg in cmdargs) + p1 = self.path.joinpath("stdout") + p2 = self.path.joinpath("stderr") + print("running:", *cmdargs) + print(" in:", Path.cwd()) + + with p1.open("w", encoding="utf8") as f1, p2.open("w", encoding="utf8") as f2: + now = timing.time() + popen = self.popen( + cmdargs, + stdin=stdin, + stdout=f1, + stderr=f2, + close_fds=(sys.platform != "win32"), + ) + if popen.stdin is not None: + popen.stdin.close() + + def handle_timeout() -> None: + __tracebackhide__ = True + + timeout_message = f"{timeout} second timeout expired running: {cmdargs}" + + popen.kill() + popen.wait() + raise self.TimeoutExpired(timeout_message) + + if timeout is None: + ret = popen.wait() + else: + try: + ret = popen.wait(timeout) + except subprocess.TimeoutExpired: + handle_timeout() + + with p1.open(encoding="utf8") as f1, p2.open(encoding="utf8") as f2: + out = f1.read().splitlines() + err = f2.read().splitlines() + + self._dump_lines(out, sys.stdout) + self._dump_lines(err, sys.stderr) + + with contextlib.suppress(ValueError): + ret = ExitCode(ret) + return RunResult(ret, out, err, timing.time() - now) + + def _dump_lines(self, lines, fp): + try: + for line in lines: + print(line, file=fp) + except UnicodeEncodeError: + print(f"couldn't print to {fp} because of encoding") + + def _getpytestargs(self) -> tuple[str, ...]: + return sys.executable, "-mpytest" + + def runpython(self, script: os.PathLike[str]) -> RunResult: + """Run a python script using sys.executable as interpreter.""" + return self.run(sys.executable, script) + + def runpython_c(self, command: str) -> RunResult: + """Run ``python -c "command"``.""" + return self.run(sys.executable, "-c", command) + + def runpytest_subprocess( + self, *args: str | os.PathLike[str], timeout: float | None = None + ) -> RunResult: + """Run pytest as a subprocess with given arguments. + + Any plugins added to the :py:attr:`plugins` list will be added using the + ``-p`` command line option. Additionally ``--basetemp`` is used to put + any temporary files and directories in a numbered directory prefixed + with "runpytest-" to not conflict with the normal numbered pytest + location for temporary files and directories. + + :param args: + The sequence of arguments to pass to the pytest subprocess. + :param timeout: + The period in seconds after which to timeout and raise + :py:class:`Pytester.TimeoutExpired`. + :returns: + The result. + """ + __tracebackhide__ = True + p = make_numbered_dir(root=self.path, prefix="runpytest-", mode=0o700) + args = (f"--basetemp={p}", *args) + plugins = [x for x in self.plugins if isinstance(x, str)] + if plugins: + args = ("-p", plugins[0], *args) + args = self._getpytestargs() + args + return self.run(*args, timeout=timeout) + + def spawn_pytest(self, string: str, expect_timeout: float = 10.0) -> pexpect.spawn: + """Run pytest using pexpect. + + This makes sure to use the right pytest and sets up the temporary + directory locations. + + The pexpect child is returned. + """ + basetemp = self.path / "temp-pexpect" + basetemp.mkdir(mode=0o700) + invoke = " ".join(map(str, self._getpytestargs())) + cmd = f"{invoke} --basetemp={basetemp} {string}" + return self.spawn(cmd, expect_timeout=expect_timeout) + + def spawn(self, cmd: str, expect_timeout: float = 10.0) -> pexpect.spawn: + """Run a command using pexpect. + + The pexpect child is returned. + """ + pexpect = importorskip("pexpect", "3.0") + if hasattr(sys, "pypy_version_info") and "64" in platform.machine(): + skip("pypy-64 bit not supported") + if not hasattr(pexpect, "spawn"): + skip("pexpect.spawn not available") + logfile = self.path.joinpath("spawn.out").open("wb") + + child = pexpect.spawn(cmd, logfile=logfile, timeout=expect_timeout) + self._request.addfinalizer(logfile.close) + return child + + +class LineComp: + def __init__(self) -> None: + self.stringio = StringIO() + """:class:`python:io.StringIO()` instance used for input.""" + + def assert_contains_lines(self, lines2: Sequence[str]) -> None: + """Assert that ``lines2`` are contained (linearly) in :attr:`stringio`'s value. + + Lines are matched using :func:`LineMatcher.fnmatch_lines `. + """ + __tracebackhide__ = True + val = self.stringio.getvalue() + self.stringio.truncate(0) + self.stringio.seek(0) + lines1 = val.split("\n") + LineMatcher(lines1).fnmatch_lines(lines2) + + +class LineMatcher: + """Flexible matching of text. + + This is a convenience class to test large texts like the output of + commands. + + The constructor takes a list of lines without their trailing newlines, i.e. + ``text.splitlines()``. + """ + + def __init__(self, lines: list[str]) -> None: + self.lines = lines + self._log_output: list[str] = [] + + def __str__(self) -> str: + """Return the entire original text. + + .. versionadded:: 6.2 + You can use :meth:`str` in older versions. + """ + return "\n".join(self.lines) + + def _getlines(self, lines2: str | Sequence[str] | Source) -> Sequence[str]: + if isinstance(lines2, str): + lines2 = Source(lines2) + if isinstance(lines2, Source): + lines2 = lines2.strip().lines + return lines2 + + def fnmatch_lines_random(self, lines2: Sequence[str]) -> None: + """Check lines exist in the output in any order (using :func:`python:fnmatch.fnmatch`).""" + __tracebackhide__ = True + self._match_lines_random(lines2, fnmatch) + + def re_match_lines_random(self, lines2: Sequence[str]) -> None: + """Check lines exist in the output in any order (using :func:`python:re.match`).""" + __tracebackhide__ = True + self._match_lines_random(lines2, lambda name, pat: bool(re.match(pat, name))) + + def _match_lines_random( + self, lines2: Sequence[str], match_func: Callable[[str, str], bool] + ) -> None: + __tracebackhide__ = True + lines2 = self._getlines(lines2) + for line in lines2: + for x in self.lines: + if line == x or match_func(x, line): + self._log("matched: ", repr(line)) + break + else: + msg = f"line {line!r} not found in output" + self._log(msg) + self._fail(msg) + + def get_lines_after(self, fnline: str) -> Sequence[str]: + """Return all lines following the given line in the text. + + The given line can contain glob wildcards. + """ + for i, line in enumerate(self.lines): + if fnline == line or fnmatch(line, fnline): + return self.lines[i + 1 :] + raise ValueError(f"line {fnline!r} not found in output") + + def _log(self, *args) -> None: + self._log_output.append(" ".join(str(x) for x in args)) + + @property + def _log_text(self) -> str: + return "\n".join(self._log_output) + + def fnmatch_lines( + self, lines2: Sequence[str], *, consecutive: bool = False + ) -> None: + """Check lines exist in the output (using :func:`python:fnmatch.fnmatch`). + + The argument is a list of lines which have to match and can use glob + wildcards. If they do not match a pytest.fail() is called. The + matches and non-matches are also shown as part of the error message. + + :param lines2: String patterns to match. + :param consecutive: Match lines consecutively? + """ + __tracebackhide__ = True + self._match_lines(lines2, fnmatch, "fnmatch", consecutive=consecutive) + + def re_match_lines( + self, lines2: Sequence[str], *, consecutive: bool = False + ) -> None: + """Check lines exist in the output (using :func:`python:re.match`). + + The argument is a list of lines which have to match using ``re.match``. + If they do not match a pytest.fail() is called. + + The matches and non-matches are also shown as part of the error message. + + :param lines2: string patterns to match. + :param consecutive: match lines consecutively? + """ + __tracebackhide__ = True + self._match_lines( + lines2, + lambda name, pat: bool(re.match(pat, name)), + "re.match", + consecutive=consecutive, + ) + + def _match_lines( + self, + lines2: Sequence[str], + match_func: Callable[[str, str], bool], + match_nickname: str, + *, + consecutive: bool = False, + ) -> None: + """Underlying implementation of ``fnmatch_lines`` and ``re_match_lines``. + + :param Sequence[str] lines2: + List of string patterns to match. The actual format depends on + ``match_func``. + :param match_func: + A callable ``match_func(line, pattern)`` where line is the + captured line from stdout/stderr and pattern is the matching + pattern. + :param str match_nickname: + The nickname for the match function that will be logged to stdout + when a match occurs. + :param consecutive: + Match lines consecutively? + """ + if not isinstance(lines2, collections.abc.Sequence): + raise TypeError(f"invalid type for lines2: {type(lines2).__name__}") + lines2 = self._getlines(lines2) + lines1 = self.lines[:] + extralines = [] + __tracebackhide__ = True + wnick = len(match_nickname) + 1 + started = False + for line in lines2: + nomatchprinted = False + while lines1: + nextline = lines1.pop(0) + if line == nextline: + self._log("exact match:", repr(line)) + started = True + break + elif match_func(nextline, line): + self._log(f"{match_nickname}:", repr(line)) + self._log( + "{:>{width}}".format("with:", width=wnick), repr(nextline) + ) + started = True + break + else: + if consecutive and started: + msg = f"no consecutive match: {line!r}" + self._log(msg) + self._log( + "{:>{width}}".format("with:", width=wnick), repr(nextline) + ) + self._fail(msg) + if not nomatchprinted: + self._log( + "{:>{width}}".format("nomatch:", width=wnick), repr(line) + ) + nomatchprinted = True + self._log("{:>{width}}".format("and:", width=wnick), repr(nextline)) + extralines.append(nextline) + else: + msg = f"remains unmatched: {line!r}" + self._log(msg) + self._fail(msg) + self._log_output = [] + + def no_fnmatch_line(self, pat: str) -> None: + """Ensure captured lines do not match the given pattern, using ``fnmatch.fnmatch``. + + :param str pat: The pattern to match lines. + """ + __tracebackhide__ = True + self._no_match_line(pat, fnmatch, "fnmatch") + + def no_re_match_line(self, pat: str) -> None: + """Ensure captured lines do not match the given pattern, using ``re.match``. + + :param str pat: The regular expression to match lines. + """ + __tracebackhide__ = True + self._no_match_line( + pat, lambda name, pat: bool(re.match(pat, name)), "re.match" + ) + + def _no_match_line( + self, pat: str, match_func: Callable[[str, str], bool], match_nickname: str + ) -> None: + """Ensure captured lines does not have a the given pattern, using ``fnmatch.fnmatch``. + + :param str pat: The pattern to match lines. + """ + __tracebackhide__ = True + nomatch_printed = False + wnick = len(match_nickname) + 1 + for line in self.lines: + if match_func(line, pat): + msg = f"{match_nickname}: {pat!r}" + self._log(msg) + self._log("{:>{width}}".format("with:", width=wnick), repr(line)) + self._fail(msg) + else: + if not nomatch_printed: + self._log("{:>{width}}".format("nomatch:", width=wnick), repr(pat)) + nomatch_printed = True + self._log("{:>{width}}".format("and:", width=wnick), repr(line)) + self._log_output = [] + + def _fail(self, msg: str) -> None: + __tracebackhide__ = True + log_text = self._log_text + self._log_output = [] + fail(log_text) + + def str(self) -> str: + """Return the entire original text.""" + return str(self) diff --git a/.venv/Lib/site-packages/_pytest/pytester_assertions.py b/.venv/Lib/site-packages/_pytest/pytester_assertions.py new file mode 100644 index 00000000..d543798f --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/pytester_assertions.py @@ -0,0 +1,74 @@ +"""Helper plugin for pytester; should not be loaded on its own.""" + +# This plugin contains assertions used by pytester. pytester cannot +# contain them itself, since it is imported by the `pytest` module, +# hence cannot be subject to assertion rewriting, which requires a +# module to not be already imported. +from __future__ import annotations + +from typing import Sequence + +from _pytest.reports import CollectReport +from _pytest.reports import TestReport + + +def assertoutcome( + outcomes: tuple[ + Sequence[TestReport], + Sequence[CollectReport | TestReport], + Sequence[CollectReport | TestReport], + ], + passed: int = 0, + skipped: int = 0, + failed: int = 0, +) -> None: + __tracebackhide__ = True + + realpassed, realskipped, realfailed = outcomes + obtained = { + "passed": len(realpassed), + "skipped": len(realskipped), + "failed": len(realfailed), + } + expected = {"passed": passed, "skipped": skipped, "failed": failed} + assert obtained == expected, outcomes + + +def assert_outcomes( + outcomes: dict[str, int], + passed: int = 0, + skipped: int = 0, + failed: int = 0, + errors: int = 0, + xpassed: int = 0, + xfailed: int = 0, + warnings: int | None = None, + deselected: int | None = None, +) -> None: + """Assert that the specified outcomes appear with the respective + numbers (0 means it didn't occur) in the text output from a test run.""" + __tracebackhide__ = True + + obtained = { + "passed": outcomes.get("passed", 0), + "skipped": outcomes.get("skipped", 0), + "failed": outcomes.get("failed", 0), + "errors": outcomes.get("errors", 0), + "xpassed": outcomes.get("xpassed", 0), + "xfailed": outcomes.get("xfailed", 0), + } + expected = { + "passed": passed, + "skipped": skipped, + "failed": failed, + "errors": errors, + "xpassed": xpassed, + "xfailed": xfailed, + } + if warnings is not None: + obtained["warnings"] = outcomes.get("warnings", 0) + expected["warnings"] = warnings + if deselected is not None: + obtained["deselected"] = outcomes.get("deselected", 0) + expected["deselected"] = deselected + assert obtained == expected diff --git a/.venv/Lib/site-packages/_pytest/python.py b/.venv/Lib/site-packages/_pytest/python.py new file mode 100644 index 00000000..3478c34c --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/python.py @@ -0,0 +1,1679 @@ +# mypy: allow-untyped-defs +"""Python test discovery, setup and run of test functions.""" + +from __future__ import annotations + +import abc +from collections import Counter +from collections import defaultdict +import dataclasses +import enum +import fnmatch +from functools import partial +import inspect +import itertools +import os +from pathlib import Path +import types +from typing import Any +from typing import Callable +from typing import Dict +from typing import final +from typing import Generator +from typing import Iterable +from typing import Iterator +from typing import Literal +from typing import Mapping +from typing import Pattern +from typing import Sequence +from typing import TYPE_CHECKING +import warnings + +import _pytest +from _pytest import fixtures +from _pytest import nodes +from _pytest._code import filter_traceback +from _pytest._code import getfslineno +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import TerminalRepr +from _pytest._code.code import Traceback +from _pytest._io.saferepr import saferepr +from _pytest.compat import ascii_escaped +from _pytest.compat import get_default_arg_names +from _pytest.compat import get_real_func +from _pytest.compat import getimfunc +from _pytest.compat import is_async_function +from _pytest.compat import is_generator +from _pytest.compat import LEGACY_PATH +from _pytest.compat import NOTSET +from _pytest.compat import safe_getattr +from _pytest.compat import safe_isclass +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import FixtureDef +from _pytest.fixtures import FixtureRequest +from _pytest.fixtures import FuncFixtureInfo +from _pytest.fixtures import get_scope_node +from _pytest.main import Session +from _pytest.mark import MARK_GEN +from _pytest.mark import ParameterSet +from _pytest.mark.structures import get_unpacked_marks +from _pytest.mark.structures import Mark +from _pytest.mark.structures import MarkDecorator +from _pytest.mark.structures import normalize_mark_list +from _pytest.outcomes import fail +from _pytest.outcomes import skip +from _pytest.pathlib import fnmatch_ex +from _pytest.pathlib import import_path +from _pytest.pathlib import ImportPathMismatchError +from _pytest.pathlib import scandir +from _pytest.scope import _ScopeName +from _pytest.scope import Scope +from _pytest.stash import StashKey +from _pytest.warning_types import PytestCollectionWarning +from _pytest.warning_types import PytestReturnNotNoneWarning +from _pytest.warning_types import PytestUnhandledCoroutineWarning + + +if TYPE_CHECKING: + from typing_extensions import Self + + +def pytest_addoption(parser: Parser) -> None: + parser.addini( + "python_files", + type="args", + # NOTE: default is also used in AssertionRewritingHook. + default=["test_*.py", "*_test.py"], + help="Glob-style file patterns for Python test module discovery", + ) + parser.addini( + "python_classes", + type="args", + default=["Test"], + help="Prefixes or glob names for Python test class discovery", + ) + parser.addini( + "python_functions", + type="args", + default=["test"], + help="Prefixes or glob names for Python test function and method discovery", + ) + parser.addini( + "disable_test_id_escaping_and_forfeit_all_rights_to_community_support", + type="bool", + default=False, + help="Disable string escape non-ASCII characters, might cause unwanted " + "side effects(use at your own risk)", + ) + + +def pytest_generate_tests(metafunc: Metafunc) -> None: + for marker in metafunc.definition.iter_markers(name="parametrize"): + metafunc.parametrize(*marker.args, **marker.kwargs, _param_mark=marker) + + +def pytest_configure(config: Config) -> None: + config.addinivalue_line( + "markers", + "parametrize(argnames, argvalues): call a test function multiple " + "times passing in different arguments in turn. argvalues generally " + "needs to be a list of values if argnames specifies only one name " + "or a list of tuples of values if argnames specifies multiple names. " + "Example: @parametrize('arg1', [1,2]) would lead to two calls of the " + "decorated test function, one with arg1=1 and another with arg1=2." + "see https://docs.pytest.org/en/stable/how-to/parametrize.html for more info " + "and examples.", + ) + config.addinivalue_line( + "markers", + "usefixtures(fixturename1, fixturename2, ...): mark tests as needing " + "all of the specified fixtures. see " + "https://docs.pytest.org/en/stable/explanation/fixtures.html#usefixtures ", + ) + + +def async_warn_and_skip(nodeid: str) -> None: + msg = "async def functions are not natively supported and have been skipped.\n" + msg += ( + "You need to install a suitable plugin for your async framework, for example:\n" + ) + msg += " - anyio\n" + msg += " - pytest-asyncio\n" + msg += " - pytest-tornasync\n" + msg += " - pytest-trio\n" + msg += " - pytest-twisted" + warnings.warn(PytestUnhandledCoroutineWarning(msg.format(nodeid))) + skip(reason="async def function and no async plugin installed (see warnings)") + + +@hookimpl(trylast=True) +def pytest_pyfunc_call(pyfuncitem: Function) -> object | None: + testfunction = pyfuncitem.obj + if is_async_function(testfunction): + async_warn_and_skip(pyfuncitem.nodeid) + funcargs = pyfuncitem.funcargs + testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames} + result = testfunction(**testargs) + if hasattr(result, "__await__") or hasattr(result, "__aiter__"): + async_warn_and_skip(pyfuncitem.nodeid) + elif result is not None: + warnings.warn( + PytestReturnNotNoneWarning( + f"Expected None, but {pyfuncitem.nodeid} returned {result!r}, which will be an error in a " + "future version of pytest. Did you mean to use `assert` instead of `return`?" + ) + ) + return True + + +def pytest_collect_directory( + path: Path, parent: nodes.Collector +) -> nodes.Collector | None: + pkginit = path / "__init__.py" + try: + has_pkginit = pkginit.is_file() + except PermissionError: + # See https://github.com/pytest-dev/pytest/issues/12120#issuecomment-2106349096. + return None + if has_pkginit: + return Package.from_parent(parent, path=path) + return None + + +def pytest_collect_file(file_path: Path, parent: nodes.Collector) -> Module | None: + if file_path.suffix == ".py": + if not parent.session.isinitpath(file_path): + if not path_matches_patterns( + file_path, parent.config.getini("python_files") + ): + return None + ihook = parent.session.gethookproxy(file_path) + module: Module = ihook.pytest_pycollect_makemodule( + module_path=file_path, parent=parent + ) + return module + return None + + +def path_matches_patterns(path: Path, patterns: Iterable[str]) -> bool: + """Return whether path matches any of the patterns in the list of globs given.""" + return any(fnmatch_ex(pattern, path) for pattern in patterns) + + +def pytest_pycollect_makemodule(module_path: Path, parent) -> Module: + return Module.from_parent(parent, path=module_path) + + +@hookimpl(trylast=True) +def pytest_pycollect_makeitem( + collector: Module | Class, name: str, obj: object +) -> None | nodes.Item | nodes.Collector | list[nodes.Item | nodes.Collector]: + assert isinstance(collector, (Class, Module)), type(collector) + # Nothing was collected elsewhere, let's do it here. + if safe_isclass(obj): + if collector.istestclass(obj, name): + return Class.from_parent(collector, name=name, obj=obj) + elif collector.istestfunction(obj, name): + # mock seems to store unbound methods (issue473), normalize it. + obj = getattr(obj, "__func__", obj) + # We need to try and unwrap the function if it's a functools.partial + # or a functools.wrapped. + # We mustn't if it's been wrapped with mock.patch (python 2 only). + if not (inspect.isfunction(obj) or inspect.isfunction(get_real_func(obj))): + filename, lineno = getfslineno(obj) + warnings.warn_explicit( + message=PytestCollectionWarning( + f"cannot collect {name!r} because it is not a function." + ), + category=None, + filename=str(filename), + lineno=lineno + 1, + ) + elif getattr(obj, "__test__", True): + if is_generator(obj): + res = Function.from_parent(collector, name=name) + reason = ( + f"yield tests were removed in pytest 4.0 - {name} will be ignored" + ) + res.add_marker(MARK_GEN.xfail(run=False, reason=reason)) + res.warn(PytestCollectionWarning(reason)) + return res + else: + return list(collector._genfunctions(name, obj)) + return None + + +class PyobjMixin(nodes.Node): + """this mix-in inherits from Node to carry over the typing information + + as its intended to always mix in before a node + its position in the mro is unaffected""" + + _ALLOW_MARKERS = True + + @property + def module(self): + """Python module object this node was collected from (can be None).""" + node = self.getparent(Module) + return node.obj if node is not None else None + + @property + def cls(self): + """Python class object this node was collected from (can be None).""" + node = self.getparent(Class) + return node.obj if node is not None else None + + @property + def instance(self): + """Python instance object the function is bound to. + + Returns None if not a test method, e.g. for a standalone test function, + a class or a module. + """ + # Overridden by Function. + return None + + @property + def obj(self): + """Underlying Python object.""" + obj = getattr(self, "_obj", None) + if obj is None: + self._obj = obj = self._getobj() + # XXX evil hack + # used to avoid Function marker duplication + if self._ALLOW_MARKERS: + self.own_markers.extend(get_unpacked_marks(self.obj)) + # This assumes that `obj` is called before there is a chance + # to add custom keys to `self.keywords`, so no fear of overriding. + self.keywords.update((mark.name, mark) for mark in self.own_markers) + return obj + + @obj.setter + def obj(self, value): + self._obj = value + + def _getobj(self): + """Get the underlying Python object. May be overwritten by subclasses.""" + # TODO: Improve the type of `parent` such that assert/ignore aren't needed. + assert self.parent is not None + obj = self.parent.obj # type: ignore[attr-defined] + return getattr(obj, self.name) + + def getmodpath(self, stopatmodule: bool = True, includemodule: bool = False) -> str: + """Return Python path relative to the containing module.""" + parts = [] + for node in self.iter_parents(): + name = node.name + if isinstance(node, Module): + name = os.path.splitext(name)[0] + if stopatmodule: + if includemodule: + parts.append(name) + break + parts.append(name) + parts.reverse() + return ".".join(parts) + + def reportinfo(self) -> tuple[os.PathLike[str] | str, int | None, str]: + # XXX caching? + path, lineno = getfslineno(self.obj) + modpath = self.getmodpath() + return path, lineno, modpath + + +# As an optimization, these builtin attribute names are pre-ignored when +# iterating over an object during collection -- the pytest_pycollect_makeitem +# hook is not called for them. +# fmt: off +class _EmptyClass: pass # noqa: E701 +IGNORED_ATTRIBUTES = frozenset.union( + frozenset(), + # Module. + dir(types.ModuleType("empty_module")), + # Some extra module attributes the above doesn't catch. + {"__builtins__", "__file__", "__cached__"}, + # Class. + dir(_EmptyClass), + # Instance. + dir(_EmptyClass()), +) +del _EmptyClass +# fmt: on + + +class PyCollector(PyobjMixin, nodes.Collector, abc.ABC): + def funcnamefilter(self, name: str) -> bool: + return self._matches_prefix_or_glob_option("python_functions", name) + + def isnosetest(self, obj: object) -> bool: + """Look for the __test__ attribute, which is applied by the + @nose.tools.istest decorator. + """ + # We explicitly check for "is True" here to not mistakenly treat + # classes with a custom __getattr__ returning something truthy (like a + # function) as test classes. + return safe_getattr(obj, "__test__", False) is True + + def classnamefilter(self, name: str) -> bool: + return self._matches_prefix_or_glob_option("python_classes", name) + + def istestfunction(self, obj: object, name: str) -> bool: + if self.funcnamefilter(name) or self.isnosetest(obj): + if isinstance(obj, (staticmethod, classmethod)): + # staticmethods and classmethods need to be unwrapped. + obj = safe_getattr(obj, "__func__", False) + return callable(obj) and fixtures.getfixturemarker(obj) is None + else: + return False + + def istestclass(self, obj: object, name: str) -> bool: + if not (self.classnamefilter(name) or self.isnosetest(obj)): + return False + if inspect.isabstract(obj): + return False + return True + + def _matches_prefix_or_glob_option(self, option_name: str, name: str) -> bool: + """Check if the given name matches the prefix or glob-pattern defined + in ini configuration.""" + for option in self.config.getini(option_name): + if name.startswith(option): + return True + # Check that name looks like a glob-string before calling fnmatch + # because this is called for every name in each collected module, + # and fnmatch is somewhat expensive to call. + elif ("*" in option or "?" in option or "[" in option) and fnmatch.fnmatch( + name, option + ): + return True + return False + + def collect(self) -> Iterable[nodes.Item | nodes.Collector]: + if not getattr(self.obj, "__test__", True): + return [] + + # Avoid random getattrs and peek in the __dict__ instead. + dicts = [getattr(self.obj, "__dict__", {})] + if isinstance(self.obj, type): + for basecls in self.obj.__mro__: + dicts.append(basecls.__dict__) + + # In each class, nodes should be definition ordered. + # __dict__ is definition ordered. + seen: set[str] = set() + dict_values: list[list[nodes.Item | nodes.Collector]] = [] + ihook = self.ihook + for dic in dicts: + values: list[nodes.Item | nodes.Collector] = [] + # Note: seems like the dict can change during iteration - + # be careful not to remove the list() without consideration. + for name, obj in list(dic.items()): + if name in IGNORED_ATTRIBUTES: + continue + if name in seen: + continue + seen.add(name) + res = ihook.pytest_pycollect_makeitem( + collector=self, name=name, obj=obj + ) + if res is None: + continue + elif isinstance(res, list): + values.extend(res) + else: + values.append(res) + dict_values.append(values) + + # Between classes in the class hierarchy, reverse-MRO order -- nodes + # inherited from base classes should come before subclasses. + result = [] + for values in reversed(dict_values): + result.extend(values) + return result + + def _genfunctions(self, name: str, funcobj) -> Iterator[Function]: + modulecol = self.getparent(Module) + assert modulecol is not None + module = modulecol.obj + clscol = self.getparent(Class) + cls = clscol and clscol.obj or None + + definition = FunctionDefinition.from_parent(self, name=name, callobj=funcobj) + fixtureinfo = definition._fixtureinfo + + # pytest_generate_tests impls call metafunc.parametrize() which fills + # metafunc._calls, the outcome of the hook. + metafunc = Metafunc( + definition=definition, + fixtureinfo=fixtureinfo, + config=self.config, + cls=cls, + module=module, + _ispytest=True, + ) + methods = [] + if hasattr(module, "pytest_generate_tests"): + methods.append(module.pytest_generate_tests) + if cls is not None and hasattr(cls, "pytest_generate_tests"): + methods.append(cls().pytest_generate_tests) + self.ihook.pytest_generate_tests.call_extra(methods, dict(metafunc=metafunc)) + + if not metafunc._calls: + yield Function.from_parent(self, name=name, fixtureinfo=fixtureinfo) + else: + # Direct parametrizations taking place in module/class-specific + # `metafunc.parametrize` calls may have shadowed some fixtures, so make sure + # we update what the function really needs a.k.a its fixture closure. Note that + # direct parametrizations using `@pytest.mark.parametrize` have already been considered + # into making the closure using `ignore_args` arg to `getfixtureclosure`. + fixtureinfo.prune_dependency_tree() + + for callspec in metafunc._calls: + subname = f"{name}[{callspec.id}]" + yield Function.from_parent( + self, + name=subname, + callspec=callspec, + fixtureinfo=fixtureinfo, + keywords={callspec.id: True}, + originalname=name, + ) + + +def importtestmodule( + path: Path, + config: Config, +): + # We assume we are only called once per module. + importmode = config.getoption("--import-mode") + try: + mod = import_path( + path, + mode=importmode, + root=config.rootpath, + consider_namespace_packages=config.getini("consider_namespace_packages"), + ) + except SyntaxError as e: + raise nodes.Collector.CollectError( + ExceptionInfo.from_current().getrepr(style="short") + ) from e + except ImportPathMismatchError as e: + raise nodes.Collector.CollectError( + "import file mismatch:\n" + "imported module {!r} has this __file__ attribute:\n" + " {}\n" + "which is not the same as the test file we want to collect:\n" + " {}\n" + "HINT: remove __pycache__ / .pyc files and/or use a " + "unique basename for your test file modules".format(*e.args) + ) from e + except ImportError as e: + exc_info = ExceptionInfo.from_current() + if config.get_verbosity() < 2: + exc_info.traceback = exc_info.traceback.filter(filter_traceback) + exc_repr = ( + exc_info.getrepr(style="short") + if exc_info.traceback + else exc_info.exconly() + ) + formatted_tb = str(exc_repr) + raise nodes.Collector.CollectError( + f"ImportError while importing test module '{path}'.\n" + "Hint: make sure your test modules/packages have valid Python names.\n" + "Traceback:\n" + f"{formatted_tb}" + ) from e + except skip.Exception as e: + if e.allow_module_level: + raise + raise nodes.Collector.CollectError( + "Using pytest.skip outside of a test will skip the entire module. " + "If that's your intention, pass `allow_module_level=True`. " + "If you want to skip a specific test or an entire class, " + "use the @pytest.mark.skip or @pytest.mark.skipif decorators." + ) from e + config.pluginmanager.consider_module(mod) + return mod + + +class Module(nodes.File, PyCollector): + """Collector for test classes and functions in a Python module.""" + + def _getobj(self): + return importtestmodule(self.path, self.config) + + def collect(self) -> Iterable[nodes.Item | nodes.Collector]: + self._register_setup_module_fixture() + self._register_setup_function_fixture() + self.session._fixturemanager.parsefactories(self) + return super().collect() + + def _register_setup_module_fixture(self) -> None: + """Register an autouse, module-scoped fixture for the collected module object + that invokes setUpModule/tearDownModule if either or both are available. + + Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with + other fixtures (#517). + """ + setup_module = _get_first_non_fixture_func( + self.obj, ("setUpModule", "setup_module") + ) + teardown_module = _get_first_non_fixture_func( + self.obj, ("tearDownModule", "teardown_module") + ) + + if setup_module is None and teardown_module is None: + return + + def xunit_setup_module_fixture(request) -> Generator[None]: + module = request.module + if setup_module is not None: + _call_with_optional_argument(setup_module, module) + yield + if teardown_module is not None: + _call_with_optional_argument(teardown_module, module) + + self.session._fixturemanager._register_fixture( + # Use a unique name to speed up lookup. + name=f"_xunit_setup_module_fixture_{self.obj.__name__}", + func=xunit_setup_module_fixture, + nodeid=self.nodeid, + scope="module", + autouse=True, + ) + + def _register_setup_function_fixture(self) -> None: + """Register an autouse, function-scoped fixture for the collected module object + that invokes setup_function/teardown_function if either or both are available. + + Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with + other fixtures (#517). + """ + setup_function = _get_first_non_fixture_func(self.obj, ("setup_function",)) + teardown_function = _get_first_non_fixture_func( + self.obj, ("teardown_function",) + ) + if setup_function is None and teardown_function is None: + return + + def xunit_setup_function_fixture(request) -> Generator[None]: + if request.instance is not None: + # in this case we are bound to an instance, so we need to let + # setup_method handle this + yield + return + function = request.function + if setup_function is not None: + _call_with_optional_argument(setup_function, function) + yield + if teardown_function is not None: + _call_with_optional_argument(teardown_function, function) + + self.session._fixturemanager._register_fixture( + # Use a unique name to speed up lookup. + name=f"_xunit_setup_function_fixture_{self.obj.__name__}", + func=xunit_setup_function_fixture, + nodeid=self.nodeid, + scope="function", + autouse=True, + ) + + +class Package(nodes.Directory): + """Collector for files and directories in a Python packages -- directories + with an `__init__.py` file. + + .. note:: + + Directories without an `__init__.py` file are instead collected by + :class:`~pytest.Dir` by default. Both are :class:`~pytest.Directory` + collectors. + + .. versionchanged:: 8.0 + + Now inherits from :class:`~pytest.Directory`. + """ + + def __init__( + self, + fspath: LEGACY_PATH | None, + parent: nodes.Collector, + # NOTE: following args are unused: + config=None, + session=None, + nodeid=None, + path: Path | None = None, + ) -> None: + # NOTE: Could be just the following, but kept as-is for compat. + # super().__init__(self, fspath, parent=parent) + session = parent.session + super().__init__( + fspath=fspath, + path=path, + parent=parent, + config=config, + session=session, + nodeid=nodeid, + ) + + def setup(self) -> None: + init_mod = importtestmodule(self.path / "__init__.py", self.config) + + # Not using fixtures to call setup_module here because autouse fixtures + # from packages are not called automatically (#4085). + setup_module = _get_first_non_fixture_func( + init_mod, ("setUpModule", "setup_module") + ) + if setup_module is not None: + _call_with_optional_argument(setup_module, init_mod) + + teardown_module = _get_first_non_fixture_func( + init_mod, ("tearDownModule", "teardown_module") + ) + if teardown_module is not None: + func = partial(_call_with_optional_argument, teardown_module, init_mod) + self.addfinalizer(func) + + def collect(self) -> Iterable[nodes.Item | nodes.Collector]: + # Always collect __init__.py first. + def sort_key(entry: os.DirEntry[str]) -> object: + return (entry.name != "__init__.py", entry.name) + + config = self.config + col: nodes.Collector | None + cols: Sequence[nodes.Collector] + ihook = self.ihook + for direntry in scandir(self.path, sort_key): + if direntry.is_dir(): + path = Path(direntry.path) + if not self.session.isinitpath(path, with_parents=True): + if ihook.pytest_ignore_collect(collection_path=path, config=config): + continue + col = ihook.pytest_collect_directory(path=path, parent=self) + if col is not None: + yield col + + elif direntry.is_file(): + path = Path(direntry.path) + if not self.session.isinitpath(path): + if ihook.pytest_ignore_collect(collection_path=path, config=config): + continue + cols = ihook.pytest_collect_file(file_path=path, parent=self) + yield from cols + + +def _call_with_optional_argument(func, arg) -> None: + """Call the given function with the given argument if func accepts one argument, otherwise + calls func without arguments.""" + arg_count = func.__code__.co_argcount + if inspect.ismethod(func): + arg_count -= 1 + if arg_count: + func(arg) + else: + func() + + +def _get_first_non_fixture_func(obj: object, names: Iterable[str]) -> object | None: + """Return the attribute from the given object to be used as a setup/teardown + xunit-style function, but only if not marked as a fixture to avoid calling it twice. + """ + for name in names: + meth: object | None = getattr(obj, name, None) + if meth is not None and fixtures.getfixturemarker(meth) is None: + return meth + return None + + +class Class(PyCollector): + """Collector for test methods (and nested classes) in a Python class.""" + + @classmethod + def from_parent(cls, parent, *, name, obj=None, **kw) -> Self: # type: ignore[override] + """The public constructor.""" + return super().from_parent(name=name, parent=parent, **kw) + + def newinstance(self): + return self.obj() + + def collect(self) -> Iterable[nodes.Item | nodes.Collector]: + if not safe_getattr(self.obj, "__test__", True): + return [] + if hasinit(self.obj): + assert self.parent is not None + self.warn( + PytestCollectionWarning( + f"cannot collect test class {self.obj.__name__!r} because it has a " + f"__init__ constructor (from: {self.parent.nodeid})" + ) + ) + return [] + elif hasnew(self.obj): + assert self.parent is not None + self.warn( + PytestCollectionWarning( + f"cannot collect test class {self.obj.__name__!r} because it has a " + f"__new__ constructor (from: {self.parent.nodeid})" + ) + ) + return [] + + self._register_setup_class_fixture() + self._register_setup_method_fixture() + + self.session._fixturemanager.parsefactories(self.newinstance(), self.nodeid) + + return super().collect() + + def _register_setup_class_fixture(self) -> None: + """Register an autouse, class scoped fixture into the collected class object + that invokes setup_class/teardown_class if either or both are available. + + Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with + other fixtures (#517). + """ + setup_class = _get_first_non_fixture_func(self.obj, ("setup_class",)) + teardown_class = _get_first_non_fixture_func(self.obj, ("teardown_class",)) + if setup_class is None and teardown_class is None: + return + + def xunit_setup_class_fixture(request) -> Generator[None]: + cls = request.cls + if setup_class is not None: + func = getimfunc(setup_class) + _call_with_optional_argument(func, cls) + yield + if teardown_class is not None: + func = getimfunc(teardown_class) + _call_with_optional_argument(func, cls) + + self.session._fixturemanager._register_fixture( + # Use a unique name to speed up lookup. + name=f"_xunit_setup_class_fixture_{self.obj.__qualname__}", + func=xunit_setup_class_fixture, + nodeid=self.nodeid, + scope="class", + autouse=True, + ) + + def _register_setup_method_fixture(self) -> None: + """Register an autouse, function scoped fixture into the collected class object + that invokes setup_method/teardown_method if either or both are available. + + Using a fixture to invoke these methods ensures we play nicely and unsurprisingly with + other fixtures (#517). + """ + setup_name = "setup_method" + setup_method = _get_first_non_fixture_func(self.obj, (setup_name,)) + teardown_name = "teardown_method" + teardown_method = _get_first_non_fixture_func(self.obj, (teardown_name,)) + if setup_method is None and teardown_method is None: + return + + def xunit_setup_method_fixture(request) -> Generator[None]: + instance = request.instance + method = request.function + if setup_method is not None: + func = getattr(instance, setup_name) + _call_with_optional_argument(func, method) + yield + if teardown_method is not None: + func = getattr(instance, teardown_name) + _call_with_optional_argument(func, method) + + self.session._fixturemanager._register_fixture( + # Use a unique name to speed up lookup. + name=f"_xunit_setup_method_fixture_{self.obj.__qualname__}", + func=xunit_setup_method_fixture, + nodeid=self.nodeid, + scope="function", + autouse=True, + ) + + +def hasinit(obj: object) -> bool: + init: object = getattr(obj, "__init__", None) + if init: + return init != object.__init__ + return False + + +def hasnew(obj: object) -> bool: + new: object = getattr(obj, "__new__", None) + if new: + return new != object.__new__ + return False + + +@final +@dataclasses.dataclass(frozen=True) +class IdMaker: + """Make IDs for a parametrization.""" + + __slots__ = ( + "argnames", + "parametersets", + "idfn", + "ids", + "config", + "nodeid", + "func_name", + ) + + # The argnames of the parametrization. + argnames: Sequence[str] + # The ParameterSets of the parametrization. + parametersets: Sequence[ParameterSet] + # Optionally, a user-provided callable to make IDs for parameters in a + # ParameterSet. + idfn: Callable[[Any], object | None] | None + # Optionally, explicit IDs for ParameterSets by index. + ids: Sequence[object | None] | None + # Optionally, the pytest config. + # Used for controlling ASCII escaping, and for calling the + # :hook:`pytest_make_parametrize_id` hook. + config: Config | None + # Optionally, the ID of the node being parametrized. + # Used only for clearer error messages. + nodeid: str | None + # Optionally, the ID of the function being parametrized. + # Used only for clearer error messages. + func_name: str | None + + def make_unique_parameterset_ids(self) -> list[str]: + """Make a unique identifier for each ParameterSet, that may be used to + identify the parametrization in a node ID. + + Format is -...-[counter], where prm_x_token is + - user-provided id, if given + - else an id derived from the value, applicable for certain types + - else + The counter suffix is appended only in case a string wouldn't be unique + otherwise. + """ + resolved_ids = list(self._resolve_ids()) + # All IDs must be unique! + if len(resolved_ids) != len(set(resolved_ids)): + # Record the number of occurrences of each ID. + id_counts = Counter(resolved_ids) + # Map the ID to its next suffix. + id_suffixes: dict[str, int] = defaultdict(int) + # Suffix non-unique IDs to make them unique. + for index, id in enumerate(resolved_ids): + if id_counts[id] > 1: + suffix = "" + if id and id[-1].isdigit(): + suffix = "_" + new_id = f"{id}{suffix}{id_suffixes[id]}" + while new_id in set(resolved_ids): + id_suffixes[id] += 1 + new_id = f"{id}{suffix}{id_suffixes[id]}" + resolved_ids[index] = new_id + id_suffixes[id] += 1 + assert len(resolved_ids) == len( + set(resolved_ids) + ), f"Internal error: {resolved_ids=}" + return resolved_ids + + def _resolve_ids(self) -> Iterable[str]: + """Resolve IDs for all ParameterSets (may contain duplicates).""" + for idx, parameterset in enumerate(self.parametersets): + if parameterset.id is not None: + # ID provided directly - pytest.param(..., id="...") + yield parameterset.id + elif self.ids and idx < len(self.ids) and self.ids[idx] is not None: + # ID provided in the IDs list - parametrize(..., ids=[...]). + yield self._idval_from_value_required(self.ids[idx], idx) + else: + # ID not provided - generate it. + yield "-".join( + self._idval(val, argname, idx) + for val, argname in zip(parameterset.values, self.argnames) + ) + + def _idval(self, val: object, argname: str, idx: int) -> str: + """Make an ID for a parameter in a ParameterSet.""" + idval = self._idval_from_function(val, argname, idx) + if idval is not None: + return idval + idval = self._idval_from_hook(val, argname) + if idval is not None: + return idval + idval = self._idval_from_value(val) + if idval is not None: + return idval + return self._idval_from_argname(argname, idx) + + def _idval_from_function(self, val: object, argname: str, idx: int) -> str | None: + """Try to make an ID for a parameter in a ParameterSet using the + user-provided id callable, if given.""" + if self.idfn is None: + return None + try: + id = self.idfn(val) + except Exception as e: + prefix = f"{self.nodeid}: " if self.nodeid is not None else "" + msg = "error raised while trying to determine id of parameter '{}' at position {}" + msg = prefix + msg.format(argname, idx) + raise ValueError(msg) from e + if id is None: + return None + return self._idval_from_value(id) + + def _idval_from_hook(self, val: object, argname: str) -> str | None: + """Try to make an ID for a parameter in a ParameterSet by calling the + :hook:`pytest_make_parametrize_id` hook.""" + if self.config: + id: str | None = self.config.hook.pytest_make_parametrize_id( + config=self.config, val=val, argname=argname + ) + return id + return None + + def _idval_from_value(self, val: object) -> str | None: + """Try to make an ID for a parameter in a ParameterSet from its value, + if the value type is supported.""" + if isinstance(val, (str, bytes)): + return _ascii_escaped_by_config(val, self.config) + elif val is None or isinstance(val, (float, int, bool, complex)): + return str(val) + elif isinstance(val, Pattern): + return ascii_escaped(val.pattern) + elif val is NOTSET: + # Fallback to default. Note that NOTSET is an enum.Enum. + pass + elif isinstance(val, enum.Enum): + return str(val) + elif isinstance(getattr(val, "__name__", None), str): + # Name of a class, function, module, etc. + name: str = getattr(val, "__name__") + return name + return None + + def _idval_from_value_required(self, val: object, idx: int) -> str: + """Like _idval_from_value(), but fails if the type is not supported.""" + id = self._idval_from_value(val) + if id is not None: + return id + + # Fail. + if self.func_name is not None: + prefix = f"In {self.func_name}: " + elif self.nodeid is not None: + prefix = f"In {self.nodeid}: " + else: + prefix = "" + msg = ( + f"{prefix}ids contains unsupported value {saferepr(val)} (type: {type(val)!r}) at index {idx}. " + "Supported types are: str, bytes, int, float, complex, bool, enum, regex or anything with a __name__." + ) + fail(msg, pytrace=False) + + @staticmethod + def _idval_from_argname(argname: str, idx: int) -> str: + """Make an ID for a parameter in a ParameterSet from the argument name + and the index of the ParameterSet.""" + return str(argname) + str(idx) + + +@final +@dataclasses.dataclass(frozen=True) +class CallSpec2: + """A planned parameterized invocation of a test function. + + Calculated during collection for a given test function's Metafunc. + Once collection is over, each callspec is turned into a single Item + and stored in item.callspec. + """ + + # arg name -> arg value which will be passed to a fixture or pseudo-fixture + # of the same name. (indirect or direct parametrization respectively) + params: dict[str, object] = dataclasses.field(default_factory=dict) + # arg name -> arg index. + indices: dict[str, int] = dataclasses.field(default_factory=dict) + # Used for sorting parametrized resources. + _arg2scope: Mapping[str, Scope] = dataclasses.field(default_factory=dict) + # Parts which will be added to the item's name in `[..]` separated by "-". + _idlist: Sequence[str] = dataclasses.field(default_factory=tuple) + # Marks which will be applied to the item. + marks: list[Mark] = dataclasses.field(default_factory=list) + + def setmulti( + self, + *, + argnames: Iterable[str], + valset: Iterable[object], + id: str, + marks: Iterable[Mark | MarkDecorator], + scope: Scope, + param_index: int, + ) -> CallSpec2: + params = self.params.copy() + indices = self.indices.copy() + arg2scope = dict(self._arg2scope) + for arg, val in zip(argnames, valset): + if arg in params: + raise ValueError(f"duplicate parametrization of {arg!r}") + params[arg] = val + indices[arg] = param_index + arg2scope[arg] = scope + return CallSpec2( + params=params, + indices=indices, + _arg2scope=arg2scope, + _idlist=[*self._idlist, id], + marks=[*self.marks, *normalize_mark_list(marks)], + ) + + def getparam(self, name: str) -> object: + try: + return self.params[name] + except KeyError as e: + raise ValueError(name) from e + + @property + def id(self) -> str: + return "-".join(self._idlist) + + +def get_direct_param_fixture_func(request: FixtureRequest) -> Any: + return request.param + + +# Used for storing pseudo fixturedefs for direct parametrization. +name2pseudofixturedef_key = StashKey[Dict[str, FixtureDef[Any]]]() + + +@final +class Metafunc: + """Objects passed to the :hook:`pytest_generate_tests` hook. + + They help to inspect a test function and to generate tests according to + test configuration or values specified in the class or module where a + test function is defined. + """ + + def __init__( + self, + definition: FunctionDefinition, + fixtureinfo: fixtures.FuncFixtureInfo, + config: Config, + cls=None, + module=None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + + #: Access to the underlying :class:`_pytest.python.FunctionDefinition`. + self.definition = definition + + #: Access to the :class:`pytest.Config` object for the test session. + self.config = config + + #: The module object where the test function is defined in. + self.module = module + + #: Underlying Python test function. + self.function = definition.obj + + #: Set of fixture names required by the test function. + self.fixturenames = fixtureinfo.names_closure + + #: Class object where the test function is defined in or ``None``. + self.cls = cls + + self._arg2fixturedefs = fixtureinfo.name2fixturedefs + + # Result of parametrize(). + self._calls: list[CallSpec2] = [] + + def parametrize( + self, + argnames: str | Sequence[str], + argvalues: Iterable[ParameterSet | Sequence[object] | object], + indirect: bool | Sequence[str] = False, + ids: Iterable[object | None] | Callable[[Any], object | None] | None = None, + scope: _ScopeName | None = None, + *, + _param_mark: Mark | None = None, + ) -> None: + """Add new invocations to the underlying test function using the list + of argvalues for the given argnames. Parametrization is performed + during the collection phase. If you need to setup expensive resources + see about setting indirect to do it rather than at test setup time. + + Can be called multiple times per test function (but only on different + argument names), in which case each call parametrizes all previous + parametrizations, e.g. + + :: + + unparametrized: t + parametrize ["x", "y"]: t[x], t[y] + parametrize [1, 2]: t[x-1], t[x-2], t[y-1], t[y-2] + + :param argnames: + A comma-separated string denoting one or more argument names, or + a list/tuple of argument strings. + + :param argvalues: + The list of argvalues determines how often a test is invoked with + different argument values. + + If only one argname was specified argvalues is a list of values. + If N argnames were specified, argvalues must be a list of + N-tuples, where each tuple-element specifies a value for its + respective argname. + :type argvalues: Iterable[_pytest.mark.structures.ParameterSet | Sequence[object] | object] + :param indirect: + A list of arguments' names (subset of argnames) or a boolean. + If True the list contains all names from the argnames. Each + argvalue corresponding to an argname in this list will + be passed as request.param to its respective argname fixture + function so that it can perform more expensive setups during the + setup phase of a test rather than at collection time. + + :param ids: + Sequence of (or generator for) ids for ``argvalues``, + or a callable to return part of the id for each argvalue. + + With sequences (and generators like ``itertools.count()``) the + returned ids should be of type ``string``, ``int``, ``float``, + ``bool``, or ``None``. + They are mapped to the corresponding index in ``argvalues``. + ``None`` means to use the auto-generated id. + + If it is a callable it will be called for each entry in + ``argvalues``, and the return value is used as part of the + auto-generated id for the whole set (where parts are joined with + dashes ("-")). + This is useful to provide more specific ids for certain items, e.g. + dates. Returning ``None`` will use an auto-generated id. + + If no ids are provided they will be generated automatically from + the argvalues. + + :param scope: + If specified it denotes the scope of the parameters. + The scope is used for grouping tests by parameter instances. + It will also override any fixture-function defined scope, allowing + to set a dynamic scope using test context or configuration. + """ + argnames, parametersets = ParameterSet._for_parametrize( + argnames, + argvalues, + self.function, + self.config, + nodeid=self.definition.nodeid, + ) + del argvalues + + if "request" in argnames: + fail( + "'request' is a reserved name and cannot be used in @pytest.mark.parametrize", + pytrace=False, + ) + + if scope is not None: + scope_ = Scope.from_user( + scope, descr=f"parametrize() call in {self.function.__name__}" + ) + else: + scope_ = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect) + + self._validate_if_using_arg_names(argnames, indirect) + + # Use any already (possibly) generated ids with parametrize Marks. + if _param_mark and _param_mark._param_ids_from: + generated_ids = _param_mark._param_ids_from._param_ids_generated + if generated_ids is not None: + ids = generated_ids + + ids = self._resolve_parameter_set_ids( + argnames, ids, parametersets, nodeid=self.definition.nodeid + ) + + # Store used (possibly generated) ids with parametrize Marks. + if _param_mark and _param_mark._param_ids_from and generated_ids is None: + object.__setattr__(_param_mark._param_ids_from, "_param_ids_generated", ids) + + # Add funcargs as fixturedefs to fixtureinfo.arg2fixturedefs by registering + # artificial "pseudo" FixtureDef's so that later at test execution time we can + # rely on a proper FixtureDef to exist for fixture setup. + node = None + # If we have a scope that is higher than function, we need + # to make sure we only ever create an according fixturedef on + # a per-scope basis. We thus store and cache the fixturedef on the + # node related to the scope. + if scope_ is not Scope.Function: + collector = self.definition.parent + assert collector is not None + node = get_scope_node(collector, scope_) + if node is None: + # If used class scope and there is no class, use module-level + # collector (for now). + if scope_ is Scope.Class: + assert isinstance(collector, Module) + node = collector + # If used package scope and there is no package, use session + # (for now). + elif scope_ is Scope.Package: + node = collector.session + else: + assert False, f"Unhandled missing scope: {scope}" + if node is None: + name2pseudofixturedef = None + else: + default: dict[str, FixtureDef[Any]] = {} + name2pseudofixturedef = node.stash.setdefault( + name2pseudofixturedef_key, default + ) + arg_directness = self._resolve_args_directness(argnames, indirect) + for argname in argnames: + if arg_directness[argname] == "indirect": + continue + if name2pseudofixturedef is not None and argname in name2pseudofixturedef: + fixturedef = name2pseudofixturedef[argname] + else: + fixturedef = FixtureDef( + config=self.config, + baseid="", + argname=argname, + func=get_direct_param_fixture_func, + scope=scope_, + params=None, + ids=None, + _ispytest=True, + ) + if name2pseudofixturedef is not None: + name2pseudofixturedef[argname] = fixturedef + self._arg2fixturedefs[argname] = [fixturedef] + + # Create the new calls: if we are parametrize() multiple times (by applying the decorator + # more than once) then we accumulate those calls generating the cartesian product + # of all calls. + newcalls = [] + for callspec in self._calls or [CallSpec2()]: + for param_index, (param_id, param_set) in enumerate( + zip(ids, parametersets) + ): + newcallspec = callspec.setmulti( + argnames=argnames, + valset=param_set.values, + id=param_id, + marks=param_set.marks, + scope=scope_, + param_index=param_index, + ) + newcalls.append(newcallspec) + self._calls = newcalls + + def _resolve_parameter_set_ids( + self, + argnames: Sequence[str], + ids: Iterable[object | None] | Callable[[Any], object | None] | None, + parametersets: Sequence[ParameterSet], + nodeid: str, + ) -> list[str]: + """Resolve the actual ids for the given parameter sets. + + :param argnames: + Argument names passed to ``parametrize()``. + :param ids: + The `ids` parameter of the ``parametrize()`` call (see docs). + :param parametersets: + The parameter sets, each containing a set of values corresponding + to ``argnames``. + :param nodeid str: + The nodeid of the definition item that generated this + parametrization. + :returns: + List with ids for each parameter set given. + """ + if ids is None: + idfn = None + ids_ = None + elif callable(ids): + idfn = ids + ids_ = None + else: + idfn = None + ids_ = self._validate_ids(ids, parametersets, self.function.__name__) + id_maker = IdMaker( + argnames, + parametersets, + idfn, + ids_, + self.config, + nodeid=nodeid, + func_name=self.function.__name__, + ) + return id_maker.make_unique_parameterset_ids() + + def _validate_ids( + self, + ids: Iterable[object | None], + parametersets: Sequence[ParameterSet], + func_name: str, + ) -> list[object | None]: + try: + num_ids = len(ids) # type: ignore[arg-type] + except TypeError: + try: + iter(ids) + except TypeError as e: + raise TypeError("ids must be a callable or an iterable") from e + num_ids = len(parametersets) + + # num_ids == 0 is a special case: https://github.com/pytest-dev/pytest/issues/1849 + if num_ids != len(parametersets) and num_ids != 0: + msg = "In {}: {} parameter sets specified, with different number of ids: {}" + fail(msg.format(func_name, len(parametersets), num_ids), pytrace=False) + + return list(itertools.islice(ids, num_ids)) + + def _resolve_args_directness( + self, + argnames: Sequence[str], + indirect: bool | Sequence[str], + ) -> dict[str, Literal["indirect", "direct"]]: + """Resolve if each parametrized argument must be considered an indirect + parameter to a fixture of the same name, or a direct parameter to the + parametrized function, based on the ``indirect`` parameter of the + parametrized() call. + + :param argnames: + List of argument names passed to ``parametrize()``. + :param indirect: + Same as the ``indirect`` parameter of ``parametrize()``. + :returns + A dict mapping each arg name to either "indirect" or "direct". + """ + arg_directness: dict[str, Literal["indirect", "direct"]] + if isinstance(indirect, bool): + arg_directness = dict.fromkeys( + argnames, "indirect" if indirect else "direct" + ) + elif isinstance(indirect, Sequence): + arg_directness = dict.fromkeys(argnames, "direct") + for arg in indirect: + if arg not in argnames: + fail( + f"In {self.function.__name__}: indirect fixture '{arg}' doesn't exist", + pytrace=False, + ) + arg_directness[arg] = "indirect" + else: + fail( + f"In {self.function.__name__}: expected Sequence or boolean" + f" for indirect, got {type(indirect).__name__}", + pytrace=False, + ) + return arg_directness + + def _validate_if_using_arg_names( + self, + argnames: Sequence[str], + indirect: bool | Sequence[str], + ) -> None: + """Check if all argnames are being used, by default values, or directly/indirectly. + + :param List[str] argnames: List of argument names passed to ``parametrize()``. + :param indirect: Same as the ``indirect`` parameter of ``parametrize()``. + :raises ValueError: If validation fails. + """ + default_arg_names = set(get_default_arg_names(self.function)) + func_name = self.function.__name__ + for arg in argnames: + if arg not in self.fixturenames: + if arg in default_arg_names: + fail( + f"In {func_name}: function already takes an argument '{arg}' with a default value", + pytrace=False, + ) + else: + if isinstance(indirect, Sequence): + name = "fixture" if arg in indirect else "argument" + else: + name = "fixture" if indirect else "argument" + fail( + f"In {func_name}: function uses no {name} '{arg}'", + pytrace=False, + ) + + +def _find_parametrized_scope( + argnames: Sequence[str], + arg2fixturedefs: Mapping[str, Sequence[fixtures.FixtureDef[object]]], + indirect: bool | Sequence[str], +) -> Scope: + """Find the most appropriate scope for a parametrized call based on its arguments. + + When there's at least one direct argument, always use "function" scope. + + When a test function is parametrized and all its arguments are indirect + (e.g. fixtures), return the most narrow scope based on the fixtures used. + + Related to issue #1832, based on code posted by @Kingdread. + """ + if isinstance(indirect, Sequence): + all_arguments_are_fixtures = len(indirect) == len(argnames) + else: + all_arguments_are_fixtures = bool(indirect) + + if all_arguments_are_fixtures: + fixturedefs = arg2fixturedefs or {} + used_scopes = [ + fixturedef[-1]._scope + for name, fixturedef in fixturedefs.items() + if name in argnames + ] + # Takes the most narrow scope from used fixtures. + return min(used_scopes, default=Scope.Function) + + return Scope.Function + + +def _ascii_escaped_by_config(val: str | bytes, config: Config | None) -> str: + if config is None: + escape_option = False + else: + escape_option = config.getini( + "disable_test_id_escaping_and_forfeit_all_rights_to_community_support" + ) + # TODO: If escaping is turned off and the user passes bytes, + # will return a bytes. For now we ignore this but the + # code *probably* doesn't handle this case. + return val if escape_option else ascii_escaped(val) # type: ignore + + +class Function(PyobjMixin, nodes.Item): + """Item responsible for setting up and executing a Python test function. + + :param name: + The full function name, including any decorations like those + added by parametrization (``my_func[my_param]``). + :param parent: + The parent Node. + :param config: + The pytest Config object. + :param callspec: + If given, this function has been parametrized and the callspec contains + meta information about the parametrization. + :param callobj: + If given, the object which will be called when the Function is invoked, + otherwise the callobj will be obtained from ``parent`` using ``originalname``. + :param keywords: + Keywords bound to the function object for "-k" matching. + :param session: + The pytest Session object. + :param fixtureinfo: + Fixture information already resolved at this fixture node.. + :param originalname: + The attribute name to use for accessing the underlying function object. + Defaults to ``name``. Set this if name is different from the original name, + for example when it contains decorations like those added by parametrization + (``my_func[my_param]``). + """ + + # Disable since functions handle it themselves. + _ALLOW_MARKERS = False + + def __init__( + self, + name: str, + parent, + config: Config | None = None, + callspec: CallSpec2 | None = None, + callobj=NOTSET, + keywords: Mapping[str, Any] | None = None, + session: Session | None = None, + fixtureinfo: FuncFixtureInfo | None = None, + originalname: str | None = None, + ) -> None: + super().__init__(name, parent, config=config, session=session) + + if callobj is not NOTSET: + self._obj = callobj + self._instance = getattr(callobj, "__self__", None) + + #: Original function name, without any decorations (for example + #: parametrization adds a ``"[...]"`` suffix to function names), used to access + #: the underlying function object from ``parent`` (in case ``callobj`` is not given + #: explicitly). + #: + #: .. versionadded:: 3.0 + self.originalname = originalname or name + + # Note: when FunctionDefinition is introduced, we should change ``originalname`` + # to a readonly property that returns FunctionDefinition.name. + + self.own_markers.extend(get_unpacked_marks(self.obj)) + if callspec: + self.callspec = callspec + self.own_markers.extend(callspec.marks) + + # todo: this is a hell of a hack + # https://github.com/pytest-dev/pytest/issues/4569 + # Note: the order of the updates is important here; indicates what + # takes priority (ctor argument over function attributes over markers). + # Take own_markers only; NodeKeywords handles parent traversal on its own. + self.keywords.update((mark.name, mark) for mark in self.own_markers) + self.keywords.update(self.obj.__dict__) + if keywords: + self.keywords.update(keywords) + + if fixtureinfo is None: + fm = self.session._fixturemanager + fixtureinfo = fm.getfixtureinfo(self, self.obj, self.cls) + self._fixtureinfo: FuncFixtureInfo = fixtureinfo + self.fixturenames = fixtureinfo.names_closure + self._initrequest() + + # todo: determine sound type limitations + @classmethod + def from_parent(cls, parent, **kw) -> Self: + """The public constructor.""" + return super().from_parent(parent=parent, **kw) + + def _initrequest(self) -> None: + self.funcargs: dict[str, object] = {} + self._request = fixtures.TopRequest(self, _ispytest=True) + + @property + def function(self): + """Underlying python 'function' object.""" + return getimfunc(self.obj) + + @property + def instance(self): + try: + return self._instance + except AttributeError: + if isinstance(self.parent, Class): + # Each Function gets a fresh class instance. + self._instance = self._getinstance() + else: + self._instance = None + return self._instance + + def _getinstance(self): + if isinstance(self.parent, Class): + # Each Function gets a fresh class instance. + return self.parent.newinstance() + else: + return None + + def _getobj(self): + instance = self.instance + if instance is not None: + parent_obj = instance + else: + assert self.parent is not None + parent_obj = self.parent.obj # type: ignore[attr-defined] + return getattr(parent_obj, self.originalname) + + @property + def _pyfuncitem(self): + """(compatonly) for code expecting pytest-2.2 style request objects.""" + return self + + def runtest(self) -> None: + """Execute the underlying test function.""" + self.ihook.pytest_pyfunc_call(pyfuncitem=self) + + def setup(self) -> None: + self._request._fillfixtures() + + def _traceback_filter(self, excinfo: ExceptionInfo[BaseException]) -> Traceback: + if hasattr(self, "_obj") and not self.config.getoption("fulltrace", False): + code = _pytest._code.Code.from_function(get_real_func(self.obj)) + path, firstlineno = code.path, code.firstlineno + traceback = excinfo.traceback + ntraceback = traceback.cut(path=path, firstlineno=firstlineno) + if ntraceback == traceback: + ntraceback = ntraceback.cut(path=path) + if ntraceback == traceback: + ntraceback = ntraceback.filter(filter_traceback) + if not ntraceback: + ntraceback = traceback + ntraceback = ntraceback.filter(excinfo) + + # issue364: mark all but first and last frames to + # only show a single-line message for each frame. + if self.config.getoption("tbstyle", "auto") == "auto": + if len(ntraceback) > 2: + ntraceback = Traceback( + ( + ntraceback[0], + *(t.with_repr_style("short") for t in ntraceback[1:-1]), + ntraceback[-1], + ) + ) + + return ntraceback + return excinfo.traceback + + # TODO: Type ignored -- breaks Liskov Substitution. + def repr_failure( # type: ignore[override] + self, + excinfo: ExceptionInfo[BaseException], + ) -> str | TerminalRepr: + style = self.config.getoption("tbstyle", "auto") + if style == "auto": + style = "long" + return self._repr_failure_py(excinfo, style=style) + + +class FunctionDefinition(Function): + """This class is a stop gap solution until we evolve to have actual function + definition nodes and manage to get rid of ``metafunc``.""" + + def runtest(self) -> None: + raise RuntimeError("function definitions are not supposed to be run as tests") + + setup = runtest diff --git a/.venv/Lib/site-packages/_pytest/python_api.py b/.venv/Lib/site-packages/_pytest/python_api.py new file mode 100644 index 00000000..4174a55b --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/python_api.py @@ -0,0 +1,1020 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +from collections.abc import Collection +from collections.abc import Sized +from decimal import Decimal +import math +from numbers import Complex +import pprint +import re +from types import TracebackType +from typing import Any +from typing import Callable +from typing import cast +from typing import ContextManager +from typing import final +from typing import Mapping +from typing import overload +from typing import Pattern +from typing import Sequence +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar + +import _pytest._code +from _pytest.outcomes import fail + + +if TYPE_CHECKING: + from numpy import ndarray + + +def _compare_approx( + full_object: object, + message_data: Sequence[tuple[str, str, str]], + number_of_elements: int, + different_ids: Sequence[object], + max_abs_diff: float, + max_rel_diff: float, +) -> list[str]: + message_list = list(message_data) + message_list.insert(0, ("Index", "Obtained", "Expected")) + max_sizes = [0, 0, 0] + for index, obtained, expected in message_list: + max_sizes[0] = max(max_sizes[0], len(index)) + max_sizes[1] = max(max_sizes[1], len(obtained)) + max_sizes[2] = max(max_sizes[2], len(expected)) + explanation = [ + f"comparison failed. Mismatched elements: {len(different_ids)} / {number_of_elements}:", + f"Max absolute difference: {max_abs_diff}", + f"Max relative difference: {max_rel_diff}", + ] + [ + f"{indexes:<{max_sizes[0]}} | {obtained:<{max_sizes[1]}} | {expected:<{max_sizes[2]}}" + for indexes, obtained, expected in message_list + ] + return explanation + + +# builtin pytest.approx helper + + +class ApproxBase: + """Provide shared utilities for making approximate comparisons between + numbers or sequences of numbers.""" + + # Tell numpy to use our `__eq__` operator instead of its. + __array_ufunc__ = None + __array_priority__ = 100 + + def __init__(self, expected, rel=None, abs=None, nan_ok: bool = False) -> None: + __tracebackhide__ = True + self.expected = expected + self.abs = abs + self.rel = rel + self.nan_ok = nan_ok + self._check_type() + + def __repr__(self) -> str: + raise NotImplementedError + + def _repr_compare(self, other_side: Any) -> list[str]: + return [ + "comparison failed", + f"Obtained: {other_side}", + f"Expected: {self}", + ] + + def __eq__(self, actual) -> bool: + return all( + a == self._approx_scalar(x) for a, x in self._yield_comparisons(actual) + ) + + def __bool__(self): + __tracebackhide__ = True + raise AssertionError( + "approx() is not supported in a boolean context.\nDid you mean: `assert a == approx(b)`?" + ) + + # Ignore type because of https://github.com/python/mypy/issues/4266. + __hash__ = None # type: ignore + + def __ne__(self, actual) -> bool: + return not (actual == self) + + def _approx_scalar(self, x) -> ApproxScalar: + if isinstance(x, Decimal): + return ApproxDecimal(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok) + return ApproxScalar(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok) + + def _yield_comparisons(self, actual): + """Yield all the pairs of numbers to be compared. + + This is used to implement the `__eq__` method. + """ + raise NotImplementedError + + def _check_type(self) -> None: + """Raise a TypeError if the expected value is not a valid type.""" + # This is only a concern if the expected value is a sequence. In every + # other case, the approx() function ensures that the expected value has + # a numeric type. For this reason, the default is to do nothing. The + # classes that deal with sequences should reimplement this method to + # raise if there are any non-numeric elements in the sequence. + + +def _recursive_sequence_map(f, x): + """Recursively map a function over a sequence of arbitrary depth""" + if isinstance(x, (list, tuple)): + seq_type = type(x) + return seq_type(_recursive_sequence_map(f, xi) for xi in x) + elif _is_sequence_like(x): + return [_recursive_sequence_map(f, xi) for xi in x] + else: + return f(x) + + +class ApproxNumpy(ApproxBase): + """Perform approximate comparisons where the expected value is numpy array.""" + + def __repr__(self) -> str: + list_scalars = _recursive_sequence_map( + self._approx_scalar, self.expected.tolist() + ) + return f"approx({list_scalars!r})" + + def _repr_compare(self, other_side: ndarray | list[Any]) -> list[str]: + import itertools + import math + + def get_value_from_nested_list( + nested_list: list[Any], nd_index: tuple[Any, ...] + ) -> Any: + """ + Helper function to get the value out of a nested list, given an n-dimensional index. + This mimics numpy's indexing, but for raw nested python lists. + """ + value: Any = nested_list + for i in nd_index: + value = value[i] + return value + + np_array_shape = self.expected.shape + approx_side_as_seq = _recursive_sequence_map( + self._approx_scalar, self.expected.tolist() + ) + + # convert other_side to numpy array to ensure shape attribute is available + other_side_as_array = _as_numpy_array(other_side) + assert other_side_as_array is not None + + if np_array_shape != other_side_as_array.shape: + return [ + "Impossible to compare arrays with different shapes.", + f"Shapes: {np_array_shape} and {other_side_as_array.shape}", + ] + + number_of_elements = self.expected.size + max_abs_diff = -math.inf + max_rel_diff = -math.inf + different_ids = [] + for index in itertools.product(*(range(i) for i in np_array_shape)): + approx_value = get_value_from_nested_list(approx_side_as_seq, index) + other_value = get_value_from_nested_list(other_side_as_array, index) + if approx_value != other_value: + abs_diff = abs(approx_value.expected - other_value) + max_abs_diff = max(max_abs_diff, abs_diff) + if other_value == 0.0: + max_rel_diff = math.inf + else: + max_rel_diff = max(max_rel_diff, abs_diff / abs(other_value)) + different_ids.append(index) + + message_data = [ + ( + str(index), + str(get_value_from_nested_list(other_side_as_array, index)), + str(get_value_from_nested_list(approx_side_as_seq, index)), + ) + for index in different_ids + ] + return _compare_approx( + self.expected, + message_data, + number_of_elements, + different_ids, + max_abs_diff, + max_rel_diff, + ) + + def __eq__(self, actual) -> bool: + import numpy as np + + # self.expected is supposed to always be an array here. + + if not np.isscalar(actual): + try: + actual = np.asarray(actual) + except Exception as e: + raise TypeError(f"cannot compare '{actual}' to numpy.ndarray") from e + + if not np.isscalar(actual) and actual.shape != self.expected.shape: + return False + + return super().__eq__(actual) + + def _yield_comparisons(self, actual): + import numpy as np + + # `actual` can either be a numpy array or a scalar, it is treated in + # `__eq__` before being passed to `ApproxBase.__eq__`, which is the + # only method that calls this one. + + if np.isscalar(actual): + for i in np.ndindex(self.expected.shape): + yield actual, self.expected[i].item() + else: + for i in np.ndindex(self.expected.shape): + yield actual[i].item(), self.expected[i].item() + + +class ApproxMapping(ApproxBase): + """Perform approximate comparisons where the expected value is a mapping + with numeric values (the keys can be anything).""" + + def __repr__(self) -> str: + return f"approx({({k: self._approx_scalar(v) for k, v in self.expected.items()})!r})" + + def _repr_compare(self, other_side: Mapping[object, float]) -> list[str]: + import math + + approx_side_as_map = { + k: self._approx_scalar(v) for k, v in self.expected.items() + } + + number_of_elements = len(approx_side_as_map) + max_abs_diff = -math.inf + max_rel_diff = -math.inf + different_ids = [] + for (approx_key, approx_value), other_value in zip( + approx_side_as_map.items(), other_side.values() + ): + if approx_value != other_value: + if approx_value.expected is not None and other_value is not None: + max_abs_diff = max( + max_abs_diff, abs(approx_value.expected - other_value) + ) + if approx_value.expected == 0.0: + max_rel_diff = math.inf + else: + max_rel_diff = max( + max_rel_diff, + abs( + (approx_value.expected - other_value) + / approx_value.expected + ), + ) + different_ids.append(approx_key) + + message_data = [ + (str(key), str(other_side[key]), str(approx_side_as_map[key])) + for key in different_ids + ] + + return _compare_approx( + self.expected, + message_data, + number_of_elements, + different_ids, + max_abs_diff, + max_rel_diff, + ) + + def __eq__(self, actual) -> bool: + try: + if set(actual.keys()) != set(self.expected.keys()): + return False + except AttributeError: + return False + + return super().__eq__(actual) + + def _yield_comparisons(self, actual): + for k in self.expected.keys(): + yield actual[k], self.expected[k] + + def _check_type(self) -> None: + __tracebackhide__ = True + for key, value in self.expected.items(): + if isinstance(value, type(self.expected)): + msg = "pytest.approx() does not support nested dictionaries: key={!r} value={!r}\n full mapping={}" + raise TypeError(msg.format(key, value, pprint.pformat(self.expected))) + + +class ApproxSequenceLike(ApproxBase): + """Perform approximate comparisons where the expected value is a sequence of numbers.""" + + def __repr__(self) -> str: + seq_type = type(self.expected) + if seq_type not in (tuple, list): + seq_type = list + return f"approx({seq_type(self._approx_scalar(x) for x in self.expected)!r})" + + def _repr_compare(self, other_side: Sequence[float]) -> list[str]: + import math + + if len(self.expected) != len(other_side): + return [ + "Impossible to compare lists with different sizes.", + f"Lengths: {len(self.expected)} and {len(other_side)}", + ] + + approx_side_as_map = _recursive_sequence_map(self._approx_scalar, self.expected) + + number_of_elements = len(approx_side_as_map) + max_abs_diff = -math.inf + max_rel_diff = -math.inf + different_ids = [] + for i, (approx_value, other_value) in enumerate( + zip(approx_side_as_map, other_side) + ): + if approx_value != other_value: + abs_diff = abs(approx_value.expected - other_value) + max_abs_diff = max(max_abs_diff, abs_diff) + if other_value == 0.0: + max_rel_diff = math.inf + else: + max_rel_diff = max(max_rel_diff, abs_diff / abs(other_value)) + different_ids.append(i) + + message_data = [ + (str(i), str(other_side[i]), str(approx_side_as_map[i])) + for i in different_ids + ] + + return _compare_approx( + self.expected, + message_data, + number_of_elements, + different_ids, + max_abs_diff, + max_rel_diff, + ) + + def __eq__(self, actual) -> bool: + try: + if len(actual) != len(self.expected): + return False + except TypeError: + return False + return super().__eq__(actual) + + def _yield_comparisons(self, actual): + return zip(actual, self.expected) + + def _check_type(self) -> None: + __tracebackhide__ = True + for index, x in enumerate(self.expected): + if isinstance(x, type(self.expected)): + msg = "pytest.approx() does not support nested data structures: {!r} at index {}\n full sequence: {}" + raise TypeError(msg.format(x, index, pprint.pformat(self.expected))) + + +class ApproxScalar(ApproxBase): + """Perform approximate comparisons where the expected value is a single number.""" + + # Using Real should be better than this Union, but not possible yet: + # https://github.com/python/typeshed/pull/3108 + DEFAULT_ABSOLUTE_TOLERANCE: float | Decimal = 1e-12 + DEFAULT_RELATIVE_TOLERANCE: float | Decimal = 1e-6 + + def __repr__(self) -> str: + """Return a string communicating both the expected value and the + tolerance for the comparison being made. + + For example, ``1.0 ± 1e-6``, ``(3+4j) ± 5e-6 ∠ ±180°``. + """ + # Don't show a tolerance for values that aren't compared using + # tolerances, i.e. non-numerics and infinities. Need to call abs to + # handle complex numbers, e.g. (inf + 1j). + if (not isinstance(self.expected, (Complex, Decimal))) or math.isinf( + abs(self.expected) + ): + return str(self.expected) + + # If a sensible tolerance can't be calculated, self.tolerance will + # raise a ValueError. In this case, display '???'. + try: + vetted_tolerance = f"{self.tolerance:.1e}" + if ( + isinstance(self.expected, Complex) + and self.expected.imag + and not math.isinf(self.tolerance) + ): + vetted_tolerance += " ∠ ±180°" + except ValueError: + vetted_tolerance = "???" + + return f"{self.expected} ± {vetted_tolerance}" + + def __eq__(self, actual) -> bool: + """Return whether the given value is equal to the expected value + within the pre-specified tolerance.""" + asarray = _as_numpy_array(actual) + if asarray is not None: + # Call ``__eq__()`` manually to prevent infinite-recursion with + # numpy<1.13. See #3748. + return all(self.__eq__(a) for a in asarray.flat) + + # Short-circuit exact equality. + if actual == self.expected: + return True + + # If either type is non-numeric, fall back to strict equality. + # NB: we need Complex, rather than just Number, to ensure that __abs__, + # __sub__, and __float__ are defined. + if not ( + isinstance(self.expected, (Complex, Decimal)) + and isinstance(actual, (Complex, Decimal)) + ): + return False + + # Allow the user to control whether NaNs are considered equal to each + # other or not. The abs() calls are for compatibility with complex + # numbers. + if math.isnan(abs(self.expected)): + return self.nan_ok and math.isnan(abs(actual)) + + # Infinity shouldn't be approximately equal to anything but itself, but + # if there's a relative tolerance, it will be infinite and infinity + # will seem approximately equal to everything. The equal-to-itself + # case would have been short circuited above, so here we can just + # return false if the expected value is infinite. The abs() call is + # for compatibility with complex numbers. + if math.isinf(abs(self.expected)): + return False + + # Return true if the two numbers are within the tolerance. + result: bool = abs(self.expected - actual) <= self.tolerance + return result + + # Ignore type because of https://github.com/python/mypy/issues/4266. + __hash__ = None # type: ignore + + @property + def tolerance(self): + """Return the tolerance for the comparison. + + This could be either an absolute tolerance or a relative tolerance, + depending on what the user specified or which would be larger. + """ + + def set_default(x, default): + return x if x is not None else default + + # Figure out what the absolute tolerance should be. ``self.abs`` is + # either None or a value specified by the user. + absolute_tolerance = set_default(self.abs, self.DEFAULT_ABSOLUTE_TOLERANCE) + + if absolute_tolerance < 0: + raise ValueError( + f"absolute tolerance can't be negative: {absolute_tolerance}" + ) + if math.isnan(absolute_tolerance): + raise ValueError("absolute tolerance can't be NaN.") + + # If the user specified an absolute tolerance but not a relative one, + # just return the absolute tolerance. + if self.rel is None: + if self.abs is not None: + return absolute_tolerance + + # Figure out what the relative tolerance should be. ``self.rel`` is + # either None or a value specified by the user. This is done after + # we've made sure the user didn't ask for an absolute tolerance only, + # because we don't want to raise errors about the relative tolerance if + # we aren't even going to use it. + relative_tolerance = set_default( + self.rel, self.DEFAULT_RELATIVE_TOLERANCE + ) * abs(self.expected) + + if relative_tolerance < 0: + raise ValueError( + f"relative tolerance can't be negative: {relative_tolerance}" + ) + if math.isnan(relative_tolerance): + raise ValueError("relative tolerance can't be NaN.") + + # Return the larger of the relative and absolute tolerances. + return max(relative_tolerance, absolute_tolerance) + + +class ApproxDecimal(ApproxScalar): + """Perform approximate comparisons where the expected value is a Decimal.""" + + DEFAULT_ABSOLUTE_TOLERANCE = Decimal("1e-12") + DEFAULT_RELATIVE_TOLERANCE = Decimal("1e-6") + + +def approx(expected, rel=None, abs=None, nan_ok: bool = False) -> ApproxBase: + """Assert that two numbers (or two ordered sequences of numbers) are equal to each other + within some tolerance. + + Due to the :doc:`python:tutorial/floatingpoint`, numbers that we + would intuitively expect to be equal are not always so:: + + >>> 0.1 + 0.2 == 0.3 + False + + This problem is commonly encountered when writing tests, e.g. when making + sure that floating-point values are what you expect them to be. One way to + deal with this problem is to assert that two floating-point numbers are + equal to within some appropriate tolerance:: + + >>> abs((0.1 + 0.2) - 0.3) < 1e-6 + True + + However, comparisons like this are tedious to write and difficult to + understand. Furthermore, absolute comparisons like the one above are + usually discouraged because there's no tolerance that works well for all + situations. ``1e-6`` is good for numbers around ``1``, but too small for + very big numbers and too big for very small ones. It's better to express + the tolerance as a fraction of the expected value, but relative comparisons + like that are even more difficult to write correctly and concisely. + + The ``approx`` class performs floating-point comparisons using a syntax + that's as intuitive as possible:: + + >>> from pytest import approx + >>> 0.1 + 0.2 == approx(0.3) + True + + The same syntax also works for ordered sequences of numbers:: + + >>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6)) + True + + ``numpy`` arrays:: + + >>> import numpy as np # doctest: +SKIP + >>> np.array([0.1, 0.2]) + np.array([0.2, 0.4]) == approx(np.array([0.3, 0.6])) # doctest: +SKIP + True + + And for a ``numpy`` array against a scalar:: + + >>> import numpy as np # doctest: +SKIP + >>> np.array([0.1, 0.2]) + np.array([0.2, 0.1]) == approx(0.3) # doctest: +SKIP + True + + Only ordered sequences are supported, because ``approx`` needs + to infer the relative position of the sequences without ambiguity. This means + ``sets`` and other unordered sequences are not supported. + + Finally, dictionary *values* can also be compared:: + + >>> {'a': 0.1 + 0.2, 'b': 0.2 + 0.4} == approx({'a': 0.3, 'b': 0.6}) + True + + The comparison will be true if both mappings have the same keys and their + respective values match the expected tolerances. + + **Tolerances** + + By default, ``approx`` considers numbers within a relative tolerance of + ``1e-6`` (i.e. one part in a million) of its expected value to be equal. + This treatment would lead to surprising results if the expected value was + ``0.0``, because nothing but ``0.0`` itself is relatively close to ``0.0``. + To handle this case less surprisingly, ``approx`` also considers numbers + within an absolute tolerance of ``1e-12`` of its expected value to be + equal. Infinity and NaN are special cases. Infinity is only considered + equal to itself, regardless of the relative tolerance. NaN is not + considered equal to anything by default, but you can make it be equal to + itself by setting the ``nan_ok`` argument to True. (This is meant to + facilitate comparing arrays that use NaN to mean "no data".) + + Both the relative and absolute tolerances can be changed by passing + arguments to the ``approx`` constructor:: + + >>> 1.0001 == approx(1) + False + >>> 1.0001 == approx(1, rel=1e-3) + True + >>> 1.0001 == approx(1, abs=1e-3) + True + + If you specify ``abs`` but not ``rel``, the comparison will not consider + the relative tolerance at all. In other words, two numbers that are within + the default relative tolerance of ``1e-6`` will still be considered unequal + if they exceed the specified absolute tolerance. If you specify both + ``abs`` and ``rel``, the numbers will be considered equal if either + tolerance is met:: + + >>> 1 + 1e-8 == approx(1) + True + >>> 1 + 1e-8 == approx(1, abs=1e-12) + False + >>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12) + True + + You can also use ``approx`` to compare nonnumeric types, or dicts and + sequences containing nonnumeric types, in which case it falls back to + strict equality. This can be useful for comparing dicts and sequences that + can contain optional values:: + + >>> {"required": 1.0000005, "optional": None} == approx({"required": 1, "optional": None}) + True + >>> [None, 1.0000005] == approx([None,1]) + True + >>> ["foo", 1.0000005] == approx([None,1]) + False + + If you're thinking about using ``approx``, then you might want to know how + it compares to other good ways of comparing floating-point numbers. All of + these algorithms are based on relative and absolute tolerances and should + agree for the most part, but they do have meaningful differences: + + - ``math.isclose(a, b, rel_tol=1e-9, abs_tol=0.0)``: True if the relative + tolerance is met w.r.t. either ``a`` or ``b`` or if the absolute + tolerance is met. Because the relative tolerance is calculated w.r.t. + both ``a`` and ``b``, this test is symmetric (i.e. neither ``a`` nor + ``b`` is a "reference value"). You have to specify an absolute tolerance + if you want to compare to ``0.0`` because there is no tolerance by + default. More information: :py:func:`math.isclose`. + + - ``numpy.isclose(a, b, rtol=1e-5, atol=1e-8)``: True if the difference + between ``a`` and ``b`` is less that the sum of the relative tolerance + w.r.t. ``b`` and the absolute tolerance. Because the relative tolerance + is only calculated w.r.t. ``b``, this test is asymmetric and you can + think of ``b`` as the reference value. Support for comparing sequences + is provided by :py:func:`numpy.allclose`. More information: + :std:doc:`numpy:reference/generated/numpy.isclose`. + + - ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b`` + are within an absolute tolerance of ``1e-7``. No relative tolerance is + considered , so this function is not appropriate for very large or very + small numbers. Also, it's only available in subclasses of ``unittest.TestCase`` + and it's ugly because it doesn't follow PEP8. More information: + :py:meth:`unittest.TestCase.assertAlmostEqual`. + + - ``a == pytest.approx(b, rel=1e-6, abs=1e-12)``: True if the relative + tolerance is met w.r.t. ``b`` or if the absolute tolerance is met. + Because the relative tolerance is only calculated w.r.t. ``b``, this test + is asymmetric and you can think of ``b`` as the reference value. In the + special case that you explicitly specify an absolute tolerance but not a + relative tolerance, only the absolute tolerance is considered. + + .. note:: + + ``approx`` can handle numpy arrays, but we recommend the + specialised test helpers in :std:doc:`numpy:reference/routines.testing` + if you need support for comparisons, NaNs, or ULP-based tolerances. + + To match strings using regex, you can use + `Matches `_ + from the + `re_assert package `_. + + .. warning:: + + .. versionchanged:: 3.2 + + In order to avoid inconsistent behavior, :py:exc:`TypeError` is + raised for ``>``, ``>=``, ``<`` and ``<=`` comparisons. + The example below illustrates the problem:: + + assert approx(0.1) > 0.1 + 1e-10 # calls approx(0.1).__gt__(0.1 + 1e-10) + assert 0.1 + 1e-10 > approx(0.1) # calls approx(0.1).__lt__(0.1 + 1e-10) + + In the second example one expects ``approx(0.1).__le__(0.1 + 1e-10)`` + to be called. But instead, ``approx(0.1).__lt__(0.1 + 1e-10)`` is used to + comparison. This is because the call hierarchy of rich comparisons + follows a fixed behavior. More information: :py:meth:`object.__ge__` + + .. versionchanged:: 3.7.1 + ``approx`` raises ``TypeError`` when it encounters a dict value or + sequence element of nonnumeric type. + + .. versionchanged:: 6.1.0 + ``approx`` falls back to strict equality for nonnumeric types instead + of raising ``TypeError``. + """ + # Delegate the comparison to a class that knows how to deal with the type + # of the expected value (e.g. int, float, list, dict, numpy.array, etc). + # + # The primary responsibility of these classes is to implement ``__eq__()`` + # and ``__repr__()``. The former is used to actually check if some + # "actual" value is equivalent to the given expected value within the + # allowed tolerance. The latter is used to show the user the expected + # value and tolerance, in the case that a test failed. + # + # The actual logic for making approximate comparisons can be found in + # ApproxScalar, which is used to compare individual numbers. All of the + # other Approx classes eventually delegate to this class. The ApproxBase + # class provides some convenient methods and overloads, but isn't really + # essential. + + __tracebackhide__ = True + + if isinstance(expected, Decimal): + cls: type[ApproxBase] = ApproxDecimal + elif isinstance(expected, Mapping): + cls = ApproxMapping + elif _is_numpy_array(expected): + expected = _as_numpy_array(expected) + cls = ApproxNumpy + elif _is_sequence_like(expected): + cls = ApproxSequenceLike + elif isinstance(expected, Collection) and not isinstance(expected, (str, bytes)): + msg = f"pytest.approx() only supports ordered sequences, but got: {expected!r}" + raise TypeError(msg) + else: + cls = ApproxScalar + + return cls(expected, rel, abs, nan_ok) + + +def _is_sequence_like(expected: object) -> bool: + return ( + hasattr(expected, "__getitem__") + and isinstance(expected, Sized) + and not isinstance(expected, (str, bytes)) + ) + + +def _is_numpy_array(obj: object) -> bool: + """ + Return true if the given object is implicitly convertible to ndarray, + and numpy is already imported. + """ + return _as_numpy_array(obj) is not None + + +def _as_numpy_array(obj: object) -> ndarray | None: + """ + Return an ndarray if the given object is implicitly convertible to ndarray, + and numpy is already imported, otherwise None. + """ + import sys + + np: Any = sys.modules.get("numpy") + if np is not None: + # avoid infinite recursion on numpy scalars, which have __array__ + if np.isscalar(obj): + return None + elif isinstance(obj, np.ndarray): + return obj + elif hasattr(obj, "__array__") or hasattr("obj", "__array_interface__"): + return np.asarray(obj) + return None + + +# builtin pytest.raises helper + +E = TypeVar("E", bound=BaseException) + + +@overload +def raises( + expected_exception: type[E] | tuple[type[E], ...], + *, + match: str | Pattern[str] | None = ..., +) -> RaisesContext[E]: ... + + +@overload +def raises( + expected_exception: type[E] | tuple[type[E], ...], + func: Callable[..., Any], + *args: Any, + **kwargs: Any, +) -> _pytest._code.ExceptionInfo[E]: ... + + +def raises( + expected_exception: type[E] | tuple[type[E], ...], *args: Any, **kwargs: Any +) -> RaisesContext[E] | _pytest._code.ExceptionInfo[E]: + r"""Assert that a code block/function call raises an exception type, or one of its subclasses. + + :param expected_exception: + The expected exception type, or a tuple if one of multiple possible + exception types are expected. Note that subclasses of the passed exceptions + will also match. + + :kwparam str | re.Pattern[str] | None match: + If specified, a string containing a regular expression, + or a regular expression object, that is tested against the string + representation of the exception and its :pep:`678` `__notes__` + using :func:`re.search`. + + To match a literal string that may contain :ref:`special characters + `, the pattern can first be escaped with :func:`re.escape`. + + (This is only used when ``pytest.raises`` is used as a context manager, + and passed through to the function otherwise. + When using ``pytest.raises`` as a function, you can use: + ``pytest.raises(Exc, func, match="passed on").match("my pattern")``.) + + Use ``pytest.raises`` as a context manager, which will capture the exception of the given + type, or any of its subclasses:: + + >>> import pytest + >>> with pytest.raises(ZeroDivisionError): + ... 1/0 + + If the code block does not raise the expected exception (:class:`ZeroDivisionError` in the example + above), or no exception at all, the check will fail instead. + + You can also use the keyword argument ``match`` to assert that the + exception matches a text or regex:: + + >>> with pytest.raises(ValueError, match='must be 0 or None'): + ... raise ValueError("value must be 0 or None") + + >>> with pytest.raises(ValueError, match=r'must be \d+$'): + ... raise ValueError("value must be 42") + + The ``match`` argument searches the formatted exception string, which includes any + `PEP-678 `__ ``__notes__``: + + >>> with pytest.raises(ValueError, match=r"had a note added"): # doctest: +SKIP + ... e = ValueError("value must be 42") + ... e.add_note("had a note added") + ... raise e + + The context manager produces an :class:`ExceptionInfo` object which can be used to inspect the + details of the captured exception:: + + >>> with pytest.raises(ValueError) as exc_info: + ... raise ValueError("value must be 42") + >>> assert exc_info.type is ValueError + >>> assert exc_info.value.args[0] == "value must be 42" + + .. warning:: + + Given that ``pytest.raises`` matches subclasses, be wary of using it to match :class:`Exception` like this:: + + with pytest.raises(Exception): # Careful, this will catch ANY exception raised. + some_function() + + Because :class:`Exception` is the base class of almost all exceptions, it is easy for this to hide + real bugs, where the user wrote this expecting a specific exception, but some other exception is being + raised due to a bug introduced during a refactoring. + + Avoid using ``pytest.raises`` to catch :class:`Exception` unless certain that you really want to catch + **any** exception raised. + + .. note:: + + When using ``pytest.raises`` as a context manager, it's worthwhile to + note that normal context manager rules apply and that the exception + raised *must* be the final line in the scope of the context manager. + Lines of code after that, within the scope of the context manager will + not be executed. For example:: + + >>> value = 15 + >>> with pytest.raises(ValueError) as exc_info: + ... if value > 10: + ... raise ValueError("value must be <= 10") + ... assert exc_info.type is ValueError # This will not execute. + + Instead, the following approach must be taken (note the difference in + scope):: + + >>> with pytest.raises(ValueError) as exc_info: + ... if value > 10: + ... raise ValueError("value must be <= 10") + ... + >>> assert exc_info.type is ValueError + + **Using with** ``pytest.mark.parametrize`` + + When using :ref:`pytest.mark.parametrize ref` + it is possible to parametrize tests such that + some runs raise an exception and others do not. + + See :ref:`parametrizing_conditional_raising` for an example. + + .. seealso:: + + :ref:`assertraises` for more examples and detailed discussion. + + **Legacy form** + + It is possible to specify a callable by passing a to-be-called lambda:: + + >>> raises(ZeroDivisionError, lambda: 1/0) + + + or you can specify an arbitrary callable with arguments:: + + >>> def f(x): return 1/x + ... + >>> raises(ZeroDivisionError, f, 0) + + >>> raises(ZeroDivisionError, f, x=0) + + + The form above is fully supported but discouraged for new code because the + context manager form is regarded as more readable and less error-prone. + + .. note:: + Similar to caught exception objects in Python, explicitly clearing + local references to returned ``ExceptionInfo`` objects can + help the Python interpreter speed up its garbage collection. + + Clearing those references breaks a reference cycle + (``ExceptionInfo`` --> caught exception --> frame stack raising + the exception --> current frame stack --> local variables --> + ``ExceptionInfo``) which makes Python keep all objects referenced + from that cycle (including all local variables in the current + frame) alive until the next cyclic garbage collection run. + More detailed information can be found in the official Python + documentation for :ref:`the try statement `. + """ + __tracebackhide__ = True + + if not expected_exception: + raise ValueError( + f"Expected an exception type or a tuple of exception types, but got `{expected_exception!r}`. " + f"Raising exceptions is already understood as failing the test, so you don't need " + f"any special code to say 'this should never raise an exception'." + ) + if isinstance(expected_exception, type): + expected_exceptions: tuple[type[E], ...] = (expected_exception,) + else: + expected_exceptions = expected_exception + for exc in expected_exceptions: + if not isinstance(exc, type) or not issubclass(exc, BaseException): + msg = "expected exception must be a BaseException type, not {}" # type: ignore[unreachable] + not_a = exc.__name__ if isinstance(exc, type) else type(exc).__name__ + raise TypeError(msg.format(not_a)) + + message = f"DID NOT RAISE {expected_exception}" + + if not args: + match: str | Pattern[str] | None = kwargs.pop("match", None) + if kwargs: + msg = "Unexpected keyword arguments passed to pytest.raises: " + msg += ", ".join(sorted(kwargs)) + msg += "\nUse context-manager form instead?" + raise TypeError(msg) + return RaisesContext(expected_exception, message, match) + else: + func = args[0] + if not callable(func): + raise TypeError(f"{func!r} object (type: {type(func)}) must be callable") + try: + func(*args[1:], **kwargs) + except expected_exception as e: + return _pytest._code.ExceptionInfo.from_exception(e) + fail(message) + + +# This doesn't work with mypy for now. Use fail.Exception instead. +raises.Exception = fail.Exception # type: ignore + + +@final +class RaisesContext(ContextManager[_pytest._code.ExceptionInfo[E]]): + def __init__( + self, + expected_exception: type[E] | tuple[type[E], ...], + message: str, + match_expr: str | Pattern[str] | None = None, + ) -> None: + self.expected_exception = expected_exception + self.message = message + self.match_expr = match_expr + self.excinfo: _pytest._code.ExceptionInfo[E] | None = None + if self.match_expr is not None: + re_error = None + try: + re.compile(self.match_expr) + except re.error as e: + re_error = e + if re_error is not None: + fail(f"Invalid regex pattern provided to 'match': {re_error}") + + def __enter__(self) -> _pytest._code.ExceptionInfo[E]: + self.excinfo = _pytest._code.ExceptionInfo.for_later() + return self.excinfo + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> bool: + __tracebackhide__ = True + if exc_type is None: + fail(self.message) + assert self.excinfo is not None + if not issubclass(exc_type, self.expected_exception): + return False + # Cast to narrow the exception type now that it's verified. + exc_info = cast(Tuple[Type[E], E, TracebackType], (exc_type, exc_val, exc_tb)) + self.excinfo.fill_unfilled(exc_info) + if self.match_expr is not None: + self.excinfo.match(self.match_expr) + return True diff --git a/.venv/Lib/site-packages/_pytest/python_path.py b/.venv/Lib/site-packages/_pytest/python_path.py new file mode 100644 index 00000000..6e33c8a3 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/python_path.py @@ -0,0 +1,26 @@ +from __future__ import annotations + +import sys + +import pytest +from pytest import Config +from pytest import Parser + + +def pytest_addoption(parser: Parser) -> None: + parser.addini("pythonpath", type="paths", help="Add paths to sys.path", default=[]) + + +@pytest.hookimpl(tryfirst=True) +def pytest_load_initial_conftests(early_config: Config) -> None: + # `pythonpath = a b` will set `sys.path` to `[a, b, x, y, z, ...]` + for path in reversed(early_config.getini("pythonpath")): + sys.path.insert(0, str(path)) + + +@pytest.hookimpl(trylast=True) +def pytest_unconfigure(config: Config) -> None: + for path in config.getini("pythonpath"): + path_str = str(path) + if path_str in sys.path: + sys.path.remove(path_str) diff --git a/.venv/Lib/site-packages/_pytest/recwarn.py b/.venv/Lib/site-packages/_pytest/recwarn.py new file mode 100644 index 00000000..85d8de84 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/recwarn.py @@ -0,0 +1,365 @@ +# mypy: allow-untyped-defs +"""Record warnings during test function execution.""" + +from __future__ import annotations + +from pprint import pformat +import re +from types import TracebackType +from typing import Any +from typing import Callable +from typing import final +from typing import Generator +from typing import Iterator +from typing import overload +from typing import Pattern +from typing import TYPE_CHECKING +from typing import TypeVar + + +if TYPE_CHECKING: + from typing_extensions import Self + +import warnings + +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.outcomes import Exit +from _pytest.outcomes import fail + + +T = TypeVar("T") + + +@fixture +def recwarn() -> Generator[WarningsRecorder]: + """Return a :class:`WarningsRecorder` instance that records all warnings emitted by test functions. + + See https://docs.pytest.org/en/latest/how-to/capture-warnings.html for information + on warning categories. + """ + wrec = WarningsRecorder(_ispytest=True) + with wrec: + warnings.simplefilter("default") + yield wrec + + +@overload +def deprecated_call(*, match: str | Pattern[str] | None = ...) -> WarningsRecorder: ... + + +@overload +def deprecated_call(func: Callable[..., T], *args: Any, **kwargs: Any) -> T: ... + + +def deprecated_call( + func: Callable[..., Any] | None = None, *args: Any, **kwargs: Any +) -> WarningsRecorder | Any: + """Assert that code produces a ``DeprecationWarning`` or ``PendingDeprecationWarning`` or ``FutureWarning``. + + This function can be used as a context manager:: + + >>> import warnings + >>> def api_call_v2(): + ... warnings.warn('use v3 of this api', DeprecationWarning) + ... return 200 + + >>> import pytest + >>> with pytest.deprecated_call(): + ... assert api_call_v2() == 200 + + It can also be used by passing a function and ``*args`` and ``**kwargs``, + in which case it will ensure calling ``func(*args, **kwargs)`` produces one of + the warnings types above. The return value is the return value of the function. + + In the context manager form you may use the keyword argument ``match`` to assert + that the warning matches a text or regex. + + The context manager produces a list of :class:`warnings.WarningMessage` objects, + one for each warning raised. + """ + __tracebackhide__ = True + if func is not None: + args = (func, *args) + return warns( + (DeprecationWarning, PendingDeprecationWarning, FutureWarning), *args, **kwargs + ) + + +@overload +def warns( + expected_warning: type[Warning] | tuple[type[Warning], ...] = ..., + *, + match: str | Pattern[str] | None = ..., +) -> WarningsChecker: ... + + +@overload +def warns( + expected_warning: type[Warning] | tuple[type[Warning], ...], + func: Callable[..., T], + *args: Any, + **kwargs: Any, +) -> T: ... + + +def warns( + expected_warning: type[Warning] | tuple[type[Warning], ...] = Warning, + *args: Any, + match: str | Pattern[str] | None = None, + **kwargs: Any, +) -> WarningsChecker | Any: + r"""Assert that code raises a particular class of warning. + + Specifically, the parameter ``expected_warning`` can be a warning class or tuple + of warning classes, and the code inside the ``with`` block must issue at least one + warning of that class or classes. + + This helper produces a list of :class:`warnings.WarningMessage` objects, one for + each warning emitted (regardless of whether it is an ``expected_warning`` or not). + Since pytest 8.0, unmatched warnings are also re-emitted when the context closes. + + This function can be used as a context manager:: + + >>> import pytest + >>> with pytest.warns(RuntimeWarning): + ... warnings.warn("my warning", RuntimeWarning) + + In the context manager form you may use the keyword argument ``match`` to assert + that the warning matches a text or regex:: + + >>> with pytest.warns(UserWarning, match='must be 0 or None'): + ... warnings.warn("value must be 0 or None", UserWarning) + + >>> with pytest.warns(UserWarning, match=r'must be \d+$'): + ... warnings.warn("value must be 42", UserWarning) + + >>> with pytest.warns(UserWarning): # catch re-emitted warning + ... with pytest.warns(UserWarning, match=r'must be \d+$'): + ... warnings.warn("this is not here", UserWarning) + Traceback (most recent call last): + ... + Failed: DID NOT WARN. No warnings of type ...UserWarning... were emitted... + + **Using with** ``pytest.mark.parametrize`` + + When using :ref:`pytest.mark.parametrize ref` it is possible to parametrize tests + such that some runs raise a warning and others do not. + + This could be achieved in the same way as with exceptions, see + :ref:`parametrizing_conditional_raising` for an example. + + """ + __tracebackhide__ = True + if not args: + if kwargs: + argnames = ", ".join(sorted(kwargs)) + raise TypeError( + f"Unexpected keyword arguments passed to pytest.warns: {argnames}" + "\nUse context-manager form instead?" + ) + return WarningsChecker(expected_warning, match_expr=match, _ispytest=True) + else: + func = args[0] + if not callable(func): + raise TypeError(f"{func!r} object (type: {type(func)}) must be callable") + with WarningsChecker(expected_warning, _ispytest=True): + return func(*args[1:], **kwargs) + + +class WarningsRecorder(warnings.catch_warnings): # type:ignore[type-arg] + """A context manager to record raised warnings. + + Each recorded warning is an instance of :class:`warnings.WarningMessage`. + + Adapted from `warnings.catch_warnings`. + + .. note:: + ``DeprecationWarning`` and ``PendingDeprecationWarning`` are treated + differently; see :ref:`ensuring_function_triggers`. + + """ + + def __init__(self, *, _ispytest: bool = False) -> None: + check_ispytest(_ispytest) + super().__init__(record=True) + self._entered = False + self._list: list[warnings.WarningMessage] = [] + + @property + def list(self) -> list[warnings.WarningMessage]: + """The list of recorded warnings.""" + return self._list + + def __getitem__(self, i: int) -> warnings.WarningMessage: + """Get a recorded warning by index.""" + return self._list[i] + + def __iter__(self) -> Iterator[warnings.WarningMessage]: + """Iterate through the recorded warnings.""" + return iter(self._list) + + def __len__(self) -> int: + """The number of recorded warnings.""" + return len(self._list) + + def pop(self, cls: type[Warning] = Warning) -> warnings.WarningMessage: + """Pop the first recorded warning which is an instance of ``cls``, + but not an instance of a child class of any other match. + Raises ``AssertionError`` if there is no match. + """ + best_idx: int | None = None + for i, w in enumerate(self._list): + if w.category == cls: + return self._list.pop(i) # exact match, stop looking + if issubclass(w.category, cls) and ( + best_idx is None + or not issubclass(w.category, self._list[best_idx].category) + ): + best_idx = i + if best_idx is not None: + return self._list.pop(best_idx) + __tracebackhide__ = True + raise AssertionError(f"{cls!r} not found in warning list") + + def clear(self) -> None: + """Clear the list of recorded warnings.""" + self._list[:] = [] + + def __enter__(self) -> Self: + if self._entered: + __tracebackhide__ = True + raise RuntimeError(f"Cannot enter {self!r} twice") + _list = super().__enter__() + # record=True means it's None. + assert _list is not None + self._list = _list + warnings.simplefilter("always") + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + if not self._entered: + __tracebackhide__ = True + raise RuntimeError(f"Cannot exit {self!r} without entering first") + + super().__exit__(exc_type, exc_val, exc_tb) + + # Built-in catch_warnings does not reset entered state so we do it + # manually here for this context manager to become reusable. + self._entered = False + + +@final +class WarningsChecker(WarningsRecorder): + def __init__( + self, + expected_warning: type[Warning] | tuple[type[Warning], ...] = Warning, + match_expr: str | Pattern[str] | None = None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + super().__init__(_ispytest=True) + + msg = "exceptions must be derived from Warning, not %s" + if isinstance(expected_warning, tuple): + for exc in expected_warning: + if not issubclass(exc, Warning): + raise TypeError(msg % type(exc)) + expected_warning_tup = expected_warning + elif isinstance(expected_warning, type) and issubclass( + expected_warning, Warning + ): + expected_warning_tup = (expected_warning,) + else: + raise TypeError(msg % type(expected_warning)) + + self.expected_warning = expected_warning_tup + self.match_expr = match_expr + + def matches(self, warning: warnings.WarningMessage) -> bool: + assert self.expected_warning is not None + return issubclass(warning.category, self.expected_warning) and bool( + self.match_expr is None or re.search(self.match_expr, str(warning.message)) + ) + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + super().__exit__(exc_type, exc_val, exc_tb) + + __tracebackhide__ = True + + # BaseExceptions like pytest.{skip,fail,xfail,exit} or Ctrl-C within + # pytest.warns should *not* trigger "DID NOT WARN" and get suppressed + # when the warning doesn't happen. Control-flow exceptions should always + # propagate. + if exc_val is not None and ( + not isinstance(exc_val, Exception) + # Exit is an Exception, not a BaseException, for some reason. + or isinstance(exc_val, Exit) + ): + return + + def found_str() -> str: + return pformat([record.message for record in self], indent=2) + + try: + if not any(issubclass(w.category, self.expected_warning) for w in self): + fail( + f"DID NOT WARN. No warnings of type {self.expected_warning} were emitted.\n" + f" Emitted warnings: {found_str()}." + ) + elif not any(self.matches(w) for w in self): + fail( + f"DID NOT WARN. No warnings of type {self.expected_warning} matching the regex were emitted.\n" + f" Regex: {self.match_expr}\n" + f" Emitted warnings: {found_str()}." + ) + finally: + # Whether or not any warnings matched, we want to re-emit all unmatched warnings. + for w in self: + if not self.matches(w): + warnings.warn_explicit( + message=w.message, + category=w.category, + filename=w.filename, + lineno=w.lineno, + module=w.__module__, + source=w.source, + ) + + # Currently in Python it is possible to pass other types than an + # `str` message when creating `Warning` instances, however this + # causes an exception when :func:`warnings.filterwarnings` is used + # to filter those warnings. See + # https://github.com/python/cpython/issues/103577 for a discussion. + # While this can be considered a bug in CPython, we put guards in + # pytest as the error message produced without this check in place + # is confusing (#10865). + for w in self: + if type(w.message) is not UserWarning: + # If the warning was of an incorrect type then `warnings.warn()` + # creates a UserWarning. Any other warning must have been specified + # explicitly. + continue + if not w.message.args: + # UserWarning() without arguments must have been specified explicitly. + continue + msg = w.message.args[0] + if isinstance(msg, str): + continue + # It's possible that UserWarning was explicitly specified, and + # its first argument was not a string. But that case can't be + # distinguished from an invalid type. + raise TypeError( + f"Warning must be str or Warning, got {msg!r} (type {type(msg).__name__})" + ) diff --git a/.venv/Lib/site-packages/_pytest/reports.py b/.venv/Lib/site-packages/_pytest/reports.py new file mode 100644 index 00000000..77cbf773 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/reports.py @@ -0,0 +1,636 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import dataclasses +from io import StringIO +import os +from pprint import pprint +from typing import Any +from typing import cast +from typing import final +from typing import Iterable +from typing import Iterator +from typing import Literal +from typing import Mapping +from typing import NoReturn +from typing import Sequence +from typing import TYPE_CHECKING + +from _pytest._code.code import ExceptionChainRepr +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import ExceptionRepr +from _pytest._code.code import ReprEntry +from _pytest._code.code import ReprEntryNative +from _pytest._code.code import ReprExceptionInfo +from _pytest._code.code import ReprFileLocation +from _pytest._code.code import ReprFuncArgs +from _pytest._code.code import ReprLocals +from _pytest._code.code import ReprTraceback +from _pytest._code.code import TerminalRepr +from _pytest._io import TerminalWriter +from _pytest.config import Config +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.outcomes import fail +from _pytest.outcomes import skip + + +if TYPE_CHECKING: + from typing_extensions import Self + + from _pytest.runner import CallInfo + + +def getworkerinfoline(node): + try: + return node._workerinfocache + except AttributeError: + d = node.workerinfo + ver = "{}.{}.{}".format(*d["version_info"][:3]) + node._workerinfocache = s = "[{}] {} -- Python {} {}".format( + d["id"], d["sysplatform"], ver, d["executable"] + ) + return s + + +class BaseReport: + when: str | None + location: tuple[str, int | None, str] | None + longrepr: ( + None | ExceptionInfo[BaseException] | tuple[str, int, str] | str | TerminalRepr + ) + sections: list[tuple[str, str]] + nodeid: str + outcome: Literal["passed", "failed", "skipped"] + + def __init__(self, **kw: Any) -> None: + self.__dict__.update(kw) + + if TYPE_CHECKING: + # Can have arbitrary fields given to __init__(). + def __getattr__(self, key: str) -> Any: ... + + def toterminal(self, out: TerminalWriter) -> None: + if hasattr(self, "node"): + worker_info = getworkerinfoline(self.node) + if worker_info: + out.line(worker_info) + + longrepr = self.longrepr + if longrepr is None: + return + + if hasattr(longrepr, "toterminal"): + longrepr_terminal = cast(TerminalRepr, longrepr) + longrepr_terminal.toterminal(out) + else: + try: + s = str(longrepr) + except UnicodeEncodeError: + s = "" + out.line(s) + + def get_sections(self, prefix: str) -> Iterator[tuple[str, str]]: + for name, content in self.sections: + if name.startswith(prefix): + yield prefix, content + + @property + def longreprtext(self) -> str: + """Read-only property that returns the full string representation of + ``longrepr``. + + .. versionadded:: 3.0 + """ + file = StringIO() + tw = TerminalWriter(file) + tw.hasmarkup = False + self.toterminal(tw) + exc = file.getvalue() + return exc.strip() + + @property + def caplog(self) -> str: + """Return captured log lines, if log capturing is enabled. + + .. versionadded:: 3.5 + """ + return "\n".join( + content for (prefix, content) in self.get_sections("Captured log") + ) + + @property + def capstdout(self) -> str: + """Return captured text from stdout, if capturing is enabled. + + .. versionadded:: 3.0 + """ + return "".join( + content for (prefix, content) in self.get_sections("Captured stdout") + ) + + @property + def capstderr(self) -> str: + """Return captured text from stderr, if capturing is enabled. + + .. versionadded:: 3.0 + """ + return "".join( + content for (prefix, content) in self.get_sections("Captured stderr") + ) + + @property + def passed(self) -> bool: + """Whether the outcome is passed.""" + return self.outcome == "passed" + + @property + def failed(self) -> bool: + """Whether the outcome is failed.""" + return self.outcome == "failed" + + @property + def skipped(self) -> bool: + """Whether the outcome is skipped.""" + return self.outcome == "skipped" + + @property + def fspath(self) -> str: + """The path portion of the reported node, as a string.""" + return self.nodeid.split("::")[0] + + @property + def count_towards_summary(self) -> bool: + """**Experimental** Whether this report should be counted towards the + totals shown at the end of the test session: "1 passed, 1 failure, etc". + + .. note:: + + This function is considered **experimental**, so beware that it is subject to changes + even in patch releases. + """ + return True + + @property + def head_line(self) -> str | None: + """**Experimental** The head line shown with longrepr output for this + report, more commonly during traceback representation during + failures:: + + ________ Test.foo ________ + + + In the example above, the head_line is "Test.foo". + + .. note:: + + This function is considered **experimental**, so beware that it is subject to changes + even in patch releases. + """ + if self.location is not None: + fspath, lineno, domain = self.location + return domain + return None + + def _get_verbose_word_with_markup( + self, config: Config, default_markup: Mapping[str, bool] + ) -> tuple[str, Mapping[str, bool]]: + _category, _short, verbose = config.hook.pytest_report_teststatus( + report=self, config=config + ) + + if isinstance(verbose, str): + return verbose, default_markup + + if isinstance(verbose, Sequence) and len(verbose) == 2: + word, markup = verbose + if isinstance(word, str) and isinstance(markup, Mapping): + return word, markup + + fail( # pragma: no cover + "pytest_report_teststatus() hook (from a plugin) returned " + f"an invalid verbose value: {verbose!r}.\nExpected either a string " + "or a tuple of (word, markup)." + ) + + def _to_json(self) -> dict[str, Any]: + """Return the contents of this report as a dict of builtin entries, + suitable for serialization. + + This was originally the serialize_report() function from xdist (ca03269). + + Experimental method. + """ + return _report_to_json(self) + + @classmethod + def _from_json(cls, reportdict: dict[str, object]) -> Self: + """Create either a TestReport or CollectReport, depending on the calling class. + + It is the callers responsibility to know which class to pass here. + + This was originally the serialize_report() function from xdist (ca03269). + + Experimental method. + """ + kwargs = _report_kwargs_from_json(reportdict) + return cls(**kwargs) + + +def _report_unserialization_failure( + type_name: str, report_class: type[BaseReport], reportdict +) -> NoReturn: + url = "https://github.com/pytest-dev/pytest/issues" + stream = StringIO() + pprint("-" * 100, stream=stream) + pprint(f"INTERNALERROR: Unknown entry type returned: {type_name}", stream=stream) + pprint(f"report_name: {report_class}", stream=stream) + pprint(reportdict, stream=stream) + pprint(f"Please report this bug at {url}", stream=stream) + pprint("-" * 100, stream=stream) + raise RuntimeError(stream.getvalue()) + + +@final +class TestReport(BaseReport): + """Basic test report object (also used for setup and teardown calls if + they fail). + + Reports can contain arbitrary extra attributes. + """ + + __test__ = False + # Defined by skipping plugin. + # xfail reason if xfailed, otherwise not defined. Use hasattr to distinguish. + wasxfail: str + + def __init__( + self, + nodeid: str, + location: tuple[str, int | None, str], + keywords: Mapping[str, Any], + outcome: Literal["passed", "failed", "skipped"], + longrepr: None + | ExceptionInfo[BaseException] + | tuple[str, int, str] + | str + | TerminalRepr, + when: Literal["setup", "call", "teardown"], + sections: Iterable[tuple[str, str]] = (), + duration: float = 0, + start: float = 0, + stop: float = 0, + user_properties: Iterable[tuple[str, object]] | None = None, + **extra, + ) -> None: + #: Normalized collection nodeid. + self.nodeid = nodeid + + #: A (filesystempath, lineno, domaininfo) tuple indicating the + #: actual location of a test item - it might be different from the + #: collected one e.g. if a method is inherited from a different module. + #: The filesystempath may be relative to ``config.rootdir``. + #: The line number is 0-based. + self.location: tuple[str, int | None, str] = location + + #: A name -> value dictionary containing all keywords and + #: markers associated with a test invocation. + self.keywords: Mapping[str, Any] = keywords + + #: Test outcome, always one of "passed", "failed", "skipped". + self.outcome = outcome + + #: None or a failure representation. + self.longrepr = longrepr + + #: One of 'setup', 'call', 'teardown' to indicate runtest phase. + self.when = when + + #: User properties is a list of tuples (name, value) that holds user + #: defined properties of the test. + self.user_properties = list(user_properties or []) + + #: Tuples of str ``(heading, content)`` with extra information + #: for the test report. Used by pytest to add text captured + #: from ``stdout``, ``stderr``, and intercepted logging events. May + #: be used by other plugins to add arbitrary information to reports. + self.sections = list(sections) + + #: Time it took to run just the test. + self.duration: float = duration + + #: The system time when the call started, in seconds since the epoch. + self.start: float = start + #: The system time when the call ended, in seconds since the epoch. + self.stop: float = stop + + self.__dict__.update(extra) + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {self.nodeid!r} when={self.when!r} outcome={self.outcome!r}>" + + @classmethod + def from_item_and_call(cls, item: Item, call: CallInfo[None]) -> TestReport: + """Create and fill a TestReport with standard item and call info. + + :param item: The item. + :param call: The call info. + """ + when = call.when + # Remove "collect" from the Literal type -- only for collection calls. + assert when != "collect" + duration = call.duration + start = call.start + stop = call.stop + keywords = {x: 1 for x in item.keywords} + excinfo = call.excinfo + sections = [] + if not call.excinfo: + outcome: Literal["passed", "failed", "skipped"] = "passed" + longrepr: ( + None + | ExceptionInfo[BaseException] + | tuple[str, int, str] + | str + | TerminalRepr + ) = None + else: + if not isinstance(excinfo, ExceptionInfo): + outcome = "failed" + longrepr = excinfo + elif isinstance(excinfo.value, skip.Exception): + outcome = "skipped" + r = excinfo._getreprcrash() + assert ( + r is not None + ), "There should always be a traceback entry for skipping a test." + if excinfo.value._use_item_location: + path, line = item.reportinfo()[:2] + assert line is not None + longrepr = os.fspath(path), line + 1, r.message + else: + longrepr = (str(r.path), r.lineno, r.message) + else: + outcome = "failed" + if call.when == "call": + longrepr = item.repr_failure(excinfo) + else: # exception in setup or teardown + longrepr = item._repr_failure_py( + excinfo, style=item.config.getoption("tbstyle", "auto") + ) + for rwhen, key, content in item._report_sections: + sections.append((f"Captured {key} {rwhen}", content)) + return cls( + item.nodeid, + item.location, + keywords, + outcome, + longrepr, + when, + sections, + duration, + start, + stop, + user_properties=item.user_properties, + ) + + +@final +class CollectReport(BaseReport): + """Collection report object. + + Reports can contain arbitrary extra attributes. + """ + + when = "collect" + + def __init__( + self, + nodeid: str, + outcome: Literal["passed", "failed", "skipped"], + longrepr: None + | ExceptionInfo[BaseException] + | tuple[str, int, str] + | str + | TerminalRepr, + result: list[Item | Collector] | None, + sections: Iterable[tuple[str, str]] = (), + **extra, + ) -> None: + #: Normalized collection nodeid. + self.nodeid = nodeid + + #: Test outcome, always one of "passed", "failed", "skipped". + self.outcome = outcome + + #: None or a failure representation. + self.longrepr = longrepr + + #: The collected items and collection nodes. + self.result = result or [] + + #: Tuples of str ``(heading, content)`` with extra information + #: for the test report. Used by pytest to add text captured + #: from ``stdout``, ``stderr``, and intercepted logging events. May + #: be used by other plugins to add arbitrary information to reports. + self.sections = list(sections) + + self.__dict__.update(extra) + + @property + def location( # type:ignore[override] + self, + ) -> tuple[str, int | None, str] | None: + return (self.fspath, None, self.fspath) + + def __repr__(self) -> str: + return f"" + + +class CollectErrorRepr(TerminalRepr): + def __init__(self, msg: str) -> None: + self.longrepr = msg + + def toterminal(self, out: TerminalWriter) -> None: + out.line(self.longrepr, red=True) + + +def pytest_report_to_serializable( + report: CollectReport | TestReport, +) -> dict[str, Any] | None: + if isinstance(report, (TestReport, CollectReport)): + data = report._to_json() + data["$report_type"] = report.__class__.__name__ + return data + # TODO: Check if this is actually reachable. + return None # type: ignore[unreachable] + + +def pytest_report_from_serializable( + data: dict[str, Any], +) -> CollectReport | TestReport | None: + if "$report_type" in data: + if data["$report_type"] == "TestReport": + return TestReport._from_json(data) + elif data["$report_type"] == "CollectReport": + return CollectReport._from_json(data) + assert False, "Unknown report_type unserialize data: {}".format( + data["$report_type"] + ) + return None + + +def _report_to_json(report: BaseReport) -> dict[str, Any]: + """Return the contents of this report as a dict of builtin entries, + suitable for serialization. + + This was originally the serialize_report() function from xdist (ca03269). + """ + + def serialize_repr_entry( + entry: ReprEntry | ReprEntryNative, + ) -> dict[str, Any]: + data = dataclasses.asdict(entry) + for key, value in data.items(): + if hasattr(value, "__dict__"): + data[key] = dataclasses.asdict(value) + entry_data = {"type": type(entry).__name__, "data": data} + return entry_data + + def serialize_repr_traceback(reprtraceback: ReprTraceback) -> dict[str, Any]: + result = dataclasses.asdict(reprtraceback) + result["reprentries"] = [ + serialize_repr_entry(x) for x in reprtraceback.reprentries + ] + return result + + def serialize_repr_crash( + reprcrash: ReprFileLocation | None, + ) -> dict[str, Any] | None: + if reprcrash is not None: + return dataclasses.asdict(reprcrash) + else: + return None + + def serialize_exception_longrepr(rep: BaseReport) -> dict[str, Any]: + assert rep.longrepr is not None + # TODO: Investigate whether the duck typing is really necessary here. + longrepr = cast(ExceptionRepr, rep.longrepr) + result: dict[str, Any] = { + "reprcrash": serialize_repr_crash(longrepr.reprcrash), + "reprtraceback": serialize_repr_traceback(longrepr.reprtraceback), + "sections": longrepr.sections, + } + if isinstance(longrepr, ExceptionChainRepr): + result["chain"] = [] + for repr_traceback, repr_crash, description in longrepr.chain: + result["chain"].append( + ( + serialize_repr_traceback(repr_traceback), + serialize_repr_crash(repr_crash), + description, + ) + ) + else: + result["chain"] = None + return result + + d = report.__dict__.copy() + if hasattr(report.longrepr, "toterminal"): + if hasattr(report.longrepr, "reprtraceback") and hasattr( + report.longrepr, "reprcrash" + ): + d["longrepr"] = serialize_exception_longrepr(report) + else: + d["longrepr"] = str(report.longrepr) + else: + d["longrepr"] = report.longrepr + for name in d: + if isinstance(d[name], os.PathLike): + d[name] = os.fspath(d[name]) + elif name == "result": + d[name] = None # for now + return d + + +def _report_kwargs_from_json(reportdict: dict[str, Any]) -> dict[str, Any]: + """Return **kwargs that can be used to construct a TestReport or + CollectReport instance. + + This was originally the serialize_report() function from xdist (ca03269). + """ + + def deserialize_repr_entry(entry_data): + data = entry_data["data"] + entry_type = entry_data["type"] + if entry_type == "ReprEntry": + reprfuncargs = None + reprfileloc = None + reprlocals = None + if data["reprfuncargs"]: + reprfuncargs = ReprFuncArgs(**data["reprfuncargs"]) + if data["reprfileloc"]: + reprfileloc = ReprFileLocation(**data["reprfileloc"]) + if data["reprlocals"]: + reprlocals = ReprLocals(data["reprlocals"]["lines"]) + + reprentry: ReprEntry | ReprEntryNative = ReprEntry( + lines=data["lines"], + reprfuncargs=reprfuncargs, + reprlocals=reprlocals, + reprfileloc=reprfileloc, + style=data["style"], + ) + elif entry_type == "ReprEntryNative": + reprentry = ReprEntryNative(data["lines"]) + else: + _report_unserialization_failure(entry_type, TestReport, reportdict) + return reprentry + + def deserialize_repr_traceback(repr_traceback_dict): + repr_traceback_dict["reprentries"] = [ + deserialize_repr_entry(x) for x in repr_traceback_dict["reprentries"] + ] + return ReprTraceback(**repr_traceback_dict) + + def deserialize_repr_crash(repr_crash_dict: dict[str, Any] | None): + if repr_crash_dict is not None: + return ReprFileLocation(**repr_crash_dict) + else: + return None + + if ( + reportdict["longrepr"] + and "reprcrash" in reportdict["longrepr"] + and "reprtraceback" in reportdict["longrepr"] + ): + reprtraceback = deserialize_repr_traceback( + reportdict["longrepr"]["reprtraceback"] + ) + reprcrash = deserialize_repr_crash(reportdict["longrepr"]["reprcrash"]) + if reportdict["longrepr"]["chain"]: + chain = [] + for repr_traceback_data, repr_crash_data, description in reportdict[ + "longrepr" + ]["chain"]: + chain.append( + ( + deserialize_repr_traceback(repr_traceback_data), + deserialize_repr_crash(repr_crash_data), + description, + ) + ) + exception_info: ExceptionChainRepr | ReprExceptionInfo = ExceptionChainRepr( + chain + ) + else: + exception_info = ReprExceptionInfo( + reprtraceback=reprtraceback, + reprcrash=reprcrash, + ) + + for section in reportdict["longrepr"]["sections"]: + exception_info.addsection(*section) + reportdict["longrepr"] = exception_info + + return reportdict diff --git a/.venv/Lib/site-packages/_pytest/runner.py b/.venv/Lib/site-packages/_pytest/runner.py new file mode 100644 index 00000000..0b60301b --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/runner.py @@ -0,0 +1,571 @@ +# mypy: allow-untyped-defs +"""Basic collect and runtest protocol implementations.""" + +from __future__ import annotations + +import bdb +import dataclasses +import os +import sys +import types +from typing import Callable +from typing import cast +from typing import final +from typing import Generic +from typing import Literal +from typing import TYPE_CHECKING +from typing import TypeVar + +from .reports import BaseReport +from .reports import CollectErrorRepr +from .reports import CollectReport +from .reports import TestReport +from _pytest import timing +from _pytest._code.code import ExceptionChainRepr +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import TerminalRepr +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.nodes import Collector +from _pytest.nodes import Directory +from _pytest.nodes import Item +from _pytest.nodes import Node +from _pytest.outcomes import Exit +from _pytest.outcomes import OutcomeException +from _pytest.outcomes import Skipped +from _pytest.outcomes import TEST_OUTCOME + + +if sys.version_info < (3, 11): + from exceptiongroup import BaseExceptionGroup + +if TYPE_CHECKING: + from _pytest.main import Session + from _pytest.terminal import TerminalReporter + +# +# pytest plugin hooks. + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("terminal reporting", "Reporting", after="general") + group.addoption( + "--durations", + action="store", + type=int, + default=None, + metavar="N", + help="Show N slowest setup/test durations (N=0 for all)", + ) + group.addoption( + "--durations-min", + action="store", + type=float, + default=0.005, + metavar="N", + help="Minimal duration in seconds for inclusion in slowest list. " + "Default: 0.005.", + ) + + +def pytest_terminal_summary(terminalreporter: TerminalReporter) -> None: + durations = terminalreporter.config.option.durations + durations_min = terminalreporter.config.option.durations_min + verbose = terminalreporter.config.get_verbosity() + if durations is None: + return + tr = terminalreporter + dlist = [] + for replist in tr.stats.values(): + for rep in replist: + if hasattr(rep, "duration"): + dlist.append(rep) + if not dlist: + return + dlist.sort(key=lambda x: x.duration, reverse=True) + if not durations: + tr.write_sep("=", "slowest durations") + else: + tr.write_sep("=", f"slowest {durations} durations") + dlist = dlist[:durations] + + for i, rep in enumerate(dlist): + if verbose < 2 and rep.duration < durations_min: + tr.write_line("") + tr.write_line( + f"({len(dlist) - i} durations < {durations_min:g}s hidden. Use -vv to show these durations.)" + ) + break + tr.write_line(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}") + + +def pytest_sessionstart(session: Session) -> None: + session._setupstate = SetupState() + + +def pytest_sessionfinish(session: Session) -> None: + session._setupstate.teardown_exact(None) + + +def pytest_runtest_protocol(item: Item, nextitem: Item | None) -> bool: + ihook = item.ihook + ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location) + runtestprotocol(item, nextitem=nextitem) + ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location) + return True + + +def runtestprotocol( + item: Item, log: bool = True, nextitem: Item | None = None +) -> list[TestReport]: + hasrequest = hasattr(item, "_request") + if hasrequest and not item._request: # type: ignore[attr-defined] + # This only happens if the item is re-run, as is done by + # pytest-rerunfailures. + item._initrequest() # type: ignore[attr-defined] + rep = call_and_report(item, "setup", log) + reports = [rep] + if rep.passed: + if item.config.getoption("setupshow", False): + show_test_item(item) + if not item.config.getoption("setuponly", False): + reports.append(call_and_report(item, "call", log)) + # If the session is about to fail or stop, teardown everything - this is + # necessary to correctly report fixture teardown errors (see #11706) + if item.session.shouldfail or item.session.shouldstop: + nextitem = None + reports.append(call_and_report(item, "teardown", log, nextitem=nextitem)) + # After all teardown hooks have been called + # want funcargs and request info to go away. + if hasrequest: + item._request = False # type: ignore[attr-defined] + item.funcargs = None # type: ignore[attr-defined] + return reports + + +def show_test_item(item: Item) -> None: + """Show test function, parameters and the fixtures of the test item.""" + tw = item.config.get_terminal_writer() + tw.line() + tw.write(" " * 8) + tw.write(item.nodeid) + used_fixtures = sorted(getattr(item, "fixturenames", [])) + if used_fixtures: + tw.write(" (fixtures used: {})".format(", ".join(used_fixtures))) + tw.flush() + + +def pytest_runtest_setup(item: Item) -> None: + _update_current_test_var(item, "setup") + item.session._setupstate.setup(item) + + +def pytest_runtest_call(item: Item) -> None: + _update_current_test_var(item, "call") + try: + del sys.last_type + del sys.last_value + del sys.last_traceback + if sys.version_info >= (3, 12, 0): + del sys.last_exc # type:ignore[attr-defined] + except AttributeError: + pass + try: + item.runtest() + except Exception as e: + # Store trace info to allow postmortem debugging + sys.last_type = type(e) + sys.last_value = e + if sys.version_info >= (3, 12, 0): + sys.last_exc = e # type:ignore[attr-defined] + assert e.__traceback__ is not None + # Skip *this* frame + sys.last_traceback = e.__traceback__.tb_next + raise + + +def pytest_runtest_teardown(item: Item, nextitem: Item | None) -> None: + _update_current_test_var(item, "teardown") + item.session._setupstate.teardown_exact(nextitem) + _update_current_test_var(item, None) + + +def _update_current_test_var( + item: Item, when: Literal["setup", "call", "teardown"] | None +) -> None: + """Update :envvar:`PYTEST_CURRENT_TEST` to reflect the current item and stage. + + If ``when`` is None, delete ``PYTEST_CURRENT_TEST`` from the environment. + """ + var_name = "PYTEST_CURRENT_TEST" + if when: + value = f"{item.nodeid} ({when})" + # don't allow null bytes on environment variables (see #2644, #2957) + value = value.replace("\x00", "(null)") + os.environ[var_name] = value + else: + os.environ.pop(var_name) + + +def pytest_report_teststatus(report: BaseReport) -> tuple[str, str, str] | None: + if report.when in ("setup", "teardown"): + if report.failed: + # category, shortletter, verbose-word + return "error", "E", "ERROR" + elif report.skipped: + return "skipped", "s", "SKIPPED" + else: + return "", "", "" + return None + + +# +# Implementation + + +def call_and_report( + item: Item, when: Literal["setup", "call", "teardown"], log: bool = True, **kwds +) -> TestReport: + ihook = item.ihook + if when == "setup": + runtest_hook: Callable[..., None] = ihook.pytest_runtest_setup + elif when == "call": + runtest_hook = ihook.pytest_runtest_call + elif when == "teardown": + runtest_hook = ihook.pytest_runtest_teardown + else: + assert False, f"Unhandled runtest hook case: {when}" + reraise: tuple[type[BaseException], ...] = (Exit,) + if not item.config.getoption("usepdb", False): + reraise += (KeyboardInterrupt,) + call = CallInfo.from_call( + lambda: runtest_hook(item=item, **kwds), when=when, reraise=reraise + ) + report: TestReport = ihook.pytest_runtest_makereport(item=item, call=call) + if log: + ihook.pytest_runtest_logreport(report=report) + if check_interactive_exception(call, report): + ihook.pytest_exception_interact(node=item, call=call, report=report) + return report + + +def check_interactive_exception(call: CallInfo[object], report: BaseReport) -> bool: + """Check whether the call raised an exception that should be reported as + interactive.""" + if call.excinfo is None: + # Didn't raise. + return False + if hasattr(report, "wasxfail"): + # Exception was expected. + return False + if isinstance(call.excinfo.value, (Skipped, bdb.BdbQuit)): + # Special control flow exception. + return False + return True + + +TResult = TypeVar("TResult", covariant=True) + + +@final +@dataclasses.dataclass +class CallInfo(Generic[TResult]): + """Result/Exception info of a function invocation.""" + + _result: TResult | None + #: The captured exception of the call, if it raised. + excinfo: ExceptionInfo[BaseException] | None + #: The system time when the call started, in seconds since the epoch. + start: float + #: The system time when the call ended, in seconds since the epoch. + stop: float + #: The call duration, in seconds. + duration: float + #: The context of invocation: "collect", "setup", "call" or "teardown". + when: Literal["collect", "setup", "call", "teardown"] + + def __init__( + self, + result: TResult | None, + excinfo: ExceptionInfo[BaseException] | None, + start: float, + stop: float, + duration: float, + when: Literal["collect", "setup", "call", "teardown"], + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self._result = result + self.excinfo = excinfo + self.start = start + self.stop = stop + self.duration = duration + self.when = when + + @property + def result(self) -> TResult: + """The return value of the call, if it didn't raise. + + Can only be accessed if excinfo is None. + """ + if self.excinfo is not None: + raise AttributeError(f"{self!r} has no valid result") + # The cast is safe because an exception wasn't raised, hence + # _result has the expected function return type (which may be + # None, that's why a cast and not an assert). + return cast(TResult, self._result) + + @classmethod + def from_call( + cls, + func: Callable[[], TResult], + when: Literal["collect", "setup", "call", "teardown"], + reraise: type[BaseException] | tuple[type[BaseException], ...] | None = None, + ) -> CallInfo[TResult]: + """Call func, wrapping the result in a CallInfo. + + :param func: + The function to call. Called without arguments. + :type func: Callable[[], _pytest.runner.TResult] + :param when: + The phase in which the function is called. + :param reraise: + Exception or exceptions that shall propagate if raised by the + function, instead of being wrapped in the CallInfo. + """ + excinfo = None + start = timing.time() + precise_start = timing.perf_counter() + try: + result: TResult | None = func() + except BaseException: + excinfo = ExceptionInfo.from_current() + if reraise is not None and isinstance(excinfo.value, reraise): + raise + result = None + # use the perf counter + precise_stop = timing.perf_counter() + duration = precise_stop - precise_start + stop = timing.time() + return cls( + start=start, + stop=stop, + duration=duration, + when=when, + result=result, + excinfo=excinfo, + _ispytest=True, + ) + + def __repr__(self) -> str: + if self.excinfo is None: + return f"" + return f"" + + +def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> TestReport: + return TestReport.from_item_and_call(item, call) + + +def pytest_make_collect_report(collector: Collector) -> CollectReport: + def collect() -> list[Item | Collector]: + # Before collecting, if this is a Directory, load the conftests. + # If a conftest import fails to load, it is considered a collection + # error of the Directory collector. This is why it's done inside of the + # CallInfo wrapper. + # + # Note: initial conftests are loaded early, not here. + if isinstance(collector, Directory): + collector.config.pluginmanager._loadconftestmodules( + collector.path, + collector.config.getoption("importmode"), + rootpath=collector.config.rootpath, + consider_namespace_packages=collector.config.getini( + "consider_namespace_packages" + ), + ) + + return list(collector.collect()) + + call = CallInfo.from_call( + collect, "collect", reraise=(KeyboardInterrupt, SystemExit) + ) + longrepr: None | tuple[str, int, str] | str | TerminalRepr = None + if not call.excinfo: + outcome: Literal["passed", "skipped", "failed"] = "passed" + else: + skip_exceptions = [Skipped] + unittest = sys.modules.get("unittest") + if unittest is not None: + skip_exceptions.append(unittest.SkipTest) + if isinstance(call.excinfo.value, tuple(skip_exceptions)): + outcome = "skipped" + r_ = collector._repr_failure_py(call.excinfo, "line") + assert isinstance(r_, ExceptionChainRepr), repr(r_) + r = r_.reprcrash + assert r + longrepr = (str(r.path), r.lineno, r.message) + else: + outcome = "failed" + errorinfo = collector.repr_failure(call.excinfo) + if not hasattr(errorinfo, "toterminal"): + assert isinstance(errorinfo, str) + errorinfo = CollectErrorRepr(errorinfo) + longrepr = errorinfo + result = call.result if not call.excinfo else None + rep = CollectReport(collector.nodeid, outcome, longrepr, result) + rep.call = call # type: ignore # see collect_one_node + return rep + + +class SetupState: + """Shared state for setting up/tearing down test items or collectors + in a session. + + Suppose we have a collection tree as follows: + + + + + + + + The SetupState maintains a stack. The stack starts out empty: + + [] + + During the setup phase of item1, setup(item1) is called. What it does + is: + + push session to stack, run session.setup() + push mod1 to stack, run mod1.setup() + push item1 to stack, run item1.setup() + + The stack is: + + [session, mod1, item1] + + While the stack is in this shape, it is allowed to add finalizers to + each of session, mod1, item1 using addfinalizer(). + + During the teardown phase of item1, teardown_exact(item2) is called, + where item2 is the next item to item1. What it does is: + + pop item1 from stack, run its teardowns + pop mod1 from stack, run its teardowns + + mod1 was popped because it ended its purpose with item1. The stack is: + + [session] + + During the setup phase of item2, setup(item2) is called. What it does + is: + + push mod2 to stack, run mod2.setup() + push item2 to stack, run item2.setup() + + Stack: + + [session, mod2, item2] + + During the teardown phase of item2, teardown_exact(None) is called, + because item2 is the last item. What it does is: + + pop item2 from stack, run its teardowns + pop mod2 from stack, run its teardowns + pop session from stack, run its teardowns + + Stack: + + [] + + The end! + """ + + def __init__(self) -> None: + # The stack is in the dict insertion order. + self.stack: dict[ + Node, + tuple[ + # Node's finalizers. + list[Callable[[], object]], + # Node's exception and original traceback, if its setup raised. + tuple[OutcomeException | Exception, types.TracebackType | None] | None, + ], + ] = {} + + def setup(self, item: Item) -> None: + """Setup objects along the collector chain to the item.""" + needed_collectors = item.listchain() + + # If a collector fails its setup, fail its entire subtree of items. + # The setup is not retried for each item - the same exception is used. + for col, (finalizers, exc) in self.stack.items(): + assert col in needed_collectors, "previous item was not torn down properly" + if exc: + raise exc[0].with_traceback(exc[1]) + + for col in needed_collectors[len(self.stack) :]: + assert col not in self.stack + # Push onto the stack. + self.stack[col] = ([col.teardown], None) + try: + col.setup() + except TEST_OUTCOME as exc: + self.stack[col] = (self.stack[col][0], (exc, exc.__traceback__)) + raise + + def addfinalizer(self, finalizer: Callable[[], object], node: Node) -> None: + """Attach a finalizer to the given node. + + The node must be currently active in the stack. + """ + assert node and not isinstance(node, tuple) + assert callable(finalizer) + assert node in self.stack, (node, self.stack) + self.stack[node][0].append(finalizer) + + def teardown_exact(self, nextitem: Item | None) -> None: + """Teardown the current stack up until reaching nodes that nextitem + also descends from. + + When nextitem is None (meaning we're at the last item), the entire + stack is torn down. + """ + needed_collectors = nextitem and nextitem.listchain() or [] + exceptions: list[BaseException] = [] + while self.stack: + if list(self.stack.keys()) == needed_collectors[: len(self.stack)]: + break + node, (finalizers, _) = self.stack.popitem() + these_exceptions = [] + while finalizers: + fin = finalizers.pop() + try: + fin() + except TEST_OUTCOME as e: + these_exceptions.append(e) + + if len(these_exceptions) == 1: + exceptions.extend(these_exceptions) + elif these_exceptions: + msg = f"errors while tearing down {node!r}" + exceptions.append(BaseExceptionGroup(msg, these_exceptions[::-1])) + + if len(exceptions) == 1: + raise exceptions[0] + elif exceptions: + raise BaseExceptionGroup("errors during test teardown", exceptions[::-1]) + if nextitem is None: + assert not self.stack + + +def collect_one_node(collector: Collector) -> CollectReport: + ihook = collector.ihook + ihook.pytest_collectstart(collector=collector) + rep: CollectReport = ihook.pytest_make_collect_report(collector=collector) + call = rep.__dict__.pop("call", None) + if call and check_interactive_exception(call, rep): + ihook.pytest_exception_interact(node=collector, call=call, report=rep) + return rep diff --git a/.venv/Lib/site-packages/_pytest/scope.py b/.venv/Lib/site-packages/_pytest/scope.py new file mode 100644 index 00000000..976a3ba2 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/scope.py @@ -0,0 +1,91 @@ +""" +Scope definition and related utilities. + +Those are defined here, instead of in the 'fixtures' module because +their use is spread across many other pytest modules, and centralizing it in 'fixtures' +would cause circular references. + +Also this makes the module light to import, as it should. +""" + +from __future__ import annotations + +from enum import Enum +from functools import total_ordering +from typing import Literal + + +_ScopeName = Literal["session", "package", "module", "class", "function"] + + +@total_ordering +class Scope(Enum): + """ + Represents one of the possible fixture scopes in pytest. + + Scopes are ordered from lower to higher, that is: + + ->>> higher ->>> + + Function < Class < Module < Package < Session + + <<<- lower <<<- + """ + + # Scopes need to be listed from lower to higher. + Function: _ScopeName = "function" + Class: _ScopeName = "class" + Module: _ScopeName = "module" + Package: _ScopeName = "package" + Session: _ScopeName = "session" + + def next_lower(self) -> Scope: + """Return the next lower scope.""" + index = _SCOPE_INDICES[self] + if index == 0: + raise ValueError(f"{self} is the lower-most scope") + return _ALL_SCOPES[index - 1] + + def next_higher(self) -> Scope: + """Return the next higher scope.""" + index = _SCOPE_INDICES[self] + if index == len(_SCOPE_INDICES) - 1: + raise ValueError(f"{self} is the upper-most scope") + return _ALL_SCOPES[index + 1] + + def __lt__(self, other: Scope) -> bool: + self_index = _SCOPE_INDICES[self] + other_index = _SCOPE_INDICES[other] + return self_index < other_index + + @classmethod + def from_user( + cls, scope_name: _ScopeName, descr: str, where: str | None = None + ) -> Scope: + """ + Given a scope name from the user, return the equivalent Scope enum. Should be used + whenever we want to convert a user provided scope name to its enum object. + + If the scope name is invalid, construct a user friendly message and call pytest.fail. + """ + from _pytest.outcomes import fail + + try: + # Holding this reference is necessary for mypy at the moment. + scope = Scope(scope_name) + except ValueError: + fail( + "{} {}got an unexpected scope value '{}'".format( + descr, f"from {where} " if where else "", scope_name + ), + pytrace=False, + ) + return scope + + +_ALL_SCOPES = list(Scope) +_SCOPE_INDICES = {scope: index for index, scope in enumerate(_ALL_SCOPES)} + + +# Ordered list of scopes which can contain many tests (in practice all except Function). +HIGH_SCOPES = [x for x in Scope if x is not Scope.Function] diff --git a/.venv/Lib/site-packages/_pytest/setuponly.py b/.venv/Lib/site-packages/_pytest/setuponly.py new file mode 100644 index 00000000..de297f40 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/setuponly.py @@ -0,0 +1,102 @@ +from __future__ import annotations + +from typing import Generator + +from _pytest._io.saferepr import saferepr +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config.argparsing import Parser +from _pytest.fixtures import FixtureDef +from _pytest.fixtures import SubRequest +from _pytest.scope import Scope +import pytest + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("debugconfig") + group.addoption( + "--setuponly", + "--setup-only", + action="store_true", + help="Only setup fixtures, do not execute tests", + ) + group.addoption( + "--setupshow", + "--setup-show", + action="store_true", + help="Show setup of fixtures while executing tests", + ) + + +@pytest.hookimpl(wrapper=True) +def pytest_fixture_setup( + fixturedef: FixtureDef[object], request: SubRequest +) -> Generator[None, object, object]: + try: + return (yield) + finally: + if request.config.option.setupshow: + if hasattr(request, "param"): + # Save the fixture parameter so ._show_fixture_action() can + # display it now and during the teardown (in .finish()). + if fixturedef.ids: + if callable(fixturedef.ids): + param = fixturedef.ids(request.param) + else: + param = fixturedef.ids[request.param_index] + else: + param = request.param + fixturedef.cached_param = param # type: ignore[attr-defined] + _show_fixture_action(fixturedef, request.config, "SETUP") + + +def pytest_fixture_post_finalizer( + fixturedef: FixtureDef[object], request: SubRequest +) -> None: + if fixturedef.cached_result is not None: + config = request.config + if config.option.setupshow: + _show_fixture_action(fixturedef, request.config, "TEARDOWN") + if hasattr(fixturedef, "cached_param"): + del fixturedef.cached_param + + +def _show_fixture_action( + fixturedef: FixtureDef[object], config: Config, msg: str +) -> None: + capman = config.pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend_global_capture() + + tw = config.get_terminal_writer() + tw.line() + # Use smaller indentation the higher the scope: Session = 0, Package = 1, etc. + scope_indent = list(reversed(Scope)).index(fixturedef._scope) + tw.write(" " * 2 * scope_indent) + tw.write( + "{step} {scope} {fixture}".format( # noqa: UP032 (Readability) + step=msg.ljust(8), # align the output to TEARDOWN + scope=fixturedef.scope[0].upper(), + fixture=fixturedef.argname, + ) + ) + + if msg == "SETUP": + deps = sorted(arg for arg in fixturedef.argnames if arg != "request") + if deps: + tw.write(" (fixtures used: {})".format(", ".join(deps))) + + if hasattr(fixturedef, "cached_param"): + tw.write(f"[{saferepr(fixturedef.cached_param, maxsize=42)}]") + + tw.flush() + + if capman: + capman.resume_global_capture() + + +@pytest.hookimpl(tryfirst=True) +def pytest_cmdline_main(config: Config) -> int | ExitCode | None: + if config.option.setuponly: + config.option.setupshow = True + return None diff --git a/.venv/Lib/site-packages/_pytest/setupplan.py b/.venv/Lib/site-packages/_pytest/setupplan.py new file mode 100644 index 00000000..4e124cce --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/setupplan.py @@ -0,0 +1,39 @@ +from __future__ import annotations + +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config.argparsing import Parser +from _pytest.fixtures import FixtureDef +from _pytest.fixtures import SubRequest +import pytest + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("debugconfig") + group.addoption( + "--setupplan", + "--setup-plan", + action="store_true", + help="Show what fixtures and tests would be executed but " + "don't execute anything", + ) + + +@pytest.hookimpl(tryfirst=True) +def pytest_fixture_setup( + fixturedef: FixtureDef[object], request: SubRequest +) -> object | None: + # Will return a dummy fixture if the setuponly option is provided. + if request.config.option.setupplan: + my_cache_key = fixturedef.cache_key(request) + fixturedef.cached_result = (None, my_cache_key, None) + return fixturedef.cached_result + return None + + +@pytest.hookimpl(tryfirst=True) +def pytest_cmdline_main(config: Config) -> int | ExitCode | None: + if config.option.setupplan: + config.option.setuponly = True + config.option.setupshow = True + return None diff --git a/.venv/Lib/site-packages/_pytest/skipping.py b/.venv/Lib/site-packages/_pytest/skipping.py new file mode 100644 index 00000000..9818be2a --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/skipping.py @@ -0,0 +1,301 @@ +# mypy: allow-untyped-defs +"""Support for skip/xfail functions and markers.""" + +from __future__ import annotations + +from collections.abc import Mapping +import dataclasses +import os +import platform +import sys +import traceback +from typing import Generator +from typing import Optional + +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.mark.structures import Mark +from _pytest.nodes import Item +from _pytest.outcomes import fail +from _pytest.outcomes import skip +from _pytest.outcomes import xfail +from _pytest.reports import BaseReport +from _pytest.reports import TestReport +from _pytest.runner import CallInfo +from _pytest.stash import StashKey + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group.addoption( + "--runxfail", + action="store_true", + dest="runxfail", + default=False, + help="Report the results of xfail tests as if they were not marked", + ) + + parser.addini( + "xfail_strict", + "Default for the strict parameter of xfail " + "markers when not given explicitly (default: False)", + default=False, + type="bool", + ) + + +def pytest_configure(config: Config) -> None: + if config.option.runxfail: + # yay a hack + import pytest + + old = pytest.xfail + config.add_cleanup(lambda: setattr(pytest, "xfail", old)) + + def nop(*args, **kwargs): + pass + + nop.Exception = xfail.Exception # type: ignore[attr-defined] + setattr(pytest, "xfail", nop) + + config.addinivalue_line( + "markers", + "skip(reason=None): skip the given test function with an optional reason. " + 'Example: skip(reason="no way of currently testing this") skips the ' + "test.", + ) + config.addinivalue_line( + "markers", + "skipif(condition, ..., *, reason=...): " + "skip the given test function if any of the conditions evaluate to True. " + "Example: skipif(sys.platform == 'win32') skips the test if we are on the win32 platform. " + "See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-skipif", + ) + config.addinivalue_line( + "markers", + "xfail(condition, ..., *, reason=..., run=True, raises=None, strict=xfail_strict): " + "mark the test function as an expected failure if any of the conditions " + "evaluate to True. Optionally specify a reason for better reporting " + "and run=False if you don't even want to execute the test function. " + "If only specific exception(s) are expected, you can list them in " + "raises, and if the test fails in other ways, it will be reported as " + "a true failure. See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-xfail", + ) + + +def evaluate_condition(item: Item, mark: Mark, condition: object) -> tuple[bool, str]: + """Evaluate a single skipif/xfail condition. + + If an old-style string condition is given, it is eval()'d, otherwise the + condition is bool()'d. If this fails, an appropriately formatted pytest.fail + is raised. + + Returns (result, reason). The reason is only relevant if the result is True. + """ + # String condition. + if isinstance(condition, str): + globals_ = { + "os": os, + "sys": sys, + "platform": platform, + "config": item.config, + } + for dictionary in reversed( + item.ihook.pytest_markeval_namespace(config=item.config) + ): + if not isinstance(dictionary, Mapping): + raise ValueError( + f"pytest_markeval_namespace() needs to return a dict, got {dictionary!r}" + ) + globals_.update(dictionary) + if hasattr(item, "obj"): + globals_.update(item.obj.__globals__) + try: + filename = f"<{mark.name} condition>" + condition_code = compile(condition, filename, "eval") + result = eval(condition_code, globals_) + except SyntaxError as exc: + msglines = [ + f"Error evaluating {mark.name!r} condition", + " " + condition, + " " + " " * (exc.offset or 0) + "^", + "SyntaxError: invalid syntax", + ] + fail("\n".join(msglines), pytrace=False) + except Exception as exc: + msglines = [ + f"Error evaluating {mark.name!r} condition", + " " + condition, + *traceback.format_exception_only(type(exc), exc), + ] + fail("\n".join(msglines), pytrace=False) + + # Boolean condition. + else: + try: + result = bool(condition) + except Exception as exc: + msglines = [ + f"Error evaluating {mark.name!r} condition as a boolean", + *traceback.format_exception_only(type(exc), exc), + ] + fail("\n".join(msglines), pytrace=False) + + reason = mark.kwargs.get("reason", None) + if reason is None: + if isinstance(condition, str): + reason = "condition: " + condition + else: + # XXX better be checked at collection time + msg = ( + f"Error evaluating {mark.name!r}: " + + "you need to specify reason=STRING when using booleans as conditions." + ) + fail(msg, pytrace=False) + + return result, reason + + +@dataclasses.dataclass(frozen=True) +class Skip: + """The result of evaluate_skip_marks().""" + + reason: str = "unconditional skip" + + +def evaluate_skip_marks(item: Item) -> Skip | None: + """Evaluate skip and skipif marks on item, returning Skip if triggered.""" + for mark in item.iter_markers(name="skipif"): + if "condition" not in mark.kwargs: + conditions = mark.args + else: + conditions = (mark.kwargs["condition"],) + + # Unconditional. + if not conditions: + reason = mark.kwargs.get("reason", "") + return Skip(reason) + + # If any of the conditions are true. + for condition in conditions: + result, reason = evaluate_condition(item, mark, condition) + if result: + return Skip(reason) + + for mark in item.iter_markers(name="skip"): + try: + return Skip(*mark.args, **mark.kwargs) + except TypeError as e: + raise TypeError(str(e) + " - maybe you meant pytest.mark.skipif?") from None + + return None + + +@dataclasses.dataclass(frozen=True) +class Xfail: + """The result of evaluate_xfail_marks().""" + + __slots__ = ("reason", "run", "strict", "raises") + + reason: str + run: bool + strict: bool + raises: tuple[type[BaseException], ...] | None + + +def evaluate_xfail_marks(item: Item) -> Xfail | None: + """Evaluate xfail marks on item, returning Xfail if triggered.""" + for mark in item.iter_markers(name="xfail"): + run = mark.kwargs.get("run", True) + strict = mark.kwargs.get("strict", item.config.getini("xfail_strict")) + raises = mark.kwargs.get("raises", None) + if "condition" not in mark.kwargs: + conditions = mark.args + else: + conditions = (mark.kwargs["condition"],) + + # Unconditional. + if not conditions: + reason = mark.kwargs.get("reason", "") + return Xfail(reason, run, strict, raises) + + # If any of the conditions are true. + for condition in conditions: + result, reason = evaluate_condition(item, mark, condition) + if result: + return Xfail(reason, run, strict, raises) + + return None + + +# Saves the xfail mark evaluation. Can be refreshed during call if None. +xfailed_key = StashKey[Optional[Xfail]]() + + +@hookimpl(tryfirst=True) +def pytest_runtest_setup(item: Item) -> None: + skipped = evaluate_skip_marks(item) + if skipped: + raise skip.Exception(skipped.reason, _use_item_location=True) + + item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item) + if xfailed and not item.config.option.runxfail and not xfailed.run: + xfail("[NOTRUN] " + xfailed.reason) + + +@hookimpl(wrapper=True) +def pytest_runtest_call(item: Item) -> Generator[None]: + xfailed = item.stash.get(xfailed_key, None) + if xfailed is None: + item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item) + + if xfailed and not item.config.option.runxfail and not xfailed.run: + xfail("[NOTRUN] " + xfailed.reason) + + try: + return (yield) + finally: + # The test run may have added an xfail mark dynamically. + xfailed = item.stash.get(xfailed_key, None) + if xfailed is None: + item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item) + + +@hookimpl(wrapper=True) +def pytest_runtest_makereport( + item: Item, call: CallInfo[None] +) -> Generator[None, TestReport, TestReport]: + rep = yield + xfailed = item.stash.get(xfailed_key, None) + if item.config.option.runxfail: + pass # don't interfere + elif call.excinfo and isinstance(call.excinfo.value, xfail.Exception): + assert call.excinfo.value.msg is not None + rep.wasxfail = "reason: " + call.excinfo.value.msg + rep.outcome = "skipped" + elif not rep.skipped and xfailed: + if call.excinfo: + raises = xfailed.raises + if raises is not None and not isinstance(call.excinfo.value, raises): + rep.outcome = "failed" + else: + rep.outcome = "skipped" + rep.wasxfail = xfailed.reason + elif call.when == "call": + if xfailed.strict: + rep.outcome = "failed" + rep.longrepr = "[XPASS(strict)] " + xfailed.reason + else: + rep.outcome = "passed" + rep.wasxfail = xfailed.reason + return rep + + +def pytest_report_teststatus(report: BaseReport) -> tuple[str, str, str] | None: + if hasattr(report, "wasxfail"): + if report.skipped: + return "xfailed", "x", "XFAIL" + elif report.passed: + return "xpassed", "X", "XPASS" + return None diff --git a/.venv/Lib/site-packages/_pytest/stash.py b/.venv/Lib/site-packages/_pytest/stash.py new file mode 100644 index 00000000..6a9ff884 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/stash.py @@ -0,0 +1,116 @@ +from __future__ import annotations + +from typing import Any +from typing import cast +from typing import Generic +from typing import TypeVar + + +__all__ = ["Stash", "StashKey"] + + +T = TypeVar("T") +D = TypeVar("D") + + +class StashKey(Generic[T]): + """``StashKey`` is an object used as a key to a :class:`Stash`. + + A ``StashKey`` is associated with the type ``T`` of the value of the key. + + A ``StashKey`` is unique and cannot conflict with another key. + + .. versionadded:: 7.0 + """ + + __slots__ = () + + +class Stash: + r"""``Stash`` is a type-safe heterogeneous mutable mapping that + allows keys and value types to be defined separately from + where it (the ``Stash``) is created. + + Usually you will be given an object which has a ``Stash``, for example + :class:`~pytest.Config` or a :class:`~_pytest.nodes.Node`: + + .. code-block:: python + + stash: Stash = some_object.stash + + If a module or plugin wants to store data in this ``Stash``, it creates + :class:`StashKey`\s for its keys (at the module level): + + .. code-block:: python + + # At the top-level of the module + some_str_key = StashKey[str]() + some_bool_key = StashKey[bool]() + + To store information: + + .. code-block:: python + + # Value type must match the key. + stash[some_str_key] = "value" + stash[some_bool_key] = True + + To retrieve the information: + + .. code-block:: python + + # The static type of some_str is str. + some_str = stash[some_str_key] + # The static type of some_bool is bool. + some_bool = stash[some_bool_key] + + .. versionadded:: 7.0 + """ + + __slots__ = ("_storage",) + + def __init__(self) -> None: + self._storage: dict[StashKey[Any], object] = {} + + def __setitem__(self, key: StashKey[T], value: T) -> None: + """Set a value for key.""" + self._storage[key] = value + + def __getitem__(self, key: StashKey[T]) -> T: + """Get the value for key. + + Raises ``KeyError`` if the key wasn't set before. + """ + return cast(T, self._storage[key]) + + def get(self, key: StashKey[T], default: D) -> T | D: + """Get the value for key, or return default if the key wasn't set + before.""" + try: + return self[key] + except KeyError: + return default + + def setdefault(self, key: StashKey[T], default: T) -> T: + """Return the value of key if already set, otherwise set the value + of key to default and return default.""" + try: + return self[key] + except KeyError: + self[key] = default + return default + + def __delitem__(self, key: StashKey[T]) -> None: + """Delete the value for key. + + Raises ``KeyError`` if the key wasn't set before. + """ + del self._storage[key] + + def __contains__(self, key: StashKey[T]) -> bool: + """Return whether key was set.""" + return key in self._storage + + def __len__(self) -> int: + """Return how many items exist in the stash.""" + return len(self._storage) diff --git a/.venv/Lib/site-packages/_pytest/stepwise.py b/.venv/Lib/site-packages/_pytest/stepwise.py new file mode 100644 index 00000000..c7860808 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/stepwise.py @@ -0,0 +1,125 @@ +from __future__ import annotations + +from _pytest import nodes +from _pytest.cacheprovider import Cache +from _pytest.config import Config +from _pytest.config.argparsing import Parser +from _pytest.main import Session +from _pytest.reports import TestReport + + +STEPWISE_CACHE_DIR = "cache/stepwise" + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group.addoption( + "--sw", + "--stepwise", + action="store_true", + default=False, + dest="stepwise", + help="Exit on test failure and continue from last failing test next time", + ) + group.addoption( + "--sw-skip", + "--stepwise-skip", + action="store_true", + default=False, + dest="stepwise_skip", + help="Ignore the first failing test but stop on the next failing test. " + "Implicitly enables --stepwise.", + ) + + +def pytest_configure(config: Config) -> None: + if config.option.stepwise_skip: + # allow --stepwise-skip to work on its own merits. + config.option.stepwise = True + if config.getoption("stepwise"): + config.pluginmanager.register(StepwisePlugin(config), "stepwiseplugin") + + +def pytest_sessionfinish(session: Session) -> None: + if not session.config.getoption("stepwise"): + assert session.config.cache is not None + if hasattr(session.config, "workerinput"): + # Do not update cache if this process is a xdist worker to prevent + # race conditions (#10641). + return + # Clear the list of failing tests if the plugin is not active. + session.config.cache.set(STEPWISE_CACHE_DIR, []) + + +class StepwisePlugin: + def __init__(self, config: Config) -> None: + self.config = config + self.session: Session | None = None + self.report_status = "" + assert config.cache is not None + self.cache: Cache = config.cache + self.lastfailed: str | None = self.cache.get(STEPWISE_CACHE_DIR, None) + self.skip: bool = config.getoption("stepwise_skip") + + def pytest_sessionstart(self, session: Session) -> None: + self.session = session + + def pytest_collection_modifyitems( + self, config: Config, items: list[nodes.Item] + ) -> None: + if not self.lastfailed: + self.report_status = "no previously failed tests, not skipping." + return + + # check all item nodes until we find a match on last failed + failed_index = None + for index, item in enumerate(items): + if item.nodeid == self.lastfailed: + failed_index = index + break + + # If the previously failed test was not found among the test items, + # do not skip any tests. + if failed_index is None: + self.report_status = "previously failed test not found, not skipping." + else: + self.report_status = f"skipping {failed_index} already passed items." + deselected = items[:failed_index] + del items[:failed_index] + config.hook.pytest_deselected(items=deselected) + + def pytest_runtest_logreport(self, report: TestReport) -> None: + if report.failed: + if self.skip: + # Remove test from the failed ones (if it exists) and unset the skip option + # to make sure the following tests will not be skipped. + if report.nodeid == self.lastfailed: + self.lastfailed = None + + self.skip = False + else: + # Mark test as the last failing and interrupt the test session. + self.lastfailed = report.nodeid + assert self.session is not None + self.session.shouldstop = ( + "Test failed, continuing from this test next run." + ) + + else: + # If the test was actually run and did pass. + if report.when == "call": + # Remove test from the failed ones, if exists. + if report.nodeid == self.lastfailed: + self.lastfailed = None + + def pytest_report_collectionfinish(self) -> str | None: + if self.config.get_verbosity() >= 0 and self.report_status: + return f"stepwise: {self.report_status}" + return None + + def pytest_sessionfinish(self) -> None: + if hasattr(self.config, "workerinput"): + # Do not update cache if this process is a xdist worker to prevent + # race conditions (#10641). + return + self.cache.set(STEPWISE_CACHE_DIR, self.lastfailed) diff --git a/.venv/Lib/site-packages/_pytest/terminal.py b/.venv/Lib/site-packages/_pytest/terminal.py new file mode 100644 index 00000000..ed267bf5 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/terminal.py @@ -0,0 +1,1577 @@ +# mypy: allow-untyped-defs +"""Terminal reporting of the full testing process. + +This is a good source for looking at the various reporting hooks. +""" + +from __future__ import annotations + +import argparse +from collections import Counter +import dataclasses +import datetime +from functools import partial +import inspect +from pathlib import Path +import platform +import sys +import textwrap +from typing import Any +from typing import Callable +from typing import ClassVar +from typing import final +from typing import Generator +from typing import Literal +from typing import Mapping +from typing import NamedTuple +from typing import Sequence +from typing import TextIO +from typing import TYPE_CHECKING +import warnings + +import pluggy + +from _pytest import nodes +from _pytest import timing +from _pytest._code import ExceptionInfo +from _pytest._code.code import ExceptionRepr +from _pytest._io import TerminalWriter +from _pytest._io.wcwidth import wcswidth +import _pytest._version +from _pytest.assertion.util import running_on_ci +from _pytest.config import _PluggyPlugin +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.nodes import Item +from _pytest.nodes import Node +from _pytest.pathlib import absolutepath +from _pytest.pathlib import bestrelpath +from _pytest.reports import BaseReport +from _pytest.reports import CollectReport +from _pytest.reports import TestReport + + +if TYPE_CHECKING: + from _pytest.main import Session + + +REPORT_COLLECTING_RESOLUTION = 0.5 + +KNOWN_TYPES = ( + "failed", + "passed", + "skipped", + "deselected", + "xfailed", + "xpassed", + "warnings", + "error", +) + +_REPORTCHARS_DEFAULT = "fE" + + +class MoreQuietAction(argparse.Action): + """A modified copy of the argparse count action which counts down and updates + the legacy quiet attribute at the same time. + + Used to unify verbosity handling. + """ + + def __init__( + self, + option_strings: Sequence[str], + dest: str, + default: object = None, + required: bool = False, + help: str | None = None, + ) -> None: + super().__init__( + option_strings=option_strings, + dest=dest, + nargs=0, + default=default, + required=required, + help=help, + ) + + def __call__( + self, + parser: argparse.ArgumentParser, + namespace: argparse.Namespace, + values: str | Sequence[object] | None, + option_string: str | None = None, + ) -> None: + new_count = getattr(namespace, self.dest, 0) - 1 + setattr(namespace, self.dest, new_count) + # todo Deprecate config.quiet + namespace.quiet = getattr(namespace, "quiet", 0) + 1 + + +class TestShortLogReport(NamedTuple): + """Used to store the test status result category, shortletter and verbose word. + For example ``"rerun", "R", ("RERUN", {"yellow": True})``. + + :ivar category: + The class of result, for example ``“passed”``, ``“skipped”``, ``“error”``, or the empty string. + + :ivar letter: + The short letter shown as testing progresses, for example ``"."``, ``"s"``, ``"E"``, or the empty string. + + :ivar word: + Verbose word is shown as testing progresses in verbose mode, for example ``"PASSED"``, ``"SKIPPED"``, + ``"ERROR"``, or the empty string. + """ + + category: str + letter: str + word: str | tuple[str, Mapping[str, bool]] + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("terminal reporting", "Reporting", after="general") + group._addoption( + "-v", + "--verbose", + action="count", + default=0, + dest="verbose", + help="Increase verbosity", + ) + group._addoption( + "--no-header", + action="store_true", + default=False, + dest="no_header", + help="Disable header", + ) + group._addoption( + "--no-summary", + action="store_true", + default=False, + dest="no_summary", + help="Disable summary", + ) + group._addoption( + "--no-fold-skipped", + action="store_false", + dest="fold_skipped", + default=True, + help="Do not fold skipped tests in short summary.", + ) + group._addoption( + "-q", + "--quiet", + action=MoreQuietAction, + default=0, + dest="verbose", + help="Decrease verbosity", + ) + group._addoption( + "--verbosity", + dest="verbose", + type=int, + default=0, + help="Set verbosity. Default: 0.", + ) + group._addoption( + "-r", + action="store", + dest="reportchars", + default=_REPORTCHARS_DEFAULT, + metavar="chars", + help="Show extra test summary info as specified by chars: (f)ailed, " + "(E)rror, (s)kipped, (x)failed, (X)passed, " + "(p)assed, (P)assed with output, (a)ll except passed (p/P), or (A)ll. " + "(w)arnings are enabled by default (see --disable-warnings), " + "'N' can be used to reset the list. (default: 'fE').", + ) + group._addoption( + "--disable-warnings", + "--disable-pytest-warnings", + default=False, + dest="disable_warnings", + action="store_true", + help="Disable warnings summary", + ) + group._addoption( + "-l", + "--showlocals", + action="store_true", + dest="showlocals", + default=False, + help="Show locals in tracebacks (disabled by default)", + ) + group._addoption( + "--no-showlocals", + action="store_false", + dest="showlocals", + help="Hide locals in tracebacks (negate --showlocals passed through addopts)", + ) + group._addoption( + "--tb", + metavar="style", + action="store", + dest="tbstyle", + default="auto", + choices=["auto", "long", "short", "no", "line", "native"], + help="Traceback print mode (auto/long/short/line/native/no)", + ) + group._addoption( + "--xfail-tb", + action="store_true", + dest="xfail_tb", + default=False, + help="Show tracebacks for xfail (as long as --tb != no)", + ) + group._addoption( + "--show-capture", + action="store", + dest="showcapture", + choices=["no", "stdout", "stderr", "log", "all"], + default="all", + help="Controls how captured stdout/stderr/log is shown on failed tests. " + "Default: all.", + ) + group._addoption( + "--fulltrace", + "--full-trace", + action="store_true", + default=False, + help="Don't cut any tracebacks (default is to cut)", + ) + group._addoption( + "--color", + metavar="color", + action="store", + dest="color", + default="auto", + choices=["yes", "no", "auto"], + help="Color terminal output (yes/no/auto)", + ) + group._addoption( + "--code-highlight", + default="yes", + choices=["yes", "no"], + help="Whether code should be highlighted (only if --color is also enabled). " + "Default: yes.", + ) + + parser.addini( + "console_output_style", + help='Console output: "classic", or with additional progress information ' + '("progress" (percentage) | "count" | "progress-even-when-capture-no" (forces ' + "progress even when capture=no)", + default="progress", + ) + Config._add_verbosity_ini( + parser, + Config.VERBOSITY_TEST_CASES, + help=( + "Specify a verbosity level for test case execution, overriding the main level. " + "Higher levels will provide more detailed information about each test case executed." + ), + ) + + +def pytest_configure(config: Config) -> None: + reporter = TerminalReporter(config, sys.stdout) + config.pluginmanager.register(reporter, "terminalreporter") + if config.option.debug or config.option.traceconfig: + + def mywriter(tags, args): + msg = " ".join(map(str, args)) + reporter.write_line("[traceconfig] " + msg) + + config.trace.root.setprocessor("pytest:config", mywriter) + + +def getreportopt(config: Config) -> str: + reportchars: str = config.option.reportchars + + old_aliases = {"F", "S"} + reportopts = "" + for char in reportchars: + if char in old_aliases: + char = char.lower() + if char == "a": + reportopts = "sxXEf" + elif char == "A": + reportopts = "PpsxXEf" + elif char == "N": + reportopts = "" + elif char not in reportopts: + reportopts += char + + if not config.option.disable_warnings and "w" not in reportopts: + reportopts = "w" + reportopts + elif config.option.disable_warnings and "w" in reportopts: + reportopts = reportopts.replace("w", "") + + return reportopts + + +@hookimpl(trylast=True) # after _pytest.runner +def pytest_report_teststatus(report: BaseReport) -> tuple[str, str, str]: + letter = "F" + if report.passed: + letter = "." + elif report.skipped: + letter = "s" + + outcome: str = report.outcome + if report.when in ("collect", "setup", "teardown") and outcome == "failed": + outcome = "error" + letter = "E" + + return outcome, letter, outcome.upper() + + +@dataclasses.dataclass +class WarningReport: + """Simple structure to hold warnings information captured by ``pytest_warning_recorded``. + + :ivar str message: + User friendly message about the warning. + :ivar str|None nodeid: + nodeid that generated the warning (see ``get_location``). + :ivar tuple fslocation: + File system location of the source of the warning (see ``get_location``). + """ + + message: str + nodeid: str | None = None + fslocation: tuple[str, int] | None = None + + count_towards_summary: ClassVar = True + + def get_location(self, config: Config) -> str | None: + """Return the more user-friendly information about the location of a warning, or None.""" + if self.nodeid: + return self.nodeid + if self.fslocation: + filename, linenum = self.fslocation + relpath = bestrelpath(config.invocation_params.dir, absolutepath(filename)) + return f"{relpath}:{linenum}" + return None + + +@final +class TerminalReporter: + def __init__(self, config: Config, file: TextIO | None = None) -> None: + import _pytest.config + + self.config = config + self._numcollected = 0 + self._session: Session | None = None + self._showfspath: bool | None = None + + self.stats: dict[str, list[Any]] = {} + self._main_color: str | None = None + self._known_types: list[str] | None = None + self.startpath = config.invocation_params.dir + if file is None: + file = sys.stdout + self._tw = _pytest.config.create_terminal_writer(config, file) + self._screen_width = self._tw.fullwidth + self.currentfspath: None | Path | str | int = None + self.reportchars = getreportopt(config) + self.foldskipped = config.option.fold_skipped + self.hasmarkup = self._tw.hasmarkup + self.isatty = file.isatty() + self._progress_nodeids_reported: set[str] = set() + self._show_progress_info = self._determine_show_progress_info() + self._collect_report_last_write: float | None = None + self._already_displayed_warnings: int | None = None + self._keyboardinterrupt_memo: ExceptionRepr | None = None + + def _determine_show_progress_info(self) -> Literal["progress", "count", False]: + """Return whether we should display progress information based on the current config.""" + # do not show progress if we are not capturing output (#3038) unless explicitly + # overridden by progress-even-when-capture-no + if ( + self.config.getoption("capture", "no") == "no" + and self.config.getini("console_output_style") + != "progress-even-when-capture-no" + ): + return False + # do not show progress if we are showing fixture setup/teardown + if self.config.getoption("setupshow", False): + return False + cfg: str = self.config.getini("console_output_style") + if cfg in {"progress", "progress-even-when-capture-no"}: + return "progress" + elif cfg == "count": + return "count" + else: + return False + + @property + def verbosity(self) -> int: + verbosity: int = self.config.option.verbose + return verbosity + + @property + def showheader(self) -> bool: + return self.verbosity >= 0 + + @property + def no_header(self) -> bool: + return bool(self.config.option.no_header) + + @property + def no_summary(self) -> bool: + return bool(self.config.option.no_summary) + + @property + def showfspath(self) -> bool: + if self._showfspath is None: + return self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) >= 0 + return self._showfspath + + @showfspath.setter + def showfspath(self, value: bool | None) -> None: + self._showfspath = value + + @property + def showlongtestinfo(self) -> bool: + return self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) > 0 + + def hasopt(self, char: str) -> bool: + char = {"xfailed": "x", "skipped": "s"}.get(char, char) + return char in self.reportchars + + def write_fspath_result(self, nodeid: str, res: str, **markup: bool) -> None: + fspath = self.config.rootpath / nodeid.split("::")[0] + if self.currentfspath is None or fspath != self.currentfspath: + if self.currentfspath is not None and self._show_progress_info: + self._write_progress_information_filling_space() + self.currentfspath = fspath + relfspath = bestrelpath(self.startpath, fspath) + self._tw.line() + self._tw.write(relfspath + " ") + self._tw.write(res, flush=True, **markup) + + def write_ensure_prefix(self, prefix: str, extra: str = "", **kwargs) -> None: + if self.currentfspath != prefix: + self._tw.line() + self.currentfspath = prefix + self._tw.write(prefix) + if extra: + self._tw.write(extra, **kwargs) + self.currentfspath = -2 + + def ensure_newline(self) -> None: + if self.currentfspath: + self._tw.line() + self.currentfspath = None + + def wrap_write( + self, + content: str, + *, + flush: bool = False, + margin: int = 8, + line_sep: str = "\n", + **markup: bool, + ) -> None: + """Wrap message with margin for progress info.""" + width_of_current_line = self._tw.width_of_current_line + wrapped = line_sep.join( + textwrap.wrap( + " " * width_of_current_line + content, + width=self._screen_width - margin, + drop_whitespace=True, + replace_whitespace=False, + ), + ) + wrapped = wrapped[width_of_current_line:] + self._tw.write(wrapped, flush=flush, **markup) + + def write(self, content: str, *, flush: bool = False, **markup: bool) -> None: + self._tw.write(content, flush=flush, **markup) + + def flush(self) -> None: + self._tw.flush() + + def write_line(self, line: str | bytes, **markup: bool) -> None: + if not isinstance(line, str): + line = str(line, errors="replace") + self.ensure_newline() + self._tw.line(line, **markup) + + def rewrite(self, line: str, **markup: bool) -> None: + """Rewinds the terminal cursor to the beginning and writes the given line. + + :param erase: + If True, will also add spaces until the full terminal width to ensure + previous lines are properly erased. + + The rest of the keyword arguments are markup instructions. + """ + erase = markup.pop("erase", False) + if erase: + fill_count = self._tw.fullwidth - len(line) - 1 + fill = " " * fill_count + else: + fill = "" + line = str(line) + self._tw.write("\r" + line + fill, **markup) + + def write_sep( + self, + sep: str, + title: str | None = None, + fullwidth: int | None = None, + **markup: bool, + ) -> None: + self.ensure_newline() + self._tw.sep(sep, title, fullwidth, **markup) + + def section(self, title: str, sep: str = "=", **kw: bool) -> None: + self._tw.sep(sep, title, **kw) + + def line(self, msg: str, **kw: bool) -> None: + self._tw.line(msg, **kw) + + def _add_stats(self, category: str, items: Sequence[Any]) -> None: + set_main_color = category not in self.stats + self.stats.setdefault(category, []).extend(items) + if set_main_color: + self._set_main_color() + + def pytest_internalerror(self, excrepr: ExceptionRepr) -> bool: + for line in str(excrepr).split("\n"): + self.write_line("INTERNALERROR> " + line) + return True + + def pytest_warning_recorded( + self, + warning_message: warnings.WarningMessage, + nodeid: str, + ) -> None: + from _pytest.warnings import warning_record_to_str + + fslocation = warning_message.filename, warning_message.lineno + message = warning_record_to_str(warning_message) + + warning_report = WarningReport( + fslocation=fslocation, message=message, nodeid=nodeid + ) + self._add_stats("warnings", [warning_report]) + + def pytest_plugin_registered(self, plugin: _PluggyPlugin) -> None: + if self.config.option.traceconfig: + msg = f"PLUGIN registered: {plugin}" + # XXX This event may happen during setup/teardown time + # which unfortunately captures our output here + # which garbles our output if we use self.write_line. + self.write_line(msg) + + def pytest_deselected(self, items: Sequence[Item]) -> None: + self._add_stats("deselected", items) + + def pytest_runtest_logstart( + self, nodeid: str, location: tuple[str, int | None, str] + ) -> None: + fspath, lineno, domain = location + # Ensure that the path is printed before the + # 1st test of a module starts running. + if self.showlongtestinfo: + line = self._locationline(nodeid, fspath, lineno, domain) + self.write_ensure_prefix(line, "") + self.flush() + elif self.showfspath: + self.write_fspath_result(nodeid, "") + self.flush() + + def pytest_runtest_logreport(self, report: TestReport) -> None: + self._tests_ran = True + rep = report + + res = TestShortLogReport( + *self.config.hook.pytest_report_teststatus(report=rep, config=self.config) + ) + category, letter, word = res.category, res.letter, res.word + if not isinstance(word, tuple): + markup = None + else: + word, markup = word + self._add_stats(category, [rep]) + if not letter and not word: + # Probably passed setup/teardown. + return + if markup is None: + was_xfail = hasattr(report, "wasxfail") + if rep.passed and not was_xfail: + markup = {"green": True} + elif rep.passed and was_xfail: + markup = {"yellow": True} + elif rep.failed: + markup = {"red": True} + elif rep.skipped: + markup = {"yellow": True} + else: + markup = {} + self._progress_nodeids_reported.add(rep.nodeid) + if self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) <= 0: + self._tw.write(letter, **markup) + # When running in xdist, the logreport and logfinish of multiple + # items are interspersed, e.g. `logreport`, `logreport`, + # `logfinish`, `logfinish`. To avoid the "past edge" calculation + # from getting confused and overflowing (#7166), do the past edge + # printing here and not in logfinish, except for the 100% which + # should only be printed after all teardowns are finished. + if self._show_progress_info and not self._is_last_item: + self._write_progress_information_if_past_edge() + else: + line = self._locationline(rep.nodeid, *rep.location) + running_xdist = hasattr(rep, "node") + if not running_xdist: + self.write_ensure_prefix(line, word, **markup) + if rep.skipped or hasattr(report, "wasxfail"): + reason = _get_raw_skip_reason(rep) + if self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) < 2: + available_width = ( + (self._tw.fullwidth - self._tw.width_of_current_line) + - len(" [100%]") + - 1 + ) + formatted_reason = _format_trimmed( + " ({})", reason, available_width + ) + else: + formatted_reason = f" ({reason})" + + if reason and formatted_reason is not None: + self.wrap_write(formatted_reason) + if self._show_progress_info: + self._write_progress_information_filling_space() + else: + self.ensure_newline() + self._tw.write(f"[{rep.node.gateway.id}]") + if self._show_progress_info: + self._tw.write( + self._get_progress_information_message() + " ", cyan=True + ) + else: + self._tw.write(" ") + self._tw.write(word, **markup) + self._tw.write(" " + line) + self.currentfspath = -2 + self.flush() + + @property + def _is_last_item(self) -> bool: + assert self._session is not None + return len(self._progress_nodeids_reported) == self._session.testscollected + + @hookimpl(wrapper=True) + def pytest_runtestloop(self) -> Generator[None, object, object]: + result = yield + + # Write the final/100% progress -- deferred until the loop is complete. + if ( + self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) <= 0 + and self._show_progress_info + and self._progress_nodeids_reported + ): + self._write_progress_information_filling_space() + + return result + + def _get_progress_information_message(self) -> str: + assert self._session + collected = self._session.testscollected + if self._show_progress_info == "count": + if collected: + progress = len(self._progress_nodeids_reported) + counter_format = f"{{:{len(str(collected))}d}}" + format_string = f" [{counter_format}/{{}}]" + return format_string.format(progress, collected) + return f" [ {collected} / {collected} ]" + else: + if collected: + return ( + f" [{len(self._progress_nodeids_reported) * 100 // collected:3d}%]" + ) + return " [100%]" + + def _write_progress_information_if_past_edge(self) -> None: + w = self._width_of_current_line + if self._show_progress_info == "count": + assert self._session + num_tests = self._session.testscollected + progress_length = len(f" [{num_tests}/{num_tests}]") + else: + progress_length = len(" [100%]") + past_edge = w + progress_length + 1 >= self._screen_width + if past_edge: + main_color, _ = self._get_main_color() + msg = self._get_progress_information_message() + self._tw.write(msg + "\n", **{main_color: True}) + + def _write_progress_information_filling_space(self) -> None: + color, _ = self._get_main_color() + msg = self._get_progress_information_message() + w = self._width_of_current_line + fill = self._tw.fullwidth - w - 1 + self.write(msg.rjust(fill), flush=True, **{color: True}) + + @property + def _width_of_current_line(self) -> int: + """Return the width of the current line.""" + return self._tw.width_of_current_line + + def pytest_collection(self) -> None: + if self.isatty: + if self.config.option.verbose >= 0: + self.write("collecting ... ", flush=True, bold=True) + self._collect_report_last_write = timing.time() + elif self.config.option.verbose >= 1: + self.write("collecting ... ", flush=True, bold=True) + + def pytest_collectreport(self, report: CollectReport) -> None: + if report.failed: + self._add_stats("error", [report]) + elif report.skipped: + self._add_stats("skipped", [report]) + items = [x for x in report.result if isinstance(x, Item)] + self._numcollected += len(items) + if self.isatty: + self.report_collect() + + def report_collect(self, final: bool = False) -> None: + if self.config.option.verbose < 0: + return + + if not final: + # Only write "collecting" report every 0.5s. + t = timing.time() + if ( + self._collect_report_last_write is not None + and self._collect_report_last_write > t - REPORT_COLLECTING_RESOLUTION + ): + return + self._collect_report_last_write = t + + errors = len(self.stats.get("error", [])) + skipped = len(self.stats.get("skipped", [])) + deselected = len(self.stats.get("deselected", [])) + selected = self._numcollected - deselected + line = "collected " if final else "collecting " + line += ( + str(self._numcollected) + " item" + ("" if self._numcollected == 1 else "s") + ) + if errors: + line += " / %d error%s" % (errors, "s" if errors != 1 else "") + if deselected: + line += " / %d deselected" % deselected + if skipped: + line += " / %d skipped" % skipped + if self._numcollected > selected: + line += " / %d selected" % selected + if self.isatty: + self.rewrite(line, bold=True, erase=True) + if final: + self.write("\n") + else: + self.write_line(line) + + @hookimpl(trylast=True) + def pytest_sessionstart(self, session: Session) -> None: + self._session = session + self._sessionstarttime = timing.time() + if not self.showheader: + return + self.write_sep("=", "test session starts", bold=True) + verinfo = platform.python_version() + if not self.no_header: + msg = f"platform {sys.platform} -- Python {verinfo}" + pypy_version_info = getattr(sys, "pypy_version_info", None) + if pypy_version_info: + verinfo = ".".join(map(str, pypy_version_info[:3])) + msg += f"[pypy-{verinfo}-{pypy_version_info[3]}]" + msg += f", pytest-{_pytest._version.version}, pluggy-{pluggy.__version__}" + if ( + self.verbosity > 0 + or self.config.option.debug + or getattr(self.config.option, "pastebin", None) + ): + msg += " -- " + str(sys.executable) + self.write_line(msg) + lines = self.config.hook.pytest_report_header( + config=self.config, start_path=self.startpath + ) + self._write_report_lines_from_hooks(lines) + + def _write_report_lines_from_hooks( + self, lines: Sequence[str | Sequence[str]] + ) -> None: + for line_or_lines in reversed(lines): + if isinstance(line_or_lines, str): + self.write_line(line_or_lines) + else: + for line in line_or_lines: + self.write_line(line) + + def pytest_report_header(self, config: Config) -> list[str]: + result = [f"rootdir: {config.rootpath}"] + + if config.inipath: + result.append("configfile: " + bestrelpath(config.rootpath, config.inipath)) + + if config.args_source == Config.ArgsSource.TESTPATHS: + testpaths: list[str] = config.getini("testpaths") + result.append("testpaths: {}".format(", ".join(testpaths))) + + plugininfo = config.pluginmanager.list_plugin_distinfo() + if plugininfo: + result.append( + "plugins: {}".format(", ".join(_plugin_nameversions(plugininfo))) + ) + return result + + def pytest_collection_finish(self, session: Session) -> None: + self.report_collect(True) + + lines = self.config.hook.pytest_report_collectionfinish( + config=self.config, + start_path=self.startpath, + items=session.items, + ) + self._write_report_lines_from_hooks(lines) + + if self.config.getoption("collectonly"): + if session.items: + if self.config.option.verbose > -1: + self._tw.line("") + self._printcollecteditems(session.items) + + failed = self.stats.get("failed") + if failed: + self._tw.sep("!", "collection failures") + for rep in failed: + rep.toterminal(self._tw) + + def _printcollecteditems(self, items: Sequence[Item]) -> None: + test_cases_verbosity = self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) + if test_cases_verbosity < 0: + if test_cases_verbosity < -1: + counts = Counter(item.nodeid.split("::", 1)[0] for item in items) + for name, count in sorted(counts.items()): + self._tw.line("%s: %d" % (name, count)) + else: + for item in items: + self._tw.line(item.nodeid) + return + stack: list[Node] = [] + indent = "" + for item in items: + needed_collectors = item.listchain()[1:] # strip root node + while stack: + if stack == needed_collectors[: len(stack)]: + break + stack.pop() + for col in needed_collectors[len(stack) :]: + stack.append(col) + indent = (len(stack) - 1) * " " + self._tw.line(f"{indent}{col}") + if test_cases_verbosity >= 1: + obj = getattr(col, "obj", None) + doc = inspect.getdoc(obj) if obj else None + if doc: + for line in doc.splitlines(): + self._tw.line("{}{}".format(indent + " ", line)) + + @hookimpl(wrapper=True) + def pytest_sessionfinish( + self, session: Session, exitstatus: int | ExitCode + ) -> Generator[None]: + result = yield + self._tw.line("") + summary_exit_codes = ( + ExitCode.OK, + ExitCode.TESTS_FAILED, + ExitCode.INTERRUPTED, + ExitCode.USAGE_ERROR, + ExitCode.NO_TESTS_COLLECTED, + ) + if exitstatus in summary_exit_codes and not self.no_summary: + self.config.hook.pytest_terminal_summary( + terminalreporter=self, exitstatus=exitstatus, config=self.config + ) + if session.shouldfail: + self.write_sep("!", str(session.shouldfail), red=True) + if exitstatus == ExitCode.INTERRUPTED: + self._report_keyboardinterrupt() + self._keyboardinterrupt_memo = None + elif session.shouldstop: + self.write_sep("!", str(session.shouldstop), red=True) + self.summary_stats() + return result + + @hookimpl(wrapper=True) + def pytest_terminal_summary(self) -> Generator[None]: + self.summary_errors() + self.summary_failures() + self.summary_xfailures() + self.summary_warnings() + self.summary_passes() + self.summary_xpasses() + try: + return (yield) + finally: + self.short_test_summary() + # Display any extra warnings from teardown here (if any). + self.summary_warnings() + + def pytest_keyboard_interrupt(self, excinfo: ExceptionInfo[BaseException]) -> None: + self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True) + + def pytest_unconfigure(self) -> None: + if self._keyboardinterrupt_memo is not None: + self._report_keyboardinterrupt() + + def _report_keyboardinterrupt(self) -> None: + excrepr = self._keyboardinterrupt_memo + assert excrepr is not None + assert excrepr.reprcrash is not None + msg = excrepr.reprcrash.message + self.write_sep("!", msg) + if "KeyboardInterrupt" in msg: + if self.config.option.fulltrace: + excrepr.toterminal(self._tw) + else: + excrepr.reprcrash.toterminal(self._tw) + self._tw.line( + "(to show a full traceback on KeyboardInterrupt use --full-trace)", + yellow=True, + ) + + def _locationline( + self, nodeid: str, fspath: str, lineno: int | None, domain: str + ) -> str: + def mkrel(nodeid: str) -> str: + line = self.config.cwd_relative_nodeid(nodeid) + if domain and line.endswith(domain): + line = line[: -len(domain)] + values = domain.split("[") + values[0] = values[0].replace(".", "::") # don't replace '.' in params + line += "[".join(values) + return line + + # fspath comes from testid which has a "/"-normalized path. + if fspath: + res = mkrel(nodeid) + if self.verbosity >= 2 and nodeid.split("::")[0] != fspath.replace( + "\\", nodes.SEP + ): + res += " <- " + bestrelpath(self.startpath, Path(fspath)) + else: + res = "[location]" + return res + " " + + def _getfailureheadline(self, rep): + head_line = rep.head_line + if head_line: + return head_line + return "test session" # XXX? + + def _getcrashline(self, rep): + try: + return str(rep.longrepr.reprcrash) + except AttributeError: + try: + return str(rep.longrepr)[:50] + except AttributeError: + return "" + + # + # Summaries for sessionfinish. + # + def getreports(self, name: str): + return [x for x in self.stats.get(name, ()) if not hasattr(x, "_pdbshown")] + + def summary_warnings(self) -> None: + if self.hasopt("w"): + all_warnings: list[WarningReport] | None = self.stats.get("warnings") + if not all_warnings: + return + + final = self._already_displayed_warnings is not None + if final: + warning_reports = all_warnings[self._already_displayed_warnings :] + else: + warning_reports = all_warnings + self._already_displayed_warnings = len(warning_reports) + if not warning_reports: + return + + reports_grouped_by_message: dict[str, list[WarningReport]] = {} + for wr in warning_reports: + reports_grouped_by_message.setdefault(wr.message, []).append(wr) + + def collapsed_location_report(reports: list[WarningReport]) -> str: + locations = [] + for w in reports: + location = w.get_location(self.config) + if location: + locations.append(location) + + if len(locations) < 10: + return "\n".join(map(str, locations)) + + counts_by_filename = Counter( + str(loc).split("::", 1)[0] for loc in locations + ) + return "\n".join( + "{}: {} warning{}".format(k, v, "s" if v > 1 else "") + for k, v in counts_by_filename.items() + ) + + title = "warnings summary (final)" if final else "warnings summary" + self.write_sep("=", title, yellow=True, bold=False) + for message, message_reports in reports_grouped_by_message.items(): + maybe_location = collapsed_location_report(message_reports) + if maybe_location: + self._tw.line(maybe_location) + lines = message.splitlines() + indented = "\n".join(" " + x for x in lines) + message = indented.rstrip() + else: + message = message.rstrip() + self._tw.line(message) + self._tw.line() + self._tw.line( + "-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html" + ) + + def summary_passes(self) -> None: + self.summary_passes_combined("passed", "PASSES", "P") + + def summary_xpasses(self) -> None: + self.summary_passes_combined("xpassed", "XPASSES", "X") + + def summary_passes_combined( + self, which_reports: str, sep_title: str, needed_opt: str + ) -> None: + if self.config.option.tbstyle != "no": + if self.hasopt(needed_opt): + reports: list[TestReport] = self.getreports(which_reports) + if not reports: + return + self.write_sep("=", sep_title) + for rep in reports: + if rep.sections: + msg = self._getfailureheadline(rep) + self.write_sep("_", msg, green=True, bold=True) + self._outrep_summary(rep) + self._handle_teardown_sections(rep.nodeid) + + def _get_teardown_reports(self, nodeid: str) -> list[TestReport]: + reports = self.getreports("") + return [ + report + for report in reports + if report.when == "teardown" and report.nodeid == nodeid + ] + + def _handle_teardown_sections(self, nodeid: str) -> None: + for report in self._get_teardown_reports(nodeid): + self.print_teardown_sections(report) + + def print_teardown_sections(self, rep: TestReport) -> None: + showcapture = self.config.option.showcapture + if showcapture == "no": + return + for secname, content in rep.sections: + if showcapture != "all" and showcapture not in secname: + continue + if "teardown" in secname: + self._tw.sep("-", secname) + if content[-1:] == "\n": + content = content[:-1] + self._tw.line(content) + + def summary_failures(self) -> None: + style = self.config.option.tbstyle + self.summary_failures_combined("failed", "FAILURES", style=style) + + def summary_xfailures(self) -> None: + show_tb = self.config.option.xfail_tb + style = self.config.option.tbstyle if show_tb else "no" + self.summary_failures_combined("xfailed", "XFAILURES", style=style) + + def summary_failures_combined( + self, + which_reports: str, + sep_title: str, + *, + style: str, + needed_opt: str | None = None, + ) -> None: + if style != "no": + if not needed_opt or self.hasopt(needed_opt): + reports: list[BaseReport] = self.getreports(which_reports) + if not reports: + return + self.write_sep("=", sep_title) + if style == "line": + for rep in reports: + line = self._getcrashline(rep) + self.write_line(line) + else: + for rep in reports: + msg = self._getfailureheadline(rep) + self.write_sep("_", msg, red=True, bold=True) + self._outrep_summary(rep) + self._handle_teardown_sections(rep.nodeid) + + def summary_errors(self) -> None: + if self.config.option.tbstyle != "no": + reports: list[BaseReport] = self.getreports("error") + if not reports: + return + self.write_sep("=", "ERRORS") + for rep in self.stats["error"]: + msg = self._getfailureheadline(rep) + if rep.when == "collect": + msg = "ERROR collecting " + msg + else: + msg = f"ERROR at {rep.when} of {msg}" + self.write_sep("_", msg, red=True, bold=True) + self._outrep_summary(rep) + + def _outrep_summary(self, rep: BaseReport) -> None: + rep.toterminal(self._tw) + showcapture = self.config.option.showcapture + if showcapture == "no": + return + for secname, content in rep.sections: + if showcapture != "all" and showcapture not in secname: + continue + self._tw.sep("-", secname) + if content[-1:] == "\n": + content = content[:-1] + self._tw.line(content) + + def summary_stats(self) -> None: + if self.verbosity < -1: + return + + session_duration = timing.time() - self._sessionstarttime + (parts, main_color) = self.build_summary_stats_line() + line_parts = [] + + display_sep = self.verbosity >= 0 + if display_sep: + fullwidth = self._tw.fullwidth + for text, markup in parts: + with_markup = self._tw.markup(text, **markup) + if display_sep: + fullwidth += len(with_markup) - len(text) + line_parts.append(with_markup) + msg = ", ".join(line_parts) + + main_markup = {main_color: True} + duration = f" in {format_session_duration(session_duration)}" + duration_with_markup = self._tw.markup(duration, **main_markup) + if display_sep: + fullwidth += len(duration_with_markup) - len(duration) + msg += duration_with_markup + + if display_sep: + markup_for_end_sep = self._tw.markup("", **main_markup) + if markup_for_end_sep.endswith("\x1b[0m"): + markup_for_end_sep = markup_for_end_sep[:-4] + fullwidth += len(markup_for_end_sep) + msg += markup_for_end_sep + + if display_sep: + self.write_sep("=", msg, fullwidth=fullwidth, **main_markup) + else: + self.write_line(msg, **main_markup) + + def short_test_summary(self) -> None: + if not self.reportchars: + return + + def show_simple(lines: list[str], *, stat: str) -> None: + failed = self.stats.get(stat, []) + if not failed: + return + config = self.config + for rep in failed: + color = _color_for_type.get(stat, _color_for_type_default) + line = _get_line_with_reprcrash_message( + config, rep, self._tw, {color: True} + ) + lines.append(line) + + def show_xfailed(lines: list[str]) -> None: + xfailed = self.stats.get("xfailed", []) + for rep in xfailed: + verbose_word, verbose_markup = rep._get_verbose_word_with_markup( + self.config, {_color_for_type["warnings"]: True} + ) + markup_word = self._tw.markup(verbose_word, **verbose_markup) + nodeid = _get_node_id_with_markup(self._tw, self.config, rep) + line = f"{markup_word} {nodeid}" + reason = rep.wasxfail + if reason: + line += " - " + str(reason) + + lines.append(line) + + def show_xpassed(lines: list[str]) -> None: + xpassed = self.stats.get("xpassed", []) + for rep in xpassed: + verbose_word, verbose_markup = rep._get_verbose_word_with_markup( + self.config, {_color_for_type["warnings"]: True} + ) + markup_word = self._tw.markup(verbose_word, **verbose_markup) + nodeid = _get_node_id_with_markup(self._tw, self.config, rep) + line = f"{markup_word} {nodeid}" + reason = rep.wasxfail + if reason: + line += " - " + str(reason) + lines.append(line) + + def show_skipped_folded(lines: list[str]) -> None: + skipped: list[CollectReport] = self.stats.get("skipped", []) + fskips = _folded_skips(self.startpath, skipped) if skipped else [] + if not fskips: + return + verbose_word, verbose_markup = skipped[0]._get_verbose_word_with_markup( + self.config, {_color_for_type["warnings"]: True} + ) + markup_word = self._tw.markup(verbose_word, **verbose_markup) + prefix = "Skipped: " + for num, fspath, lineno, reason in fskips: + if reason.startswith(prefix): + reason = reason[len(prefix) :] + if lineno is not None: + lines.append( + "%s [%d] %s:%d: %s" % (markup_word, num, fspath, lineno, reason) + ) + else: + lines.append("%s [%d] %s: %s" % (markup_word, num, fspath, reason)) + + def show_skipped_unfolded(lines: list[str]) -> None: + skipped: list[CollectReport] = self.stats.get("skipped", []) + + for rep in skipped: + assert rep.longrepr is not None + assert isinstance(rep.longrepr, tuple), (rep, rep.longrepr) + assert len(rep.longrepr) == 3, (rep, rep.longrepr) + + verbose_word, verbose_markup = rep._get_verbose_word_with_markup( + self.config, {_color_for_type["warnings"]: True} + ) + markup_word = self._tw.markup(verbose_word, **verbose_markup) + nodeid = _get_node_id_with_markup(self._tw, self.config, rep) + line = f"{markup_word} {nodeid}" + reason = rep.longrepr[2] + if reason: + line += " - " + str(reason) + lines.append(line) + + def show_skipped(lines: list[str]) -> None: + if self.foldskipped: + show_skipped_folded(lines) + else: + show_skipped_unfolded(lines) + + REPORTCHAR_ACTIONS: Mapping[str, Callable[[list[str]], None]] = { + "x": show_xfailed, + "X": show_xpassed, + "f": partial(show_simple, stat="failed"), + "s": show_skipped, + "p": partial(show_simple, stat="passed"), + "E": partial(show_simple, stat="error"), + } + + lines: list[str] = [] + for char in self.reportchars: + action = REPORTCHAR_ACTIONS.get(char) + if action: # skipping e.g. "P" (passed with output) here. + action(lines) + + if lines: + self.write_sep("=", "short test summary info", cyan=True, bold=True) + for line in lines: + self.write_line(line) + + def _get_main_color(self) -> tuple[str, list[str]]: + if self._main_color is None or self._known_types is None or self._is_last_item: + self._set_main_color() + assert self._main_color + assert self._known_types + return self._main_color, self._known_types + + def _determine_main_color(self, unknown_type_seen: bool) -> str: + stats = self.stats + if "failed" in stats or "error" in stats: + main_color = "red" + elif "warnings" in stats or "xpassed" in stats or unknown_type_seen: + main_color = "yellow" + elif "passed" in stats or not self._is_last_item: + main_color = "green" + else: + main_color = "yellow" + return main_color + + def _set_main_color(self) -> None: + unknown_types: list[str] = [] + for found_type in self.stats: + if found_type: # setup/teardown reports have an empty key, ignore them + if found_type not in KNOWN_TYPES and found_type not in unknown_types: + unknown_types.append(found_type) + self._known_types = list(KNOWN_TYPES) + unknown_types + self._main_color = self._determine_main_color(bool(unknown_types)) + + def build_summary_stats_line(self) -> tuple[list[tuple[str, dict[str, bool]]], str]: + """ + Build the parts used in the last summary stats line. + + The summary stats line is the line shown at the end, "=== 12 passed, 2 errors in Xs===". + + This function builds a list of the "parts" that make up for the text in that line, in + the example above it would be: + + [ + ("12 passed", {"green": True}), + ("2 errors", {"red": True} + ] + + That last dict for each line is a "markup dictionary", used by TerminalWriter to + color output. + + The final color of the line is also determined by this function, and is the second + element of the returned tuple. + """ + if self.config.getoption("collectonly"): + return self._build_collect_only_summary_stats_line() + else: + return self._build_normal_summary_stats_line() + + def _get_reports_to_display(self, key: str) -> list[Any]: + """Get test/collection reports for the given status key, such as `passed` or `error`.""" + reports = self.stats.get(key, []) + return [x for x in reports if getattr(x, "count_towards_summary", True)] + + def _build_normal_summary_stats_line( + self, + ) -> tuple[list[tuple[str, dict[str, bool]]], str]: + main_color, known_types = self._get_main_color() + parts = [] + + for key in known_types: + reports = self._get_reports_to_display(key) + if reports: + count = len(reports) + color = _color_for_type.get(key, _color_for_type_default) + markup = {color: True, "bold": color == main_color} + parts.append(("%d %s" % pluralize(count, key), markup)) + + if not parts: + parts = [("no tests ran", {_color_for_type_default: True})] + + return parts, main_color + + def _build_collect_only_summary_stats_line( + self, + ) -> tuple[list[tuple[str, dict[str, bool]]], str]: + deselected = len(self._get_reports_to_display("deselected")) + errors = len(self._get_reports_to_display("error")) + + if self._numcollected == 0: + parts = [("no tests collected", {"yellow": True})] + main_color = "yellow" + + elif deselected == 0: + main_color = "green" + collected_output = "%d %s collected" % pluralize(self._numcollected, "test") + parts = [(collected_output, {main_color: True})] + else: + all_tests_were_deselected = self._numcollected == deselected + if all_tests_were_deselected: + main_color = "yellow" + collected_output = f"no tests collected ({deselected} deselected)" + else: + main_color = "green" + selected = self._numcollected - deselected + collected_output = f"{selected}/{self._numcollected} tests collected ({deselected} deselected)" + + parts = [(collected_output, {main_color: True})] + + if errors: + main_color = _color_for_type["error"] + parts += [("%d %s" % pluralize(errors, "error"), {main_color: True})] + + return parts, main_color + + +def _get_node_id_with_markup(tw: TerminalWriter, config: Config, rep: BaseReport): + nodeid = config.cwd_relative_nodeid(rep.nodeid) + path, *parts = nodeid.split("::") + if parts: + parts_markup = tw.markup("::".join(parts), bold=True) + return path + "::" + parts_markup + else: + return path + + +def _format_trimmed(format: str, msg: str, available_width: int) -> str | None: + """Format msg into format, ellipsizing it if doesn't fit in available_width. + + Returns None if even the ellipsis can't fit. + """ + # Only use the first line. + i = msg.find("\n") + if i != -1: + msg = msg[:i] + + ellipsis = "..." + format_width = wcswidth(format.format("")) + if format_width + len(ellipsis) > available_width: + return None + + if format_width + wcswidth(msg) > available_width: + available_width -= len(ellipsis) + msg = msg[:available_width] + while format_width + wcswidth(msg) > available_width: + msg = msg[:-1] + msg += ellipsis + + return format.format(msg) + + +def _get_line_with_reprcrash_message( + config: Config, rep: BaseReport, tw: TerminalWriter, word_markup: dict[str, bool] +) -> str: + """Get summary line for a report, trying to add reprcrash message.""" + verbose_word, verbose_markup = rep._get_verbose_word_with_markup( + config, word_markup + ) + word = tw.markup(verbose_word, **verbose_markup) + node = _get_node_id_with_markup(tw, config, rep) + + line = f"{word} {node}" + line_width = wcswidth(line) + + try: + # Type ignored intentionally -- possible AttributeError expected. + msg = rep.longrepr.reprcrash.message # type: ignore[union-attr] + except AttributeError: + pass + else: + if running_on_ci() or config.option.verbose >= 2: + msg = f" - {msg}" + else: + available_width = tw.fullwidth - line_width + msg = _format_trimmed(" - {}", msg, available_width) + if msg is not None: + line += msg + + return line + + +def _folded_skips( + startpath: Path, + skipped: Sequence[CollectReport], +) -> list[tuple[int, str, int | None, str]]: + d: dict[tuple[str, int | None, str], list[CollectReport]] = {} + for event in skipped: + assert event.longrepr is not None + assert isinstance(event.longrepr, tuple), (event, event.longrepr) + assert len(event.longrepr) == 3, (event, event.longrepr) + fspath, lineno, reason = event.longrepr + # For consistency, report all fspaths in relative form. + fspath = bestrelpath(startpath, Path(fspath)) + keywords = getattr(event, "keywords", {}) + # Folding reports with global pytestmark variable. + # This is a workaround, because for now we cannot identify the scope of a skip marker + # TODO: Revisit after marks scope would be fixed. + if ( + event.when == "setup" + and "skip" in keywords + and "pytestmark" not in keywords + ): + key: tuple[str, int | None, str] = (fspath, None, reason) + else: + key = (fspath, lineno, reason) + d.setdefault(key, []).append(event) + values: list[tuple[int, str, int | None, str]] = [] + for key, events in d.items(): + values.append((len(events), *key)) + return values + + +_color_for_type = { + "failed": "red", + "error": "red", + "warnings": "yellow", + "passed": "green", +} +_color_for_type_default = "yellow" + + +def pluralize(count: int, noun: str) -> tuple[int, str]: + # No need to pluralize words such as `failed` or `passed`. + if noun not in ["error", "warnings", "test"]: + return count, noun + + # The `warnings` key is plural. To avoid API breakage, we keep it that way but + # set it to singular here so we can determine plurality in the same way as we do + # for `error`. + noun = noun.replace("warnings", "warning") + + return count, noun + "s" if count != 1 else noun + + +def _plugin_nameversions(plugininfo) -> list[str]: + values: list[str] = [] + for plugin, dist in plugininfo: + # Gets us name and version! + name = f"{dist.project_name}-{dist.version}" + # Questionable convenience, but it keeps things short. + if name.startswith("pytest-"): + name = name[7:] + # We decided to print python package names they can have more than one plugin. + if name not in values: + values.append(name) + return values + + +def format_session_duration(seconds: float) -> str: + """Format the given seconds in a human readable manner to show in the final summary.""" + if seconds < 60: + return f"{seconds:.2f}s" + else: + dt = datetime.timedelta(seconds=int(seconds)) + return f"{seconds:.2f}s ({dt})" + + +def _get_raw_skip_reason(report: TestReport) -> str: + """Get the reason string of a skip/xfail/xpass test report. + + The string is just the part given by the user. + """ + if hasattr(report, "wasxfail"): + reason = report.wasxfail + if reason.startswith("reason: "): + reason = reason[len("reason: ") :] + return reason + else: + assert report.skipped + assert isinstance(report.longrepr, tuple) + _, _, reason = report.longrepr + if reason.startswith("Skipped: "): + reason = reason[len("Skipped: ") :] + elif reason == "Skipped": + reason = "" + return reason diff --git a/.venv/Lib/site-packages/_pytest/threadexception.py b/.venv/Lib/site-packages/_pytest/threadexception.py new file mode 100644 index 00000000..c1ed8038 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/threadexception.py @@ -0,0 +1,97 @@ +from __future__ import annotations + +import threading +import traceback +from types import TracebackType +from typing import Any +from typing import Callable +from typing import Generator +from typing import TYPE_CHECKING +import warnings + +import pytest + + +if TYPE_CHECKING: + from typing_extensions import Self + + +# Copied from cpython/Lib/test/support/threading_helper.py, with modifications. +class catch_threading_exception: + """Context manager catching threading.Thread exception using + threading.excepthook. + + Storing exc_value using a custom hook can create a reference cycle. The + reference cycle is broken explicitly when the context manager exits. + + Storing thread using a custom hook can resurrect it if it is set to an + object which is being finalized. Exiting the context manager clears the + stored object. + + Usage: + with threading_helper.catch_threading_exception() as cm: + # code spawning a thread which raises an exception + ... + # check the thread exception: use cm.args + ... + # cm.args attribute no longer exists at this point + # (to break a reference cycle) + """ + + def __init__(self) -> None: + self.args: threading.ExceptHookArgs | None = None + self._old_hook: Callable[[threading.ExceptHookArgs], Any] | None = None + + def _hook(self, args: threading.ExceptHookArgs) -> None: + self.args = args + + def __enter__(self) -> Self: + self._old_hook = threading.excepthook + threading.excepthook = self._hook + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + assert self._old_hook is not None + threading.excepthook = self._old_hook + self._old_hook = None + del self.args + + +def thread_exception_runtest_hook() -> Generator[None]: + with catch_threading_exception() as cm: + try: + yield + finally: + if cm.args: + thread_name = ( + "" if cm.args.thread is None else cm.args.thread.name + ) + msg = f"Exception in thread {thread_name}\n\n" + msg += "".join( + traceback.format_exception( + cm.args.exc_type, + cm.args.exc_value, + cm.args.exc_traceback, + ) + ) + warnings.warn(pytest.PytestUnhandledThreadExceptionWarning(msg)) + + +@pytest.hookimpl(wrapper=True, trylast=True) +def pytest_runtest_setup() -> Generator[None]: + yield from thread_exception_runtest_hook() + + +@pytest.hookimpl(wrapper=True, tryfirst=True) +def pytest_runtest_call() -> Generator[None]: + yield from thread_exception_runtest_hook() + + +@pytest.hookimpl(wrapper=True, tryfirst=True) +def pytest_runtest_teardown() -> Generator[None]: + yield from thread_exception_runtest_hook() diff --git a/.venv/Lib/site-packages/_pytest/timing.py b/.venv/Lib/site-packages/_pytest/timing.py new file mode 100644 index 00000000..b23c7f69 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/timing.py @@ -0,0 +1,16 @@ +"""Indirection for time functions. + +We intentionally grab some "time" functions internally to avoid tests mocking "time" to affect +pytest runtime information (issue #185). + +Fixture "mock_timing" also interacts with this module for pytest's own tests. +""" + +from __future__ import annotations + +from time import perf_counter +from time import sleep +from time import time + + +__all__ = ["perf_counter", "sleep", "time"] diff --git a/.venv/Lib/site-packages/_pytest/tmpdir.py b/.venv/Lib/site-packages/_pytest/tmpdir.py new file mode 100644 index 00000000..de0cbcfe --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/tmpdir.py @@ -0,0 +1,322 @@ +# mypy: allow-untyped-defs +"""Support for providing temporary directories to test functions.""" + +from __future__ import annotations + +import dataclasses +import os +from pathlib import Path +import re +from shutil import rmtree +import tempfile +from typing import Any +from typing import Dict +from typing import final +from typing import Generator +from typing import Literal + +from .pathlib import cleanup_dead_symlinks +from .pathlib import LOCK_TIMEOUT +from .pathlib import make_numbered_dir +from .pathlib import make_numbered_dir_with_cleanup +from .pathlib import rm_rf +from _pytest.compat import get_user_id +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.monkeypatch import MonkeyPatch +from _pytest.nodes import Item +from _pytest.reports import TestReport +from _pytest.stash import StashKey + + +tmppath_result_key = StashKey[Dict[str, bool]]() +RetentionType = Literal["all", "failed", "none"] + + +@final +@dataclasses.dataclass +class TempPathFactory: + """Factory for temporary directories under the common base temp directory. + + The base directory can be configured using the ``--basetemp`` option. + """ + + _given_basetemp: Path | None + # pluggy TagTracerSub, not currently exposed, so Any. + _trace: Any + _basetemp: Path | None + _retention_count: int + _retention_policy: RetentionType + + def __init__( + self, + given_basetemp: Path | None, + retention_count: int, + retention_policy: RetentionType, + trace, + basetemp: Path | None = None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + if given_basetemp is None: + self._given_basetemp = None + else: + # Use os.path.abspath() to get absolute path instead of resolve() as it + # does not work the same in all platforms (see #4427). + # Path.absolute() exists, but it is not public (see https://bugs.python.org/issue25012). + self._given_basetemp = Path(os.path.abspath(str(given_basetemp))) + self._trace = trace + self._retention_count = retention_count + self._retention_policy = retention_policy + self._basetemp = basetemp + + @classmethod + def from_config( + cls, + config: Config, + *, + _ispytest: bool = False, + ) -> TempPathFactory: + """Create a factory according to pytest configuration. + + :meta private: + """ + check_ispytest(_ispytest) + count = int(config.getini("tmp_path_retention_count")) + if count < 0: + raise ValueError( + f"tmp_path_retention_count must be >= 0. Current input: {count}." + ) + + policy = config.getini("tmp_path_retention_policy") + if policy not in ("all", "failed", "none"): + raise ValueError( + f"tmp_path_retention_policy must be either all, failed, none. Current input: {policy}." + ) + + return cls( + given_basetemp=config.option.basetemp, + trace=config.trace.get("tmpdir"), + retention_count=count, + retention_policy=policy, + _ispytest=True, + ) + + def _ensure_relative_to_basetemp(self, basename: str) -> str: + basename = os.path.normpath(basename) + if (self.getbasetemp() / basename).resolve().parent != self.getbasetemp(): + raise ValueError(f"{basename} is not a normalized and relative path") + return basename + + def mktemp(self, basename: str, numbered: bool = True) -> Path: + """Create a new temporary directory managed by the factory. + + :param basename: + Directory base name, must be a relative path. + + :param numbered: + If ``True``, ensure the directory is unique by adding a numbered + suffix greater than any existing one: ``basename="foo-"`` and ``numbered=True`` + means that this function will create directories named ``"foo-0"``, + ``"foo-1"``, ``"foo-2"`` and so on. + + :returns: + The path to the new directory. + """ + basename = self._ensure_relative_to_basetemp(basename) + if not numbered: + p = self.getbasetemp().joinpath(basename) + p.mkdir(mode=0o700) + else: + p = make_numbered_dir(root=self.getbasetemp(), prefix=basename, mode=0o700) + self._trace("mktemp", p) + return p + + def getbasetemp(self) -> Path: + """Return the base temporary directory, creating it if needed. + + :returns: + The base temporary directory. + """ + if self._basetemp is not None: + return self._basetemp + + if self._given_basetemp is not None: + basetemp = self._given_basetemp + if basetemp.exists(): + rm_rf(basetemp) + basetemp.mkdir(mode=0o700) + basetemp = basetemp.resolve() + else: + from_env = os.environ.get("PYTEST_DEBUG_TEMPROOT") + temproot = Path(from_env or tempfile.gettempdir()).resolve() + user = get_user() or "unknown" + # use a sub-directory in the temproot to speed-up + # make_numbered_dir() call + rootdir = temproot.joinpath(f"pytest-of-{user}") + try: + rootdir.mkdir(mode=0o700, exist_ok=True) + except OSError: + # getuser() likely returned illegal characters for the platform, use unknown back off mechanism + rootdir = temproot.joinpath("pytest-of-unknown") + rootdir.mkdir(mode=0o700, exist_ok=True) + # Because we use exist_ok=True with a predictable name, make sure + # we are the owners, to prevent any funny business (on unix, where + # temproot is usually shared). + # Also, to keep things private, fixup any world-readable temp + # rootdir's permissions. Historically 0o755 was used, so we can't + # just error out on this, at least for a while. + uid = get_user_id() + if uid is not None: + rootdir_stat = rootdir.stat() + if rootdir_stat.st_uid != uid: + raise OSError( + f"The temporary directory {rootdir} is not owned by the current user. " + "Fix this and try again." + ) + if (rootdir_stat.st_mode & 0o077) != 0: + os.chmod(rootdir, rootdir_stat.st_mode & ~0o077) + keep = self._retention_count + if self._retention_policy == "none": + keep = 0 + basetemp = make_numbered_dir_with_cleanup( + prefix="pytest-", + root=rootdir, + keep=keep, + lock_timeout=LOCK_TIMEOUT, + mode=0o700, + ) + assert basetemp is not None, basetemp + self._basetemp = basetemp + self._trace("new basetemp", basetemp) + return basetemp + + +def get_user() -> str | None: + """Return the current user name, or None if getuser() does not work + in the current environment (see #1010).""" + try: + # In some exotic environments, getpass may not be importable. + import getpass + + return getpass.getuser() + except (ImportError, OSError, KeyError): + return None + + +def pytest_configure(config: Config) -> None: + """Create a TempPathFactory and attach it to the config object. + + This is to comply with existing plugins which expect the handler to be + available at pytest_configure time, but ideally should be moved entirely + to the tmp_path_factory session fixture. + """ + mp = MonkeyPatch() + config.add_cleanup(mp.undo) + _tmp_path_factory = TempPathFactory.from_config(config, _ispytest=True) + mp.setattr(config, "_tmp_path_factory", _tmp_path_factory, raising=False) + + +def pytest_addoption(parser: Parser) -> None: + parser.addini( + "tmp_path_retention_count", + help="How many sessions should we keep the `tmp_path` directories, according to `tmp_path_retention_policy`.", + default=3, + ) + + parser.addini( + "tmp_path_retention_policy", + help="Controls which directories created by the `tmp_path` fixture are kept around, based on test outcome. " + "(all/failed/none)", + default="all", + ) + + +@fixture(scope="session") +def tmp_path_factory(request: FixtureRequest) -> TempPathFactory: + """Return a :class:`pytest.TempPathFactory` instance for the test session.""" + # Set dynamically by pytest_configure() above. + return request.config._tmp_path_factory # type: ignore + + +def _mk_tmp(request: FixtureRequest, factory: TempPathFactory) -> Path: + name = request.node.name + name = re.sub(r"[\W]", "_", name) + MAXVAL = 30 + name = name[:MAXVAL] + return factory.mktemp(name, numbered=True) + + +@fixture +def tmp_path( + request: FixtureRequest, tmp_path_factory: TempPathFactory +) -> Generator[Path]: + """Return a temporary directory path object which is unique to each test + function invocation, created as a sub directory of the base temporary + directory. + + By default, a new base temporary directory is created each test session, + and old bases are removed after 3 sessions, to aid in debugging. + This behavior can be configured with :confval:`tmp_path_retention_count` and + :confval:`tmp_path_retention_policy`. + If ``--basetemp`` is used then it is cleared each session. See + :ref:`temporary directory location and retention`. + + The returned object is a :class:`pathlib.Path` object. + """ + path = _mk_tmp(request, tmp_path_factory) + yield path + + # Remove the tmpdir if the policy is "failed" and the test passed. + tmp_path_factory: TempPathFactory = request.session.config._tmp_path_factory # type: ignore + policy = tmp_path_factory._retention_policy + result_dict = request.node.stash[tmppath_result_key] + + if policy == "failed" and result_dict.get("call", True): + # We do a "best effort" to remove files, but it might not be possible due to some leaked resource, + # permissions, etc, in which case we ignore it. + rmtree(path, ignore_errors=True) + + del request.node.stash[tmppath_result_key] + + +def pytest_sessionfinish(session, exitstatus: int | ExitCode): + """After each session, remove base directory if all the tests passed, + the policy is "failed", and the basetemp is not specified by a user. + """ + tmp_path_factory: TempPathFactory = session.config._tmp_path_factory + basetemp = tmp_path_factory._basetemp + if basetemp is None: + return + + policy = tmp_path_factory._retention_policy + if ( + exitstatus == 0 + and policy == "failed" + and tmp_path_factory._given_basetemp is None + ): + if basetemp.is_dir(): + # We do a "best effort" to remove files, but it might not be possible due to some leaked resource, + # permissions, etc, in which case we ignore it. + rmtree(basetemp, ignore_errors=True) + + # Remove dead symlinks. + if basetemp.is_dir(): + cleanup_dead_symlinks(basetemp) + + +@hookimpl(wrapper=True, tryfirst=True) +def pytest_runtest_makereport( + item: Item, call +) -> Generator[None, TestReport, TestReport]: + rep = yield + assert rep.when is not None + empty: dict[str, bool] = {} + item.stash.setdefault(tmppath_result_key, empty)[rep.when] = rep.passed + return rep diff --git a/.venv/Lib/site-packages/_pytest/unittest.py b/.venv/Lib/site-packages/_pytest/unittest.py new file mode 100644 index 00000000..8cecd4f9 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/unittest.py @@ -0,0 +1,435 @@ +# mypy: allow-untyped-defs +"""Discover and run std-library "unittest" style tests.""" + +from __future__ import annotations + +import inspect +import sys +import traceback +import types +from typing import Any +from typing import Callable +from typing import Generator +from typing import Iterable +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import Union + +import _pytest._code +from _pytest.compat import is_async_function +from _pytest.config import hookimpl +from _pytest.fixtures import FixtureRequest +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.outcomes import exit +from _pytest.outcomes import fail +from _pytest.outcomes import skip +from _pytest.outcomes import xfail +from _pytest.python import Class +from _pytest.python import Function +from _pytest.python import Module +from _pytest.runner import CallInfo +import pytest + + +if sys.version_info[:2] < (3, 11): + from exceptiongroup import ExceptionGroup + +if TYPE_CHECKING: + import unittest + + import twisted.trial.unittest + + +_SysExcInfoType = Union[ + Tuple[Type[BaseException], BaseException, types.TracebackType], + Tuple[None, None, None], +] + + +def pytest_pycollect_makeitem( + collector: Module | Class, name: str, obj: object +) -> UnitTestCase | None: + try: + # Has unittest been imported? + ut = sys.modules["unittest"] + # Is obj a subclass of unittest.TestCase? + # Type ignored because `ut` is an opaque module. + if not issubclass(obj, ut.TestCase): # type: ignore + return None + except Exception: + return None + # Is obj a concrete class? + # Abstract classes can't be instantiated so no point collecting them. + if inspect.isabstract(obj): + return None + # Yes, so let's collect it. + return UnitTestCase.from_parent(collector, name=name, obj=obj) + + +class UnitTestCase(Class): + # Marker for fixturemanger.getfixtureinfo() + # to declare that our children do not support funcargs. + nofuncargs = True + + def newinstance(self): + # TestCase __init__ takes the method (test) name. The TestCase + # constructor treats the name "runTest" as a special no-op, so it can be + # used when a dummy instance is needed. While unittest.TestCase has a + # default, some subclasses omit the default (#9610), so always supply + # it. + return self.obj("runTest") + + def collect(self) -> Iterable[Item | Collector]: + from unittest import TestLoader + + cls = self.obj + if not getattr(cls, "__test__", True): + return + + skipped = _is_skipped(cls) + if not skipped: + self._register_unittest_setup_method_fixture(cls) + self._register_unittest_setup_class_fixture(cls) + self._register_setup_class_fixture() + + self.session._fixturemanager.parsefactories(self.newinstance(), self.nodeid) + + loader = TestLoader() + foundsomething = False + for name in loader.getTestCaseNames(self.obj): + x = getattr(self.obj, name) + if not getattr(x, "__test__", True): + continue + yield TestCaseFunction.from_parent(self, name=name) + foundsomething = True + + if not foundsomething: + runtest = getattr(self.obj, "runTest", None) + if runtest is not None: + ut = sys.modules.get("twisted.trial.unittest", None) + if ut is None or runtest != ut.TestCase.runTest: + yield TestCaseFunction.from_parent(self, name="runTest") + + def _register_unittest_setup_class_fixture(self, cls: type) -> None: + """Register an auto-use fixture to invoke setUpClass and + tearDownClass (#517).""" + setup = getattr(cls, "setUpClass", None) + teardown = getattr(cls, "tearDownClass", None) + if setup is None and teardown is None: + return None + cleanup = getattr(cls, "doClassCleanups", lambda: None) + + def process_teardown_exceptions() -> None: + # tearDown_exceptions is a list set in the class containing exc_infos for errors during + # teardown for the class. + exc_infos = getattr(cls, "tearDown_exceptions", None) + if not exc_infos: + return + exceptions = [exc for (_, exc, _) in exc_infos] + # If a single exception, raise it directly as this provides a more readable + # error (hopefully this will improve in #12255). + if len(exceptions) == 1: + raise exceptions[0] + else: + raise ExceptionGroup("Unittest class cleanup errors", exceptions) + + def unittest_setup_class_fixture( + request: FixtureRequest, + ) -> Generator[None]: + cls = request.cls + if _is_skipped(cls): + reason = cls.__unittest_skip_why__ + raise pytest.skip.Exception(reason, _use_item_location=True) + if setup is not None: + try: + setup() + # unittest does not call the cleanup function for every BaseException, so we + # follow this here. + except Exception: + cleanup() + process_teardown_exceptions() + raise + yield + try: + if teardown is not None: + teardown() + finally: + cleanup() + process_teardown_exceptions() + + self.session._fixturemanager._register_fixture( + # Use a unique name to speed up lookup. + name=f"_unittest_setUpClass_fixture_{cls.__qualname__}", + func=unittest_setup_class_fixture, + nodeid=self.nodeid, + scope="class", + autouse=True, + ) + + def _register_unittest_setup_method_fixture(self, cls: type) -> None: + """Register an auto-use fixture to invoke setup_method and + teardown_method (#517).""" + setup = getattr(cls, "setup_method", None) + teardown = getattr(cls, "teardown_method", None) + if setup is None and teardown is None: + return None + + def unittest_setup_method_fixture( + request: FixtureRequest, + ) -> Generator[None]: + self = request.instance + if _is_skipped(self): + reason = self.__unittest_skip_why__ + raise pytest.skip.Exception(reason, _use_item_location=True) + if setup is not None: + setup(self, request.function) + yield + if teardown is not None: + teardown(self, request.function) + + self.session._fixturemanager._register_fixture( + # Use a unique name to speed up lookup. + name=f"_unittest_setup_method_fixture_{cls.__qualname__}", + func=unittest_setup_method_fixture, + nodeid=self.nodeid, + scope="function", + autouse=True, + ) + + +class TestCaseFunction(Function): + nofuncargs = True + _excinfo: list[_pytest._code.ExceptionInfo[BaseException]] | None = None + + def _getinstance(self): + assert isinstance(self.parent, UnitTestCase) + return self.parent.obj(self.name) + + # Backward compat for pytest-django; can be removed after pytest-django + # updates + some slack. + @property + def _testcase(self): + return self.instance + + def setup(self) -> None: + # A bound method to be called during teardown() if set (see 'runtest()'). + self._explicit_tearDown: Callable[[], None] | None = None + super().setup() + + def teardown(self) -> None: + if self._explicit_tearDown is not None: + self._explicit_tearDown() + self._explicit_tearDown = None + self._obj = None + del self._instance + super().teardown() + + def startTest(self, testcase: unittest.TestCase) -> None: + pass + + def _addexcinfo(self, rawexcinfo: _SysExcInfoType) -> None: + # Unwrap potential exception info (see twisted trial support below). + rawexcinfo = getattr(rawexcinfo, "_rawexcinfo", rawexcinfo) + try: + excinfo = _pytest._code.ExceptionInfo[BaseException].from_exc_info( + rawexcinfo # type: ignore[arg-type] + ) + # Invoke the attributes to trigger storing the traceback + # trial causes some issue there. + _ = excinfo.value + _ = excinfo.traceback + except TypeError: + try: + try: + values = traceback.format_exception(*rawexcinfo) + values.insert( + 0, + "NOTE: Incompatible Exception Representation, " + "displaying natively:\n\n", + ) + fail("".join(values), pytrace=False) + except (fail.Exception, KeyboardInterrupt): + raise + except BaseException: + fail( + "ERROR: Unknown Incompatible Exception " + f"representation:\n{rawexcinfo!r}", + pytrace=False, + ) + except KeyboardInterrupt: + raise + except fail.Exception: + excinfo = _pytest._code.ExceptionInfo.from_current() + self.__dict__.setdefault("_excinfo", []).append(excinfo) + + def addError( + self, testcase: unittest.TestCase, rawexcinfo: _SysExcInfoType + ) -> None: + try: + if isinstance(rawexcinfo[1], exit.Exception): + exit(rawexcinfo[1].msg) + except TypeError: + pass + self._addexcinfo(rawexcinfo) + + def addFailure( + self, testcase: unittest.TestCase, rawexcinfo: _SysExcInfoType + ) -> None: + self._addexcinfo(rawexcinfo) + + def addSkip(self, testcase: unittest.TestCase, reason: str) -> None: + try: + raise pytest.skip.Exception(reason, _use_item_location=True) + except skip.Exception: + self._addexcinfo(sys.exc_info()) + + def addExpectedFailure( + self, + testcase: unittest.TestCase, + rawexcinfo: _SysExcInfoType, + reason: str = "", + ) -> None: + try: + xfail(str(reason)) + except xfail.Exception: + self._addexcinfo(sys.exc_info()) + + def addUnexpectedSuccess( + self, + testcase: unittest.TestCase, + reason: twisted.trial.unittest.Todo | None = None, + ) -> None: + msg = "Unexpected success" + if reason: + msg += f": {reason.reason}" + # Preserve unittest behaviour - fail the test. Explicitly not an XPASS. + try: + fail(msg, pytrace=False) + except fail.Exception: + self._addexcinfo(sys.exc_info()) + + def addSuccess(self, testcase: unittest.TestCase) -> None: + pass + + def stopTest(self, testcase: unittest.TestCase) -> None: + pass + + def addDuration(self, testcase: unittest.TestCase, elapsed: float) -> None: + pass + + def runtest(self) -> None: + from _pytest.debugging import maybe_wrap_pytest_function_for_tracing + + testcase = self.instance + assert testcase is not None + + maybe_wrap_pytest_function_for_tracing(self) + + # Let the unittest framework handle async functions. + if is_async_function(self.obj): + testcase(result=self) + else: + # When --pdb is given, we want to postpone calling tearDown() otherwise + # when entering the pdb prompt, tearDown() would have probably cleaned up + # instance variables, which makes it difficult to debug. + # Arguably we could always postpone tearDown(), but this changes the moment where the + # TestCase instance interacts with the results object, so better to only do it + # when absolutely needed. + # We need to consider if the test itself is skipped, or the whole class. + assert isinstance(self.parent, UnitTestCase) + skipped = _is_skipped(self.obj) or _is_skipped(self.parent.obj) + if self.config.getoption("usepdb") and not skipped: + self._explicit_tearDown = testcase.tearDown + setattr(testcase, "tearDown", lambda *args: None) + + # We need to update the actual bound method with self.obj, because + # wrap_pytest_function_for_tracing replaces self.obj by a wrapper. + setattr(testcase, self.name, self.obj) + try: + testcase(result=self) + finally: + delattr(testcase, self.name) + + def _traceback_filter( + self, excinfo: _pytest._code.ExceptionInfo[BaseException] + ) -> _pytest._code.Traceback: + traceback = super()._traceback_filter(excinfo) + ntraceback = traceback.filter( + lambda x: not x.frame.f_globals.get("__unittest"), + ) + if not ntraceback: + ntraceback = traceback + return ntraceback + + +@hookimpl(tryfirst=True) +def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> None: + if isinstance(item, TestCaseFunction): + if item._excinfo: + call.excinfo = item._excinfo.pop(0) + try: + del call.result + except AttributeError: + pass + + # Convert unittest.SkipTest to pytest.skip. + # This is actually only needed for nose, which reuses unittest.SkipTest for + # its own nose.SkipTest. For unittest TestCases, SkipTest is already + # handled internally, and doesn't reach here. + unittest = sys.modules.get("unittest") + if unittest and call.excinfo and isinstance(call.excinfo.value, unittest.SkipTest): + excinfo = call.excinfo + call2 = CallInfo[None].from_call( + lambda: pytest.skip(str(excinfo.value)), call.when + ) + call.excinfo = call2.excinfo + + +# Twisted trial support. +classImplements_has_run = False + + +@hookimpl(wrapper=True) +def pytest_runtest_protocol(item: Item) -> Generator[None, object, object]: + if isinstance(item, TestCaseFunction) and "twisted.trial.unittest" in sys.modules: + ut: Any = sys.modules["twisted.python.failure"] + global classImplements_has_run + Failure__init__ = ut.Failure.__init__ + if not classImplements_has_run: + from twisted.trial.itrial import IReporter + from zope.interface import classImplements + + classImplements(TestCaseFunction, IReporter) + classImplements_has_run = True + + def excstore( + self, exc_value=None, exc_type=None, exc_tb=None, captureVars=None + ): + if exc_value is None: + self._rawexcinfo = sys.exc_info() + else: + if exc_type is None: + exc_type = type(exc_value) + self._rawexcinfo = (exc_type, exc_value, exc_tb) + try: + Failure__init__( + self, exc_value, exc_type, exc_tb, captureVars=captureVars + ) + except TypeError: + Failure__init__(self, exc_value, exc_type, exc_tb) + + ut.Failure.__init__ = excstore + try: + res = yield + finally: + ut.Failure.__init__ = Failure__init__ + else: + res = yield + return res + + +def _is_skipped(obj) -> bool: + """Return True if the given object has been marked with @unittest.skip.""" + return bool(getattr(obj, "__unittest_skip__", False)) diff --git a/.venv/Lib/site-packages/_pytest/unraisableexception.py b/.venv/Lib/site-packages/_pytest/unraisableexception.py new file mode 100644 index 00000000..77a2de20 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/unraisableexception.py @@ -0,0 +1,100 @@ +from __future__ import annotations + +import sys +import traceback +from types import TracebackType +from typing import Any +from typing import Callable +from typing import Generator +from typing import TYPE_CHECKING +import warnings + +import pytest + + +if TYPE_CHECKING: + from typing_extensions import Self + + +# Copied from cpython/Lib/test/support/__init__.py, with modifications. +class catch_unraisable_exception: + """Context manager catching unraisable exception using sys.unraisablehook. + + Storing the exception value (cm.unraisable.exc_value) creates a reference + cycle. The reference cycle is broken explicitly when the context manager + exits. + + Storing the object (cm.unraisable.object) can resurrect it if it is set to + an object which is being finalized. Exiting the context manager clears the + stored object. + + Usage: + with catch_unraisable_exception() as cm: + # code creating an "unraisable exception" + ... + # check the unraisable exception: use cm.unraisable + ... + # cm.unraisable attribute no longer exists at this point + # (to break a reference cycle) + """ + + def __init__(self) -> None: + self.unraisable: sys.UnraisableHookArgs | None = None + self._old_hook: Callable[[sys.UnraisableHookArgs], Any] | None = None + + def _hook(self, unraisable: sys.UnraisableHookArgs) -> None: + # Storing unraisable.object can resurrect an object which is being + # finalized. Storing unraisable.exc_value creates a reference cycle. + self.unraisable = unraisable + + def __enter__(self) -> Self: + self._old_hook = sys.unraisablehook + sys.unraisablehook = self._hook + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + assert self._old_hook is not None + sys.unraisablehook = self._old_hook + self._old_hook = None + del self.unraisable + + +def unraisable_exception_runtest_hook() -> Generator[None]: + with catch_unraisable_exception() as cm: + try: + yield + finally: + if cm.unraisable: + if cm.unraisable.err_msg is not None: + err_msg = cm.unraisable.err_msg + else: + err_msg = "Exception ignored in" + msg = f"{err_msg}: {cm.unraisable.object!r}\n\n" + msg += "".join( + traceback.format_exception( + cm.unraisable.exc_type, + cm.unraisable.exc_value, + cm.unraisable.exc_traceback, + ) + ) + warnings.warn(pytest.PytestUnraisableExceptionWarning(msg)) + + +@pytest.hookimpl(wrapper=True, tryfirst=True) +def pytest_runtest_setup() -> Generator[None]: + yield from unraisable_exception_runtest_hook() + + +@pytest.hookimpl(wrapper=True, tryfirst=True) +def pytest_runtest_call() -> Generator[None]: + yield from unraisable_exception_runtest_hook() + + +@pytest.hookimpl(wrapper=True, tryfirst=True) +def pytest_runtest_teardown() -> Generator[None]: + yield from unraisable_exception_runtest_hook() diff --git a/.venv/Lib/site-packages/_pytest/warning_types.py b/.venv/Lib/site-packages/_pytest/warning_types.py new file mode 100644 index 00000000..4ab14e48 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/warning_types.py @@ -0,0 +1,166 @@ +from __future__ import annotations + +import dataclasses +import inspect +from types import FunctionType +from typing import Any +from typing import final +from typing import Generic +from typing import TypeVar +import warnings + + +class PytestWarning(UserWarning): + """Base class for all warnings emitted by pytest.""" + + __module__ = "pytest" + + +@final +class PytestAssertRewriteWarning(PytestWarning): + """Warning emitted by the pytest assert rewrite module.""" + + __module__ = "pytest" + + +@final +class PytestCacheWarning(PytestWarning): + """Warning emitted by the cache plugin in various situations.""" + + __module__ = "pytest" + + +@final +class PytestConfigWarning(PytestWarning): + """Warning emitted for configuration issues.""" + + __module__ = "pytest" + + +@final +class PytestCollectionWarning(PytestWarning): + """Warning emitted when pytest is not able to collect a file or symbol in a module.""" + + __module__ = "pytest" + + +class PytestDeprecationWarning(PytestWarning, DeprecationWarning): + """Warning class for features that will be removed in a future version.""" + + __module__ = "pytest" + + +class PytestRemovedIn9Warning(PytestDeprecationWarning): + """Warning class for features that will be removed in pytest 9.""" + + __module__ = "pytest" + + +class PytestReturnNotNoneWarning(PytestWarning): + """Warning emitted when a test function is returning value other than None.""" + + __module__ = "pytest" + + +@final +class PytestExperimentalApiWarning(PytestWarning, FutureWarning): + """Warning category used to denote experiments in pytest. + + Use sparingly as the API might change or even be removed completely in a + future version. + """ + + __module__ = "pytest" + + @classmethod + def simple(cls, apiname: str) -> PytestExperimentalApiWarning: + return cls(f"{apiname} is an experimental api that may change over time") + + +@final +class PytestUnhandledCoroutineWarning(PytestReturnNotNoneWarning): + """Warning emitted for an unhandled coroutine. + + A coroutine was encountered when collecting test functions, but was not + handled by any async-aware plugin. + Coroutine test functions are not natively supported. + """ + + __module__ = "pytest" + + +@final +class PytestUnknownMarkWarning(PytestWarning): + """Warning emitted on use of unknown markers. + + See :ref:`mark` for details. + """ + + __module__ = "pytest" + + +@final +class PytestUnraisableExceptionWarning(PytestWarning): + """An unraisable exception was reported. + + Unraisable exceptions are exceptions raised in :meth:`__del__ ` + implementations and similar situations when the exception cannot be raised + as normal. + """ + + __module__ = "pytest" + + +@final +class PytestUnhandledThreadExceptionWarning(PytestWarning): + """An unhandled exception occurred in a :class:`~threading.Thread`. + + Such exceptions don't propagate normally. + """ + + __module__ = "pytest" + + +_W = TypeVar("_W", bound=PytestWarning) + + +@final +@dataclasses.dataclass +class UnformattedWarning(Generic[_W]): + """A warning meant to be formatted during runtime. + + This is used to hold warnings that need to format their message at runtime, + as opposed to a direct message. + """ + + category: type[_W] + template: str + + def format(self, **kwargs: Any) -> _W: + """Return an instance of the warning category, formatted with given kwargs.""" + return self.category(self.template.format(**kwargs)) + + +def warn_explicit_for(method: FunctionType, message: PytestWarning) -> None: + """ + Issue the warning :param:`message` for the definition of the given :param:`method` + + this helps to log warnings for functions defined prior to finding an issue with them + (like hook wrappers being marked in a legacy mechanism) + """ + lineno = method.__code__.co_firstlineno + filename = inspect.getfile(method) + module = method.__module__ + mod_globals = method.__globals__ + try: + warnings.warn_explicit( + message, + type(message), + filename=filename, + module=module, + registry=mod_globals.setdefault("__warningregistry__", {}), + lineno=lineno, + ) + except Warning as w: + # If warnings are errors (e.g. -Werror), location information gets lost, so we add it to the message. + raise type(w)(f"{w}\n at {filename}:{lineno}") from None diff --git a/.venv/Lib/site-packages/_pytest/warnings.py b/.venv/Lib/site-packages/_pytest/warnings.py new file mode 100644 index 00000000..eeb47726 --- /dev/null +++ b/.venv/Lib/site-packages/_pytest/warnings.py @@ -0,0 +1,151 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +from contextlib import contextmanager +import sys +from typing import Generator +from typing import Literal +import warnings + +from _pytest.config import apply_warning_filters +from _pytest.config import Config +from _pytest.config import parse_warning_filter +from _pytest.main import Session +from _pytest.nodes import Item +from _pytest.terminal import TerminalReporter +import pytest + + +def pytest_configure(config: Config) -> None: + config.addinivalue_line( + "markers", + "filterwarnings(warning): add a warning filter to the given test. " + "see https://docs.pytest.org/en/stable/how-to/capture-warnings.html#pytest-mark-filterwarnings ", + ) + + +@contextmanager +def catch_warnings_for_item( + config: Config, + ihook, + when: Literal["config", "collect", "runtest"], + item: Item | None, +) -> Generator[None]: + """Context manager that catches warnings generated in the contained execution block. + + ``item`` can be None if we are not in the context of an item execution. + + Each warning captured triggers the ``pytest_warning_recorded`` hook. + """ + config_filters = config.getini("filterwarnings") + cmdline_filters = config.known_args_namespace.pythonwarnings or [] + with warnings.catch_warnings(record=True) as log: + # mypy can't infer that record=True means log is not None; help it. + assert log is not None + + if not sys.warnoptions: + # If user is not explicitly configuring warning filters, show deprecation warnings by default (#2908). + warnings.filterwarnings("always", category=DeprecationWarning) + warnings.filterwarnings("always", category=PendingDeprecationWarning) + + # To be enabled in pytest 9.0.0. + # warnings.filterwarnings("error", category=pytest.PytestRemovedIn9Warning) + + apply_warning_filters(config_filters, cmdline_filters) + + # apply filters from "filterwarnings" marks + nodeid = "" if item is None else item.nodeid + if item is not None: + for mark in item.iter_markers(name="filterwarnings"): + for arg in mark.args: + warnings.filterwarnings(*parse_warning_filter(arg, escape=False)) + + try: + yield + finally: + for warning_message in log: + ihook.pytest_warning_recorded.call_historic( + kwargs=dict( + warning_message=warning_message, + nodeid=nodeid, + when=when, + location=None, + ) + ) + + +def warning_record_to_str(warning_message: warnings.WarningMessage) -> str: + """Convert a warnings.WarningMessage to a string.""" + warn_msg = warning_message.message + msg = warnings.formatwarning( + str(warn_msg), + warning_message.category, + warning_message.filename, + warning_message.lineno, + warning_message.line, + ) + if warning_message.source is not None: + try: + import tracemalloc + except ImportError: + pass + else: + tb = tracemalloc.get_object_traceback(warning_message.source) + if tb is not None: + formatted_tb = "\n".join(tb.format()) + # Use a leading new line to better separate the (large) output + # from the traceback to the previous warning text. + msg += f"\nObject allocated at:\n{formatted_tb}" + else: + # No need for a leading new line. + url = "https://docs.pytest.org/en/stable/how-to/capture-warnings.html#resource-warnings" + msg += "Enable tracemalloc to get traceback where the object was allocated.\n" + msg += f"See {url} for more info." + return msg + + +@pytest.hookimpl(wrapper=True, tryfirst=True) +def pytest_runtest_protocol(item: Item) -> Generator[None, object, object]: + with catch_warnings_for_item( + config=item.config, ihook=item.ihook, when="runtest", item=item + ): + return (yield) + + +@pytest.hookimpl(wrapper=True, tryfirst=True) +def pytest_collection(session: Session) -> Generator[None, object, object]: + config = session.config + with catch_warnings_for_item( + config=config, ihook=config.hook, when="collect", item=None + ): + return (yield) + + +@pytest.hookimpl(wrapper=True) +def pytest_terminal_summary( + terminalreporter: TerminalReporter, +) -> Generator[None]: + config = terminalreporter.config + with catch_warnings_for_item( + config=config, ihook=config.hook, when="config", item=None + ): + return (yield) + + +@pytest.hookimpl(wrapper=True) +def pytest_sessionfinish(session: Session) -> Generator[None]: + config = session.config + with catch_warnings_for_item( + config=config, ihook=config.hook, when="config", item=None + ): + return (yield) + + +@pytest.hookimpl(wrapper=True) +def pytest_load_initial_conftests( + early_config: Config, +) -> Generator[None]: + with catch_warnings_for_item( + config=early_config, ihook=early_config.hook, when="config", item=None + ): + return (yield) diff --git a/.venv/Lib/site-packages/colorama-0.4.6.dist-info/INSTALLER b/.venv/Lib/site-packages/colorama-0.4.6.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/.venv/Lib/site-packages/colorama-0.4.6.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/Lib/site-packages/colorama-0.4.6.dist-info/METADATA b/.venv/Lib/site-packages/colorama-0.4.6.dist-info/METADATA new file mode 100644 index 00000000..a1b5c575 --- /dev/null +++ b/.venv/Lib/site-packages/colorama-0.4.6.dist-info/METADATA @@ -0,0 +1,441 @@ +Metadata-Version: 2.1 +Name: colorama +Version: 0.4.6 +Summary: Cross-platform colored terminal text. +Project-URL: Homepage, https://github.com/tartley/colorama +Author-email: Jonathan Hartley +License-File: LICENSE.txt +Keywords: ansi,color,colour,crossplatform,terminal,text,windows,xplatform +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Terminals +Requires-Python: !=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7 +Description-Content-Type: text/x-rst + +.. image:: https://img.shields.io/pypi/v/colorama.svg + :target: https://pypi.org/project/colorama/ + :alt: Latest Version + +.. image:: https://img.shields.io/pypi/pyversions/colorama.svg + :target: https://pypi.org/project/colorama/ + :alt: Supported Python versions + +.. image:: https://github.com/tartley/colorama/actions/workflows/test.yml/badge.svg + :target: https://github.com/tartley/colorama/actions/workflows/test.yml + :alt: Build Status + +Colorama +======== + +Makes ANSI escape character sequences (for producing colored terminal text and +cursor positioning) work under MS Windows. + +.. |donate| image:: https://www.paypalobjects.com/en_US/i/btn/btn_donate_SM.gif + :target: https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=2MZ9D2GMLYCUJ&item_name=Colorama¤cy_code=USD + :alt: Donate with Paypal + +`PyPI for releases `_ | +`Github for source `_ | +`Colorama for enterprise on Tidelift `_ + +If you find Colorama useful, please |donate| to the authors. Thank you! + +Installation +------------ + +Tested on CPython 2.7, 3.7, 3.8, 3.9 and 3.10 and Pypy 2.7 and 3.8. + +No requirements other than the standard library. + +.. code-block:: bash + + pip install colorama + # or + conda install -c anaconda colorama + +Description +----------- + +ANSI escape character sequences have long been used to produce colored terminal +text and cursor positioning on Unix and Macs. Colorama makes this work on +Windows, too, by wrapping ``stdout``, stripping ANSI sequences it finds (which +would appear as gobbledygook in the output), and converting them into the +appropriate win32 calls to modify the state of the terminal. On other platforms, +Colorama does nothing. + +This has the upshot of providing a simple cross-platform API for printing +colored terminal text from Python, and has the happy side-effect that existing +applications or libraries which use ANSI sequences to produce colored output on +Linux or Macs can now also work on Windows, simply by calling +``colorama.just_fix_windows_console()`` (since v0.4.6) or ``colorama.init()`` +(all versions, but may have other side-effects – see below). + +An alternative approach is to install ``ansi.sys`` on Windows machines, which +provides the same behaviour for all applications running in terminals. Colorama +is intended for situations where that isn't easy (e.g., maybe your app doesn't +have an installer.) + +Demo scripts in the source code repository print some colored text using +ANSI sequences. Compare their output under Gnome-terminal's built in ANSI +handling, versus on Windows Command-Prompt using Colorama: + +.. image:: https://github.com/tartley/colorama/raw/master/screenshots/ubuntu-demo.png + :width: 661 + :height: 357 + :alt: ANSI sequences on Ubuntu under gnome-terminal. + +.. image:: https://github.com/tartley/colorama/raw/master/screenshots/windows-demo.png + :width: 668 + :height: 325 + :alt: Same ANSI sequences on Windows, using Colorama. + +These screenshots show that, on Windows, Colorama does not support ANSI 'dim +text'; it looks the same as 'normal text'. + +Usage +----- + +Initialisation +.............. + +If the only thing you want from Colorama is to get ANSI escapes to work on +Windows, then run: + +.. code-block:: python + + from colorama import just_fix_windows_console + just_fix_windows_console() + +If you're on a recent version of Windows 10 or better, and your stdout/stderr +are pointing to a Windows console, then this will flip the magic configuration +switch to enable Windows' built-in ANSI support. + +If you're on an older version of Windows, and your stdout/stderr are pointing to +a Windows console, then this will wrap ``sys.stdout`` and/or ``sys.stderr`` in a +magic file object that intercepts ANSI escape sequences and issues the +appropriate Win32 calls to emulate them. + +In all other circumstances, it does nothing whatsoever. Basically the idea is +that this makes Windows act like Unix with respect to ANSI escape handling. + +It's safe to call this function multiple times. It's safe to call this function +on non-Windows platforms, but it won't do anything. It's safe to call this +function when one or both of your stdout/stderr are redirected to a file – it +won't do anything to those streams. + +Alternatively, you can use the older interface with more features (but also more +potential footguns): + +.. code-block:: python + + from colorama import init + init() + +This does the same thing as ``just_fix_windows_console``, except for the +following differences: + +- It's not safe to call ``init`` multiple times; you can end up with multiple + layers of wrapping and broken ANSI support. + +- Colorama will apply a heuristic to guess whether stdout/stderr support ANSI, + and if it thinks they don't, then it will wrap ``sys.stdout`` and + ``sys.stderr`` in a magic file object that strips out ANSI escape sequences + before printing them. This happens on all platforms, and can be convenient if + you want to write your code to emit ANSI escape sequences unconditionally, and + let Colorama decide whether they should actually be output. But note that + Colorama's heuristic is not particularly clever. + +- ``init`` also accepts explicit keyword args to enable/disable various + functionality – see below. + +To stop using Colorama before your program exits, simply call ``deinit()``. +This will restore ``stdout`` and ``stderr`` to their original values, so that +Colorama is disabled. To resume using Colorama again, call ``reinit()``; it is +cheaper than calling ``init()`` again (but does the same thing). + +Most users should depend on ``colorama >= 0.4.6``, and use +``just_fix_windows_console``. The old ``init`` interface will be supported +indefinitely for backwards compatibility, but we don't plan to fix any issues +with it, also for backwards compatibility. + +Colored Output +.............. + +Cross-platform printing of colored text can then be done using Colorama's +constant shorthand for ANSI escape sequences. These are deliberately +rudimentary, see below. + +.. code-block:: python + + from colorama import Fore, Back, Style + print(Fore.RED + 'some red text') + print(Back.GREEN + 'and with a green background') + print(Style.DIM + 'and in dim text') + print(Style.RESET_ALL) + print('back to normal now') + +...or simply by manually printing ANSI sequences from your own code: + +.. code-block:: python + + print('\033[31m' + 'some red text') + print('\033[39m') # and reset to default color + +...or, Colorama can be used in conjunction with existing ANSI libraries +such as the venerable `Termcolor `_ +the fabulous `Blessings `_, +or the incredible `_Rich `_. + +If you wish Colorama's Fore, Back and Style constants were more capable, +then consider using one of the above highly capable libraries to generate +colors, etc, and use Colorama just for its primary purpose: to convert +those ANSI sequences to also work on Windows: + +SIMILARLY, do not send PRs adding the generation of new ANSI types to Colorama. +We are only interested in converting ANSI codes to win32 API calls, not +shortcuts like the above to generate ANSI characters. + +.. code-block:: python + + from colorama import just_fix_windows_console + from termcolor import colored + + # use Colorama to make Termcolor work on Windows too + just_fix_windows_console() + + # then use Termcolor for all colored text output + print(colored('Hello, World!', 'green', 'on_red')) + +Available formatting constants are:: + + Fore: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET. + Back: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET. + Style: DIM, NORMAL, BRIGHT, RESET_ALL + +``Style.RESET_ALL`` resets foreground, background, and brightness. Colorama will +perform this reset automatically on program exit. + +These are fairly well supported, but not part of the standard:: + + Fore: LIGHTBLACK_EX, LIGHTRED_EX, LIGHTGREEN_EX, LIGHTYELLOW_EX, LIGHTBLUE_EX, LIGHTMAGENTA_EX, LIGHTCYAN_EX, LIGHTWHITE_EX + Back: LIGHTBLACK_EX, LIGHTRED_EX, LIGHTGREEN_EX, LIGHTYELLOW_EX, LIGHTBLUE_EX, LIGHTMAGENTA_EX, LIGHTCYAN_EX, LIGHTWHITE_EX + +Cursor Positioning +.................. + +ANSI codes to reposition the cursor are supported. See ``demos/demo06.py`` for +an example of how to generate them. + +Init Keyword Args +................. + +``init()`` accepts some ``**kwargs`` to override default behaviour. + +init(autoreset=False): + If you find yourself repeatedly sending reset sequences to turn off color + changes at the end of every print, then ``init(autoreset=True)`` will + automate that: + + .. code-block:: python + + from colorama import init + init(autoreset=True) + print(Fore.RED + 'some red text') + print('automatically back to default color again') + +init(strip=None): + Pass ``True`` or ``False`` to override whether ANSI codes should be + stripped from the output. The default behaviour is to strip if on Windows + or if output is redirected (not a tty). + +init(convert=None): + Pass ``True`` or ``False`` to override whether to convert ANSI codes in the + output into win32 calls. The default behaviour is to convert if on Windows + and output is to a tty (terminal). + +init(wrap=True): + On Windows, Colorama works by replacing ``sys.stdout`` and ``sys.stderr`` + with proxy objects, which override the ``.write()`` method to do their work. + If this wrapping causes you problems, then this can be disabled by passing + ``init(wrap=False)``. The default behaviour is to wrap if ``autoreset`` or + ``strip`` or ``convert`` are True. + + When wrapping is disabled, colored printing on non-Windows platforms will + continue to work as normal. To do cross-platform colored output, you can + use Colorama's ``AnsiToWin32`` proxy directly: + + .. code-block:: python + + import sys + from colorama import init, AnsiToWin32 + init(wrap=False) + stream = AnsiToWin32(sys.stderr).stream + + # Python 2 + print >>stream, Fore.BLUE + 'blue text on stderr' + + # Python 3 + print(Fore.BLUE + 'blue text on stderr', file=stream) + +Recognised ANSI Sequences +......................... + +ANSI sequences generally take the form:: + + ESC [ ; ... + +Where ```` is an integer, and ```` is a single letter. Zero or +more params are passed to a ````. If no params are passed, it is +generally synonymous with passing a single zero. No spaces exist in the +sequence; they have been inserted here simply to read more easily. + +The only ANSI sequences that Colorama converts into win32 calls are:: + + ESC [ 0 m # reset all (colors and brightness) + ESC [ 1 m # bright + ESC [ 2 m # dim (looks same as normal brightness) + ESC [ 22 m # normal brightness + + # FOREGROUND: + ESC [ 30 m # black + ESC [ 31 m # red + ESC [ 32 m # green + ESC [ 33 m # yellow + ESC [ 34 m # blue + ESC [ 35 m # magenta + ESC [ 36 m # cyan + ESC [ 37 m # white + ESC [ 39 m # reset + + # BACKGROUND + ESC [ 40 m # black + ESC [ 41 m # red + ESC [ 42 m # green + ESC [ 43 m # yellow + ESC [ 44 m # blue + ESC [ 45 m # magenta + ESC [ 46 m # cyan + ESC [ 47 m # white + ESC [ 49 m # reset + + # cursor positioning + ESC [ y;x H # position cursor at x across, y down + ESC [ y;x f # position cursor at x across, y down + ESC [ n A # move cursor n lines up + ESC [ n B # move cursor n lines down + ESC [ n C # move cursor n characters forward + ESC [ n D # move cursor n characters backward + + # clear the screen + ESC [ mode J # clear the screen + + # clear the line + ESC [ mode K # clear the line + +Multiple numeric params to the ``'m'`` command can be combined into a single +sequence:: + + ESC [ 36 ; 45 ; 1 m # bright cyan text on magenta background + +All other ANSI sequences of the form ``ESC [ ; ... `` +are silently stripped from the output on Windows. + +Any other form of ANSI sequence, such as single-character codes or alternative +initial characters, are not recognised or stripped. It would be cool to add +them though. Let me know if it would be useful for you, via the Issues on +GitHub. + +Status & Known Problems +----------------------- + +I've personally only tested it on Windows XP (CMD, Console2), Ubuntu +(gnome-terminal, xterm), and OS X. + +Some valid ANSI sequences aren't recognised. + +If you're hacking on the code, see `README-hacking.md`_. ESPECIALLY, see the +explanation there of why we do not want PRs that allow Colorama to generate new +types of ANSI codes. + +See outstanding issues and wish-list: +https://github.com/tartley/colorama/issues + +If anything doesn't work for you, or doesn't do what you expected or hoped for, +I'd love to hear about it on that issues list, would be delighted by patches, +and would be happy to grant commit access to anyone who submits a working patch +or two. + +.. _README-hacking.md: README-hacking.md + +License +------- + +Copyright Jonathan Hartley & Arnon Yaari, 2013-2020. BSD 3-Clause license; see +LICENSE file. + +Professional support +-------------------- + +.. |tideliftlogo| image:: https://cdn2.hubspot.net/hubfs/4008838/website/logos/logos_for_download/Tidelift_primary-shorthand-logo.png + :alt: Tidelift + :target: https://tidelift.com/subscription/pkg/pypi-colorama?utm_source=pypi-colorama&utm_medium=referral&utm_campaign=readme + +.. list-table:: + :widths: 10 100 + + * - |tideliftlogo| + - Professional support for colorama is available as part of the + `Tidelift Subscription`_. + Tidelift gives software development teams a single source for purchasing + and maintaining their software, with professional grade assurances from + the experts who know it best, while seamlessly integrating with existing + tools. + +.. _Tidelift Subscription: https://tidelift.com/subscription/pkg/pypi-colorama?utm_source=pypi-colorama&utm_medium=referral&utm_campaign=readme + +Thanks +------ + +See the CHANGELOG for more thanks! + +* Marc Schlaich (schlamar) for a ``setup.py`` fix for Python2.5. +* Marc Abramowitz, reported & fixed a crash on exit with closed ``stdout``, + providing a solution to issue #7's setuptools/distutils debate, + and other fixes. +* User 'eryksun', for guidance on correctly instantiating ``ctypes.windll``. +* Matthew McCormick for politely pointing out a longstanding crash on non-Win. +* Ben Hoyt, for a magnificent fix under 64-bit Windows. +* Jesse at Empty Square for submitting a fix for examples in the README. +* User 'jamessp', an observant documentation fix for cursor positioning. +* User 'vaal1239', Dave Mckee & Lackner Kristof for a tiny but much-needed Win7 + fix. +* Julien Stuyck, for wisely suggesting Python3 compatible updates to README. +* Daniel Griffith for multiple fabulous patches. +* Oscar Lesta for a valuable fix to stop ANSI chars being sent to non-tty + output. +* Roger Binns, for many suggestions, valuable feedback, & bug reports. +* Tim Golden for thought and much appreciated feedback on the initial idea. +* User 'Zearin' for updates to the README file. +* John Szakmeister for adding support for light colors +* Charles Merriam for adding documentation to demos +* Jurko for a fix on 64-bit Windows CPython2.5 w/o ctypes +* Florian Bruhin for a fix when stdout or stderr are None +* Thomas Weininger for fixing ValueError on Windows +* Remi Rampin for better Github integration and fixes to the README file +* Simeon Visser for closing a file handle using 'with' and updating classifiers + to include Python 3.3 and 3.4 +* Andy Neff for fixing RESET of LIGHT_EX colors. +* Jonathan Hartley for the initial idea and implementation. diff --git a/.venv/Lib/site-packages/colorama-0.4.6.dist-info/RECORD b/.venv/Lib/site-packages/colorama-0.4.6.dist-info/RECORD new file mode 100644 index 00000000..cd6b130d --- /dev/null +++ b/.venv/Lib/site-packages/colorama-0.4.6.dist-info/RECORD @@ -0,0 +1,31 @@ +colorama-0.4.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +colorama-0.4.6.dist-info/METADATA,sha256=e67SnrUMOym9sz_4TjF3vxvAV4T3aF7NyqRHHH3YEMw,17158 +colorama-0.4.6.dist-info/RECORD,, +colorama-0.4.6.dist-info/WHEEL,sha256=cdcF4Fbd0FPtw2EMIOwH-3rSOTUdTCeOSXRMD1iLUb8,105 +colorama-0.4.6.dist-info/licenses/LICENSE.txt,sha256=ysNcAmhuXQSlpxQL-zs25zrtSWZW6JEQLkKIhteTAxg,1491 +colorama/__init__.py,sha256=wePQA4U20tKgYARySLEC047ucNX-g8pRLpYBuiHlLb8,266 +colorama/__pycache__/__init__.cpython-312.pyc,, +colorama/__pycache__/ansi.cpython-312.pyc,, +colorama/__pycache__/ansitowin32.cpython-312.pyc,, +colorama/__pycache__/initialise.cpython-312.pyc,, +colorama/__pycache__/win32.cpython-312.pyc,, +colorama/__pycache__/winterm.cpython-312.pyc,, +colorama/ansi.py,sha256=Top4EeEuaQdBWdteKMEcGOTeKeF19Q-Wo_6_Cj5kOzQ,2522 +colorama/ansitowin32.py,sha256=vPNYa3OZbxjbuFyaVo0Tmhmy1FZ1lKMWCnT7odXpItk,11128 +colorama/initialise.py,sha256=-hIny86ClXo39ixh5iSCfUIa2f_h_bgKRDW7gqs-KLU,3325 +colorama/tests/__init__.py,sha256=MkgPAEzGQd-Rq0w0PZXSX2LadRWhUECcisJY8lSrm4Q,75 +colorama/tests/__pycache__/__init__.cpython-312.pyc,, +colorama/tests/__pycache__/ansi_test.cpython-312.pyc,, +colorama/tests/__pycache__/ansitowin32_test.cpython-312.pyc,, +colorama/tests/__pycache__/initialise_test.cpython-312.pyc,, +colorama/tests/__pycache__/isatty_test.cpython-312.pyc,, +colorama/tests/__pycache__/utils.cpython-312.pyc,, +colorama/tests/__pycache__/winterm_test.cpython-312.pyc,, +colorama/tests/ansi_test.py,sha256=FeViDrUINIZcr505PAxvU4AjXz1asEiALs9GXMhwRaE,2839 +colorama/tests/ansitowin32_test.py,sha256=RN7AIhMJ5EqDsYaCjVo-o4u8JzDD4ukJbmevWKS70rY,10678 +colorama/tests/initialise_test.py,sha256=BbPy-XfyHwJ6zKozuQOvNvQZzsx9vdb_0bYXn7hsBTc,6741 +colorama/tests/isatty_test.py,sha256=Pg26LRpv0yQDB5Ac-sxgVXG7hsA1NYvapFgApZfYzZg,1866 +colorama/tests/utils.py,sha256=1IIRylG39z5-dzq09R_ngufxyPZxgldNbrxKxUGwGKE,1079 +colorama/tests/winterm_test.py,sha256=qoWFPEjym5gm2RuMwpf3pOis3a5r_PJZFCzK254JL8A,3709 +colorama/win32.py,sha256=YQOKwMTwtGBbsY4dL5HYTvwTeP9wIQra5MvPNddpxZs,6181 +colorama/winterm.py,sha256=XCQFDHjPi6AHYNdZwy0tA02H-Jh48Jp-HvCjeLeLp3U,7134 diff --git a/.venv/Lib/site-packages/colorama-0.4.6.dist-info/WHEEL b/.venv/Lib/site-packages/colorama-0.4.6.dist-info/WHEEL new file mode 100644 index 00000000..d79189fd --- /dev/null +++ b/.venv/Lib/site-packages/colorama-0.4.6.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.11.1 +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any diff --git a/.venv/Lib/site-packages/colorama-0.4.6.dist-info/licenses/LICENSE.txt b/.venv/Lib/site-packages/colorama-0.4.6.dist-info/licenses/LICENSE.txt new file mode 100644 index 00000000..3105888e --- /dev/null +++ b/.venv/Lib/site-packages/colorama-0.4.6.dist-info/licenses/LICENSE.txt @@ -0,0 +1,27 @@ +Copyright (c) 2010 Jonathan Hartley +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holders, nor those of its contributors + may be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/.venv/Lib/site-packages/colorama/__init__.py b/.venv/Lib/site-packages/colorama/__init__.py new file mode 100644 index 00000000..383101cd --- /dev/null +++ b/.venv/Lib/site-packages/colorama/__init__.py @@ -0,0 +1,7 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +from .initialise import init, deinit, reinit, colorama_text, just_fix_windows_console +from .ansi import Fore, Back, Style, Cursor +from .ansitowin32 import AnsiToWin32 + +__version__ = '0.4.6' + diff --git a/.venv/Lib/site-packages/colorama/ansi.py b/.venv/Lib/site-packages/colorama/ansi.py new file mode 100644 index 00000000..11ec695f --- /dev/null +++ b/.venv/Lib/site-packages/colorama/ansi.py @@ -0,0 +1,102 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +''' +This module generates ANSI character codes to printing colors to terminals. +See: http://en.wikipedia.org/wiki/ANSI_escape_code +''' + +CSI = '\033[' +OSC = '\033]' +BEL = '\a' + + +def code_to_chars(code): + return CSI + str(code) + 'm' + +def set_title(title): + return OSC + '2;' + title + BEL + +def clear_screen(mode=2): + return CSI + str(mode) + 'J' + +def clear_line(mode=2): + return CSI + str(mode) + 'K' + + +class AnsiCodes(object): + def __init__(self): + # the subclasses declare class attributes which are numbers. + # Upon instantiation we define instance attributes, which are the same + # as the class attributes but wrapped with the ANSI escape sequence + for name in dir(self): + if not name.startswith('_'): + value = getattr(self, name) + setattr(self, name, code_to_chars(value)) + + +class AnsiCursor(object): + def UP(self, n=1): + return CSI + str(n) + 'A' + def DOWN(self, n=1): + return CSI + str(n) + 'B' + def FORWARD(self, n=1): + return CSI + str(n) + 'C' + def BACK(self, n=1): + return CSI + str(n) + 'D' + def POS(self, x=1, y=1): + return CSI + str(y) + ';' + str(x) + 'H' + + +class AnsiFore(AnsiCodes): + BLACK = 30 + RED = 31 + GREEN = 32 + YELLOW = 33 + BLUE = 34 + MAGENTA = 35 + CYAN = 36 + WHITE = 37 + RESET = 39 + + # These are fairly well supported, but not part of the standard. + LIGHTBLACK_EX = 90 + LIGHTRED_EX = 91 + LIGHTGREEN_EX = 92 + LIGHTYELLOW_EX = 93 + LIGHTBLUE_EX = 94 + LIGHTMAGENTA_EX = 95 + LIGHTCYAN_EX = 96 + LIGHTWHITE_EX = 97 + + +class AnsiBack(AnsiCodes): + BLACK = 40 + RED = 41 + GREEN = 42 + YELLOW = 43 + BLUE = 44 + MAGENTA = 45 + CYAN = 46 + WHITE = 47 + RESET = 49 + + # These are fairly well supported, but not part of the standard. + LIGHTBLACK_EX = 100 + LIGHTRED_EX = 101 + LIGHTGREEN_EX = 102 + LIGHTYELLOW_EX = 103 + LIGHTBLUE_EX = 104 + LIGHTMAGENTA_EX = 105 + LIGHTCYAN_EX = 106 + LIGHTWHITE_EX = 107 + + +class AnsiStyle(AnsiCodes): + BRIGHT = 1 + DIM = 2 + NORMAL = 22 + RESET_ALL = 0 + +Fore = AnsiFore() +Back = AnsiBack() +Style = AnsiStyle() +Cursor = AnsiCursor() diff --git a/.venv/Lib/site-packages/colorama/ansitowin32.py b/.venv/Lib/site-packages/colorama/ansitowin32.py new file mode 100644 index 00000000..abf209e6 --- /dev/null +++ b/.venv/Lib/site-packages/colorama/ansitowin32.py @@ -0,0 +1,277 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +import re +import sys +import os + +from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style, BEL +from .winterm import enable_vt_processing, WinTerm, WinColor, WinStyle +from .win32 import windll, winapi_test + + +winterm = None +if windll is not None: + winterm = WinTerm() + + +class StreamWrapper(object): + ''' + Wraps a stream (such as stdout), acting as a transparent proxy for all + attribute access apart from method 'write()', which is delegated to our + Converter instance. + ''' + def __init__(self, wrapped, converter): + # double-underscore everything to prevent clashes with names of + # attributes on the wrapped stream object. + self.__wrapped = wrapped + self.__convertor = converter + + def __getattr__(self, name): + return getattr(self.__wrapped, name) + + def __enter__(self, *args, **kwargs): + # special method lookup bypasses __getattr__/__getattribute__, see + # https://stackoverflow.com/questions/12632894/why-doesnt-getattr-work-with-exit + # thus, contextlib magic methods are not proxied via __getattr__ + return self.__wrapped.__enter__(*args, **kwargs) + + def __exit__(self, *args, **kwargs): + return self.__wrapped.__exit__(*args, **kwargs) + + def __setstate__(self, state): + self.__dict__ = state + + def __getstate__(self): + return self.__dict__ + + def write(self, text): + self.__convertor.write(text) + + def isatty(self): + stream = self.__wrapped + if 'PYCHARM_HOSTED' in os.environ: + if stream is not None and (stream is sys.__stdout__ or stream is sys.__stderr__): + return True + try: + stream_isatty = stream.isatty + except AttributeError: + return False + else: + return stream_isatty() + + @property + def closed(self): + stream = self.__wrapped + try: + return stream.closed + # AttributeError in the case that the stream doesn't support being closed + # ValueError for the case that the stream has already been detached when atexit runs + except (AttributeError, ValueError): + return True + + +class AnsiToWin32(object): + ''' + Implements a 'write()' method which, on Windows, will strip ANSI character + sequences from the text, and if outputting to a tty, will convert them into + win32 function calls. + ''' + ANSI_CSI_RE = re.compile('\001?\033\\[((?:\\d|;)*)([a-zA-Z])\002?') # Control Sequence Introducer + ANSI_OSC_RE = re.compile('\001?\033\\]([^\a]*)(\a)\002?') # Operating System Command + + def __init__(self, wrapped, convert=None, strip=None, autoreset=False): + # The wrapped stream (normally sys.stdout or sys.stderr) + self.wrapped = wrapped + + # should we reset colors to defaults after every .write() + self.autoreset = autoreset + + # create the proxy wrapping our output stream + self.stream = StreamWrapper(wrapped, self) + + on_windows = os.name == 'nt' + # We test if the WinAPI works, because even if we are on Windows + # we may be using a terminal that doesn't support the WinAPI + # (e.g. Cygwin Terminal). In this case it's up to the terminal + # to support the ANSI codes. + conversion_supported = on_windows and winapi_test() + try: + fd = wrapped.fileno() + except Exception: + fd = -1 + system_has_native_ansi = not on_windows or enable_vt_processing(fd) + have_tty = not self.stream.closed and self.stream.isatty() + need_conversion = conversion_supported and not system_has_native_ansi + + # should we strip ANSI sequences from our output? + if strip is None: + strip = need_conversion or not have_tty + self.strip = strip + + # should we should convert ANSI sequences into win32 calls? + if convert is None: + convert = need_conversion and have_tty + self.convert = convert + + # dict of ansi codes to win32 functions and parameters + self.win32_calls = self.get_win32_calls() + + # are we wrapping stderr? + self.on_stderr = self.wrapped is sys.stderr + + def should_wrap(self): + ''' + True if this class is actually needed. If false, then the output + stream will not be affected, nor will win32 calls be issued, so + wrapping stdout is not actually required. This will generally be + False on non-Windows platforms, unless optional functionality like + autoreset has been requested using kwargs to init() + ''' + return self.convert or self.strip or self.autoreset + + def get_win32_calls(self): + if self.convert and winterm: + return { + AnsiStyle.RESET_ALL: (winterm.reset_all, ), + AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT), + AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL), + AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL), + AnsiFore.BLACK: (winterm.fore, WinColor.BLACK), + AnsiFore.RED: (winterm.fore, WinColor.RED), + AnsiFore.GREEN: (winterm.fore, WinColor.GREEN), + AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW), + AnsiFore.BLUE: (winterm.fore, WinColor.BLUE), + AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA), + AnsiFore.CYAN: (winterm.fore, WinColor.CYAN), + AnsiFore.WHITE: (winterm.fore, WinColor.GREY), + AnsiFore.RESET: (winterm.fore, ), + AnsiFore.LIGHTBLACK_EX: (winterm.fore, WinColor.BLACK, True), + AnsiFore.LIGHTRED_EX: (winterm.fore, WinColor.RED, True), + AnsiFore.LIGHTGREEN_EX: (winterm.fore, WinColor.GREEN, True), + AnsiFore.LIGHTYELLOW_EX: (winterm.fore, WinColor.YELLOW, True), + AnsiFore.LIGHTBLUE_EX: (winterm.fore, WinColor.BLUE, True), + AnsiFore.LIGHTMAGENTA_EX: (winterm.fore, WinColor.MAGENTA, True), + AnsiFore.LIGHTCYAN_EX: (winterm.fore, WinColor.CYAN, True), + AnsiFore.LIGHTWHITE_EX: (winterm.fore, WinColor.GREY, True), + AnsiBack.BLACK: (winterm.back, WinColor.BLACK), + AnsiBack.RED: (winterm.back, WinColor.RED), + AnsiBack.GREEN: (winterm.back, WinColor.GREEN), + AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW), + AnsiBack.BLUE: (winterm.back, WinColor.BLUE), + AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA), + AnsiBack.CYAN: (winterm.back, WinColor.CYAN), + AnsiBack.WHITE: (winterm.back, WinColor.GREY), + AnsiBack.RESET: (winterm.back, ), + AnsiBack.LIGHTBLACK_EX: (winterm.back, WinColor.BLACK, True), + AnsiBack.LIGHTRED_EX: (winterm.back, WinColor.RED, True), + AnsiBack.LIGHTGREEN_EX: (winterm.back, WinColor.GREEN, True), + AnsiBack.LIGHTYELLOW_EX: (winterm.back, WinColor.YELLOW, True), + AnsiBack.LIGHTBLUE_EX: (winterm.back, WinColor.BLUE, True), + AnsiBack.LIGHTMAGENTA_EX: (winterm.back, WinColor.MAGENTA, True), + AnsiBack.LIGHTCYAN_EX: (winterm.back, WinColor.CYAN, True), + AnsiBack.LIGHTWHITE_EX: (winterm.back, WinColor.GREY, True), + } + return dict() + + def write(self, text): + if self.strip or self.convert: + self.write_and_convert(text) + else: + self.wrapped.write(text) + self.wrapped.flush() + if self.autoreset: + self.reset_all() + + + def reset_all(self): + if self.convert: + self.call_win32('m', (0,)) + elif not self.strip and not self.stream.closed: + self.wrapped.write(Style.RESET_ALL) + + + def write_and_convert(self, text): + ''' + Write the given text to our wrapped stream, stripping any ANSI + sequences from the text, and optionally converting them into win32 + calls. + ''' + cursor = 0 + text = self.convert_osc(text) + for match in self.ANSI_CSI_RE.finditer(text): + start, end = match.span() + self.write_plain_text(text, cursor, start) + self.convert_ansi(*match.groups()) + cursor = end + self.write_plain_text(text, cursor, len(text)) + + + def write_plain_text(self, text, start, end): + if start < end: + self.wrapped.write(text[start:end]) + self.wrapped.flush() + + + def convert_ansi(self, paramstring, command): + if self.convert: + params = self.extract_params(command, paramstring) + self.call_win32(command, params) + + + def extract_params(self, command, paramstring): + if command in 'Hf': + params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(';')) + while len(params) < 2: + # defaults: + params = params + (1,) + else: + params = tuple(int(p) for p in paramstring.split(';') if len(p) != 0) + if len(params) == 0: + # defaults: + if command in 'JKm': + params = (0,) + elif command in 'ABCD': + params = (1,) + + return params + + + def call_win32(self, command, params): + if command == 'm': + for param in params: + if param in self.win32_calls: + func_args = self.win32_calls[param] + func = func_args[0] + args = func_args[1:] + kwargs = dict(on_stderr=self.on_stderr) + func(*args, **kwargs) + elif command in 'J': + winterm.erase_screen(params[0], on_stderr=self.on_stderr) + elif command in 'K': + winterm.erase_line(params[0], on_stderr=self.on_stderr) + elif command in 'Hf': # cursor position - absolute + winterm.set_cursor_position(params, on_stderr=self.on_stderr) + elif command in 'ABCD': # cursor position - relative + n = params[0] + # A - up, B - down, C - forward, D - back + x, y = {'A': (0, -n), 'B': (0, n), 'C': (n, 0), 'D': (-n, 0)}[command] + winterm.cursor_adjust(x, y, on_stderr=self.on_stderr) + + + def convert_osc(self, text): + for match in self.ANSI_OSC_RE.finditer(text): + start, end = match.span() + text = text[:start] + text[end:] + paramstring, command = match.groups() + if command == BEL: + if paramstring.count(";") == 1: + params = paramstring.split(";") + # 0 - change title and icon (we will only change title) + # 1 - change icon (we don't support this) + # 2 - change title + if params[0] in '02': + winterm.set_title(params[1]) + return text + + + def flush(self): + self.wrapped.flush() diff --git a/.venv/Lib/site-packages/colorama/initialise.py b/.venv/Lib/site-packages/colorama/initialise.py new file mode 100644 index 00000000..d5fd4b71 --- /dev/null +++ b/.venv/Lib/site-packages/colorama/initialise.py @@ -0,0 +1,121 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +import atexit +import contextlib +import sys + +from .ansitowin32 import AnsiToWin32 + + +def _wipe_internal_state_for_tests(): + global orig_stdout, orig_stderr + orig_stdout = None + orig_stderr = None + + global wrapped_stdout, wrapped_stderr + wrapped_stdout = None + wrapped_stderr = None + + global atexit_done + atexit_done = False + + global fixed_windows_console + fixed_windows_console = False + + try: + # no-op if it wasn't registered + atexit.unregister(reset_all) + except AttributeError: + # python 2: no atexit.unregister. Oh well, we did our best. + pass + + +def reset_all(): + if AnsiToWin32 is not None: # Issue #74: objects might become None at exit + AnsiToWin32(orig_stdout).reset_all() + + +def init(autoreset=False, convert=None, strip=None, wrap=True): + + if not wrap and any([autoreset, convert, strip]): + raise ValueError('wrap=False conflicts with any other arg=True') + + global wrapped_stdout, wrapped_stderr + global orig_stdout, orig_stderr + + orig_stdout = sys.stdout + orig_stderr = sys.stderr + + if sys.stdout is None: + wrapped_stdout = None + else: + sys.stdout = wrapped_stdout = \ + wrap_stream(orig_stdout, convert, strip, autoreset, wrap) + if sys.stderr is None: + wrapped_stderr = None + else: + sys.stderr = wrapped_stderr = \ + wrap_stream(orig_stderr, convert, strip, autoreset, wrap) + + global atexit_done + if not atexit_done: + atexit.register(reset_all) + atexit_done = True + + +def deinit(): + if orig_stdout is not None: + sys.stdout = orig_stdout + if orig_stderr is not None: + sys.stderr = orig_stderr + + +def just_fix_windows_console(): + global fixed_windows_console + + if sys.platform != "win32": + return + if fixed_windows_console: + return + if wrapped_stdout is not None or wrapped_stderr is not None: + # Someone already ran init() and it did stuff, so we won't second-guess them + return + + # On newer versions of Windows, AnsiToWin32.__init__ will implicitly enable the + # native ANSI support in the console as a side-effect. We only need to actually + # replace sys.stdout/stderr if we're in the old-style conversion mode. + new_stdout = AnsiToWin32(sys.stdout, convert=None, strip=None, autoreset=False) + if new_stdout.convert: + sys.stdout = new_stdout + new_stderr = AnsiToWin32(sys.stderr, convert=None, strip=None, autoreset=False) + if new_stderr.convert: + sys.stderr = new_stderr + + fixed_windows_console = True + +@contextlib.contextmanager +def colorama_text(*args, **kwargs): + init(*args, **kwargs) + try: + yield + finally: + deinit() + + +def reinit(): + if wrapped_stdout is not None: + sys.stdout = wrapped_stdout + if wrapped_stderr is not None: + sys.stderr = wrapped_stderr + + +def wrap_stream(stream, convert, strip, autoreset, wrap): + if wrap: + wrapper = AnsiToWin32(stream, + convert=convert, strip=strip, autoreset=autoreset) + if wrapper.should_wrap(): + stream = wrapper.stream + return stream + + +# Use this for initial setup as well, to reduce code duplication +_wipe_internal_state_for_tests() diff --git a/.venv/Lib/site-packages/colorama/tests/__init__.py b/.venv/Lib/site-packages/colorama/tests/__init__.py new file mode 100644 index 00000000..8c5661e9 --- /dev/null +++ b/.venv/Lib/site-packages/colorama/tests/__init__.py @@ -0,0 +1 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. diff --git a/.venv/Lib/site-packages/colorama/tests/ansi_test.py b/.venv/Lib/site-packages/colorama/tests/ansi_test.py new file mode 100644 index 00000000..0a20c80f --- /dev/null +++ b/.venv/Lib/site-packages/colorama/tests/ansi_test.py @@ -0,0 +1,76 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +import sys +from unittest import TestCase, main + +from ..ansi import Back, Fore, Style +from ..ansitowin32 import AnsiToWin32 + +stdout_orig = sys.stdout +stderr_orig = sys.stderr + + +class AnsiTest(TestCase): + + def setUp(self): + # sanity check: stdout should be a file or StringIO object. + # It will only be AnsiToWin32 if init() has previously wrapped it + self.assertNotEqual(type(sys.stdout), AnsiToWin32) + self.assertNotEqual(type(sys.stderr), AnsiToWin32) + + def tearDown(self): + sys.stdout = stdout_orig + sys.stderr = stderr_orig + + + def testForeAttributes(self): + self.assertEqual(Fore.BLACK, '\033[30m') + self.assertEqual(Fore.RED, '\033[31m') + self.assertEqual(Fore.GREEN, '\033[32m') + self.assertEqual(Fore.YELLOW, '\033[33m') + self.assertEqual(Fore.BLUE, '\033[34m') + self.assertEqual(Fore.MAGENTA, '\033[35m') + self.assertEqual(Fore.CYAN, '\033[36m') + self.assertEqual(Fore.WHITE, '\033[37m') + self.assertEqual(Fore.RESET, '\033[39m') + + # Check the light, extended versions. + self.assertEqual(Fore.LIGHTBLACK_EX, '\033[90m') + self.assertEqual(Fore.LIGHTRED_EX, '\033[91m') + self.assertEqual(Fore.LIGHTGREEN_EX, '\033[92m') + self.assertEqual(Fore.LIGHTYELLOW_EX, '\033[93m') + self.assertEqual(Fore.LIGHTBLUE_EX, '\033[94m') + self.assertEqual(Fore.LIGHTMAGENTA_EX, '\033[95m') + self.assertEqual(Fore.LIGHTCYAN_EX, '\033[96m') + self.assertEqual(Fore.LIGHTWHITE_EX, '\033[97m') + + + def testBackAttributes(self): + self.assertEqual(Back.BLACK, '\033[40m') + self.assertEqual(Back.RED, '\033[41m') + self.assertEqual(Back.GREEN, '\033[42m') + self.assertEqual(Back.YELLOW, '\033[43m') + self.assertEqual(Back.BLUE, '\033[44m') + self.assertEqual(Back.MAGENTA, '\033[45m') + self.assertEqual(Back.CYAN, '\033[46m') + self.assertEqual(Back.WHITE, '\033[47m') + self.assertEqual(Back.RESET, '\033[49m') + + # Check the light, extended versions. + self.assertEqual(Back.LIGHTBLACK_EX, '\033[100m') + self.assertEqual(Back.LIGHTRED_EX, '\033[101m') + self.assertEqual(Back.LIGHTGREEN_EX, '\033[102m') + self.assertEqual(Back.LIGHTYELLOW_EX, '\033[103m') + self.assertEqual(Back.LIGHTBLUE_EX, '\033[104m') + self.assertEqual(Back.LIGHTMAGENTA_EX, '\033[105m') + self.assertEqual(Back.LIGHTCYAN_EX, '\033[106m') + self.assertEqual(Back.LIGHTWHITE_EX, '\033[107m') + + + def testStyleAttributes(self): + self.assertEqual(Style.DIM, '\033[2m') + self.assertEqual(Style.NORMAL, '\033[22m') + self.assertEqual(Style.BRIGHT, '\033[1m') + + +if __name__ == '__main__': + main() diff --git a/.venv/Lib/site-packages/colorama/tests/ansitowin32_test.py b/.venv/Lib/site-packages/colorama/tests/ansitowin32_test.py new file mode 100644 index 00000000..91ca551f --- /dev/null +++ b/.venv/Lib/site-packages/colorama/tests/ansitowin32_test.py @@ -0,0 +1,294 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +from io import StringIO, TextIOWrapper +from unittest import TestCase, main +try: + from contextlib import ExitStack +except ImportError: + # python 2 + from contextlib2 import ExitStack + +try: + from unittest.mock import MagicMock, Mock, patch +except ImportError: + from mock import MagicMock, Mock, patch + +from ..ansitowin32 import AnsiToWin32, StreamWrapper +from ..win32 import ENABLE_VIRTUAL_TERMINAL_PROCESSING +from .utils import osname + + +class StreamWrapperTest(TestCase): + + def testIsAProxy(self): + mockStream = Mock() + wrapper = StreamWrapper(mockStream, None) + self.assertTrue( wrapper.random_attr is mockStream.random_attr ) + + def testDelegatesWrite(self): + mockStream = Mock() + mockConverter = Mock() + wrapper = StreamWrapper(mockStream, mockConverter) + wrapper.write('hello') + self.assertTrue(mockConverter.write.call_args, (('hello',), {})) + + def testDelegatesContext(self): + mockConverter = Mock() + s = StringIO() + with StreamWrapper(s, mockConverter) as fp: + fp.write(u'hello') + self.assertTrue(s.closed) + + def testProxyNoContextManager(self): + mockStream = MagicMock() + mockStream.__enter__.side_effect = AttributeError() + mockConverter = Mock() + with self.assertRaises(AttributeError) as excinfo: + with StreamWrapper(mockStream, mockConverter) as wrapper: + wrapper.write('hello') + + def test_closed_shouldnt_raise_on_closed_stream(self): + stream = StringIO() + stream.close() + wrapper = StreamWrapper(stream, None) + self.assertEqual(wrapper.closed, True) + + def test_closed_shouldnt_raise_on_detached_stream(self): + stream = TextIOWrapper(StringIO()) + stream.detach() + wrapper = StreamWrapper(stream, None) + self.assertEqual(wrapper.closed, True) + +class AnsiToWin32Test(TestCase): + + def testInit(self): + mockStdout = Mock() + auto = Mock() + stream = AnsiToWin32(mockStdout, autoreset=auto) + self.assertEqual(stream.wrapped, mockStdout) + self.assertEqual(stream.autoreset, auto) + + @patch('colorama.ansitowin32.winterm', None) + @patch('colorama.ansitowin32.winapi_test', lambda *_: True) + def testStripIsTrueOnWindows(self): + with osname('nt'): + mockStdout = Mock() + stream = AnsiToWin32(mockStdout) + self.assertTrue(stream.strip) + + def testStripIsFalseOffWindows(self): + with osname('posix'): + mockStdout = Mock(closed=False) + stream = AnsiToWin32(mockStdout) + self.assertFalse(stream.strip) + + def testWriteStripsAnsi(self): + mockStdout = Mock() + stream = AnsiToWin32(mockStdout) + stream.wrapped = Mock() + stream.write_and_convert = Mock() + stream.strip = True + + stream.write('abc') + + self.assertFalse(stream.wrapped.write.called) + self.assertEqual(stream.write_and_convert.call_args, (('abc',), {})) + + def testWriteDoesNotStripAnsi(self): + mockStdout = Mock() + stream = AnsiToWin32(mockStdout) + stream.wrapped = Mock() + stream.write_and_convert = Mock() + stream.strip = False + stream.convert = False + + stream.write('abc') + + self.assertFalse(stream.write_and_convert.called) + self.assertEqual(stream.wrapped.write.call_args, (('abc',), {})) + + def assert_autoresets(self, convert, autoreset=True): + stream = AnsiToWin32(Mock()) + stream.convert = convert + stream.reset_all = Mock() + stream.autoreset = autoreset + stream.winterm = Mock() + + stream.write('abc') + + self.assertEqual(stream.reset_all.called, autoreset) + + def testWriteAutoresets(self): + self.assert_autoresets(convert=True) + self.assert_autoresets(convert=False) + self.assert_autoresets(convert=True, autoreset=False) + self.assert_autoresets(convert=False, autoreset=False) + + def testWriteAndConvertWritesPlainText(self): + stream = AnsiToWin32(Mock()) + stream.write_and_convert( 'abc' ) + self.assertEqual( stream.wrapped.write.call_args, (('abc',), {}) ) + + def testWriteAndConvertStripsAllValidAnsi(self): + stream = AnsiToWin32(Mock()) + stream.call_win32 = Mock() + data = [ + 'abc\033[mdef', + 'abc\033[0mdef', + 'abc\033[2mdef', + 'abc\033[02mdef', + 'abc\033[002mdef', + 'abc\033[40mdef', + 'abc\033[040mdef', + 'abc\033[0;1mdef', + 'abc\033[40;50mdef', + 'abc\033[50;30;40mdef', + 'abc\033[Adef', + 'abc\033[0Gdef', + 'abc\033[1;20;128Hdef', + ] + for datum in data: + stream.wrapped.write.reset_mock() + stream.write_and_convert( datum ) + self.assertEqual( + [args[0] for args in stream.wrapped.write.call_args_list], + [ ('abc',), ('def',) ] + ) + + def testWriteAndConvertSkipsEmptySnippets(self): + stream = AnsiToWin32(Mock()) + stream.call_win32 = Mock() + stream.write_and_convert( '\033[40m\033[41m' ) + self.assertFalse( stream.wrapped.write.called ) + + def testWriteAndConvertCallsWin32WithParamsAndCommand(self): + stream = AnsiToWin32(Mock()) + stream.convert = True + stream.call_win32 = Mock() + stream.extract_params = Mock(return_value='params') + data = { + 'abc\033[adef': ('a', 'params'), + 'abc\033[;;bdef': ('b', 'params'), + 'abc\033[0cdef': ('c', 'params'), + 'abc\033[;;0;;Gdef': ('G', 'params'), + 'abc\033[1;20;128Hdef': ('H', 'params'), + } + for datum, expected in data.items(): + stream.call_win32.reset_mock() + stream.write_and_convert( datum ) + self.assertEqual( stream.call_win32.call_args[0], expected ) + + def test_reset_all_shouldnt_raise_on_closed_orig_stdout(self): + stream = StringIO() + converter = AnsiToWin32(stream) + stream.close() + + converter.reset_all() + + def test_wrap_shouldnt_raise_on_closed_orig_stdout(self): + stream = StringIO() + stream.close() + with \ + patch("colorama.ansitowin32.os.name", "nt"), \ + patch("colorama.ansitowin32.winapi_test", lambda: True): + converter = AnsiToWin32(stream) + self.assertTrue(converter.strip) + self.assertFalse(converter.convert) + + def test_wrap_shouldnt_raise_on_missing_closed_attr(self): + with \ + patch("colorama.ansitowin32.os.name", "nt"), \ + patch("colorama.ansitowin32.winapi_test", lambda: True): + converter = AnsiToWin32(object()) + self.assertTrue(converter.strip) + self.assertFalse(converter.convert) + + def testExtractParams(self): + stream = AnsiToWin32(Mock()) + data = { + '': (0,), + ';;': (0,), + '2': (2,), + ';;002;;': (2,), + '0;1': (0, 1), + ';;003;;456;;': (3, 456), + '11;22;33;44;55': (11, 22, 33, 44, 55), + } + for datum, expected in data.items(): + self.assertEqual(stream.extract_params('m', datum), expected) + + def testCallWin32UsesLookup(self): + listener = Mock() + stream = AnsiToWin32(listener) + stream.win32_calls = { + 1: (lambda *_, **__: listener(11),), + 2: (lambda *_, **__: listener(22),), + 3: (lambda *_, **__: listener(33),), + } + stream.call_win32('m', (3, 1, 99, 2)) + self.assertEqual( + [a[0][0] for a in listener.call_args_list], + [33, 11, 22] ) + + def test_osc_codes(self): + mockStdout = Mock() + stream = AnsiToWin32(mockStdout, convert=True) + with patch('colorama.ansitowin32.winterm') as winterm: + data = [ + '\033]0\x07', # missing arguments + '\033]0;foo\x08', # wrong OSC command + '\033]0;colorama_test_title\x07', # should work + '\033]1;colorama_test_title\x07', # wrong set command + '\033]2;colorama_test_title\x07', # should work + '\033]' + ';' * 64 + '\x08', # see issue #247 + ] + for code in data: + stream.write(code) + self.assertEqual(winterm.set_title.call_count, 2) + + def test_native_windows_ansi(self): + with ExitStack() as stack: + def p(a, b): + stack.enter_context(patch(a, b, create=True)) + # Pretend to be on Windows + p("colorama.ansitowin32.os.name", "nt") + p("colorama.ansitowin32.winapi_test", lambda: True) + p("colorama.win32.winapi_test", lambda: True) + p("colorama.winterm.win32.windll", "non-None") + p("colorama.winterm.get_osfhandle", lambda _: 1234) + + # Pretend that our mock stream has native ANSI support + p( + "colorama.winterm.win32.GetConsoleMode", + lambda _: ENABLE_VIRTUAL_TERMINAL_PROCESSING, + ) + SetConsoleMode = Mock() + p("colorama.winterm.win32.SetConsoleMode", SetConsoleMode) + + stdout = Mock() + stdout.closed = False + stdout.isatty.return_value = True + stdout.fileno.return_value = 1 + + # Our fake console says it has native vt support, so AnsiToWin32 should + # enable that support and do nothing else. + stream = AnsiToWin32(stdout) + SetConsoleMode.assert_called_with(1234, ENABLE_VIRTUAL_TERMINAL_PROCESSING) + self.assertFalse(stream.strip) + self.assertFalse(stream.convert) + self.assertFalse(stream.should_wrap()) + + # Now let's pretend we're on an old Windows console, that doesn't have + # native ANSI support. + p("colorama.winterm.win32.GetConsoleMode", lambda _: 0) + SetConsoleMode = Mock() + p("colorama.winterm.win32.SetConsoleMode", SetConsoleMode) + + stream = AnsiToWin32(stdout) + SetConsoleMode.assert_called_with(1234, ENABLE_VIRTUAL_TERMINAL_PROCESSING) + self.assertTrue(stream.strip) + self.assertTrue(stream.convert) + self.assertTrue(stream.should_wrap()) + + +if __name__ == '__main__': + main() diff --git a/.venv/Lib/site-packages/colorama/tests/initialise_test.py b/.venv/Lib/site-packages/colorama/tests/initialise_test.py new file mode 100644 index 00000000..89f9b075 --- /dev/null +++ b/.venv/Lib/site-packages/colorama/tests/initialise_test.py @@ -0,0 +1,189 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +import sys +from unittest import TestCase, main, skipUnless + +try: + from unittest.mock import patch, Mock +except ImportError: + from mock import patch, Mock + +from ..ansitowin32 import StreamWrapper +from ..initialise import init, just_fix_windows_console, _wipe_internal_state_for_tests +from .utils import osname, replace_by + +orig_stdout = sys.stdout +orig_stderr = sys.stderr + + +class InitTest(TestCase): + + @skipUnless(sys.stdout.isatty(), "sys.stdout is not a tty") + def setUp(self): + # sanity check + self.assertNotWrapped() + + def tearDown(self): + _wipe_internal_state_for_tests() + sys.stdout = orig_stdout + sys.stderr = orig_stderr + + def assertWrapped(self): + self.assertIsNot(sys.stdout, orig_stdout, 'stdout should be wrapped') + self.assertIsNot(sys.stderr, orig_stderr, 'stderr should be wrapped') + self.assertTrue(isinstance(sys.stdout, StreamWrapper), + 'bad stdout wrapper') + self.assertTrue(isinstance(sys.stderr, StreamWrapper), + 'bad stderr wrapper') + + def assertNotWrapped(self): + self.assertIs(sys.stdout, orig_stdout, 'stdout should not be wrapped') + self.assertIs(sys.stderr, orig_stderr, 'stderr should not be wrapped') + + @patch('colorama.initialise.reset_all') + @patch('colorama.ansitowin32.winapi_test', lambda *_: True) + @patch('colorama.ansitowin32.enable_vt_processing', lambda *_: False) + def testInitWrapsOnWindows(self, _): + with osname("nt"): + init() + self.assertWrapped() + + @patch('colorama.initialise.reset_all') + @patch('colorama.ansitowin32.winapi_test', lambda *_: False) + def testInitDoesntWrapOnEmulatedWindows(self, _): + with osname("nt"): + init() + self.assertNotWrapped() + + def testInitDoesntWrapOnNonWindows(self): + with osname("posix"): + init() + self.assertNotWrapped() + + def testInitDoesntWrapIfNone(self): + with replace_by(None): + init() + # We can't use assertNotWrapped here because replace_by(None) + # changes stdout/stderr already. + self.assertIsNone(sys.stdout) + self.assertIsNone(sys.stderr) + + def testInitAutoresetOnWrapsOnAllPlatforms(self): + with osname("posix"): + init(autoreset=True) + self.assertWrapped() + + def testInitWrapOffDoesntWrapOnWindows(self): + with osname("nt"): + init(wrap=False) + self.assertNotWrapped() + + def testInitWrapOffIncompatibleWithAutoresetOn(self): + self.assertRaises(ValueError, lambda: init(autoreset=True, wrap=False)) + + @patch('colorama.win32.SetConsoleTextAttribute') + @patch('colorama.initialise.AnsiToWin32') + def testAutoResetPassedOn(self, mockATW32, _): + with osname("nt"): + init(autoreset=True) + self.assertEqual(len(mockATW32.call_args_list), 2) + self.assertEqual(mockATW32.call_args_list[1][1]['autoreset'], True) + self.assertEqual(mockATW32.call_args_list[0][1]['autoreset'], True) + + @patch('colorama.initialise.AnsiToWin32') + def testAutoResetChangeable(self, mockATW32): + with osname("nt"): + init() + + init(autoreset=True) + self.assertEqual(len(mockATW32.call_args_list), 4) + self.assertEqual(mockATW32.call_args_list[2][1]['autoreset'], True) + self.assertEqual(mockATW32.call_args_list[3][1]['autoreset'], True) + + init() + self.assertEqual(len(mockATW32.call_args_list), 6) + self.assertEqual( + mockATW32.call_args_list[4][1]['autoreset'], False) + self.assertEqual( + mockATW32.call_args_list[5][1]['autoreset'], False) + + + @patch('colorama.initialise.atexit.register') + def testAtexitRegisteredOnlyOnce(self, mockRegister): + init() + self.assertTrue(mockRegister.called) + mockRegister.reset_mock() + init() + self.assertFalse(mockRegister.called) + + +class JustFixWindowsConsoleTest(TestCase): + def _reset(self): + _wipe_internal_state_for_tests() + sys.stdout = orig_stdout + sys.stderr = orig_stderr + + def tearDown(self): + self._reset() + + @patch("colorama.ansitowin32.winapi_test", lambda: True) + def testJustFixWindowsConsole(self): + if sys.platform != "win32": + # just_fix_windows_console should be a no-op + just_fix_windows_console() + self.assertIs(sys.stdout, orig_stdout) + self.assertIs(sys.stderr, orig_stderr) + else: + def fake_std(): + # Emulate stdout=not a tty, stderr=tty + # to check that we handle both cases correctly + stdout = Mock() + stdout.closed = False + stdout.isatty.return_value = False + stdout.fileno.return_value = 1 + sys.stdout = stdout + + stderr = Mock() + stderr.closed = False + stderr.isatty.return_value = True + stderr.fileno.return_value = 2 + sys.stderr = stderr + + for native_ansi in [False, True]: + with patch( + 'colorama.ansitowin32.enable_vt_processing', + lambda *_: native_ansi + ): + self._reset() + fake_std() + + # Regular single-call test + prev_stdout = sys.stdout + prev_stderr = sys.stderr + just_fix_windows_console() + self.assertIs(sys.stdout, prev_stdout) + if native_ansi: + self.assertIs(sys.stderr, prev_stderr) + else: + self.assertIsNot(sys.stderr, prev_stderr) + + # second call without resetting is always a no-op + prev_stdout = sys.stdout + prev_stderr = sys.stderr + just_fix_windows_console() + self.assertIs(sys.stdout, prev_stdout) + self.assertIs(sys.stderr, prev_stderr) + + self._reset() + fake_std() + + # If init() runs first, just_fix_windows_console should be a no-op + init() + prev_stdout = sys.stdout + prev_stderr = sys.stderr + just_fix_windows_console() + self.assertIs(prev_stdout, sys.stdout) + self.assertIs(prev_stderr, sys.stderr) + + +if __name__ == '__main__': + main() diff --git a/.venv/Lib/site-packages/colorama/tests/isatty_test.py b/.venv/Lib/site-packages/colorama/tests/isatty_test.py new file mode 100644 index 00000000..0f84e4be --- /dev/null +++ b/.venv/Lib/site-packages/colorama/tests/isatty_test.py @@ -0,0 +1,57 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +import sys +from unittest import TestCase, main + +from ..ansitowin32 import StreamWrapper, AnsiToWin32 +from .utils import pycharm, replace_by, replace_original_by, StreamTTY, StreamNonTTY + + +def is_a_tty(stream): + return StreamWrapper(stream, None).isatty() + +class IsattyTest(TestCase): + + def test_TTY(self): + tty = StreamTTY() + self.assertTrue(is_a_tty(tty)) + with pycharm(): + self.assertTrue(is_a_tty(tty)) + + def test_nonTTY(self): + non_tty = StreamNonTTY() + self.assertFalse(is_a_tty(non_tty)) + with pycharm(): + self.assertFalse(is_a_tty(non_tty)) + + def test_withPycharm(self): + with pycharm(): + self.assertTrue(is_a_tty(sys.stderr)) + self.assertTrue(is_a_tty(sys.stdout)) + + def test_withPycharmTTYOverride(self): + tty = StreamTTY() + with pycharm(), replace_by(tty): + self.assertTrue(is_a_tty(tty)) + + def test_withPycharmNonTTYOverride(self): + non_tty = StreamNonTTY() + with pycharm(), replace_by(non_tty): + self.assertFalse(is_a_tty(non_tty)) + + def test_withPycharmNoneOverride(self): + with pycharm(): + with replace_by(None), replace_original_by(None): + self.assertFalse(is_a_tty(None)) + self.assertFalse(is_a_tty(StreamNonTTY())) + self.assertTrue(is_a_tty(StreamTTY())) + + def test_withPycharmStreamWrapped(self): + with pycharm(): + self.assertTrue(AnsiToWin32(StreamTTY()).stream.isatty()) + self.assertFalse(AnsiToWin32(StreamNonTTY()).stream.isatty()) + self.assertTrue(AnsiToWin32(sys.stdout).stream.isatty()) + self.assertTrue(AnsiToWin32(sys.stderr).stream.isatty()) + + +if __name__ == '__main__': + main() diff --git a/.venv/Lib/site-packages/colorama/tests/utils.py b/.venv/Lib/site-packages/colorama/tests/utils.py new file mode 100644 index 00000000..472fafb4 --- /dev/null +++ b/.venv/Lib/site-packages/colorama/tests/utils.py @@ -0,0 +1,49 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +from contextlib import contextmanager +from io import StringIO +import sys +import os + + +class StreamTTY(StringIO): + def isatty(self): + return True + +class StreamNonTTY(StringIO): + def isatty(self): + return False + +@contextmanager +def osname(name): + orig = os.name + os.name = name + yield + os.name = orig + +@contextmanager +def replace_by(stream): + orig_stdout = sys.stdout + orig_stderr = sys.stderr + sys.stdout = stream + sys.stderr = stream + yield + sys.stdout = orig_stdout + sys.stderr = orig_stderr + +@contextmanager +def replace_original_by(stream): + orig_stdout = sys.__stdout__ + orig_stderr = sys.__stderr__ + sys.__stdout__ = stream + sys.__stderr__ = stream + yield + sys.__stdout__ = orig_stdout + sys.__stderr__ = orig_stderr + +@contextmanager +def pycharm(): + os.environ["PYCHARM_HOSTED"] = "1" + non_tty = StreamNonTTY() + with replace_by(non_tty), replace_original_by(non_tty): + yield + del os.environ["PYCHARM_HOSTED"] diff --git a/.venv/Lib/site-packages/colorama/tests/winterm_test.py b/.venv/Lib/site-packages/colorama/tests/winterm_test.py new file mode 100644 index 00000000..d0955f9e --- /dev/null +++ b/.venv/Lib/site-packages/colorama/tests/winterm_test.py @@ -0,0 +1,131 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +import sys +from unittest import TestCase, main, skipUnless + +try: + from unittest.mock import Mock, patch +except ImportError: + from mock import Mock, patch + +from ..winterm import WinColor, WinStyle, WinTerm + + +class WinTermTest(TestCase): + + @patch('colorama.winterm.win32') + def testInit(self, mockWin32): + mockAttr = Mock() + mockAttr.wAttributes = 7 + 6 * 16 + 8 + mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr + term = WinTerm() + self.assertEqual(term._fore, 7) + self.assertEqual(term._back, 6) + self.assertEqual(term._style, 8) + + @skipUnless(sys.platform.startswith("win"), "requires Windows") + def testGetAttrs(self): + term = WinTerm() + + term._fore = 0 + term._back = 0 + term._style = 0 + self.assertEqual(term.get_attrs(), 0) + + term._fore = WinColor.YELLOW + self.assertEqual(term.get_attrs(), WinColor.YELLOW) + + term._back = WinColor.MAGENTA + self.assertEqual( + term.get_attrs(), + WinColor.YELLOW + WinColor.MAGENTA * 16) + + term._style = WinStyle.BRIGHT + self.assertEqual( + term.get_attrs(), + WinColor.YELLOW + WinColor.MAGENTA * 16 + WinStyle.BRIGHT) + + @patch('colorama.winterm.win32') + def testResetAll(self, mockWin32): + mockAttr = Mock() + mockAttr.wAttributes = 1 + 2 * 16 + 8 + mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr + term = WinTerm() + + term.set_console = Mock() + term._fore = -1 + term._back = -1 + term._style = -1 + + term.reset_all() + + self.assertEqual(term._fore, 1) + self.assertEqual(term._back, 2) + self.assertEqual(term._style, 8) + self.assertEqual(term.set_console.called, True) + + @skipUnless(sys.platform.startswith("win"), "requires Windows") + def testFore(self): + term = WinTerm() + term.set_console = Mock() + term._fore = 0 + + term.fore(5) + + self.assertEqual(term._fore, 5) + self.assertEqual(term.set_console.called, True) + + @skipUnless(sys.platform.startswith("win"), "requires Windows") + def testBack(self): + term = WinTerm() + term.set_console = Mock() + term._back = 0 + + term.back(5) + + self.assertEqual(term._back, 5) + self.assertEqual(term.set_console.called, True) + + @skipUnless(sys.platform.startswith("win"), "requires Windows") + def testStyle(self): + term = WinTerm() + term.set_console = Mock() + term._style = 0 + + term.style(22) + + self.assertEqual(term._style, 22) + self.assertEqual(term.set_console.called, True) + + @patch('colorama.winterm.win32') + def testSetConsole(self, mockWin32): + mockAttr = Mock() + mockAttr.wAttributes = 0 + mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr + term = WinTerm() + term.windll = Mock() + + term.set_console() + + self.assertEqual( + mockWin32.SetConsoleTextAttribute.call_args, + ((mockWin32.STDOUT, term.get_attrs()), {}) + ) + + @patch('colorama.winterm.win32') + def testSetConsoleOnStderr(self, mockWin32): + mockAttr = Mock() + mockAttr.wAttributes = 0 + mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr + term = WinTerm() + term.windll = Mock() + + term.set_console(on_stderr=True) + + self.assertEqual( + mockWin32.SetConsoleTextAttribute.call_args, + ((mockWin32.STDERR, term.get_attrs()), {}) + ) + + +if __name__ == '__main__': + main() diff --git a/.venv/Lib/site-packages/colorama/win32.py b/.venv/Lib/site-packages/colorama/win32.py new file mode 100644 index 00000000..841b0e27 --- /dev/null +++ b/.venv/Lib/site-packages/colorama/win32.py @@ -0,0 +1,180 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. + +# from winbase.h +STDOUT = -11 +STDERR = -12 + +ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004 + +try: + import ctypes + from ctypes import LibraryLoader + windll = LibraryLoader(ctypes.WinDLL) + from ctypes import wintypes +except (AttributeError, ImportError): + windll = None + SetConsoleTextAttribute = lambda *_: None + winapi_test = lambda *_: None +else: + from ctypes import byref, Structure, c_char, POINTER + + COORD = wintypes._COORD + + class CONSOLE_SCREEN_BUFFER_INFO(Structure): + """struct in wincon.h.""" + _fields_ = [ + ("dwSize", COORD), + ("dwCursorPosition", COORD), + ("wAttributes", wintypes.WORD), + ("srWindow", wintypes.SMALL_RECT), + ("dwMaximumWindowSize", COORD), + ] + def __str__(self): + return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % ( + self.dwSize.Y, self.dwSize.X + , self.dwCursorPosition.Y, self.dwCursorPosition.X + , self.wAttributes + , self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right + , self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X + ) + + _GetStdHandle = windll.kernel32.GetStdHandle + _GetStdHandle.argtypes = [ + wintypes.DWORD, + ] + _GetStdHandle.restype = wintypes.HANDLE + + _GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo + _GetConsoleScreenBufferInfo.argtypes = [ + wintypes.HANDLE, + POINTER(CONSOLE_SCREEN_BUFFER_INFO), + ] + _GetConsoleScreenBufferInfo.restype = wintypes.BOOL + + _SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute + _SetConsoleTextAttribute.argtypes = [ + wintypes.HANDLE, + wintypes.WORD, + ] + _SetConsoleTextAttribute.restype = wintypes.BOOL + + _SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition + _SetConsoleCursorPosition.argtypes = [ + wintypes.HANDLE, + COORD, + ] + _SetConsoleCursorPosition.restype = wintypes.BOOL + + _FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA + _FillConsoleOutputCharacterA.argtypes = [ + wintypes.HANDLE, + c_char, + wintypes.DWORD, + COORD, + POINTER(wintypes.DWORD), + ] + _FillConsoleOutputCharacterA.restype = wintypes.BOOL + + _FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute + _FillConsoleOutputAttribute.argtypes = [ + wintypes.HANDLE, + wintypes.WORD, + wintypes.DWORD, + COORD, + POINTER(wintypes.DWORD), + ] + _FillConsoleOutputAttribute.restype = wintypes.BOOL + + _SetConsoleTitleW = windll.kernel32.SetConsoleTitleW + _SetConsoleTitleW.argtypes = [ + wintypes.LPCWSTR + ] + _SetConsoleTitleW.restype = wintypes.BOOL + + _GetConsoleMode = windll.kernel32.GetConsoleMode + _GetConsoleMode.argtypes = [ + wintypes.HANDLE, + POINTER(wintypes.DWORD) + ] + _GetConsoleMode.restype = wintypes.BOOL + + _SetConsoleMode = windll.kernel32.SetConsoleMode + _SetConsoleMode.argtypes = [ + wintypes.HANDLE, + wintypes.DWORD + ] + _SetConsoleMode.restype = wintypes.BOOL + + def _winapi_test(handle): + csbi = CONSOLE_SCREEN_BUFFER_INFO() + success = _GetConsoleScreenBufferInfo( + handle, byref(csbi)) + return bool(success) + + def winapi_test(): + return any(_winapi_test(h) for h in + (_GetStdHandle(STDOUT), _GetStdHandle(STDERR))) + + def GetConsoleScreenBufferInfo(stream_id=STDOUT): + handle = _GetStdHandle(stream_id) + csbi = CONSOLE_SCREEN_BUFFER_INFO() + success = _GetConsoleScreenBufferInfo( + handle, byref(csbi)) + return csbi + + def SetConsoleTextAttribute(stream_id, attrs): + handle = _GetStdHandle(stream_id) + return _SetConsoleTextAttribute(handle, attrs) + + def SetConsoleCursorPosition(stream_id, position, adjust=True): + position = COORD(*position) + # If the position is out of range, do nothing. + if position.Y <= 0 or position.X <= 0: + return + # Adjust for Windows' SetConsoleCursorPosition: + # 1. being 0-based, while ANSI is 1-based. + # 2. expecting (x,y), while ANSI uses (y,x). + adjusted_position = COORD(position.Y - 1, position.X - 1) + if adjust: + # Adjust for viewport's scroll position + sr = GetConsoleScreenBufferInfo(STDOUT).srWindow + adjusted_position.Y += sr.Top + adjusted_position.X += sr.Left + # Resume normal processing + handle = _GetStdHandle(stream_id) + return _SetConsoleCursorPosition(handle, adjusted_position) + + def FillConsoleOutputCharacter(stream_id, char, length, start): + handle = _GetStdHandle(stream_id) + char = c_char(char.encode()) + length = wintypes.DWORD(length) + num_written = wintypes.DWORD(0) + # Note that this is hard-coded for ANSI (vs wide) bytes. + success = _FillConsoleOutputCharacterA( + handle, char, length, start, byref(num_written)) + return num_written.value + + def FillConsoleOutputAttribute(stream_id, attr, length, start): + ''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )''' + handle = _GetStdHandle(stream_id) + attribute = wintypes.WORD(attr) + length = wintypes.DWORD(length) + num_written = wintypes.DWORD(0) + # Note that this is hard-coded for ANSI (vs wide) bytes. + return _FillConsoleOutputAttribute( + handle, attribute, length, start, byref(num_written)) + + def SetConsoleTitle(title): + return _SetConsoleTitleW(title) + + def GetConsoleMode(handle): + mode = wintypes.DWORD() + success = _GetConsoleMode(handle, byref(mode)) + if not success: + raise ctypes.WinError() + return mode.value + + def SetConsoleMode(handle, mode): + success = _SetConsoleMode(handle, mode) + if not success: + raise ctypes.WinError() diff --git a/.venv/Lib/site-packages/colorama/winterm.py b/.venv/Lib/site-packages/colorama/winterm.py new file mode 100644 index 00000000..aad867e8 --- /dev/null +++ b/.venv/Lib/site-packages/colorama/winterm.py @@ -0,0 +1,195 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +try: + from msvcrt import get_osfhandle +except ImportError: + def get_osfhandle(_): + raise OSError("This isn't windows!") + + +from . import win32 + +# from wincon.h +class WinColor(object): + BLACK = 0 + BLUE = 1 + GREEN = 2 + CYAN = 3 + RED = 4 + MAGENTA = 5 + YELLOW = 6 + GREY = 7 + +# from wincon.h +class WinStyle(object): + NORMAL = 0x00 # dim text, dim background + BRIGHT = 0x08 # bright text, dim background + BRIGHT_BACKGROUND = 0x80 # dim text, bright background + +class WinTerm(object): + + def __init__(self): + self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes + self.set_attrs(self._default) + self._default_fore = self._fore + self._default_back = self._back + self._default_style = self._style + # In order to emulate LIGHT_EX in windows, we borrow the BRIGHT style. + # So that LIGHT_EX colors and BRIGHT style do not clobber each other, + # we track them separately, since LIGHT_EX is overwritten by Fore/Back + # and BRIGHT is overwritten by Style codes. + self._light = 0 + + def get_attrs(self): + return self._fore + self._back * 16 + (self._style | self._light) + + def set_attrs(self, value): + self._fore = value & 7 + self._back = (value >> 4) & 7 + self._style = value & (WinStyle.BRIGHT | WinStyle.BRIGHT_BACKGROUND) + + def reset_all(self, on_stderr=None): + self.set_attrs(self._default) + self.set_console(attrs=self._default) + self._light = 0 + + def fore(self, fore=None, light=False, on_stderr=False): + if fore is None: + fore = self._default_fore + self._fore = fore + # Emulate LIGHT_EX with BRIGHT Style + if light: + self._light |= WinStyle.BRIGHT + else: + self._light &= ~WinStyle.BRIGHT + self.set_console(on_stderr=on_stderr) + + def back(self, back=None, light=False, on_stderr=False): + if back is None: + back = self._default_back + self._back = back + # Emulate LIGHT_EX with BRIGHT_BACKGROUND Style + if light: + self._light |= WinStyle.BRIGHT_BACKGROUND + else: + self._light &= ~WinStyle.BRIGHT_BACKGROUND + self.set_console(on_stderr=on_stderr) + + def style(self, style=None, on_stderr=False): + if style is None: + style = self._default_style + self._style = style + self.set_console(on_stderr=on_stderr) + + def set_console(self, attrs=None, on_stderr=False): + if attrs is None: + attrs = self.get_attrs() + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + win32.SetConsoleTextAttribute(handle, attrs) + + def get_position(self, handle): + position = win32.GetConsoleScreenBufferInfo(handle).dwCursorPosition + # Because Windows coordinates are 0-based, + # and win32.SetConsoleCursorPosition expects 1-based. + position.X += 1 + position.Y += 1 + return position + + def set_cursor_position(self, position=None, on_stderr=False): + if position is None: + # I'm not currently tracking the position, so there is no default. + # position = self.get_position() + return + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + win32.SetConsoleCursorPosition(handle, position) + + def cursor_adjust(self, x, y, on_stderr=False): + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + position = self.get_position(handle) + adjusted_position = (position.Y + y, position.X + x) + win32.SetConsoleCursorPosition(handle, adjusted_position, adjust=False) + + def erase_screen(self, mode=0, on_stderr=False): + # 0 should clear from the cursor to the end of the screen. + # 1 should clear from the cursor to the beginning of the screen. + # 2 should clear the entire screen, and move cursor to (1,1) + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + csbi = win32.GetConsoleScreenBufferInfo(handle) + # get the number of character cells in the current buffer + cells_in_screen = csbi.dwSize.X * csbi.dwSize.Y + # get number of character cells before current cursor position + cells_before_cursor = csbi.dwSize.X * csbi.dwCursorPosition.Y + csbi.dwCursorPosition.X + if mode == 0: + from_coord = csbi.dwCursorPosition + cells_to_erase = cells_in_screen - cells_before_cursor + elif mode == 1: + from_coord = win32.COORD(0, 0) + cells_to_erase = cells_before_cursor + elif mode == 2: + from_coord = win32.COORD(0, 0) + cells_to_erase = cells_in_screen + else: + # invalid mode + return + # fill the entire screen with blanks + win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord) + # now set the buffer's attributes accordingly + win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord) + if mode == 2: + # put the cursor where needed + win32.SetConsoleCursorPosition(handle, (1, 1)) + + def erase_line(self, mode=0, on_stderr=False): + # 0 should clear from the cursor to the end of the line. + # 1 should clear from the cursor to the beginning of the line. + # 2 should clear the entire line. + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + csbi = win32.GetConsoleScreenBufferInfo(handle) + if mode == 0: + from_coord = csbi.dwCursorPosition + cells_to_erase = csbi.dwSize.X - csbi.dwCursorPosition.X + elif mode == 1: + from_coord = win32.COORD(0, csbi.dwCursorPosition.Y) + cells_to_erase = csbi.dwCursorPosition.X + elif mode == 2: + from_coord = win32.COORD(0, csbi.dwCursorPosition.Y) + cells_to_erase = csbi.dwSize.X + else: + # invalid mode + return + # fill the entire screen with blanks + win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord) + # now set the buffer's attributes accordingly + win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord) + + def set_title(self, title): + win32.SetConsoleTitle(title) + + +def enable_vt_processing(fd): + if win32.windll is None or not win32.winapi_test(): + return False + + try: + handle = get_osfhandle(fd) + mode = win32.GetConsoleMode(handle) + win32.SetConsoleMode( + handle, + mode | win32.ENABLE_VIRTUAL_TERMINAL_PROCESSING, + ) + + mode = win32.GetConsoleMode(handle) + if mode & win32.ENABLE_VIRTUAL_TERMINAL_PROCESSING: + return True + # Can get TypeError in testsuite where 'fd' is a Mock() + except (OSError, TypeError): + return False diff --git a/.venv/Lib/site-packages/coverage-7.6.4.dist-info/INSTALLER b/.venv/Lib/site-packages/coverage-7.6.4.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/.venv/Lib/site-packages/coverage-7.6.4.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/Lib/site-packages/coverage-7.6.4.dist-info/LICENSE.txt b/.venv/Lib/site-packages/coverage-7.6.4.dist-info/LICENSE.txt new file mode 100644 index 00000000..f433b1a5 --- /dev/null +++ b/.venv/Lib/site-packages/coverage-7.6.4.dist-info/LICENSE.txt @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/.venv/Lib/site-packages/coverage-7.6.4.dist-info/METADATA b/.venv/Lib/site-packages/coverage-7.6.4.dist-info/METADATA new file mode 100644 index 00000000..f1a44cde --- /dev/null +++ b/.venv/Lib/site-packages/coverage-7.6.4.dist-info/METADATA @@ -0,0 +1,199 @@ +Metadata-Version: 2.1 +Name: coverage +Version: 7.6.4 +Summary: Code coverage measurement for Python +Home-page: https://github.com/nedbat/coveragepy +Author: Ned Batchelder and 239 others +Author-email: ned@nedbatchelder.com +License: Apache-2.0 +Project-URL: Documentation, https://coverage.readthedocs.io/en/7.6.4 +Project-URL: Funding, https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=pypi +Project-URL: Issues, https://github.com/nedbat/coveragepy/issues +Project-URL: Mastodon, https://hachyderm.io/@coveragepy +Project-URL: Mastodon (nedbat), https://hachyderm.io/@nedbat +Keywords: code coverage testing +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3.14 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Quality Assurance +Classifier: Topic :: Software Development :: Testing +Classifier: Development Status :: 5 - Production/Stable +Requires-Python: >=3.9 +Description-Content-Type: text/x-rst +License-File: LICENSE.txt +Provides-Extra: toml +Requires-Dist: tomli ; (python_full_version <= "3.11.0a6") and extra == 'toml' + +.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +.. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +=========== +Coverage.py +=========== + +Code coverage measurement for Python. + +.. image:: https://raw.githubusercontent.com/vshymanskyy/StandWithUkraine/main/banner2-direct.svg + :target: https://vshymanskyy.github.io/StandWithUkraine + :alt: Stand with Ukraine + +------------- + +| |kit| |license| |versions| +| |test-status| |quality-status| |docs| |metacov| +| |tidelift| |sponsor| |stars| |mastodon-coveragepy| |mastodon-nedbat| + +Coverage.py measures code coverage, typically during test execution. It uses +the code analysis tools and tracing hooks provided in the Python standard +library to determine which lines are executable, and which have been executed. + +Coverage.py runs on these versions of Python: + +.. PYVERSIONS + +* Python 3.9 through 3.14 alpha 1, including free-threading. +* PyPy3 versions 3.9 and 3.10. + +Documentation is on `Read the Docs`_. Code repository and issue tracker are on +`GitHub`_. + +.. _Read the Docs: https://coverage.readthedocs.io/en/7.6.4/ +.. _GitHub: https://github.com/nedbat/coveragepy + +**New in 7.x:** +multi-line exclusion patterns; +function/class reporting; +experimental support for sys.monitoring; +dropped support for Python 3.7 and 3.8; +added ``Coverage.collect()`` context manager; +improved data combining; +``[run] exclude_also`` setting; +``report --format=``; +type annotations. + +**New in 6.x:** +dropped support for Python 2.7, 3.5, and 3.6; +write data on SIGTERM; +added support for 3.10 match/case statements. + + +For Enterprise +-------------- + +.. |tideliftlogo| image:: https://nedbatchelder.com/pix/Tidelift_Logo_small.png + :alt: Tidelift + :target: https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme + +.. list-table:: + :widths: 10 100 + + * - |tideliftlogo| + - `Available as part of the Tidelift Subscription. `_ + Coverage and thousands of other packages are working with + Tidelift to deliver one enterprise subscription that covers all of the open + source you use. If you want the flexibility of open source and the confidence + of commercial-grade software, this is for you. + `Learn more. `_ + + +Getting Started +--------------- + +Looking to run ``coverage`` on your test suite? See the `Quick Start section`_ +of the docs. + +.. _Quick Start section: https://coverage.readthedocs.io/en/7.6.4/#quick-start + + +Change history +-------------- + +The complete history of changes is on the `change history page`_. + +.. _change history page: https://coverage.readthedocs.io/en/7.6.4/changes.html + + +Code of Conduct +--------------- + +Everyone participating in the coverage.py project is expected to treat other +people with respect and to follow the guidelines articulated in the `Python +Community Code of Conduct`_. + +.. _Python Community Code of Conduct: https://www.python.org/psf/codeofconduct/ + + +Contributing +------------ + +Found a bug? Want to help improve the code or documentation? See the +`Contributing section`_ of the docs. + +.. _Contributing section: https://coverage.readthedocs.io/en/7.6.4/contributing.html + + +Security +-------- + +To report a security vulnerability, please use the `Tidelift security +contact`_. Tidelift will coordinate the fix and disclosure. + +.. _Tidelift security contact: https://tidelift.com/security + + +License +------- + +Licensed under the `Apache 2.0 License`_. For details, see `NOTICE.txt`_. + +.. _Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 +.. _NOTICE.txt: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + + +.. |test-status| image:: https://github.com/nedbat/coveragepy/actions/workflows/testsuite.yml/badge.svg?branch=master&event=push + :target: https://github.com/nedbat/coveragepy/actions/workflows/testsuite.yml + :alt: Test suite status +.. |quality-status| image:: https://github.com/nedbat/coveragepy/actions/workflows/quality.yml/badge.svg?branch=master&event=push + :target: https://github.com/nedbat/coveragepy/actions/workflows/quality.yml + :alt: Quality check status +.. |docs| image:: https://readthedocs.org/projects/coverage/badge/?version=latest&style=flat + :target: https://coverage.readthedocs.io/en/7.6.4/ + :alt: Documentation +.. |kit| image:: https://img.shields.io/pypi/v/coverage + :target: https://pypi.org/project/coverage/ + :alt: PyPI status +.. |versions| image:: https://img.shields.io/pypi/pyversions/coverage.svg?logo=python&logoColor=FBE072 + :target: https://pypi.org/project/coverage/ + :alt: Python versions supported +.. |license| image:: https://img.shields.io/pypi/l/coverage.svg + :target: https://pypi.org/project/coverage/ + :alt: License +.. |metacov| image:: https://img.shields.io/endpoint?url=https://gist.githubusercontent.com/nedbat/8c6980f77988a327348f9b02bbaf67f5/raw/metacov.json + :target: https://nedbat.github.io/coverage-reports/latest.html + :alt: Coverage reports +.. |tidelift| image:: https://tidelift.com/badges/package/pypi/coverage + :target: https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme + :alt: Tidelift +.. |stars| image:: https://img.shields.io/github/stars/nedbat/coveragepy.svg?logo=github + :target: https://github.com/nedbat/coveragepy/stargazers + :alt: GitHub stars +.. |mastodon-nedbat| image:: https://img.shields.io/badge/dynamic/json?style=flat&labelColor=450657&logo=mastodon&logoColor=ffffff&label=@nedbat&query=followers_count&url=https%3A%2F%2Fhachyderm.io%2Fapi%2Fv1%2Faccounts%2Flookup%3Facct=nedbat + :target: https://hachyderm.io/@nedbat + :alt: nedbat on Mastodon +.. |mastodon-coveragepy| image:: https://img.shields.io/badge/dynamic/json?style=flat&labelColor=450657&logo=mastodon&logoColor=ffffff&label=@coveragepy&query=followers_count&url=https%3A%2F%2Fhachyderm.io%2Fapi%2Fv1%2Faccounts%2Flookup%3Facct=coveragepy + :target: https://hachyderm.io/@coveragepy + :alt: coveragepy on Mastodon +.. |sponsor| image:: https://img.shields.io/badge/%E2%9D%A4-Sponsor%20me-brightgreen?style=flat&logo=GitHub + :target: https://github.com/sponsors/nedbat + :alt: Sponsor me on GitHub diff --git a/.venv/Lib/site-packages/coverage-7.6.4.dist-info/RECORD b/.venv/Lib/site-packages/coverage-7.6.4.dist-info/RECORD new file mode 100644 index 00000000..0d759579 --- /dev/null +++ b/.venv/Lib/site-packages/coverage-7.6.4.dist-info/RECORD @@ -0,0 +1,104 @@ +../../Scripts/coverage-3.12.exe,sha256=CxGbiS_SJniVJdBsS-u-xNIL8AbrCT9sAliJL3ttbRg,108427 +../../Scripts/coverage.exe,sha256=CxGbiS_SJniVJdBsS-u-xNIL8AbrCT9sAliJL3ttbRg,108427 +../../Scripts/coverage3.exe,sha256=CxGbiS_SJniVJdBsS-u-xNIL8AbrCT9sAliJL3ttbRg,108427 +coverage-7.6.4.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +coverage-7.6.4.dist-info/LICENSE.txt,sha256=6z17VIVGasvYHytJb1latjfSeS4mggayfZnnk722dUk,10351 +coverage-7.6.4.dist-info/METADATA,sha256=PHuR-BG7Gh5L-TkHQTvA6y-1qQHEfIFZpCyByVP52P0,8404 +coverage-7.6.4.dist-info/RECORD,, +coverage-7.6.4.dist-info/WHEEL,sha256=62QJgqtUFevqILau0n0UncooEMoOyVCKVQitJpcuCig,101 +coverage-7.6.4.dist-info/entry_points.txt,sha256=s7x_4Bg6sI_AjEov0yLrWDOVR__vCWpFoIGw-MZk2qA,123 +coverage-7.6.4.dist-info/top_level.txt,sha256=BjhyiIvusb5OJkqCXjRncTF3soKF-mDOby-hxkWwwv0,9 +coverage/__init__.py,sha256=f3KZIgjkIaxJ4WZJAtfWwAHO6G1czoeyvuCOb09vRIs,1081 +coverage/__main__.py,sha256=LzQl-dAzS04IRHO8f2hyW79ck5g68kO13-9Ez-nHKGQ,303 +coverage/__pycache__/__init__.cpython-312.pyc,, +coverage/__pycache__/__main__.cpython-312.pyc,, +coverage/__pycache__/annotate.cpython-312.pyc,, +coverage/__pycache__/bytecode.cpython-312.pyc,, +coverage/__pycache__/cmdline.cpython-312.pyc,, +coverage/__pycache__/collector.cpython-312.pyc,, +coverage/__pycache__/config.cpython-312.pyc,, +coverage/__pycache__/context.cpython-312.pyc,, +coverage/__pycache__/control.cpython-312.pyc,, +coverage/__pycache__/core.cpython-312.pyc,, +coverage/__pycache__/data.cpython-312.pyc,, +coverage/__pycache__/debug.cpython-312.pyc,, +coverage/__pycache__/disposition.cpython-312.pyc,, +coverage/__pycache__/env.cpython-312.pyc,, +coverage/__pycache__/exceptions.cpython-312.pyc,, +coverage/__pycache__/execfile.cpython-312.pyc,, +coverage/__pycache__/files.cpython-312.pyc,, +coverage/__pycache__/html.cpython-312.pyc,, +coverage/__pycache__/inorout.cpython-312.pyc,, +coverage/__pycache__/jsonreport.cpython-312.pyc,, +coverage/__pycache__/lcovreport.cpython-312.pyc,, +coverage/__pycache__/misc.cpython-312.pyc,, +coverage/__pycache__/multiproc.cpython-312.pyc,, +coverage/__pycache__/numbits.cpython-312.pyc,, +coverage/__pycache__/parser.cpython-312.pyc,, +coverage/__pycache__/phystokens.cpython-312.pyc,, +coverage/__pycache__/plugin.cpython-312.pyc,, +coverage/__pycache__/plugin_support.cpython-312.pyc,, +coverage/__pycache__/python.cpython-312.pyc,, +coverage/__pycache__/pytracer.cpython-312.pyc,, +coverage/__pycache__/regions.cpython-312.pyc,, +coverage/__pycache__/report.cpython-312.pyc,, +coverage/__pycache__/report_core.cpython-312.pyc,, +coverage/__pycache__/results.cpython-312.pyc,, +coverage/__pycache__/sqldata.cpython-312.pyc,, +coverage/__pycache__/sqlitedb.cpython-312.pyc,, +coverage/__pycache__/sysmon.cpython-312.pyc,, +coverage/__pycache__/templite.cpython-312.pyc,, +coverage/__pycache__/tomlconfig.cpython-312.pyc,, +coverage/__pycache__/types.cpython-312.pyc,, +coverage/__pycache__/version.cpython-312.pyc,, +coverage/__pycache__/xmlreport.cpython-312.pyc,, +coverage/annotate.py,sha256=9vMpWp83DsQScDTWDT6pL8vIC7iUSSTRs4zCOac0TNk,3881 +coverage/bytecode.py,sha256=DInUGd7brsv5mdmGdQmAR79yHAN3H14ccitcPIzV7Qc,744 +coverage/cmdline.py,sha256=8OoJ_sEjXF4zdz8VgIlXA-o6atsdXQI8Cbdum_7An_A,35254 +coverage/collector.py,sha256=UfezPlmAPjOpNhgE6PbdYUr4gK9Zi2mPoEJ38XMPZEA,19972 +coverage/config.py,sha256=wazyeHPbB14ahCNXHIzhtxaVfJc-gw4RDhH2B5rQSLQ,22926 +coverage/context.py,sha256=lb8B-58yW8du56inutIeiZuTxdNr7TfJi1JddvRwQhg,2573 +coverage/control.py,sha256=K8-_8eBY0QfKqd_Hq3T-5IPeCHu0XlTdLWX1_ub10XM,53355 +coverage/core.py,sha256=rpwLpoGnOUeAWfgjRETjmApuB2SeMCj7C5xFDLdnZtQ,3554 +coverage/data.py,sha256=TNWtGx3noa_FBYFB0QUt3a8YDTkJyij3ux9Rt_nWgGA,8297 +coverage/debug.py,sha256=5n1QX0ytnfQFk76j3q6o4lV_E7Ys7iwi_7OiVNGDHzU,21310 +coverage/disposition.py,sha256=xb-zvwp_42zbePVis4Y_m_xjOyHcM6zmTGM1dn7TMv0,1952 +coverage/env.py,sha256=Gx_jWawfU_aZTWgnzGpGhxZEWW-lv8zcgCH7vuxMQc4,4590 +coverage/exceptions.py,sha256=QeimYAr2NgdcvWceOX8ull-66maTv2zz7UZ7ZFQUh9A,1460 +coverage/execfile.py,sha256=zkZJId_n4PEGuuWqQK8PXoBvYlzqS57titVcbbVJMKI,12214 +coverage/files.py,sha256=0qz-Jfiw6pYNFeKG9sIu42D3nq9HRLzzzutFSPSH5Qc,19942 +coverage/html.py,sha256=bKIe8ojFHvZDyYT6EFrSU3zcA3KfDgmytqj3J4on7dE,30147 +coverage/htmlfiles/coverage_html.js,sha256=PqDTAlVdIaB9gIjEf6JHHysMm_D7HyRe4BzQFfpf3OM,26207 +coverage/htmlfiles/favicon_32.png,sha256=vIEA-odDwRvSQ-syWfSwEnWGUWEv2b-Tv4tzTRfwJWE,1732 +coverage/htmlfiles/index.html,sha256=eciDXoye0zDRlWUY5q4HHlE1FPVG4_y0NZ9_OIwaQ0E,7005 +coverage/htmlfiles/keybd_closed.png,sha256=fZv4rmY3DkNJtPQjrFJ5UBOE5DdNof3mdeCZWC7TOoo,9004 +coverage/htmlfiles/pyfile.html,sha256=dJV8bc3mMQz6J_N2KxVMXNKVge6vnMiQiqqe1QYuZmw,6643 +coverage/htmlfiles/style.css,sha256=hEsJogNnNUUQOvNJRwR2OqJhmcsn03tRhuNWoc2g3VQ,14414 +coverage/htmlfiles/style.scss,sha256=VtMzE1jNJhiN0Ih6mhJ0niyxr9UIYyYtirIe-QVq_vs,19217 +coverage/inorout.py,sha256=0JkiC3qqeQX-h38W4EdYu49C2RN9IuJaiEfVns8vSyc,24382 +coverage/jsonreport.py,sha256=bkl2DggQRaxL5p2geBt0Vd1ffgiI-vjj1HUTY5QMklw,6919 +coverage/lcovreport.py,sha256=5vgS_HkT_Rq5gCxLKUyv79h-hTdk7qoTPUS0dE621rg,8127 +coverage/misc.py,sha256=zQUXYuWhfi5W5PIx2OU9Lxj2NgPEyy5j7IZUY6lBspQ,11591 +coverage/multiproc.py,sha256=qAxRi2Jc19a7DVp1hgUZP6JyNF5l3nLmjGcBD-aiZ08,4310 +coverage/numbits.py,sha256=YWRSkT-8872C-XtEzxTl1Uk9aoFKUjFbdKkNtYPKGEc,4819 +coverage/parser.py,sha256=6cf8nVciZcAyVmDgIN6a31ZsnAauFC46THCSttILBog,51915 +coverage/phystokens.py,sha256=R989TrsFcuZx7x2kBOZmOyQc7z_7QrA3d1cWcRBEGas,7735 +coverage/plugin.py,sha256=V1h7GiWKYGpZYISOHfr69lPK4K2f5XYa0o60XtrvVSk,22214 +coverage/plugin_support.py,sha256=T9B3CF6OdI07ICQoq18AaCRj-h3xBkOxz8FEuSlsbtc,10609 +coverage/py.typed,sha256=QhKiyYY18iX2PTjtgwSYebGpYvH_m0bYEyU_kMST7EU,73 +coverage/python.py,sha256=02kieGCrvS_DQhE3xIHZkJX9__ItYR5K9mm55eQ7-xU,8745 +coverage/pytracer.py,sha256=I-11n12PEO4OHv4vVUn7CmZixmU_HtN-P87edbL2b3I,15448 +coverage/regions.py,sha256=5ls28y7vlhby3m-Vs6vuvT3u61b23ivL1x6zrf_LYAY,4623 +coverage/report.py,sha256=C-Gp3GBBUQ-NUEwCTzEjj_j-JwdHceNkjdUJMnSU5QE,10876 +coverage/report_core.py,sha256=Ar6U6I3hf8Pu8jIfYmW-MXmlrlcfTzt2r8FeVW9oBvA,4196 +coverage/results.py,sha256=-0bkb0tc-CSCvLvuMMOwOa_hewtSn8N7iVCdZoFCEeI,14120 +coverage/sqldata.py,sha256=XvLEju-FpfFoy-EZffWQLaCgLlw23w5beM3J4xWU1Vk,44615 +coverage/sqlitedb.py,sha256=SVQ0qLHKWTZgxgw59LvZOpxAiNN7MZXn4Vy3RBki3n4,9931 +coverage/sysmon.py,sha256=YtfeO_RdLL14YElF53DP8onD_xNgh03xhFjj8idfUfA,16236 +coverage/templite.py,sha256=CrVt52hhVQxkEY7R9-LV5MBrHnDAoe8xBG1XYIwUPlc,11114 +coverage/tomlconfig.py,sha256=eqhqyA2qgYBsq5Xr7R4EPcXNnc477KnTY6hIRta-9Sk,7760 +coverage/tracer.cp312-win_amd64.pyd,sha256=5hn0M4ud0JJ5vLpWD9vsF4i8U_Dfr_iaQw7nbguV5nY,22528 +coverage/tracer.pyi,sha256=A_x3UrAuonDZaQlhcaTJCOA4YgsgLXnG1ZffeNGZ6OM,1244 +coverage/types.py,sha256=HTk2YiapshzP2zc5Xu4wJirIOc3hDtiGKHGmXEwZcQM,5851 +coverage/version.py,sha256=7NNQ2rxA8QYAwSN9atjuoQI5ddRTgO4moQU6Uq7HpIg,1481 +coverage/xmlreport.py,sha256=R2-2XLNcJuOUydaB5KnVvlCJu3H3vGhxJI0erdm11Qo,10063 diff --git a/.venv/Lib/site-packages/coverage-7.6.4.dist-info/WHEEL b/.venv/Lib/site-packages/coverage-7.6.4.dist-info/WHEEL new file mode 100644 index 00000000..bd4c259e --- /dev/null +++ b/.venv/Lib/site-packages/coverage-7.6.4.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (75.2.0) +Root-Is-Purelib: false +Tag: cp312-cp312-win_amd64 + diff --git a/.venv/Lib/site-packages/coverage-7.6.4.dist-info/entry_points.txt b/.venv/Lib/site-packages/coverage-7.6.4.dist-info/entry_points.txt new file mode 100644 index 00000000..242b4774 --- /dev/null +++ b/.venv/Lib/site-packages/coverage-7.6.4.dist-info/entry_points.txt @@ -0,0 +1,4 @@ +[console_scripts] +coverage = coverage.cmdline:main +coverage-3.12 = coverage.cmdline:main +coverage3 = coverage.cmdline:main diff --git a/.venv/Lib/site-packages/coverage-7.6.4.dist-info/top_level.txt b/.venv/Lib/site-packages/coverage-7.6.4.dist-info/top_level.txt new file mode 100644 index 00000000..4ebc8aea --- /dev/null +++ b/.venv/Lib/site-packages/coverage-7.6.4.dist-info/top_level.txt @@ -0,0 +1 @@ +coverage diff --git a/.venv/Lib/site-packages/coverage/__init__.py b/.venv/Lib/site-packages/coverage/__init__.py new file mode 100644 index 00000000..1bda8921 --- /dev/null +++ b/.venv/Lib/site-packages/coverage/__init__.py @@ -0,0 +1,38 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +""" +Code coverage measurement for Python. + +Ned Batchelder +https://coverage.readthedocs.io + +""" + +from __future__ import annotations + +# mypy's convention is that "import as" names are public from the module. +# We import names as themselves to indicate that. Pylint sees it as pointless, +# so disable its warning. +# pylint: disable=useless-import-alias + +from coverage.version import ( + __version__ as __version__, + version_info as version_info, +) + +from coverage.control import ( + Coverage as Coverage, + process_startup as process_startup, +) +from coverage.data import CoverageData as CoverageData +from coverage.exceptions import CoverageException as CoverageException +from coverage.plugin import ( + CodeRegion as CodeRegion, + CoveragePlugin as CoveragePlugin, + FileReporter as FileReporter, + FileTracer as FileTracer, +) + +# Backward compatibility. +coverage = Coverage diff --git a/.venv/Lib/site-packages/coverage/__main__.py b/.venv/Lib/site-packages/coverage/__main__.py new file mode 100644 index 00000000..ce2d8dbd --- /dev/null +++ b/.venv/Lib/site-packages/coverage/__main__.py @@ -0,0 +1,10 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Coverage.py's main entry point.""" + +from __future__ import annotations + +import sys +from coverage.cmdline import main +sys.exit(main()) diff --git a/.venv/Lib/site-packages/coverage/annotate.py b/.venv/Lib/site-packages/coverage/annotate.py new file mode 100644 index 00000000..13d44b21 --- /dev/null +++ b/.venv/Lib/site-packages/coverage/annotate.py @@ -0,0 +1,115 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Source file annotation for coverage.py.""" + +from __future__ import annotations + +import os +import re + +from typing import TYPE_CHECKING +from collections.abc import Iterable + +from coverage.files import flat_rootname +from coverage.misc import ensure_dir, isolate_module +from coverage.plugin import FileReporter +from coverage.report_core import get_analysis_to_report +from coverage.results import Analysis +from coverage.types import TMorf + +if TYPE_CHECKING: + from coverage import Coverage + +os = isolate_module(os) + + +class AnnotateReporter: + """Generate annotated source files showing line coverage. + + This reporter creates annotated copies of the measured source files. Each + .py file is copied as a .py,cover file, with a left-hand margin annotating + each line:: + + > def h(x): + - if 0: #pragma: no cover + - pass + > if x == 1: + ! a = 1 + > else: + > a = 2 + + > h(2) + + Executed lines use ">", lines not executed use "!", lines excluded from + consideration use "-". + + """ + + def __init__(self, coverage: Coverage) -> None: + self.coverage = coverage + self.config = self.coverage.config + self.directory: str | None = None + + blank_re = re.compile(r"\s*(#|$)") + else_re = re.compile(r"\s*else\s*:\s*(#|$)") + + def report(self, morfs: Iterable[TMorf] | None, directory: str | None = None) -> None: + """Run the report. + + See `coverage.report()` for arguments. + + """ + self.directory = directory + self.coverage.get_data() + for fr, analysis in get_analysis_to_report(self.coverage, morfs): + self.annotate_file(fr, analysis) + + def annotate_file(self, fr: FileReporter, analysis: Analysis) -> None: + """Annotate a single file. + + `fr` is the FileReporter for the file to annotate. + + """ + statements = sorted(analysis.statements) + missing = sorted(analysis.missing) + excluded = sorted(analysis.excluded) + + if self.directory: + ensure_dir(self.directory) + dest_file = os.path.join(self.directory, flat_rootname(fr.relative_filename())) + if dest_file.endswith("_py"): + dest_file = dest_file[:-3] + ".py" + dest_file += ",cover" + else: + dest_file = fr.filename + ",cover" + + with open(dest_file, "w", encoding="utf-8") as dest: + i = j = 0 + covered = True + source = fr.source() + for lineno, line in enumerate(source.splitlines(True), start=1): + while i < len(statements) and statements[i] < lineno: + i += 1 + while j < len(missing) and missing[j] < lineno: + j += 1 + if i < len(statements) and statements[i] == lineno: + covered = j >= len(missing) or missing[j] > lineno + if self.blank_re.match(line): + dest.write(" ") + elif self.else_re.match(line): + # Special logic for lines containing only "else:". + if j >= len(missing): + dest.write("> ") + elif statements[i] == missing[j]: + dest.write("! ") + else: + dest.write("> ") + elif lineno in excluded: + dest.write("- ") + elif covered: + dest.write("> ") + else: + dest.write("! ") + + dest.write(line) diff --git a/.venv/Lib/site-packages/coverage/bytecode.py b/.venv/Lib/site-packages/coverage/bytecode.py new file mode 100644 index 00000000..764b29b8 --- /dev/null +++ b/.venv/Lib/site-packages/coverage/bytecode.py @@ -0,0 +1,22 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Bytecode manipulation for coverage.py""" + +from __future__ import annotations + +from types import CodeType +from collections.abc import Iterator + + +def code_objects(code: CodeType) -> Iterator[CodeType]: + """Iterate over all the code objects in `code`.""" + stack = [code] + while stack: + # We're going to return the code object on the stack, but first + # push its children for later returning. + code = stack.pop() + for c in code.co_consts: + if isinstance(c, CodeType): + stack.append(c) + yield code diff --git a/.venv/Lib/site-packages/coverage/cmdline.py b/.venv/Lib/site-packages/coverage/cmdline.py new file mode 100644 index 00000000..7c01ccbf --- /dev/null +++ b/.venv/Lib/site-packages/coverage/cmdline.py @@ -0,0 +1,1009 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Command-line support for coverage.py.""" + +from __future__ import annotations + +import glob +import optparse # pylint: disable=deprecated-module +import os +import os.path +import shlex +import sys +import textwrap +import traceback + +from typing import cast, Any, NoReturn + +import coverage +from coverage import Coverage +from coverage import env +from coverage.config import CoverageConfig +from coverage.control import DEFAULT_DATAFILE +from coverage.core import HAS_CTRACER +from coverage.data import combinable_files, debug_data_file +from coverage.debug import info_header, short_stack, write_formatted_info +from coverage.exceptions import _BaseCoverageException, _ExceptionDuringRun, NoSource +from coverage.execfile import PyRunner +from coverage.results import display_covered, should_fail_under +from coverage.version import __url__ + +# When adding to this file, alphabetization is important. Look for +# "alphabetize" comments throughout. + +class Opts: + """A namespace class for individual options we'll build parsers from.""" + + # Keep these entries alphabetized (roughly) by the option name as it + # appears on the command line. + + append = optparse.make_option( + "-a", "--append", action="store_true", + help="Append coverage data to .coverage, otherwise it starts clean each time.", + ) + branch = optparse.make_option( + "", "--branch", action="store_true", + help="Measure branch coverage in addition to statement coverage.", + ) + concurrency = optparse.make_option( + "", "--concurrency", action="store", metavar="LIBS", + help=( + "Properly measure code using a concurrency library. " + + "Valid values are: {}, or a comma-list of them." + ).format(", ".join(sorted(CoverageConfig.CONCURRENCY_CHOICES))), + ) + context = optparse.make_option( + "", "--context", action="store", metavar="LABEL", + help="The context label to record for this coverage run.", + ) + contexts = optparse.make_option( + "", "--contexts", action="store", metavar="REGEX1,REGEX2,...", + help=( + "Only display data from lines covered in the given contexts. " + + "Accepts Python regexes, which must be quoted." + ), + ) + datafile = optparse.make_option( + "", "--data-file", action="store", metavar="DATAFILE", + help=( + "Base name of the data files to operate on. " + + "Defaults to '.coverage'. [env: COVERAGE_FILE]" + ), + ) + datafle_input = optparse.make_option( + "", "--data-file", action="store", metavar="INFILE", + help=( + "Read coverage data for report generation from this file. " + + "Defaults to '.coverage'. [env: COVERAGE_FILE]" + ), + ) + datafile_output = optparse.make_option( + "", "--data-file", action="store", metavar="OUTFILE", + help=( + "Write the recorded coverage data to this file. " + + "Defaults to '.coverage'. [env: COVERAGE_FILE]" + ), + ) + debug = optparse.make_option( + "", "--debug", action="store", metavar="OPTS", + help="Debug options, separated by commas. [env: COVERAGE_DEBUG]", + ) + directory = optparse.make_option( + "-d", "--directory", action="store", metavar="DIR", + help="Write the output files to DIR.", + ) + fail_under = optparse.make_option( + "", "--fail-under", action="store", metavar="MIN", type="float", + help="Exit with a status of 2 if the total coverage is less than MIN.", + ) + format = optparse.make_option( + "", "--format", action="store", metavar="FORMAT", + help="Output format, either text (default), markdown, or total.", + ) + help = optparse.make_option( + "-h", "--help", action="store_true", + help="Get help on this command.", + ) + ignore_errors = optparse.make_option( + "-i", "--ignore-errors", action="store_true", + help="Ignore errors while reading source files.", + ) + include = optparse.make_option( + "", "--include", action="store", metavar="PAT1,PAT2,...", + help=( + "Include only files whose paths match one of these patterns. " + + "Accepts shell-style wildcards, which must be quoted." + ), + ) + keep = optparse.make_option( + "", "--keep", action="store_true", + help="Keep original coverage files, otherwise they are deleted.", + ) + pylib = optparse.make_option( + "-L", "--pylib", action="store_true", + help=( + "Measure coverage even inside the Python installed library, " + + "which isn't done by default." + ), + ) + show_missing = optparse.make_option( + "-m", "--show-missing", action="store_true", + help="Show line numbers of statements in each module that weren't executed.", + ) + module = optparse.make_option( + "-m", "--module", action="store_true", + help=( + " is an importable Python module, not a script path, " + + "to be run as 'python -m' would run it." + ), + ) + omit = optparse.make_option( + "", "--omit", action="store", metavar="PAT1,PAT2,...", + help=( + "Omit files whose paths match one of these patterns. " + + "Accepts shell-style wildcards, which must be quoted." + ), + ) + output_xml = optparse.make_option( + "-o", "", action="store", dest="outfile", metavar="OUTFILE", + help="Write the XML report to this file. Defaults to 'coverage.xml'", + ) + output_json = optparse.make_option( + "-o", "", action="store", dest="outfile", metavar="OUTFILE", + help="Write the JSON report to this file. Defaults to 'coverage.json'", + ) + output_lcov = optparse.make_option( + "-o", "", action="store", dest="outfile", metavar="OUTFILE", + help="Write the LCOV report to this file. Defaults to 'coverage.lcov'", + ) + json_pretty_print = optparse.make_option( + "", "--pretty-print", action="store_true", + help="Format the JSON for human readers.", + ) + parallel_mode = optparse.make_option( + "-p", "--parallel-mode", action="store_true", + help=( + "Append the machine name, process id and random number to the " + + "data file name to simplify collecting data from " + + "many processes." + ), + ) + precision = optparse.make_option( + "", "--precision", action="store", metavar="N", type=int, + help=( + "Number of digits after the decimal point to display for " + + "reported coverage percentages." + ), + ) + quiet = optparse.make_option( + "-q", "--quiet", action="store_true", + help="Don't print messages about what is happening.", + ) + rcfile = optparse.make_option( + "", "--rcfile", action="store", + help=( + "Specify configuration file. " + + "By default '.coveragerc', 'setup.cfg', 'tox.ini', and " + + "'pyproject.toml' are tried. [env: COVERAGE_RCFILE]" + ), + ) + show_contexts = optparse.make_option( + "--show-contexts", action="store_true", + help="Show contexts for covered lines.", + ) + skip_covered = optparse.make_option( + "--skip-covered", action="store_true", + help="Skip files with 100% coverage.", + ) + no_skip_covered = optparse.make_option( + "--no-skip-covered", action="store_false", dest="skip_covered", + help="Disable --skip-covered.", + ) + skip_empty = optparse.make_option( + "--skip-empty", action="store_true", + help="Skip files with no code.", + ) + sort = optparse.make_option( + "--sort", action="store", metavar="COLUMN", + help=( + "Sort the report by the named column: name, stmts, miss, branch, brpart, or cover. " + + "Default is name." + ), + ) + source = optparse.make_option( + "", "--source", action="store", metavar="SRC1,SRC2,...", + help="A list of directories or importable names of code to measure.", + ) + timid = optparse.make_option( + "", "--timid", action="store_true", + help="Use the slower Python trace function core.", + ) + title = optparse.make_option( + "", "--title", action="store", metavar="TITLE", + help="A text string to use as the title on the HTML.", + ) + version = optparse.make_option( + "", "--version", action="store_true", + help="Display version information and exit.", + ) + + +class CoverageOptionParser(optparse.OptionParser): + """Base OptionParser for coverage.py. + + Problems don't exit the program. + Defaults are initialized for all options. + + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + kwargs["add_help_option"] = False + super().__init__(*args, **kwargs) + self.set_defaults( + # Keep these arguments alphabetized by their names. + action=None, + append=None, + branch=None, + concurrency=None, + context=None, + contexts=None, + data_file=None, + debug=None, + directory=None, + fail_under=None, + format=None, + help=None, + ignore_errors=None, + include=None, + keep=None, + module=None, + omit=None, + parallel_mode=None, + precision=None, + pylib=None, + quiet=None, + rcfile=True, + show_contexts=None, + show_missing=None, + skip_covered=None, + skip_empty=None, + sort=None, + source=None, + timid=None, + title=None, + version=None, + ) + + self.disable_interspersed_args() + + class OptionParserError(Exception): + """Used to stop the optparse error handler ending the process.""" + pass + + def parse_args_ok(self, args: list[str]) -> tuple[bool, optparse.Values | None, list[str]]: + """Call optparse.parse_args, but return a triple: + + (ok, options, args) + + """ + try: + options, args = super().parse_args(args) + except self.OptionParserError: + return False, None, [] + return True, options, args + + def error(self, msg: str) -> NoReturn: + """Override optparse.error so sys.exit doesn't get called.""" + show_help(msg) + raise self.OptionParserError + + +class GlobalOptionParser(CoverageOptionParser): + """Command-line parser for coverage.py global option arguments.""" + + def __init__(self) -> None: + super().__init__() + + self.add_options([ + Opts.help, + Opts.version, + ]) + + +class CmdOptionParser(CoverageOptionParser): + """Parse one of the new-style commands for coverage.py.""" + + def __init__( + self, + action: str, + options: list[optparse.Option], + description: str, + usage: str | None = None, + ): + """Create an OptionParser for a coverage.py command. + + `action` is the slug to put into `options.action`. + `options` is a list of Option's for the command. + `description` is the description of the command, for the help text. + `usage` is the usage string to display in help. + + """ + if usage: + usage = "%prog " + usage + super().__init__( + usage=usage, + description=description, + ) + self.set_defaults(action=action) + self.add_options(options) + self.cmd = action + + def __eq__(self, other: str) -> bool: # type: ignore[override] + # A convenience equality, so that I can put strings in unit test + # results, and they will compare equal to objects. + return (other == f"") + + __hash__ = None # type: ignore[assignment] + + def get_prog_name(self) -> str: + """Override of an undocumented function in optparse.OptionParser.""" + program_name = super().get_prog_name() + + # Include the sub-command for this parser as part of the command. + return f"{program_name} {self.cmd}" + +# In lists of Opts, keep them alphabetized by the option names as they appear +# on the command line, since these lists determine the order of the options in +# the help output. +# +# In COMMANDS, keep the keys (command names) alphabetized. + +GLOBAL_ARGS = [ + Opts.debug, + Opts.help, + Opts.rcfile, +] + +COMMANDS = { + "annotate": CmdOptionParser( + "annotate", + [ + Opts.directory, + Opts.datafle_input, + Opts.ignore_errors, + Opts.include, + Opts.omit, + ] + GLOBAL_ARGS, + usage="[options] [modules]", + description=( + "Make annotated copies of the given files, marking statements that are executed " + + "with > and statements that are missed with !." + ), + ), + + "combine": CmdOptionParser( + "combine", + [ + Opts.append, + Opts.datafile, + Opts.keep, + Opts.quiet, + ] + GLOBAL_ARGS, + usage="[options] ... ", + description=( + "Combine data from multiple coverage files. " + + "The combined results are written to a single " + + "file representing the union of the data. The positional " + + "arguments are data files or directories containing data files. " + + "If no paths are provided, data files in the default data file's " + + "directory are combined." + ), + ), + + "debug": CmdOptionParser( + "debug", GLOBAL_ARGS, + usage="", + description=( + "Display information about the internals of coverage.py, " + + "for diagnosing problems. " + + "Topics are: " + + "'data' to show a summary of the collected data; " + + "'sys' to show installation information; " + + "'config' to show the configuration; " + + "'premain' to show what is calling coverage; " + + "'pybehave' to show internal flags describing Python behavior." + ), + ), + + "erase": CmdOptionParser( + "erase", + [ + Opts.datafile, + ] + GLOBAL_ARGS, + description="Erase previously collected coverage data.", + ), + + "help": CmdOptionParser( + "help", GLOBAL_ARGS, + usage="[command]", + description="Describe how to use coverage.py", + ), + + "html": CmdOptionParser( + "html", + [ + Opts.contexts, + Opts.directory, + Opts.datafle_input, + Opts.fail_under, + Opts.ignore_errors, + Opts.include, + Opts.omit, + Opts.precision, + Opts.quiet, + Opts.show_contexts, + Opts.skip_covered, + Opts.no_skip_covered, + Opts.skip_empty, + Opts.title, + ] + GLOBAL_ARGS, + usage="[options] [modules]", + description=( + "Create an HTML report of the coverage of the files. " + + "Each file gets its own page, with the source decorated to show " + + "executed, excluded, and missed lines." + ), + ), + + "json": CmdOptionParser( + "json", + [ + Opts.contexts, + Opts.datafle_input, + Opts.fail_under, + Opts.ignore_errors, + Opts.include, + Opts.omit, + Opts.output_json, + Opts.json_pretty_print, + Opts.quiet, + Opts.show_contexts, + ] + GLOBAL_ARGS, + usage="[options] [modules]", + description="Generate a JSON report of coverage results.", + ), + + "lcov": CmdOptionParser( + "lcov", + [ + Opts.datafle_input, + Opts.fail_under, + Opts.ignore_errors, + Opts.include, + Opts.output_lcov, + Opts.omit, + Opts.quiet, + ] + GLOBAL_ARGS, + usage="[options] [modules]", + description="Generate an LCOV report of coverage results.", + ), + + "report": CmdOptionParser( + "report", + [ + Opts.contexts, + Opts.datafle_input, + Opts.fail_under, + Opts.format, + Opts.ignore_errors, + Opts.include, + Opts.omit, + Opts.precision, + Opts.sort, + Opts.show_missing, + Opts.skip_covered, + Opts.no_skip_covered, + Opts.skip_empty, + ] + GLOBAL_ARGS, + usage="[options] [modules]", + description="Report coverage statistics on modules.", + ), + + "run": CmdOptionParser( + "run", + [ + Opts.append, + Opts.branch, + Opts.concurrency, + Opts.context, + Opts.datafile_output, + Opts.include, + Opts.module, + Opts.omit, + Opts.pylib, + Opts.parallel_mode, + Opts.source, + Opts.timid, + ] + GLOBAL_ARGS, + usage="[options] [program options]", + description="Run a Python program, measuring code execution.", + ), + + "xml": CmdOptionParser( + "xml", + [ + Opts.datafle_input, + Opts.fail_under, + Opts.ignore_errors, + Opts.include, + Opts.omit, + Opts.output_xml, + Opts.quiet, + Opts.skip_empty, + ] + GLOBAL_ARGS, + usage="[options] [modules]", + description="Generate an XML report of coverage results.", + ), +} + + +def show_help( + error: str | None = None, + topic: str | None = None, + parser: optparse.OptionParser | None = None, +) -> None: + """Display an error message, or the named topic.""" + assert error or topic or parser + + program_path = sys.argv[0] + if program_path.endswith(os.path.sep + "__main__.py"): + # The path is the main module of a package; get that path instead. + program_path = os.path.dirname(program_path) + program_name = os.path.basename(program_path) + if env.WINDOWS: + # entry_points={"console_scripts":...} on Windows makes files + # called coverage.exe, coverage3.exe, and coverage-3.5.exe. These + # invoke coverage-script.py, coverage3-script.py, and + # coverage-3.5-script.py. argv[0] is the .py file, but we want to + # get back to the original form. + auto_suffix = "-script.py" + if program_name.endswith(auto_suffix): + program_name = program_name[:-len(auto_suffix)] + + help_params = dict(coverage.__dict__) + help_params["__url__"] = __url__ + help_params["program_name"] = program_name + if HAS_CTRACER: + help_params["extension_modifier"] = "with C extension" + else: + help_params["extension_modifier"] = "without C extension" + + if error: + print(error, file=sys.stderr) + print(f"Use '{program_name} help' for help.", file=sys.stderr) + elif parser: + print(parser.format_help().strip()) + print() + else: + assert topic is not None + help_msg = textwrap.dedent(HELP_TOPICS.get(topic, "")).strip() + if help_msg: + print(help_msg.format(**help_params)) + else: + print(f"Don't know topic {topic!r}") + print("Full documentation is at {__url__}".format(**help_params)) + + +OK, ERR, FAIL_UNDER = 0, 1, 2 + + +class CoverageScript: + """The command-line interface to coverage.py.""" + + def __init__(self) -> None: + self.global_option = False + self.coverage: Coverage + + def command_line(self, argv: list[str]) -> int: + """The bulk of the command line interface to coverage.py. + + `argv` is the argument list to process. + + Returns 0 if all is well, 1 if something went wrong. + + """ + # Collect the command-line options. + if not argv: + show_help(topic="minimum_help") + return OK + + # The command syntax we parse depends on the first argument. Global + # switch syntax always starts with an option. + parser: optparse.OptionParser | None + self.global_option = argv[0].startswith("-") + if self.global_option: + parser = GlobalOptionParser() + else: + parser = COMMANDS.get(argv[0]) + if not parser: + show_help(f"Unknown command: {argv[0]!r}") + return ERR + argv = argv[1:] + + ok, options, args = parser.parse_args_ok(argv) + if not ok: + return ERR + assert options is not None + + # Handle help and version. + if self.do_help(options, args, parser): + return OK + + # Listify the list options. + source = unshell_list(options.source) + omit = unshell_list(options.omit) + include = unshell_list(options.include) + debug = unshell_list(options.debug) + contexts = unshell_list(options.contexts) + + if options.concurrency is not None: + concurrency = options.concurrency.split(",") + else: + concurrency = None + + # Do something. + self.coverage = Coverage( + data_file=options.data_file or DEFAULT_DATAFILE, + data_suffix=options.parallel_mode, + cover_pylib=options.pylib, + timid=options.timid, + branch=options.branch, + config_file=options.rcfile, + source=source, + omit=omit, + include=include, + debug=debug, + concurrency=concurrency, + check_preimported=True, + context=options.context, + messages=not options.quiet, + ) + + if options.action == "debug": + return self.do_debug(args) + + elif options.action == "erase": + self.coverage.erase() + return OK + + elif options.action == "run": + return self.do_run(options, args) + + elif options.action == "combine": + if options.append: + self.coverage.load() + data_paths = args or None + self.coverage.combine(data_paths, strict=True, keep=bool(options.keep)) + self.coverage.save() + return OK + + # Remaining actions are reporting, with some common options. + report_args = dict( + morfs=unglob_args(args), + ignore_errors=options.ignore_errors, + omit=omit, + include=include, + contexts=contexts, + ) + + # We need to be able to import from the current directory, because + # plugins may try to, for example, to read Django settings. + sys.path.insert(0, "") + + self.coverage.load() + + total = None + if options.action == "report": + total = self.coverage.report( + precision=options.precision, + show_missing=options.show_missing, + skip_covered=options.skip_covered, + skip_empty=options.skip_empty, + sort=options.sort, + output_format=options.format, + **report_args, + ) + elif options.action == "annotate": + self.coverage.annotate(directory=options.directory, **report_args) + elif options.action == "html": + total = self.coverage.html_report( + directory=options.directory, + precision=options.precision, + skip_covered=options.skip_covered, + skip_empty=options.skip_empty, + show_contexts=options.show_contexts, + title=options.title, + **report_args, + ) + elif options.action == "xml": + total = self.coverage.xml_report( + outfile=options.outfile, + skip_empty=options.skip_empty, + **report_args, + ) + elif options.action == "json": + total = self.coverage.json_report( + outfile=options.outfile, + pretty_print=options.pretty_print, + show_contexts=options.show_contexts, + **report_args, + ) + elif options.action == "lcov": + total = self.coverage.lcov_report( + outfile=options.outfile, + **report_args, + ) + else: + # There are no other possible actions. + raise AssertionError + + if total is not None: + # Apply the command line fail-under options, and then use the config + # value, so we can get fail_under from the config file. + if options.fail_under is not None: + self.coverage.set_option("report:fail_under", options.fail_under) + if options.precision is not None: + self.coverage.set_option("report:precision", options.precision) + + fail_under = cast(float, self.coverage.get_option("report:fail_under")) + precision = cast(int, self.coverage.get_option("report:precision")) + if should_fail_under(total, fail_under, precision): + msg = "total of {total} is less than fail-under={fail_under:.{p}f}".format( + total=display_covered(total, precision), + fail_under=fail_under, + p=precision, + ) + print("Coverage failure:", msg) + return FAIL_UNDER + + return OK + + def do_help( + self, + options: optparse.Values, + args: list[str], + parser: optparse.OptionParser, + ) -> bool: + """Deal with help requests. + + Return True if it handled the request, False if not. + + """ + # Handle help. + if options.help: + if self.global_option: + show_help(topic="help") + else: + show_help(parser=parser) + return True + + if options.action == "help": + if args: + for a in args: + parser_maybe = COMMANDS.get(a) + if parser_maybe is not None: + show_help(parser=parser_maybe) + else: + show_help(topic=a) + else: + show_help(topic="help") + return True + + # Handle version. + if options.version: + show_help(topic="version") + return True + + return False + + def do_run(self, options: optparse.Values, args: list[str]) -> int: + """Implementation of 'coverage run'.""" + + if not args: + if options.module: + # Specified -m with nothing else. + show_help("No module specified for -m") + return ERR + command_line = cast(str, self.coverage.get_option("run:command_line")) + if command_line is not None: + args = shlex.split(command_line) + if args and args[0] in {"-m", "--module"}: + options.module = True + args = args[1:] + if not args: + show_help("Nothing to do.") + return ERR + + if options.append and self.coverage.get_option("run:parallel"): + show_help("Can't append to data files in parallel mode.") + return ERR + + if options.concurrency == "multiprocessing": + # Can't set other run-affecting command line options with + # multiprocessing. + for opt_name in ["branch", "include", "omit", "pylib", "source", "timid"]: + # As it happens, all of these options have no default, meaning + # they will be None if they have not been specified. + if getattr(options, opt_name) is not None: + show_help( + "Options affecting multiprocessing must only be specified " + + "in a configuration file.\n" + + f"Remove --{opt_name} from the command line.", + ) + return ERR + + os.environ["COVERAGE_RUN"] = "true" + + runner = PyRunner(args, as_module=bool(options.module)) + runner.prepare() + + if options.append: + self.coverage.load() + + # Run the script. + self.coverage.start() + code_ran = True + try: + runner.run() + except NoSource: + code_ran = False + raise + finally: + self.coverage.stop() + if code_ran: + self.coverage.save() + + return OK + + def do_debug(self, args: list[str]) -> int: + """Implementation of 'coverage debug'.""" + + if not args: + show_help("What information would you like: config, data, sys, premain, pybehave?") + return ERR + if args[1:]: + show_help("Only one topic at a time, please") + return ERR + + if args[0] == "sys": + write_formatted_info(print, "sys", self.coverage.sys_info()) + elif args[0] == "data": + print(info_header("data")) + data_file = self.coverage.config.data_file + debug_data_file(data_file) + for filename in combinable_files(data_file): + print("-----") + debug_data_file(filename) + elif args[0] == "config": + write_formatted_info(print, "config", self.coverage.config.debug_info()) + elif args[0] == "premain": + print(info_header("premain")) + print(short_stack(full=True)) + elif args[0] == "pybehave": + write_formatted_info(print, "pybehave", env.debug_info()) + else: + show_help(f"Don't know what you mean by {args[0]!r}") + return ERR + + return OK + + +def unshell_list(s: str) -> list[str] | None: + """Turn a command-line argument into a list.""" + if not s: + return None + if env.WINDOWS: + # When running coverage.py as coverage.exe, some of the behavior + # of the shell is emulated: wildcards are expanded into a list of + # file names. So you have to single-quote patterns on the command + # line, but (not) helpfully, the single quotes are included in the + # argument, so we have to strip them off here. + s = s.strip("'") + return s.split(",") + + +def unglob_args(args: list[str]) -> list[str]: + """Interpret shell wildcards for platforms that need it.""" + if env.WINDOWS: + globbed = [] + for arg in args: + if "?" in arg or "*" in arg: + globbed.extend(glob.glob(arg)) + else: + globbed.append(arg) + args = globbed + return args + + +HELP_TOPICS = { + "help": """\ + Coverage.py, version {__version__} {extension_modifier} + Measure, collect, and report on code coverage in Python programs. + + usage: {program_name} [options] [args] + + Commands: + annotate Annotate source files with execution information. + combine Combine a number of data files. + debug Display information about the internals of coverage.py + erase Erase previously collected coverage data. + help Get help on using coverage.py. + html Create an HTML report. + json Create a JSON report of coverage results. + lcov Create an LCOV report of coverage results. + report Report coverage stats on modules. + run Run a Python program and measure code execution. + xml Create an XML report of coverage results. + + Use "{program_name} help " for detailed help on any command. + """, + + "minimum_help": ( + "Code coverage for Python, version {__version__} {extension_modifier}. " + + "Use '{program_name} help' for help." + ), + + "version": "Coverage.py, version {__version__} {extension_modifier}", +} + + +def main(argv: list[str] | None = None) -> int | None: + """The main entry point to coverage.py. + + This is installed as the script entry point. + + """ + if argv is None: + argv = sys.argv[1:] + try: + status = CoverageScript().command_line(argv) + except _ExceptionDuringRun as err: + # An exception was caught while running the product code. The + # sys.exc_info() return tuple is packed into an _ExceptionDuringRun + # exception. + traceback.print_exception(*err.args) # pylint: disable=no-value-for-parameter + status = ERR + except _BaseCoverageException as err: + # A controlled error inside coverage.py: print the message to the user. + msg = err.args[0] + print(msg) + status = ERR + except SystemExit as err: + # The user called `sys.exit()`. Exit with their argument, if any. + if err.args: + status = err.args[0] + else: + status = None + return status + +# Profiling using ox_profile. Install it from GitHub: +# pip install git+https://github.com/emin63/ox_profile.git +# +# $set_env.py: COVERAGE_PROFILE - Set to use ox_profile. +_profile = os.getenv("COVERAGE_PROFILE") +if _profile: # pragma: debugging + from ox_profile.core.launchers import SimpleLauncher # pylint: disable=import-error + original_main = main + + def main( # pylint: disable=function-redefined + argv: list[str] | None = None, + ) -> int | None: + """A wrapper around main that profiles.""" + profiler = SimpleLauncher.launch() + try: + return original_main(argv) + finally: + data, _ = profiler.query(re_filter="coverage", max_records=100) + print(profiler.show(query=data, limit=100, sep="", col="")) + profiler.cancel() diff --git a/.venv/Lib/site-packages/coverage/collector.py b/.venv/Lib/site-packages/coverage/collector.py new file mode 100644 index 00000000..53fa6871 --- /dev/null +++ b/.venv/Lib/site-packages/coverage/collector.py @@ -0,0 +1,497 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Raw data collector for coverage.py.""" + +from __future__ import annotations + +import contextlib +import functools +import os +import sys + +from collections.abc import Mapping +from types import FrameType +from typing import cast, Any, Callable, TypeVar + +from coverage import env +from coverage.config import CoverageConfig +from coverage.core import Core +from coverage.data import CoverageData +from coverage.debug import short_stack +from coverage.exceptions import ConfigError +from coverage.misc import human_sorted_items, isolate_module +from coverage.plugin import CoveragePlugin +from coverage.types import ( + TArc, + TCheckIncludeFn, + TFileDisposition, + TShouldStartContextFn, + TShouldTraceFn, + TTraceData, + TTraceFn, + Tracer, + TWarnFn, +) + +os = isolate_module(os) + + +T = TypeVar("T") + + +class Collector: + """Collects trace data. + + Creates a Tracer object for each thread, since they track stack + information. Each Tracer points to the same shared data, contributing + traced data points. + + When the Collector is started, it creates a Tracer for the current thread, + and installs a function to create Tracers for each new thread started. + When the Collector is stopped, all active Tracers are stopped. + + Threads started while the Collector is stopped will never have Tracers + associated with them. + + """ + + # The stack of active Collectors. Collectors are added here when started, + # and popped when stopped. Collectors on the stack are paused when not + # the top, and resumed when they become the top again. + _collectors: list[Collector] = [] + + # The concurrency settings we support here. + LIGHT_THREADS = {"greenlet", "eventlet", "gevent"} + + def __init__( + self, + core: Core, + should_trace: TShouldTraceFn, + check_include: TCheckIncludeFn, + should_start_context: TShouldStartContextFn | None, + file_mapper: Callable[[str], str], + branch: bool, + warn: TWarnFn, + concurrency: list[str], + ) -> None: + """Create a collector. + + `should_trace` is a function, taking a file name and a frame, and + returning a `coverage.FileDisposition object`. + + `check_include` is a function taking a file name and a frame. It returns + a boolean: True if the file should be traced, False if not. + + `should_start_context` is a function taking a frame, and returning a + string. If the frame should be the start of a new context, the string + is the new context. If the frame should not be the start of a new + context, return None. + + `file_mapper` is a function taking a filename, and returning a Unicode + filename. The result is the name that will be recorded in the data + file. + + If `branch` is true, then branches will be measured. This involves + collecting data on which statements followed each other (arcs). Use + `get_arc_data` to get the arc data. + + `warn` is a warning function, taking a single string message argument + and an optional slug argument which will be a string or None, to be + used if a warning needs to be issued. + + `concurrency` is a list of strings indicating the concurrency libraries + in use. Valid values are "greenlet", "eventlet", "gevent", or "thread" + (the default). "thread" can be combined with one of the other three. + Other values are ignored. + + """ + self.core = core + self.should_trace = should_trace + self.check_include = check_include + self.should_start_context = should_start_context + self.file_mapper = file_mapper + self.branch = branch + self.warn = warn + self.concurrency = concurrency + assert isinstance(self.concurrency, list), f"Expected a list: {self.concurrency!r}" + + self.pid = os.getpid() + + self.covdata: CoverageData + self.threading = None + self.static_context: str | None = None + + self.origin = short_stack() + + self.concur_id_func = None + + # We can handle a few concurrency options here, but only one at a time. + concurrencies = set(self.concurrency) + unknown = concurrencies - CoverageConfig.CONCURRENCY_CHOICES + if unknown: + show = ", ".join(sorted(unknown)) + raise ConfigError(f"Unknown concurrency choices: {show}") + light_threads = concurrencies & self.LIGHT_THREADS + if len(light_threads) > 1: + show = ", ".join(sorted(light_threads)) + raise ConfigError(f"Conflicting concurrency settings: {show}") + do_threading = False + + tried = "nothing" # to satisfy pylint + try: + if "greenlet" in concurrencies: + tried = "greenlet" + import greenlet + self.concur_id_func = greenlet.getcurrent + elif "eventlet" in concurrencies: + tried = "eventlet" + import eventlet.greenthread # pylint: disable=import-error,useless-suppression + self.concur_id_func = eventlet.greenthread.getcurrent + elif "gevent" in concurrencies: + tried = "gevent" + import gevent # pylint: disable=import-error,useless-suppression + self.concur_id_func = gevent.getcurrent + + if "thread" in concurrencies: + do_threading = True + except ImportError as ex: + msg = f"Couldn't trace with concurrency={tried}, the module isn't installed." + raise ConfigError(msg) from ex + + if self.concur_id_func and not hasattr(core.tracer_class, "concur_id_func"): + raise ConfigError( + "Can't support concurrency={} with {}, only threads are supported.".format( + tried, self.tracer_name(), + ), + ) + + if do_threading or not concurrencies: + # It's important to import threading only if we need it. If + # it's imported early, and the program being measured uses + # gevent, then gevent's monkey-patching won't work properly. + import threading + self.threading = threading + + self.reset() + + def __repr__(self) -> str: + return f"" + + def use_data(self, covdata: CoverageData, context: str | None) -> None: + """Use `covdata` for recording data.""" + self.covdata = covdata + self.static_context = context + self.covdata.set_context(self.static_context) + + def tracer_name(self) -> str: + """Return the class name of the tracer we're using.""" + return self.core.tracer_class.__name__ + + def _clear_data(self) -> None: + """Clear out existing data, but stay ready for more collection.""" + # We used to use self.data.clear(), but that would remove filename + # keys and data values that were still in use higher up the stack + # when we are called as part of switch_context. + with self.data_lock or contextlib.nullcontext(): + for d in self.data.values(): + d.clear() + + for tracer in self.tracers: + tracer.reset_activity() + + def reset(self) -> None: + """Clear collected data, and prepare to collect more.""" + self.data_lock = self.threading.Lock() if self.threading else None + + # The trace data we are collecting. + self.data: TTraceData = {} + + # A dictionary mapping file names to file tracer plugin names that will + # handle them. + self.file_tracers: dict[str, str] = {} + + self.disabled_plugins: set[str] = set() + + # The .should_trace_cache attribute is a cache from file names to + # coverage.FileDisposition objects, or None. When a file is first + # considered for tracing, a FileDisposition is obtained from + # Coverage.should_trace. Its .trace attribute indicates whether the + # file should be traced or not. If it should be, a plugin with dynamic + # file names can decide not to trace it based on the dynamic file name + # being excluded by the inclusion rules, in which case the + # FileDisposition will be replaced by None in the cache. + if env.PYPY: + import __pypy__ # pylint: disable=import-error + # Alex Gaynor said: + # should_trace_cache is a strictly growing key: once a key is in + # it, it never changes. Further, the keys used to access it are + # generally constant, given sufficient context. That is to say, at + # any given point _trace() is called, pypy is able to know the key. + # This is because the key is determined by the physical source code + # line, and that's invariant with the call site. + # + # This property of a dict with immutable keys, combined with + # call-site-constant keys is a match for PyPy's module dict, + # which is optimized for such workloads. + # + # This gives a 20% benefit on the workload described at + # https://bitbucket.org/pypy/pypy/issue/1871/10x-slower-than-cpython-under-coverage + self.should_trace_cache = __pypy__.newdict("module") + else: + self.should_trace_cache = {} + + # Our active Tracers. + self.tracers: list[Tracer] = [] + + self._clear_data() + + def lock_data(self) -> None: + """Lock self.data_lock, for use by the C tracer.""" + if self.data_lock is not None: + self.data_lock.acquire() + + def unlock_data(self) -> None: + """Unlock self.data_lock, for use by the C tracer.""" + if self.data_lock is not None: + self.data_lock.release() + + def _start_tracer(self) -> TTraceFn | None: + """Start a new Tracer object, and store it in self.tracers.""" + tracer = self.core.tracer_class(**self.core.tracer_kwargs) + tracer.data = self.data + tracer.lock_data = self.lock_data + tracer.unlock_data = self.unlock_data + tracer.trace_arcs = self.branch + tracer.should_trace = self.should_trace + tracer.should_trace_cache = self.should_trace_cache + tracer.warn = self.warn + + if hasattr(tracer, 'concur_id_func'): + tracer.concur_id_func = self.concur_id_func + if hasattr(tracer, 'file_tracers'): + tracer.file_tracers = self.file_tracers + if hasattr(tracer, 'threading'): + tracer.threading = self.threading + if hasattr(tracer, 'check_include'): + tracer.check_include = self.check_include + if hasattr(tracer, 'should_start_context'): + tracer.should_start_context = self.should_start_context + if hasattr(tracer, 'switch_context'): + tracer.switch_context = self.switch_context + if hasattr(tracer, 'disable_plugin'): + tracer.disable_plugin = self.disable_plugin + + fn = tracer.start() + self.tracers.append(tracer) + + return fn + + # The trace function has to be set individually on each thread before + # execution begins. Ironically, the only support the threading module has + # for running code before the thread main is the tracing function. So we + # install this as a trace function, and the first time it's called, it does + # the real trace installation. + # + # New in 3.12: threading.settrace_all_threads: https://github.com/python/cpython/pull/96681 + + def _installation_trace(self, frame: FrameType, event: str, arg: Any) -> TTraceFn | None: + """Called on new threads, installs the real tracer.""" + # Remove ourselves as the trace function. + sys.settrace(None) + # Install the real tracer. + fn: TTraceFn | None = self._start_tracer() + # Invoke the real trace function with the current event, to be sure + # not to lose an event. + if fn: + fn = fn(frame, event, arg) + # Return the new trace function to continue tracing in this scope. + return fn + + def start(self) -> None: + """Start collecting trace information.""" + # We may be a new collector in a forked process. The old process' + # collectors will be in self._collectors, but they won't be usable. + # Find them and discard them. + keep_collectors = [] + for c in self._collectors: + if c.pid == self.pid: + keep_collectors.append(c) + else: + c.post_fork() + self._collectors[:] = keep_collectors + + if self._collectors: + self._collectors[-1].pause() + + self.tracers = [] + + try: + # Install the tracer on this thread. + self._start_tracer() + except: + if self._collectors: + self._collectors[-1].resume() + raise + + # If _start_tracer succeeded, then we add ourselves to the global + # stack of collectors. + self._collectors.append(self) + + # Install our installation tracer in threading, to jump-start other + # threads. + if self.core.systrace and self.threading: + self.threading.settrace(self._installation_trace) + + def stop(self) -> None: + """Stop collecting trace information.""" + assert self._collectors + if self._collectors[-1] is not self: + print("self._collectors:") + for c in self._collectors: + print(f" {c!r}\n{c.origin}") + assert self._collectors[-1] is self, ( + f"Expected current collector to be {self!r}, but it's {self._collectors[-1]!r}" + ) + + self.pause() + + # Remove this Collector from the stack, and resume the one underneath (if any). + self._collectors.pop() + if self._collectors: + self._collectors[-1].resume() + + def pause(self) -> None: + """Pause tracing, but be prepared to `resume`.""" + for tracer in self.tracers: + tracer.stop() + stats = tracer.get_stats() + if stats: + print("\nCoverage.py tracer stats:") + for k, v in human_sorted_items(stats.items()): + print(f"{k:>20}: {v}") + if self.threading: + self.threading.settrace(None) + + def resume(self) -> None: + """Resume tracing after a `pause`.""" + for tracer in self.tracers: + tracer.start() + if self.core.systrace: + if self.threading: + self.threading.settrace(self._installation_trace) + else: + self._start_tracer() + + def post_fork(self) -> None: + """After a fork, tracers might need to adjust.""" + for tracer in self.tracers: + if hasattr(tracer, "post_fork"): + tracer.post_fork() + + def _activity(self) -> bool: + """Has any activity been traced? + + Returns a boolean, True if any trace function was invoked. + + """ + return any(tracer.activity() for tracer in self.tracers) + + def switch_context(self, new_context: str | None) -> None: + """Switch to a new dynamic context.""" + context: str | None + self.flush_data() + if self.static_context: + context = self.static_context + if new_context: + context += "|" + new_context + else: + context = new_context + self.covdata.set_context(context) + + def disable_plugin(self, disposition: TFileDisposition) -> None: + """Disable the plugin mentioned in `disposition`.""" + file_tracer = disposition.file_tracer + assert file_tracer is not None + plugin = file_tracer._coverage_plugin + plugin_name = plugin._coverage_plugin_name + self.warn(f"Disabling plug-in {plugin_name!r} due to previous exception") + plugin._coverage_enabled = False + disposition.trace = False + + @functools.cache # pylint: disable=method-cache-max-size-none + def cached_mapped_file(self, filename: str) -> str: + """A locally cached version of file names mapped through file_mapper.""" + return self.file_mapper(filename) + + def mapped_file_dict(self, d: Mapping[str, T]) -> dict[str, T]: + """Return a dict like d, but with keys modified by file_mapper.""" + # The call to list(items()) ensures that the GIL protects the dictionary + # iterator against concurrent modifications by tracers running + # in other threads. We try three times in case of concurrent + # access, hoping to get a clean copy. + runtime_err = None + for _ in range(3): # pragma: part covered + try: + items = list(d.items()) + except RuntimeError as ex: # pragma: cant happen + runtime_err = ex + else: + break + else: # pragma: cant happen + assert isinstance(runtime_err, Exception) + raise runtime_err + + return {self.cached_mapped_file(k): v for k, v in items if v} + + def plugin_was_disabled(self, plugin: CoveragePlugin) -> None: + """Record that `plugin` was disabled during the run.""" + self.disabled_plugins.add(plugin._coverage_plugin_name) + + def flush_data(self) -> bool: + """Save the collected data to our associated `CoverageData`. + + Data may have also been saved along the way. This forces the + last of the data to be saved. + + Returns True if there was data to save, False if not. + """ + if not self._activity(): + return False + + if self.branch: + if self.core.packed_arcs: + # Unpack the line number pairs packed into integers. See + # tracer.c:CTracer_record_pair for the C code that creates + # these packed ints. + arc_data: dict[str, list[TArc]] = {} + packed_data = cast(dict[str, set[int]], self.data) + + # The list() here and in the inner loop are to get a clean copy + # even as tracers are continuing to add data. + for fname, packeds in list(packed_data.items()): + tuples = [] + for packed in list(packeds): + l1 = packed & 0xFFFFF + l2 = (packed & (0xFFFFF << 20)) >> 20 + if packed & (1 << 40): + l1 *= -1 + if packed & (1 << 41): + l2 *= -1 + tuples.append((l1, l2)) + arc_data[fname] = tuples + else: + arc_data = cast(dict[str, list[TArc]], self.data) + self.covdata.add_arcs(self.mapped_file_dict(arc_data)) + else: + line_data = cast(dict[str, set[int]], self.data) + self.covdata.add_lines(self.mapped_file_dict(line_data)) + + file_tracers = { + k: v for k, v in self.file_tracers.items() + if v not in self.disabled_plugins + } + self.covdata.add_file_tracers(self.mapped_file_dict(file_tracers)) + + self._clear_data() + return True diff --git a/.venv/Lib/site-packages/coverage/config.py b/.venv/Lib/site-packages/coverage/config.py new file mode 100644 index 00000000..357fc5af --- /dev/null +++ b/.venv/Lib/site-packages/coverage/config.py @@ -0,0 +1,627 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Config file for coverage.py""" + +from __future__ import annotations + +import collections +import configparser +import copy +import os +import os.path +import re + +from typing import ( + Any, Callable, Union, +) +from collections.abc import Iterable + +from coverage.exceptions import ConfigError +from coverage.misc import isolate_module, human_sorted_items, substitute_variables +from coverage.tomlconfig import TomlConfigParser, TomlDecodeError +from coverage.types import ( + TConfigurable, TConfigSectionIn, TConfigValueIn, TConfigSectionOut, + TConfigValueOut, TPluginConfig, +) + +os = isolate_module(os) + + +class HandyConfigParser(configparser.ConfigParser): + """Our specialization of ConfigParser.""" + + def __init__(self, our_file: bool) -> None: + """Create the HandyConfigParser. + + `our_file` is True if this config file is specifically for coverage, + False if we are examining another config file (tox.ini, setup.cfg) + for possible settings. + """ + + super().__init__(interpolation=None) + self.section_prefixes = ["coverage:"] + if our_file: + self.section_prefixes.append("") + + def read( # type: ignore[override] + self, + filenames: Iterable[str], + encoding_unused: str | None = None, + ) -> list[str]: + """Read a file name as UTF-8 configuration data.""" + return super().read(filenames, encoding="utf-8") + + def real_section(self, section: str) -> str | None: + """Get the actual name of a section.""" + for section_prefix in self.section_prefixes: + real_section = section_prefix + section + has = super().has_section(real_section) + if has: + return real_section + return None + + def has_option(self, section: str, option: str) -> bool: + real_section = self.real_section(section) + if real_section is not None: + return super().has_option(real_section, option) + return False + + def has_section(self, section: str) -> bool: + return bool(self.real_section(section)) + + def options(self, section: str) -> list[str]: + real_section = self.real_section(section) + if real_section is not None: + return super().options(real_section) + raise ConfigError(f"No section: {section!r}") + + def get_section(self, section: str) -> TConfigSectionOut: + """Get the contents of a section, as a dictionary.""" + d: dict[str, TConfigValueOut] = {} + for opt in self.options(section): + d[opt] = self.get(section, opt) + return d + + def get(self, section: str, option: str, *args: Any, **kwargs: Any) -> str: # type: ignore + """Get a value, replacing environment variables also. + + The arguments are the same as `ConfigParser.get`, but in the found + value, ``$WORD`` or ``${WORD}`` are replaced by the value of the + environment variable ``WORD``. + + Returns the finished value. + + """ + for section_prefix in self.section_prefixes: + real_section = section_prefix + section + if super().has_option(real_section, option): + break + else: + raise ConfigError(f"No option {option!r} in section: {section!r}") + + v: str = super().get(real_section, option, *args, **kwargs) + v = substitute_variables(v, os.environ) + return v + + def getlist(self, section: str, option: str) -> list[str]: + """Read a list of strings. + + The value of `section` and `option` is treated as a comma- and newline- + separated list of strings. Each value is stripped of white space. + + Returns the list of strings. + + """ + value_list = self.get(section, option) + values = [] + for value_line in value_list.split("\n"): + for value in value_line.split(","): + value = value.strip() + if value: + values.append(value) + return values + + def getregexlist(self, section: str, option: str) -> list[str]: + """Read a list of full-line regexes. + + The value of `section` and `option` is treated as a newline-separated + list of regexes. Each value is stripped of white space. + + Returns the list of strings. + + """ + line_list = self.get(section, option) + value_list = [] + for value in line_list.splitlines(): + value = value.strip() + try: + re.compile(value) + except re.error as e: + raise ConfigError( + f"Invalid [{section}].{option} value {value!r}: {e}", + ) from e + if value: + value_list.append(value) + return value_list + + +TConfigParser = Union[HandyConfigParser, TomlConfigParser] + + +# The default line exclusion regexes. +DEFAULT_EXCLUDE = [ + r"#\s*(pragma|PRAGMA)[:\s]?\s*(no|NO)\s*(cover|COVER)", +] + +# The default partial branch regexes, to be modified by the user. +DEFAULT_PARTIAL = [ + r"#\s*(pragma|PRAGMA)[:\s]?\s*(no|NO)\s*(branch|BRANCH)", +] + +# The default partial branch regexes, based on Python semantics. +# These are any Python branching constructs that can't actually execute all +# their branches. +DEFAULT_PARTIAL_ALWAYS = [ + "while (True|1|False|0):", + "if (True|1|False|0):", +] + + +class CoverageConfig(TConfigurable, TPluginConfig): + """Coverage.py configuration. + + The attributes of this class are the various settings that control the + operation of coverage.py. + + """ + # pylint: disable=too-many-instance-attributes + + def __init__(self) -> None: + """Initialize the configuration attributes to their defaults.""" + # Metadata about the config. + # We tried to read these config files. + self.config_files_attempted: list[str] = [] + # We did read these config files, but maybe didn't find any content for us. + self.config_files_read: list[str] = [] + # The file that gave us our configuration. + self.config_file: str | None = None + self._config_contents: bytes | None = None + + # Defaults for [run] and [report] + self._include = None + self._omit = None + + # Defaults for [run] + self.branch = False + self.command_line: str | None = None + self.concurrency: list[str] = [] + self.context: str | None = None + self.cover_pylib = False + self.data_file = ".coverage" + self.debug: list[str] = [] + self.debug_file: str | None = None + self.disable_warnings: list[str] = [] + self.dynamic_context: str | None = None + self.parallel = False + self.plugins: list[str] = [] + self.relative_files = False + self.run_include: list[str] = [] + self.run_omit: list[str] = [] + self.sigterm = False + self.source: list[str] | None = None + self.source_pkgs: list[str] = [] + self.timid = False + self._crash: str | None = None + + # Defaults for [report] + self.exclude_list = DEFAULT_EXCLUDE[:] + self.exclude_also: list[str] = [] + self.fail_under = 0.0 + self.format: str | None = None + self.ignore_errors = False + self.include_namespace_packages = False + self.report_include: list[str] | None = None + self.report_omit: list[str] | None = None + self.partial_always_list = DEFAULT_PARTIAL_ALWAYS[:] + self.partial_list = DEFAULT_PARTIAL[:] + self.precision = 0 + self.report_contexts: list[str] | None = None + self.show_missing = False + self.skip_covered = False + self.skip_empty = False + self.sort: str | None = None + + # Defaults for [html] + self.extra_css: str | None = None + self.html_dir = "htmlcov" + self.html_skip_covered: bool | None = None + self.html_skip_empty: bool | None = None + self.html_title = "Coverage report" + self.show_contexts = False + + # Defaults for [xml] + self.xml_output = "coverage.xml" + self.xml_package_depth = 99 + + # Defaults for [json] + self.json_output = "coverage.json" + self.json_pretty_print = False + self.json_show_contexts = False + + # Defaults for [lcov] + self.lcov_output = "coverage.lcov" + self.lcov_line_checksums = False + + # Defaults for [paths] + self.paths: dict[str, list[str]] = {} + + # Options for plugins + self.plugin_options: dict[str, TConfigSectionOut] = {} + + MUST_BE_LIST = { + "debug", "concurrency", "plugins", + "report_omit", "report_include", + "run_omit", "run_include", + } + + def from_args(self, **kwargs: TConfigValueIn) -> None: + """Read config values from `kwargs`.""" + for k, v in kwargs.items(): + if v is not None: + if k in self.MUST_BE_LIST and isinstance(v, str): + v = [v] + setattr(self, k, v) + + def from_file(self, filename: str, warn: Callable[[str], None], our_file: bool) -> bool: + """Read configuration from a .rc file. + + `filename` is a file name to read. + + `our_file` is True if this config file is specifically for coverage, + False if we are examining another config file (tox.ini, setup.cfg) + for possible settings. + + Returns True or False, whether the file could be read, and it had some + coverage.py settings in it. + + """ + _, ext = os.path.splitext(filename) + cp: TConfigParser + if ext == ".toml": + cp = TomlConfigParser(our_file) + else: + cp = HandyConfigParser(our_file) + + self.config_files_attempted.append(os.path.abspath(filename)) + + try: + files_read = cp.read(filename) + except (configparser.Error, TomlDecodeError) as err: + raise ConfigError(f"Couldn't read config file {filename}: {err}") from err + if not files_read: + return False + + self.config_files_read.extend(map(os.path.abspath, files_read)) + + any_set = False + try: + for option_spec in self.CONFIG_FILE_OPTIONS: + was_set = self._set_attr_from_config_option(cp, *option_spec) + if was_set: + any_set = True + except ValueError as err: + raise ConfigError(f"Couldn't read config file {filename}: {err}") from err + + # Check that there are no unrecognized options. + all_options = collections.defaultdict(set) + for option_spec in self.CONFIG_FILE_OPTIONS: + section, option = option_spec[1].split(":") + all_options[section].add(option) + + for section, options in all_options.items(): + real_section = cp.real_section(section) + if real_section: + for unknown in set(cp.options(section)) - options: + warn( + "Unrecognized option '[{}] {}=' in config file {}".format( + real_section, unknown, filename, + ), + ) + + # [paths] is special + if cp.has_section("paths"): + for option in cp.options("paths"): + self.paths[option] = cp.getlist("paths", option) + any_set = True + + # plugins can have options + for plugin in self.plugins: + if cp.has_section(plugin): + self.plugin_options[plugin] = cp.get_section(plugin) + any_set = True + + # Was this file used as a config file? If it's specifically our file, + # then it was used. If we're piggybacking on someone else's file, + # then it was only used if we found some settings in it. + if our_file: + used = True + else: + used = any_set + + if used: + self.config_file = os.path.abspath(filename) + with open(filename, "rb") as f: + self._config_contents = f.read() + + return used + + def copy(self) -> CoverageConfig: + """Return a copy of the configuration.""" + return copy.deepcopy(self) + + CONCURRENCY_CHOICES = {"thread", "gevent", "greenlet", "eventlet", "multiprocessing"} + + CONFIG_FILE_OPTIONS = [ + # These are *args for _set_attr_from_config_option: + # (attr, where, type_="") + # + # attr is the attribute to set on the CoverageConfig object. + # where is the section:name to read from the configuration file. + # type_ is the optional type to apply, by using .getTYPE to read the + # configuration value from the file. + + # [run] + ("branch", "run:branch", "boolean"), + ("command_line", "run:command_line"), + ("concurrency", "run:concurrency", "list"), + ("context", "run:context"), + ("cover_pylib", "run:cover_pylib", "boolean"), + ("data_file", "run:data_file"), + ("debug", "run:debug", "list"), + ("debug_file", "run:debug_file"), + ("disable_warnings", "run:disable_warnings", "list"), + ("dynamic_context", "run:dynamic_context"), + ("parallel", "run:parallel", "boolean"), + ("plugins", "run:plugins", "list"), + ("relative_files", "run:relative_files", "boolean"), + ("run_include", "run:include", "list"), + ("run_omit", "run:omit", "list"), + ("sigterm", "run:sigterm", "boolean"), + ("source", "run:source", "list"), + ("source_pkgs", "run:source_pkgs", "list"), + ("timid", "run:timid", "boolean"), + ("_crash", "run:_crash"), + + # [report] + ("exclude_list", "report:exclude_lines", "regexlist"), + ("exclude_also", "report:exclude_also", "regexlist"), + ("fail_under", "report:fail_under", "float"), + ("format", "report:format"), + ("ignore_errors", "report:ignore_errors", "boolean"), + ("include_namespace_packages", "report:include_namespace_packages", "boolean"), + ("partial_always_list", "report:partial_branches_always", "regexlist"), + ("partial_list", "report:partial_branches", "regexlist"), + ("precision", "report:precision", "int"), + ("report_contexts", "report:contexts", "list"), + ("report_include", "report:include", "list"), + ("report_omit", "report:omit", "list"), + ("show_missing", "report:show_missing", "boolean"), + ("skip_covered", "report:skip_covered", "boolean"), + ("skip_empty", "report:skip_empty", "boolean"), + ("sort", "report:sort"), + + # [html] + ("extra_css", "html:extra_css"), + ("html_dir", "html:directory"), + ("html_skip_covered", "html:skip_covered", "boolean"), + ("html_skip_empty", "html:skip_empty", "boolean"), + ("html_title", "html:title"), + ("show_contexts", "html:show_contexts", "boolean"), + + # [xml] + ("xml_output", "xml:output"), + ("xml_package_depth", "xml:package_depth", "int"), + + # [json] + ("json_output", "json:output"), + ("json_pretty_print", "json:pretty_print", "boolean"), + ("json_show_contexts", "json:show_contexts", "boolean"), + + # [lcov] + ("lcov_output", "lcov:output"), + ("lcov_line_checksums", "lcov:line_checksums", "boolean") + ] + + def _set_attr_from_config_option( + self, + cp: TConfigParser, + attr: str, + where: str, + type_: str = "", + ) -> bool: + """Set an attribute on self if it exists in the ConfigParser. + + Returns True if the attribute was set. + + """ + section, option = where.split(":") + if cp.has_option(section, option): + method = getattr(cp, "get" + type_) + setattr(self, attr, method(section, option)) + return True + return False + + def get_plugin_options(self, plugin: str) -> TConfigSectionOut: + """Get a dictionary of options for the plugin named `plugin`.""" + return self.plugin_options.get(plugin, {}) + + def set_option(self, option_name: str, value: TConfigValueIn | TConfigSectionIn) -> None: + """Set an option in the configuration. + + `option_name` is a colon-separated string indicating the section and + option name. For example, the ``branch`` option in the ``[run]`` + section of the config file would be indicated with `"run:branch"`. + + `value` is the new value for the option. + + """ + # Special-cased options. + if option_name == "paths": + self.paths = value # type: ignore[assignment] + return + + # Check all the hard-coded options. + for option_spec in self.CONFIG_FILE_OPTIONS: + attr, where = option_spec[:2] + if where == option_name: + setattr(self, attr, value) + return + + # See if it's a plugin option. + plugin_name, _, key = option_name.partition(":") + if key and plugin_name in self.plugins: + self.plugin_options.setdefault(plugin_name, {})[key] = value # type: ignore[index] + return + + # If we get here, we didn't find the option. + raise ConfigError(f"No such option: {option_name!r}") + + def get_option(self, option_name: str) -> TConfigValueOut | None: + """Get an option from the configuration. + + `option_name` is a colon-separated string indicating the section and + option name. For example, the ``branch`` option in the ``[run]`` + section of the config file would be indicated with `"run:branch"`. + + Returns the value of the option. + + """ + # Special-cased options. + if option_name == "paths": + return self.paths # type: ignore[return-value] + + # Check all the hard-coded options. + for option_spec in self.CONFIG_FILE_OPTIONS: + attr, where = option_spec[:2] + if where == option_name: + return getattr(self, attr) # type: ignore[no-any-return] + + # See if it's a plugin option. + plugin_name, _, key = option_name.partition(":") + if key and plugin_name in self.plugins: + return self.plugin_options.get(plugin_name, {}).get(key) + + # If we get here, we didn't find the option. + raise ConfigError(f"No such option: {option_name!r}") + + def post_process_file(self, path: str) -> str: + """Make final adjustments to a file path to make it usable.""" + return os.path.expanduser(path) + + def post_process(self) -> None: + """Make final adjustments to settings to make them usable.""" + self.data_file = self.post_process_file(self.data_file) + self.html_dir = self.post_process_file(self.html_dir) + self.xml_output = self.post_process_file(self.xml_output) + self.paths = { + k: [self.post_process_file(f) for f in v] + for k, v in self.paths.items() + } + self.exclude_list += self.exclude_also + + def debug_info(self) -> list[tuple[str, Any]]: + """Make a list of (name, value) pairs for writing debug info.""" + return human_sorted_items( + (k, v) for k, v in self.__dict__.items() if not k.startswith("_") + ) + + +def config_files_to_try(config_file: bool | str) -> list[tuple[str, bool, bool]]: + """What config files should we try to read? + + Returns a list of tuples: + (filename, is_our_file, was_file_specified) + """ + + # Some API users were specifying ".coveragerc" to mean the same as + # True, so make it so. + if config_file == ".coveragerc": + config_file = True + specified_file = (config_file is not True) + if not specified_file: + # No file was specified. Check COVERAGE_RCFILE. + rcfile = os.getenv("COVERAGE_RCFILE") + if rcfile: + config_file = rcfile + specified_file = True + if not specified_file: + # Still no file specified. Default to .coveragerc + config_file = ".coveragerc" + assert isinstance(config_file, str) + files_to_try = [ + (config_file, True, specified_file), + ("setup.cfg", False, False), + ("tox.ini", False, False), + ("pyproject.toml", False, False), + ] + return files_to_try + + +def read_coverage_config( + config_file: bool | str, + warn: Callable[[str], None], + **kwargs: TConfigValueIn, +) -> CoverageConfig: + """Read the coverage.py configuration. + + Arguments: + config_file: a boolean or string, see the `Coverage` class for the + tricky details. + warn: a function to issue warnings. + all others: keyword arguments from the `Coverage` class, used for + setting values in the configuration. + + Returns: + config: + config is a CoverageConfig object read from the appropriate + configuration file. + + """ + # Build the configuration from a number of sources: + # 1) defaults: + config = CoverageConfig() + + # 2) from a file: + if config_file: + files_to_try = config_files_to_try(config_file) + + for fname, our_file, specified_file in files_to_try: + config_read = config.from_file(fname, warn, our_file=our_file) + if config_read: + break + if specified_file: + raise ConfigError(f"Couldn't read {fname!r} as a config file") + + # 3) from environment variables: + env_data_file = os.getenv("COVERAGE_FILE") + if env_data_file: + config.data_file = env_data_file + # $set_env.py: COVERAGE_DEBUG - Debug options: https://coverage.rtfd.io/cmd.html#debug + debugs = os.getenv("COVERAGE_DEBUG") + if debugs: + config.debug.extend(d.strip() for d in debugs.split(",")) + + # 4) from constructor arguments: + config.from_args(**kwargs) + + # 5) for our benchmark, force settings using a secret environment variable: + force_file = os.getenv("COVERAGE_FORCE_CONFIG") + if force_file: + config.from_file(force_file, warn, our_file=True) + + # Once all the config has been collected, there's a little post-processing + # to do. + config.post_process() + + return config diff --git a/.venv/Lib/site-packages/coverage/context.py b/.venv/Lib/site-packages/coverage/context.py new file mode 100644 index 00000000..977e9b4e --- /dev/null +++ b/.venv/Lib/site-packages/coverage/context.py @@ -0,0 +1,75 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Determine contexts for coverage.py""" + +from __future__ import annotations + +from types import FrameType +from typing import cast +from collections.abc import Sequence + +from coverage.types import TShouldStartContextFn + + +def combine_context_switchers( + context_switchers: Sequence[TShouldStartContextFn], +) -> TShouldStartContextFn | None: + """Create a single context switcher from multiple switchers. + + `context_switchers` is a list of functions that take a frame as an + argument and return a string to use as the new context label. + + Returns a function that composites `context_switchers` functions, or None + if `context_switchers` is an empty list. + + When invoked, the combined switcher calls `context_switchers` one-by-one + until a string is returned. The combined switcher returns None if all + `context_switchers` return None. + """ + if not context_switchers: + return None + + if len(context_switchers) == 1: + return context_switchers[0] + + def should_start_context(frame: FrameType) -> str | None: + """The combiner for multiple context switchers.""" + for switcher in context_switchers: + new_context = switcher(frame) + if new_context is not None: + return new_context + return None + + return should_start_context + + +def should_start_context_test_function(frame: FrameType) -> str | None: + """Is this frame calling a test_* function?""" + co_name = frame.f_code.co_name + if co_name.startswith("test") or co_name == "runTest": + return qualname_from_frame(frame) + return None + + +def qualname_from_frame(frame: FrameType) -> str | None: + """Get a qualified name for the code running in `frame`.""" + co = frame.f_code + fname = co.co_name + method = None + if co.co_argcount and co.co_varnames[0] == "self": + self = frame.f_locals.get("self", None) + method = getattr(self, fname, None) + + if method is None: + func = frame.f_globals.get(fname) + if func is None: + return None + return cast(str, func.__module__ + "." + fname) + + func = getattr(method, "__func__", None) + if func is None: + cls = self.__class__ + return cast(str, cls.__module__ + "." + cls.__name__ + "." + fname) + + return cast(str, func.__module__ + "." + func.__qualname__) diff --git a/.venv/Lib/site-packages/coverage/control.py b/.venv/Lib/site-packages/coverage/control.py new file mode 100644 index 00000000..b052d4b4 --- /dev/null +++ b/.venv/Lib/site-packages/coverage/control.py @@ -0,0 +1,1402 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Central control stuff for coverage.py.""" + +from __future__ import annotations + +import atexit +import collections +import contextlib +import functools +import os +import os.path +import platform +import signal +import sys +import threading +import time +import warnings + +from types import FrameType +from typing import cast, Any, Callable, IO +from collections.abc import Iterable, Iterator + +from coverage import env +from coverage.annotate import AnnotateReporter +from coverage.collector import Collector +from coverage.config import CoverageConfig, read_coverage_config +from coverage.context import should_start_context_test_function, combine_context_switchers +from coverage.core import Core, HAS_CTRACER +from coverage.data import CoverageData, combine_parallel_data +from coverage.debug import ( + DebugControl, NoDebugging, short_stack, write_formatted_info, relevant_environment_display, +) +from coverage.disposition import disposition_debug_msg +from coverage.exceptions import ConfigError, CoverageException, CoverageWarning, PluginError +from coverage.files import PathAliases, abs_file, relative_filename, set_relative_directory +from coverage.html import HtmlReporter +from coverage.inorout import InOrOut +from coverage.jsonreport import JsonReporter +from coverage.lcovreport import LcovReporter +from coverage.misc import bool_or_none, join_regex +from coverage.misc import DefaultValue, ensure_dir_for_file, isolate_module +from coverage.multiproc import patch_multiprocessing +from coverage.plugin import FileReporter +from coverage.plugin_support import Plugins +from coverage.python import PythonFileReporter +from coverage.report import SummaryReporter +from coverage.report_core import render_report +from coverage.results import Analysis, analysis_from_file_reporter +from coverage.types import ( + FilePath, TConfigurable, TConfigSectionIn, TConfigValueIn, TConfigValueOut, + TFileDisposition, TLineNo, TMorf, +) +from coverage.xmlreport import XmlReporter + +os = isolate_module(os) + +@contextlib.contextmanager +def override_config(cov: Coverage, **kwargs: TConfigValueIn) -> Iterator[None]: + """Temporarily tweak the configuration of `cov`. + + The arguments are applied to `cov.config` with the `from_args` method. + At the end of the with-statement, the old configuration is restored. + """ + original_config = cov.config + cov.config = cov.config.copy() + try: + cov.config.from_args(**kwargs) + yield + finally: + cov.config = original_config + + +DEFAULT_DATAFILE = DefaultValue("MISSING") +_DEFAULT_DATAFILE = DEFAULT_DATAFILE # Just in case, for backwards compatibility + +class Coverage(TConfigurable): + """Programmatic access to coverage.py. + + To use:: + + from coverage import Coverage + + cov = Coverage() + cov.start() + #.. call your code .. + cov.stop() + cov.html_report(directory="covhtml") + + A context manager is available to do the same thing:: + + cov = Coverage() + with cov.collect(): + #.. call your code .. + cov.html_report(directory="covhtml") + + Note: in keeping with Python custom, names starting with underscore are + not part of the public API. They might stop working at any point. Please + limit yourself to documented methods to avoid problems. + + Methods can raise any of the exceptions described in :ref:`api_exceptions`. + + """ + + # The stack of started Coverage instances. + _instances: list[Coverage] = [] + + @classmethod + def current(cls) -> Coverage | None: + """Get the latest started `Coverage` instance, if any. + + Returns: a `Coverage` instance, or None. + + .. versionadded:: 5.0 + + """ + if cls._instances: + return cls._instances[-1] + else: + return None + + def __init__( # pylint: disable=too-many-arguments + self, + data_file: FilePath | DefaultValue | None = DEFAULT_DATAFILE, + data_suffix: str | bool | None = None, + cover_pylib: bool | None = None, + auto_data: bool = False, + timid: bool | None = None, + branch: bool | None = None, + config_file: FilePath | bool = True, + source: Iterable[str] | None = None, + source_pkgs: Iterable[str] | None = None, + omit: str | Iterable[str] | None = None, + include: str | Iterable[str] | None = None, + debug: Iterable[str] | None = None, + concurrency: str | Iterable[str] | None = None, + check_preimported: bool = False, + context: str | None = None, + messages: bool = False, + ) -> None: + """ + Many of these arguments duplicate and override values that can be + provided in a configuration file. Parameters that are missing here + will use values from the config file. + + `data_file` is the base name of the data file to use. The config value + defaults to ".coverage". None can be provided to prevent writing a data + file. `data_suffix` is appended (with a dot) to `data_file` to create + the final file name. If `data_suffix` is simply True, then a suffix is + created with the machine and process identity included. + + `cover_pylib` is a boolean determining whether Python code installed + with the Python interpreter is measured. This includes the Python + standard library and any packages installed with the interpreter. + + If `auto_data` is true, then any existing data file will be read when + coverage measurement starts, and data will be saved automatically when + measurement stops. + + If `timid` is true, then a slower and simpler trace function will be + used. This is important for some environments where manipulation of + tracing functions breaks the faster trace function. + + If `branch` is true, then branch coverage will be measured in addition + to the usual statement coverage. + + `config_file` determines what configuration file to read: + + * If it is ".coveragerc", it is interpreted as if it were True, + for backward compatibility. + + * If it is a string, it is the name of the file to read. If the + file can't be read, it is an error. + + * If it is True, then a few standard files names are tried + (".coveragerc", "setup.cfg", "tox.ini"). It is not an error for + these files to not be found. + + * If it is False, then no configuration file is read. + + `source` is a list of file paths or package names. Only code located + in the trees indicated by the file paths or package names will be + measured. + + `source_pkgs` is a list of package names. It works the same as + `source`, but can be used to name packages where the name can also be + interpreted as a file path. + + `include` and `omit` are lists of file name patterns. Files that match + `include` will be measured, files that match `omit` will not. Each + will also accept a single string argument. + + `debug` is a list of strings indicating what debugging information is + desired. + + `concurrency` is a string indicating the concurrency library being used + in the measured code. Without this, coverage.py will get incorrect + results if these libraries are in use. Valid strings are "greenlet", + "eventlet", "gevent", "multiprocessing", or "thread" (the default). + This can also be a list of these strings. + + If `check_preimported` is true, then when coverage is started, the + already-imported files will be checked to see if they should be + measured by coverage. Importing measured files before coverage is + started can mean that code is missed. + + `context` is a string to use as the :ref:`static context + ` label for collected data. + + If `messages` is true, some messages will be printed to stdout + indicating what is happening. + + .. versionadded:: 4.0 + The `concurrency` parameter. + + .. versionadded:: 4.2 + The `concurrency` parameter can now be a list of strings. + + .. versionadded:: 5.0 + The `check_preimported` and `context` parameters. + + .. versionadded:: 5.3 + The `source_pkgs` parameter. + + .. versionadded:: 6.0 + The `messages` parameter. + + """ + # Start self.config as a usable default configuration. It will soon be + # replaced with the real configuration. + self.config = CoverageConfig() + + # data_file=None means no disk file at all. data_file missing means + # use the value from the config file. + self._no_disk = data_file is None + if isinstance(data_file, DefaultValue): + data_file = None + if data_file is not None: + data_file = os.fspath(data_file) + + # This is injectable by tests. + self._debug_file: IO[str] | None = None + + self._auto_load = self._auto_save = auto_data + self._data_suffix_specified = data_suffix + + # Is it ok for no data to be collected? + self._warn_no_data = True + self._warn_unimported_source = True + self._warn_preimported_source = check_preimported + self._no_warn_slugs: list[str] = [] + self._messages = messages + + # A record of all the warnings that have been issued. + self._warnings: list[str] = [] + + # Other instance attributes, set with placebos or placeholders. + # More useful objects will be created later. + self._debug: DebugControl = NoDebugging() + self._inorout: InOrOut | None = None + self._plugins: Plugins = Plugins() + self._data: CoverageData | None = None + self._core: Core | None = None + self._collector: Collector | None = None + self._metacov = False + + self._file_mapper: Callable[[str], str] = abs_file + self._data_suffix = self._run_suffix = None + self._exclude_re: dict[str, str] = {} + self._old_sigterm: Callable[[int, FrameType | None], Any] | None = None + + # State machine variables: + # Have we initialized everything? + self._inited = False + self._inited_for_start = False + # Have we started collecting and not stopped it? + self._started = False + # Should we write the debug output? + self._should_write_debug = True + + # Build our configuration from a number of sources. + if not isinstance(config_file, bool): + config_file = os.fspath(config_file) + self.config = read_coverage_config( + config_file=config_file, + warn=self._warn, + data_file=data_file, + cover_pylib=cover_pylib, + timid=timid, + branch=branch, + parallel=bool_or_none(data_suffix), + source=source, + source_pkgs=source_pkgs, + run_omit=omit, + run_include=include, + debug=debug, + report_omit=omit, + report_include=include, + concurrency=concurrency, + context=context, + ) + + # If we have sub-process measurement happening automatically, then we + # want any explicit creation of a Coverage object to mean, this process + # is already coverage-aware, so don't auto-measure it. By now, the + # auto-creation of a Coverage object has already happened. But we can + # find it and tell it not to save its data. + if not env.METACOV: + _prevent_sub_process_measurement() + + def _init(self) -> None: + """Set all the initial state. + + This is called by the public methods to initialize state. This lets us + construct a :class:`Coverage` object, then tweak its state before this + function is called. + + """ + if self._inited: + return + + self._inited = True + + # Create and configure the debugging controller. + self._debug = DebugControl(self.config.debug, self._debug_file, self.config.debug_file) + if self._debug.should("process"): + self._debug.write("Coverage._init") + + if "multiprocessing" in (self.config.concurrency or ()): + # Multi-processing uses parallel for the subprocesses, so also use + # it for the main process. + self.config.parallel = True + + # _exclude_re is a dict that maps exclusion list names to compiled regexes. + self._exclude_re = {} + + set_relative_directory() + if self.config.relative_files: + self._file_mapper = relative_filename + + # Load plugins + self._plugins = Plugins.load_plugins(self.config.plugins, self.config, self._debug) + + # Run configuring plugins. + for plugin in self._plugins.configurers: + # We need an object with set_option and get_option. Either self or + # self.config will do. Choosing randomly stops people from doing + # other things with those objects, against the public API. Yes, + # this is a bit childish. :) + plugin.configure([self, self.config][int(time.time()) % 2]) + + def _post_init(self) -> None: + """Stuff to do after everything is initialized.""" + if self._should_write_debug: + self._should_write_debug = False + self._write_startup_debug() + + # "[run] _crash" will raise an exception if the value is close by in + # the call stack, for testing error handling. + if self.config._crash and self.config._crash in short_stack(): + raise RuntimeError(f"Crashing because called by {self.config._crash}") + + def _write_startup_debug(self) -> None: + """Write out debug info at startup if needed.""" + wrote_any = False + with self._debug.without_callers(): + if self._debug.should("config"): + config_info = self.config.debug_info() + write_formatted_info(self._debug.write, "config", config_info) + wrote_any = True + + if self._debug.should("sys"): + write_formatted_info(self._debug.write, "sys", self.sys_info()) + for plugin in self._plugins: + header = "sys: " + plugin._coverage_plugin_name + info = plugin.sys_info() + write_formatted_info(self._debug.write, header, info) + wrote_any = True + + if self._debug.should("pybehave"): + write_formatted_info(self._debug.write, "pybehave", env.debug_info()) + wrote_any = True + + if wrote_any: + write_formatted_info(self._debug.write, "end", ()) + + def _should_trace(self, filename: str, frame: FrameType) -> TFileDisposition: + """Decide whether to trace execution in `filename`. + + Calls `_should_trace_internal`, and returns the FileDisposition. + + """ + assert self._inorout is not None + disp = self._inorout.should_trace(filename, frame) + if self._debug.should("trace"): + self._debug.write(disposition_debug_msg(disp)) + return disp + + def _check_include_omit_etc(self, filename: str, frame: FrameType) -> bool: + """Check a file name against the include/omit/etc, rules, verbosely. + + Returns a boolean: True if the file should be traced, False if not. + + """ + assert self._inorout is not None + reason = self._inorout.check_include_omit_etc(filename, frame) + if self._debug.should("trace"): + if not reason: + msg = f"Including {filename!r}" + else: + msg = f"Not including {filename!r}: {reason}" + self._debug.write(msg) + + return not reason + + def _warn(self, msg: str, slug: str | None = None, once: bool = False) -> None: + """Use `msg` as a warning. + + For warning suppression, use `slug` as the shorthand. + + If `once` is true, only show this warning once (determined by the + slug.) + + """ + if not self._no_warn_slugs: + self._no_warn_slugs = list(self.config.disable_warnings) + + if slug in self._no_warn_slugs: + # Don't issue the warning + return + + self._warnings.append(msg) + if slug: + msg = f"{msg} ({slug})" + if self._debug.should("pid"): + msg = f"[{os.getpid()}] {msg}" + warnings.warn(msg, category=CoverageWarning, stacklevel=2) + + if once: + assert slug is not None + self._no_warn_slugs.append(slug) + + def _message(self, msg: str) -> None: + """Write a message to the user, if configured to do so.""" + if self._messages: + print(msg) + + def get_option(self, option_name: str) -> TConfigValueOut | None: + """Get an option from the configuration. + + `option_name` is a colon-separated string indicating the section and + option name. For example, the ``branch`` option in the ``[run]`` + section of the config file would be indicated with `"run:branch"`. + + Returns the value of the option. The type depends on the option + selected. + + As a special case, an `option_name` of ``"paths"`` will return an + dictionary with the entire ``[paths]`` section value. + + .. versionadded:: 4.0 + + """ + return self.config.get_option(option_name) + + def set_option(self, option_name: str, value: TConfigValueIn | TConfigSectionIn) -> None: + """Set an option in the configuration. + + `option_name` is a colon-separated string indicating the section and + option name. For example, the ``branch`` option in the ``[run]`` + section of the config file would be indicated with ``"run:branch"``. + + `value` is the new value for the option. This should be an + appropriate Python value. For example, use True for booleans, not the + string ``"True"``. + + As an example, calling: + + .. code-block:: python + + cov.set_option("run:branch", True) + + has the same effect as this configuration file: + + .. code-block:: ini + + [run] + branch = True + + As a special case, an `option_name` of ``"paths"`` will replace the + entire ``[paths]`` section. The value should be a dictionary. + + .. versionadded:: 4.0 + + """ + self.config.set_option(option_name, value) + + def load(self) -> None: + """Load previously-collected coverage data from the data file.""" + self._init() + if self._collector is not None: + self._collector.reset() + should_skip = self.config.parallel and not os.path.exists(self.config.data_file) + if not should_skip: + self._init_data(suffix=None) + self._post_init() + if not should_skip: + assert self._data is not None + self._data.read() + + def _init_for_start(self) -> None: + """Initialization for start()""" + # Construct the collector. + concurrency: list[str] = self.config.concurrency or [] + if "multiprocessing" in concurrency: + if self.config.config_file is None: + raise ConfigError("multiprocessing requires a configuration file") + patch_multiprocessing(rcfile=self.config.config_file) + + dycon = self.config.dynamic_context + if not dycon or dycon == "none": + context_switchers = [] + elif dycon == "test_function": + context_switchers = [should_start_context_test_function] + else: + raise ConfigError(f"Don't understand dynamic_context setting: {dycon!r}") + + context_switchers.extend( + plugin.dynamic_context for plugin in self._plugins.context_switchers + ) + + should_start_context = combine_context_switchers(context_switchers) + + self._core = Core( + warn=self._warn, + timid=self.config.timid, + metacov=self._metacov, + ) + self._collector = Collector( + core=self._core, + should_trace=self._should_trace, + check_include=self._check_include_omit_etc, + should_start_context=should_start_context, + file_mapper=self._file_mapper, + branch=self.config.branch, + warn=self._warn, + concurrency=concurrency, + ) + + suffix = self._data_suffix_specified + if suffix: + if not isinstance(suffix, str): + # if data_suffix=True, use .machinename.pid.random + suffix = True + elif self.config.parallel: + if suffix is None: + suffix = True + elif not isinstance(suffix, str): + suffix = bool(suffix) + else: + suffix = None + + self._init_data(suffix) + + assert self._data is not None + self._collector.use_data(self._data, self.config.context) + + # Early warning if we aren't going to be able to support plugins. + if self._plugins.file_tracers and not self._core.supports_plugins: + self._warn( + "Plugin file tracers ({}) aren't supported with {}".format( + ", ".join( + plugin._coverage_plugin_name + for plugin in self._plugins.file_tracers + ), + self._collector.tracer_name(), + ), + ) + for plugin in self._plugins.file_tracers: + plugin._coverage_enabled = False + + # Create the file classifying substructure. + self._inorout = InOrOut( + config=self.config, + warn=self._warn, + debug=(self._debug if self._debug.should("trace") else None), + include_namespace_packages=self.config.include_namespace_packages, + ) + self._inorout.plugins = self._plugins + self._inorout.disp_class = self._core.file_disposition_class + + # It's useful to write debug info after initing for start. + self._should_write_debug = True + + # Register our clean-up handlers. + atexit.register(self._atexit) + if self.config.sigterm: + is_main = (threading.current_thread() == threading.main_thread()) + if is_main and not env.WINDOWS: + # The Python docs seem to imply that SIGTERM works uniformly even + # on Windows, but that's not my experience, and this agrees: + # https://stackoverflow.com/questions/35772001/x/35792192#35792192 + self._old_sigterm = signal.signal( # type: ignore[assignment] + signal.SIGTERM, self._on_sigterm, + ) + + def _init_data(self, suffix: str | bool | None) -> None: + """Create a data file if we don't have one yet.""" + if self._data is None: + # Create the data file. We do this at construction time so that the + # data file will be written into the directory where the process + # started rather than wherever the process eventually chdir'd to. + ensure_dir_for_file(self.config.data_file) + self._data = CoverageData( + basename=self.config.data_file, + suffix=suffix, + warn=self._warn, + debug=self._debug, + no_disk=self._no_disk, + ) + + def start(self) -> None: + """Start measuring code coverage. + + Coverage measurement is only collected in functions called after + :meth:`start` is invoked. Statements in the same scope as + :meth:`start` won't be measured. + + Once you invoke :meth:`start`, you must also call :meth:`stop` + eventually, or your process might not shut down cleanly. + + The :meth:`collect` method is a context manager to handle both + starting and stopping collection. + + """ + self._init() + if not self._inited_for_start: + self._inited_for_start = True + self._init_for_start() + self._post_init() + + assert self._collector is not None + assert self._inorout is not None + + # Issue warnings for possible problems. + self._inorout.warn_conflicting_settings() + + # See if we think some code that would eventually be measured has + # already been imported. + if self._warn_preimported_source: + self._inorout.warn_already_imported_files() + + if self._auto_load: + self.load() + + self._collector.start() + self._started = True + self._instances.append(self) + + def stop(self) -> None: + """Stop measuring code coverage.""" + if self._instances: + if self._instances[-1] is self: + self._instances.pop() + if self._started: + assert self._collector is not None + self._collector.stop() + self._started = False + + @contextlib.contextmanager + def collect(self) -> Iterator[None]: + """A context manager to start/stop coverage measurement collection. + + .. versionadded:: 7.3 + + """ + self.start() + try: + yield + finally: + self.stop() # pragma: nested + + def _atexit(self, event: str = "atexit") -> None: + """Clean up on process shutdown.""" + if self._debug.should("process"): + self._debug.write(f"{event}: pid: {os.getpid()}, instance: {self!r}") + if self._started: + self.stop() + if self._auto_save or event == "sigterm": + self.save() + + def _on_sigterm(self, signum_unused: int, frame_unused: FrameType | None) -> None: + """A handler for signal.SIGTERM.""" + self._atexit("sigterm") + # Statements after here won't be seen by metacov because we just wrote + # the data, and are about to kill the process. + signal.signal(signal.SIGTERM, self._old_sigterm) # pragma: not covered + os.kill(os.getpid(), signal.SIGTERM) # pragma: not covered + + def erase(self) -> None: + """Erase previously collected coverage data. + + This removes the in-memory data collected in this session as well as + discarding the data file. + + """ + self._init() + self._post_init() + if self._collector is not None: + self._collector.reset() + self._init_data(suffix=None) + assert self._data is not None + self._data.erase(parallel=self.config.parallel) + self._data = None + self._inited_for_start = False + + def switch_context(self, new_context: str) -> None: + """Switch to a new dynamic context. + + `new_context` is a string to use as the :ref:`dynamic context + ` label for collected data. If a :ref:`static + context ` is in use, the static and dynamic context + labels will be joined together with a pipe character. + + Coverage collection must be started already. + + .. versionadded:: 5.0 + + """ + if not self._started: # pragma: part started + raise CoverageException("Cannot switch context, coverage is not started") + + assert self._collector is not None + if self._collector.should_start_context: + self._warn("Conflicting dynamic contexts", slug="dynamic-conflict", once=True) + + self._collector.switch_context(new_context) + + def clear_exclude(self, which: str = "exclude") -> None: + """Clear the exclude list.""" + self._init() + setattr(self.config, which + "_list", []) + self._exclude_regex_stale() + + def exclude(self, regex: str, which: str = "exclude") -> None: + """Exclude source lines from execution consideration. + + A number of lists of regular expressions are maintained. Each list + selects lines that are treated differently during reporting. + + `which` determines which list is modified. The "exclude" list selects + lines that are not considered executable at all. The "partial" list + indicates lines with branches that are not taken. + + `regex` is a regular expression. The regex is added to the specified + list. If any of the regexes in the list is found in a line, the line + is marked for special treatment during reporting. + + """ + self._init() + excl_list = getattr(self.config, which + "_list") + excl_list.append(regex) + self._exclude_regex_stale() + + def _exclude_regex_stale(self) -> None: + """Drop all the compiled exclusion regexes, a list was modified.""" + self._exclude_re.clear() + + def _exclude_regex(self, which: str) -> str: + """Return a regex string for the given exclusion list.""" + if which not in self._exclude_re: + excl_list = getattr(self.config, which + "_list") + self._exclude_re[which] = join_regex(excl_list) + return self._exclude_re[which] + + def get_exclude_list(self, which: str = "exclude") -> list[str]: + """Return a list of excluded regex strings. + + `which` indicates which list is desired. See :meth:`exclude` for the + lists that are available, and their meaning. + + """ + self._init() + return cast(list[str], getattr(self.config, which + "_list")) + + def save(self) -> None: + """Save the collected coverage data to the data file.""" + data = self.get_data() + data.write() + + def _make_aliases(self) -> PathAliases: + """Create a PathAliases from our configuration.""" + aliases = PathAliases( + debugfn=(self._debug.write if self._debug.should("pathmap") else None), + relative=self.config.relative_files, + ) + for paths in self.config.paths.values(): + result = paths[0] + for pattern in paths[1:]: + aliases.add(pattern, result) + return aliases + + def combine( + self, + data_paths: Iterable[str] | None = None, + strict: bool = False, + keep: bool = False, + ) -> None: + """Combine together a number of similarly-named coverage data files. + + All coverage data files whose name starts with `data_file` (from the + coverage() constructor) will be read, and combined together into the + current measurements. + + `data_paths` is a list of files or directories from which data should + be combined. If no list is passed, then the data files from the + directory indicated by the current data file (probably the current + directory) will be combined. + + If `strict` is true, then it is an error to attempt to combine when + there are no data files to combine. + + If `keep` is true, then original input data files won't be deleted. + + .. versionadded:: 4.0 + The `data_paths` parameter. + + .. versionadded:: 4.3 + The `strict` parameter. + + .. versionadded: 5.5 + The `keep` parameter. + """ + self._init() + self._init_data(suffix=None) + self._post_init() + self.get_data() + + assert self._data is not None + combine_parallel_data( + self._data, + aliases=self._make_aliases(), + data_paths=data_paths, + strict=strict, + keep=keep, + message=self._message, + ) + + def get_data(self) -> CoverageData: + """Get the collected data. + + Also warn about various problems collecting data. + + Returns a :class:`coverage.CoverageData`, the collected coverage data. + + .. versionadded:: 4.0 + + """ + self._init() + self._init_data(suffix=None) + self._post_init() + + if self._collector is not None: + for plugin in self._plugins: + if not plugin._coverage_enabled: + self._collector.plugin_was_disabled(plugin) + + if self._collector.flush_data(): + self._post_save_work() + + assert self._data is not None + return self._data + + def _post_save_work(self) -> None: + """After saving data, look for warnings, post-work, etc. + + Warn about things that should have happened but didn't. + Look for un-executed files. + + """ + assert self._data is not None + assert self._inorout is not None + + # If there are still entries in the source_pkgs_unmatched list, + # then we never encountered those packages. + if self._warn_unimported_source: + self._inorout.warn_unimported_source() + + # Find out if we got any data. + if not self._data and self._warn_no_data: + self._warn("No data was collected.", slug="no-data-collected") + + # Touch all the files that could have executed, so that we can + # mark completely un-executed files as 0% covered. + file_paths = collections.defaultdict(list) + for file_path, plugin_name in self._inorout.find_possibly_unexecuted_files(): + file_path = self._file_mapper(file_path) + file_paths[plugin_name].append(file_path) + for plugin_name, paths in file_paths.items(): + self._data.touch_files(paths, plugin_name) + + # Backward compatibility with version 1. + def analysis(self, morf: TMorf) -> tuple[str, list[TLineNo], list[TLineNo], str]: + """Like `analysis2` but doesn't return excluded line numbers.""" + f, s, _, m, mf = self.analysis2(morf) + return f, s, m, mf + + def analysis2( + self, + morf: TMorf, + ) -> tuple[str, list[TLineNo], list[TLineNo], list[TLineNo], str]: + """Analyze a module. + + `morf` is a module or a file name. It will be analyzed to determine + its coverage statistics. The return value is a 5-tuple: + + * The file name for the module. + * A list of line numbers of executable statements. + * A list of line numbers of excluded statements. + * A list of line numbers of statements not run (missing from + execution). + * A readable formatted string of the missing line numbers. + + The analysis uses the source file itself and the current measured + coverage data. + + """ + analysis = self._analyze(morf) + return ( + analysis.filename, + sorted(analysis.statements), + sorted(analysis.excluded), + sorted(analysis.missing), + analysis.missing_formatted(), + ) + + def _analyze(self, morf: TMorf) -> Analysis: + """Analyze a module or file. Private for now.""" + self._init() + self._post_init() + + data = self.get_data() + file_reporter = self._get_file_reporter(morf) + filename = self._file_mapper(file_reporter.filename) + return analysis_from_file_reporter(data, self.config.precision, file_reporter, filename) + + @functools.lru_cache(maxsize=1) + def _get_file_reporter(self, morf: TMorf) -> FileReporter: + """Get a FileReporter for a module or file name.""" + assert self._data is not None + plugin = None + file_reporter: str | FileReporter = "python" + + if isinstance(morf, str): + mapped_morf = self._file_mapper(morf) + plugin_name = self._data.file_tracer(mapped_morf) + if plugin_name: + plugin = self._plugins.get(plugin_name) + + if plugin: + file_reporter = plugin.file_reporter(mapped_morf) + if file_reporter is None: + raise PluginError( + "Plugin {!r} did not provide a file reporter for {!r}.".format( + plugin._coverage_plugin_name, morf, + ), + ) + + if file_reporter == "python": + file_reporter = PythonFileReporter(morf, self) + + assert isinstance(file_reporter, FileReporter) + return file_reporter + + def _get_file_reporters( + self, + morfs: Iterable[TMorf] | None = None, + ) -> list[tuple[FileReporter, TMorf]]: + """Get FileReporters for a list of modules or file names. + + For each module or file name in `morfs`, find a FileReporter. Return + a list pairing FileReporters with the morfs. + + If `morfs` is a single module or file name, this returns a list of one + FileReporter. If `morfs` is empty or None, then the list of all files + measured is used to find the FileReporters. + + """ + assert self._data is not None + if not morfs: + morfs = self._data.measured_files() + + # Be sure we have a collection. + if not isinstance(morfs, (list, tuple, set)): + morfs = [morfs] # type: ignore[list-item] + + return [(self._get_file_reporter(morf), morf) for morf in morfs] + + def _prepare_data_for_reporting(self) -> None: + """Re-map data before reporting, to get implicit "combine" behavior.""" + if self.config.paths: + mapped_data = CoverageData(warn=self._warn, debug=self._debug, no_disk=True) + if self._data is not None: + mapped_data.update(self._data, map_path=self._make_aliases().map) + self._data = mapped_data + + def report( + self, + morfs: Iterable[TMorf] | None = None, + show_missing: bool | None = None, + ignore_errors: bool | None = None, + file: IO[str] | None = None, + omit: str | list[str] | None = None, + include: str | list[str] | None = None, + skip_covered: bool | None = None, + contexts: list[str] | None = None, + skip_empty: bool | None = None, + precision: int | None = None, + sort: str | None = None, + output_format: str | None = None, + ) -> float: + """Write a textual summary report to `file`. + + Each module in `morfs` is listed, with counts of statements, executed + statements, missing statements, and a list of lines missed. + + If `show_missing` is true, then details of which lines or branches are + missing will be included in the report. If `ignore_errors` is true, + then a failure while reporting a single file will not stop the entire + report. + + `file` is a file-like object, suitable for writing. + + `output_format` determines the format, either "text" (the default), + "markdown", or "total". + + `include` is a list of file name patterns. Files that match will be + included in the report. Files matching `omit` will not be included in + the report. + + If `skip_covered` is true, don't report on files with 100% coverage. + + If `skip_empty` is true, don't report on empty files (those that have + no statements). + + `contexts` is a list of regular expression strings. Only data from + :ref:`dynamic contexts ` that match one of those + expressions (using :func:`re.search `) will be + included in the report. + + `precision` is the number of digits to display after the decimal + point for percentages. + + All of the arguments default to the settings read from the + :ref:`configuration file `. + + Returns a float, the total percentage covered. + + .. versionadded:: 4.0 + The `skip_covered` parameter. + + .. versionadded:: 5.0 + The `contexts` and `skip_empty` parameters. + + .. versionadded:: 5.2 + The `precision` parameter. + + .. versionadded:: 7.0 + The `format` parameter. + + """ + self._prepare_data_for_reporting() + with override_config( + self, + ignore_errors=ignore_errors, + report_omit=omit, + report_include=include, + show_missing=show_missing, + skip_covered=skip_covered, + report_contexts=contexts, + skip_empty=skip_empty, + precision=precision, + sort=sort, + format=output_format, + ): + reporter = SummaryReporter(self) + return reporter.report(morfs, outfile=file) + + def annotate( + self, + morfs: Iterable[TMorf] | None = None, + directory: str | None = None, + ignore_errors: bool | None = None, + omit: str | list[str] | None = None, + include: str | list[str] | None = None, + contexts: list[str] | None = None, + ) -> None: + """Annotate a list of modules. + + Each module in `morfs` is annotated. The source is written to a new + file, named with a ",cover" suffix, with each line prefixed with a + marker to indicate the coverage of the line. Covered lines have ">", + excluded lines have "-", and missing lines have "!". + + See :meth:`report` for other arguments. + + """ + self._prepare_data_for_reporting() + with override_config( + self, + ignore_errors=ignore_errors, + report_omit=omit, + report_include=include, + report_contexts=contexts, + ): + reporter = AnnotateReporter(self) + reporter.report(morfs, directory=directory) + + def html_report( + self, + morfs: Iterable[TMorf] | None = None, + directory: str | None = None, + ignore_errors: bool | None = None, + omit: str | list[str] | None = None, + include: str | list[str] | None = None, + extra_css: str | None = None, + title: str | None = None, + skip_covered: bool | None = None, + show_contexts: bool | None = None, + contexts: list[str] | None = None, + skip_empty: bool | None = None, + precision: int | None = None, + ) -> float: + """Generate an HTML report. + + The HTML is written to `directory`. The file "index.html" is the + overview starting point, with links to more detailed pages for + individual modules. + + `extra_css` is a path to a file of other CSS to apply on the page. + It will be copied into the HTML directory. + + `title` is a text string (not HTML) to use as the title of the HTML + report. + + See :meth:`report` for other arguments. + + Returns a float, the total percentage covered. + + .. note:: + + The HTML report files are generated incrementally based on the + source files and coverage results. If you modify the report files, + the changes will not be considered. You should be careful about + changing the files in the report folder. + + """ + self._prepare_data_for_reporting() + with override_config( + self, + ignore_errors=ignore_errors, + report_omit=omit, + report_include=include, + html_dir=directory, + extra_css=extra_css, + html_title=title, + html_skip_covered=skip_covered, + show_contexts=show_contexts, + report_contexts=contexts, + html_skip_empty=skip_empty, + precision=precision, + ): + reporter = HtmlReporter(self) + ret = reporter.report(morfs) + return ret + + def xml_report( + self, + morfs: Iterable[TMorf] | None = None, + outfile: str | None = None, + ignore_errors: bool | None = None, + omit: str | list[str] | None = None, + include: str | list[str] | None = None, + contexts: list[str] | None = None, + skip_empty: bool | None = None, + ) -> float: + """Generate an XML report of coverage results. + + The report is compatible with Cobertura reports. + + Each module in `morfs` is included in the report. `outfile` is the + path to write the file to, "-" will write to stdout. + + See :meth:`report` for other arguments. + + Returns a float, the total percentage covered. + + """ + self._prepare_data_for_reporting() + with override_config( + self, + ignore_errors=ignore_errors, + report_omit=omit, + report_include=include, + xml_output=outfile, + report_contexts=contexts, + skip_empty=skip_empty, + ): + return render_report(self.config.xml_output, XmlReporter(self), morfs, self._message) + + def json_report( + self, + morfs: Iterable[TMorf] | None = None, + outfile: str | None = None, + ignore_errors: bool | None = None, + omit: str | list[str] | None = None, + include: str | list[str] | None = None, + contexts: list[str] | None = None, + pretty_print: bool | None = None, + show_contexts: bool | None = None, + ) -> float: + """Generate a JSON report of coverage results. + + Each module in `morfs` is included in the report. `outfile` is the + path to write the file to, "-" will write to stdout. + + `pretty_print` is a boolean, whether to pretty-print the JSON output or not. + + See :meth:`report` for other arguments. + + Returns a float, the total percentage covered. + + .. versionadded:: 5.0 + + """ + self._prepare_data_for_reporting() + with override_config( + self, + ignore_errors=ignore_errors, + report_omit=omit, + report_include=include, + json_output=outfile, + report_contexts=contexts, + json_pretty_print=pretty_print, + json_show_contexts=show_contexts, + ): + return render_report(self.config.json_output, JsonReporter(self), morfs, self._message) + + def lcov_report( + self, + morfs: Iterable[TMorf] | None = None, + outfile: str | None = None, + ignore_errors: bool | None = None, + omit: str | list[str] | None = None, + include: str | list[str] | None = None, + contexts: list[str] | None = None, + ) -> float: + """Generate an LCOV report of coverage results. + + Each module in `morfs` is included in the report. `outfile` is the + path to write the file to, "-" will write to stdout. + + See :meth:`report` for other arguments. + + .. versionadded:: 6.3 + """ + self._prepare_data_for_reporting() + with override_config( + self, + ignore_errors=ignore_errors, + report_omit=omit, + report_include=include, + lcov_output=outfile, + report_contexts=contexts, + ): + return render_report(self.config.lcov_output, LcovReporter(self), morfs, self._message) + + def sys_info(self) -> Iterable[tuple[str, Any]]: + """Return a list of (key, value) pairs showing internal information.""" + + import coverage as covmod + + self._init() + self._post_init() + + def plugin_info(plugins: list[Any]) -> list[str]: + """Make an entry for the sys_info from a list of plug-ins.""" + entries = [] + for plugin in plugins: + entry = plugin._coverage_plugin_name + if not plugin._coverage_enabled: + entry += " (disabled)" + entries.append(entry) + return entries + + info = [ + ("coverage_version", covmod.__version__), + ("coverage_module", covmod.__file__), + ("core", self._collector.tracer_name() if self._collector is not None else "-none-"), + ("CTracer", "available" if HAS_CTRACER else "unavailable"), + ("plugins.file_tracers", plugin_info(self._plugins.file_tracers)), + ("plugins.configurers", plugin_info(self._plugins.configurers)), + ("plugins.context_switchers", plugin_info(self._plugins.context_switchers)), + ("configs_attempted", self.config.config_files_attempted), + ("configs_read", self.config.config_files_read), + ("config_file", self.config.config_file), + ("config_contents", + repr(self.config._config_contents) if self.config._config_contents else "-none-", + ), + ("data_file", self._data.data_filename() if self._data is not None else "-none-"), + ("python", sys.version.replace("\n", "")), + ("platform", platform.platform()), + ("implementation", platform.python_implementation()), + ("gil_enabled", getattr(sys, '_is_gil_enabled', lambda: True)()), + ("executable", sys.executable), + ("def_encoding", sys.getdefaultencoding()), + ("fs_encoding", sys.getfilesystemencoding()), + ("pid", os.getpid()), + ("cwd", os.getcwd()), + ("path", sys.path), + ("environment", [f"{k} = {v}" for k, v in relevant_environment_display(os.environ)]), + ("command_line", " ".join(getattr(sys, "argv", ["-none-"]))), + ] + + if self._inorout is not None: + info.extend(self._inorout.sys_info()) + + info.extend(CoverageData.sys_info()) + + return info + + +# Mega debugging... +# $set_env.py: COVERAGE_DEBUG_CALLS - Lots and lots of output about calls to Coverage. +if int(os.getenv("COVERAGE_DEBUG_CALLS", 0)): # pragma: debugging + from coverage.debug import decorate_methods, show_calls + + Coverage = decorate_methods( # type: ignore[misc] + show_calls(show_args=True), + butnot=["get_data"], + )(Coverage) + + +def process_startup() -> Coverage | None: + """Call this at Python start-up to perhaps measure coverage. + + If the environment variable COVERAGE_PROCESS_START is defined, coverage + measurement is started. The value of the variable is the config file + to use. + + There are two ways to configure your Python installation to invoke this + function when Python starts: + + #. Create or append to sitecustomize.py to add these lines:: + + import coverage + coverage.process_startup() + + #. Create a .pth file in your Python installation containing:: + + import coverage; coverage.process_startup() + + Returns the :class:`Coverage` instance that was started, or None if it was + not started by this call. + + """ + cps = os.getenv("COVERAGE_PROCESS_START") + if not cps: + # No request for coverage, nothing to do. + return None + + # This function can be called more than once in a process. This happens + # because some virtualenv configurations make the same directory visible + # twice in sys.path. This means that the .pth file will be found twice, + # and executed twice, executing this function twice. We set a global + # flag (an attribute on this function) to indicate that coverage.py has + # already been started, so we can avoid doing it twice. + # + # https://github.com/nedbat/coveragepy/issues/340 has more details. + + if hasattr(process_startup, "coverage"): + # We've annotated this function before, so we must have already + # started coverage.py in this process. Nothing to do. + return None + + cov = Coverage(config_file=cps) + process_startup.coverage = cov # type: ignore[attr-defined] + cov._warn_no_data = False + cov._warn_unimported_source = False + cov._warn_preimported_source = False + cov._auto_save = True + cov.start() + + return cov + + +def _prevent_sub_process_measurement() -> None: + """Stop any subprocess auto-measurement from writing data.""" + auto_created_coverage = getattr(process_startup, "coverage", None) + if auto_created_coverage is not None: + auto_created_coverage._auto_save = False diff --git a/.venv/Lib/site-packages/coverage/core.py b/.venv/Lib/site-packages/coverage/core.py new file mode 100644 index 00000000..b8916b68 --- /dev/null +++ b/.venv/Lib/site-packages/coverage/core.py @@ -0,0 +1,99 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Management of core choices.""" + +from __future__ import annotations + +import os +import sys +from typing import Any + +from coverage import env +from coverage.disposition import FileDisposition +from coverage.exceptions import ConfigError +from coverage.pytracer import PyTracer +from coverage.sysmon import SysMonitor +from coverage.types import ( + TFileDisposition, + Tracer, + TWarnFn, +) + + +try: + # Use the C extension code when we can, for speed. + from coverage.tracer import CTracer, CFileDisposition + HAS_CTRACER = True +except ImportError: + # Couldn't import the C extension, maybe it isn't built. + if os.getenv("COVERAGE_CORE") == "ctrace": # pragma: part covered + # During testing, we use the COVERAGE_CORE environment variable + # to indicate that we've fiddled with the environment to test this + # fallback code. If we thought we had a C tracer, but couldn't import + # it, then exit quickly and clearly instead of dribbling confusing + # errors. I'm using sys.exit here instead of an exception because an + # exception here causes all sorts of other noise in unittest. + sys.stderr.write("*** COVERAGE_CORE is 'ctrace' but can't import CTracer!\n") + sys.exit(1) + HAS_CTRACER = False + + +class Core: + """Information about the central technology enabling execution measurement.""" + + tracer_class: type[Tracer] + tracer_kwargs: dict[str, Any] + file_disposition_class: type[TFileDisposition] + supports_plugins: bool + packed_arcs: bool + systrace: bool + + def __init__(self, + warn: TWarnFn, + timid: bool, + metacov: bool, + ) -> None: + # Defaults + self.tracer_kwargs = {} + + core_name: str | None + if timid: + core_name = "pytrace" + else: + core_name = os.getenv("COVERAGE_CORE") + + if core_name == "sysmon" and not env.PYBEHAVIOR.pep669: + warn("sys.monitoring isn't available, using default core", slug="no-sysmon") + core_name = None + + if not core_name: + # Once we're comfortable with sysmon as a default: + # if env.PYBEHAVIOR.pep669 and self.should_start_context is None: + # core_name = "sysmon" + if HAS_CTRACER: + core_name = "ctrace" + else: + core_name = "pytrace" + + if core_name == "sysmon": + self.tracer_class = SysMonitor + self.tracer_kwargs = {"tool_id": 3 if metacov else 1} + self.file_disposition_class = FileDisposition + self.supports_plugins = False + self.packed_arcs = False + self.systrace = False + elif core_name == "ctrace": + self.tracer_class = CTracer + self.file_disposition_class = CFileDisposition + self.supports_plugins = True + self.packed_arcs = True + self.systrace = True + elif core_name == "pytrace": + self.tracer_class = PyTracer + self.file_disposition_class = FileDisposition + self.supports_plugins = False + self.packed_arcs = False + self.systrace = True + else: + raise ConfigError(f"Unknown core value: {core_name!r}") diff --git a/.venv/Lib/site-packages/coverage/data.py b/.venv/Lib/site-packages/coverage/data.py new file mode 100644 index 00000000..9baab8ed --- /dev/null +++ b/.venv/Lib/site-packages/coverage/data.py @@ -0,0 +1,228 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Coverage data for coverage.py. + +This file had the 4.x JSON data support, which is now gone. This file still +has storage-agnostic helpers, and is kept to avoid changing too many imports. +CoverageData is now defined in sqldata.py, and imported here to keep the +imports working. + +""" + +from __future__ import annotations + +import functools +import glob +import hashlib +import os.path + +from typing import Callable +from collections.abc import Iterable + +from coverage.exceptions import CoverageException, NoDataError +from coverage.files import PathAliases +from coverage.misc import Hasher, file_be_gone, human_sorted, plural +from coverage.sqldata import CoverageData + + +def line_counts(data: CoverageData, fullpath: bool = False) -> dict[str, int]: + """Return a dict summarizing the line coverage data. + + Keys are based on the file names, and values are the number of executed + lines. If `fullpath` is true, then the keys are the full pathnames of + the files, otherwise they are the basenames of the files. + + Returns a dict mapping file names to counts of lines. + + """ + summ = {} + filename_fn: Callable[[str], str] + if fullpath: + # pylint: disable=unnecessary-lambda-assignment + filename_fn = lambda f: f + else: + filename_fn = os.path.basename + for filename in data.measured_files(): + lines = data.lines(filename) + assert lines is not None + summ[filename_fn(filename)] = len(lines) + return summ + + +def add_data_to_hash(data: CoverageData, filename: str, hasher: Hasher) -> None: + """Contribute `filename`'s data to the `hasher`. + + `hasher` is a `coverage.misc.Hasher` instance to be updated with + the file's data. It should only get the results data, not the run + data. + + """ + if data.has_arcs(): + hasher.update(sorted(data.arcs(filename) or [])) + else: + hasher.update(sorted_lines(data, filename)) + hasher.update(data.file_tracer(filename)) + + +def combinable_files(data_file: str, data_paths: Iterable[str] | None = None) -> list[str]: + """Make a list of data files to be combined. + + `data_file` is a path to a data file. `data_paths` is a list of files or + directories of files. + + Returns a list of absolute file paths. + """ + data_dir, local = os.path.split(os.path.abspath(data_file)) + + data_paths = data_paths or [data_dir] + files_to_combine = [] + for p in data_paths: + if os.path.isfile(p): + files_to_combine.append(os.path.abspath(p)) + elif os.path.isdir(p): + pattern = glob.escape(os.path.join(os.path.abspath(p), local)) +".*" + files_to_combine.extend(glob.glob(pattern)) + else: + raise NoDataError(f"Couldn't combine from non-existent path '{p}'") + + # SQLite might have made journal files alongside our database files. + # We never want to combine those. + files_to_combine = [fnm for fnm in files_to_combine if not fnm.endswith("-journal")] + + # Sorting isn't usually needed, since it shouldn't matter what order files + # are combined, but sorting makes tests more predictable, and makes + # debugging more understandable when things go wrong. + return sorted(files_to_combine) + + +def combine_parallel_data( + data: CoverageData, + aliases: PathAliases | None = None, + data_paths: Iterable[str] | None = None, + strict: bool = False, + keep: bool = False, + message: Callable[[str], None] | None = None, +) -> None: + """Combine a number of data files together. + + `data` is a CoverageData. + + Treat `data.filename` as a file prefix, and combine the data from all + of the data files starting with that prefix plus a dot. + + If `aliases` is provided, it's a `PathAliases` object that is used to + re-map paths to match the local machine's. + + If `data_paths` is provided, it is a list of directories or files to + combine. Directories are searched for files that start with + `data.filename` plus dot as a prefix, and those files are combined. + + If `data_paths` is not provided, then the directory portion of + `data.filename` is used as the directory to search for data files. + + Unless `keep` is True every data file found and combined is then deleted + from disk. If a file cannot be read, a warning will be issued, and the + file will not be deleted. + + If `strict` is true, and no files are found to combine, an error is + raised. + + `message` is a function to use for printing messages to the user. + + """ + files_to_combine = combinable_files(data.base_filename(), data_paths) + + if strict and not files_to_combine: + raise NoDataError("No data to combine") + + if aliases is None: + map_path = None + else: + map_path = functools.cache(aliases.map) + + file_hashes = set() + combined_any = False + + for f in files_to_combine: + if f == data.data_filename(): + # Sometimes we are combining into a file which is one of the + # parallel files. Skip that file. + if data._debug.should("dataio"): + data._debug.write(f"Skipping combining ourself: {f!r}") + continue + + try: + rel_file_name = os.path.relpath(f) + except ValueError: + # ValueError can be raised under Windows when os.getcwd() returns a + # folder from a different drive than the drive of f, in which case + # we print the original value of f instead of its relative path + rel_file_name = f + + with open(f, "rb") as fobj: + hasher = hashlib.new("sha3_256", usedforsecurity=False) + hasher.update(fobj.read()) + sha = hasher.digest() + combine_this_one = sha not in file_hashes + + delete_this_one = not keep + if combine_this_one: + if data._debug.should("dataio"): + data._debug.write(f"Combining data file {f!r}") + file_hashes.add(sha) + try: + new_data = CoverageData(f, debug=data._debug) + new_data.read() + except CoverageException as exc: + if data._warn: + # The CoverageException has the file name in it, so just + # use the message as the warning. + data._warn(str(exc)) + if message: + message(f"Couldn't combine data file {rel_file_name}: {exc}") + delete_this_one = False + else: + data.update(new_data, map_path=map_path) + combined_any = True + if message: + message(f"Combined data file {rel_file_name}") + else: + if message: + message(f"Skipping duplicate data {rel_file_name}") + + if delete_this_one: + if data._debug.should("dataio"): + data._debug.write(f"Deleting data file {f!r}") + file_be_gone(f) + + if strict and not combined_any: + raise NoDataError("No usable data files") + + +def debug_data_file(filename: str) -> None: + """Implementation of 'coverage debug data'.""" + data = CoverageData(filename) + filename = data.data_filename() + print(f"path: {filename}") + if not os.path.exists(filename): + print("No data collected: file doesn't exist") + return + data.read() + print(f"has_arcs: {data.has_arcs()!r}") + summary = line_counts(data, fullpath=True) + filenames = human_sorted(summary.keys()) + nfiles = len(filenames) + print(f"{nfiles} file{plural(nfiles)}:") + for f in filenames: + line = f"{f}: {summary[f]} line{plural(summary[f])}" + plugin = data.file_tracer(f) + if plugin: + line += f" [{plugin}]" + print(line) + + +def sorted_lines(data: CoverageData, filename: str) -> list[int]: + """Get the sorted lines for a file, for tests.""" + lines = data.lines(filename) + return sorted(lines or []) diff --git a/.venv/Lib/site-packages/coverage/debug.py b/.venv/Lib/site-packages/coverage/debug.py new file mode 100644 index 00000000..cf9310dc --- /dev/null +++ b/.venv/Lib/site-packages/coverage/debug.py @@ -0,0 +1,613 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Control of and utilities for debugging.""" + +from __future__ import annotations + +import atexit +import contextlib +import functools +import inspect +import itertools +import os +import pprint +import re +import reprlib +import sys +import traceback +import types +import _thread + +from typing import ( + overload, + Any, Callable, IO, +) +from collections.abc import Iterable, Iterator, Mapping + +from coverage.misc import human_sorted_items, isolate_module +from coverage.types import AnyCallable, TWritable + +os = isolate_module(os) + + +# When debugging, it can be helpful to force some options, especially when +# debugging the configuration mechanisms you usually use to control debugging! +# This is a list of forced debugging options. +FORCED_DEBUG: list[str] = [] +FORCED_DEBUG_FILE = None + + +class DebugControl: + """Control and output for debugging.""" + + show_repr_attr = False # For auto_repr + + def __init__( + self, + options: Iterable[str], + output: IO[str] | None, + file_name: str | None = None, + ) -> None: + """Configure the options and output file for debugging.""" + self.options = list(options) + FORCED_DEBUG + self.suppress_callers = False + + filters = [] + if self.should("process"): + filters.append(CwdTracker().filter) + filters.append(ProcessTracker().filter) + if self.should("pytest"): + filters.append(PytestTracker().filter) + if self.should("pid"): + filters.append(add_pid_and_tid) + + self.output = DebugOutputFile.get_one( + output, + file_name=file_name, + filters=filters, + ) + self.raw_output = self.output.outfile + + def __repr__(self) -> str: + return f"" + + def should(self, option: str) -> bool: + """Decide whether to output debug information in category `option`.""" + if option == "callers" and self.suppress_callers: + return False + return (option in self.options) + + @contextlib.contextmanager + def without_callers(self) -> Iterator[None]: + """A context manager to prevent call stacks from being logged.""" + old = self.suppress_callers + self.suppress_callers = True + try: + yield + finally: + self.suppress_callers = old + + def write(self, msg: str, *, exc: BaseException | None = None) -> None: + """Write a line of debug output. + + `msg` is the line to write. A newline will be appended. + + If `exc` is provided, a stack trace of the exception will be written + after the message. + + """ + self.output.write(msg + "\n") + if exc is not None: + self.output.write("".join(traceback.format_exception(None, exc, exc.__traceback__))) + if self.should("self"): + caller_self = inspect.stack()[1][0].f_locals.get("self") + if caller_self is not None: + self.output.write(f"self: {caller_self!r}\n") + if self.should("callers"): + dump_stack_frames(out=self.output, skip=1) + self.output.flush() + + +class NoDebugging(DebugControl): + """A replacement for DebugControl that will never try to do anything.""" + def __init__(self) -> None: + # pylint: disable=super-init-not-called + ... + + def should(self, option: str) -> bool: + """Should we write debug messages? Never.""" + return False + + def write(self, msg: str, *, exc: BaseException | None = None) -> None: + """This will never be called.""" + raise AssertionError("NoDebugging.write should never be called.") + + +def info_header(label: str) -> str: + """Make a nice header string.""" + return "--{:-<60s}".format(" "+label+" ") + + +def info_formatter(info: Iterable[tuple[str, Any]]) -> Iterator[str]: + """Produce a sequence of formatted lines from info. + + `info` is a sequence of pairs (label, data). The produced lines are + nicely formatted, ready to print. + + """ + info = list(info) + if not info: + return + label_len = 30 + assert all(len(l) < label_len for l, _ in info) + for label, data in info: + if data == []: + data = "-none-" + if isinstance(data, tuple) and len(repr(tuple(data))) < 30: + # Convert to tuple to scrub namedtuples. + yield "%*s: %r" % (label_len, label, tuple(data)) + elif isinstance(data, (list, set, tuple)): + prefix = "%*s:" % (label_len, label) + for e in data: + yield "%*s %s" % (label_len+1, prefix, e) + prefix = "" + else: + yield "%*s: %s" % (label_len, label, data) + + +def write_formatted_info( + write: Callable[[str], None], + header: str, + info: Iterable[tuple[str, Any]], +) -> None: + """Write a sequence of (label,data) pairs nicely. + + `write` is a function write(str) that accepts each line of output. + `header` is a string to start the section. `info` is a sequence of + (label, data) pairs, where label is a str, and data can be a single + value, or a list/set/tuple. + + """ + write(info_header(header)) + for line in info_formatter(info): + write(f" {line}") + + +def exc_one_line(exc: Exception) -> str: + """Get a one-line summary of an exception, including class name and message.""" + lines = traceback.format_exception_only(type(exc), exc) + return "|".join(l.rstrip() for l in lines) + + +_FILENAME_REGEXES: list[tuple[str, str]] = [ + (r".*[/\\]pytest-of-.*[/\\]pytest-\d+([/\\]popen-gw\d+)?", "tmp:"), +] +_FILENAME_SUBS: list[tuple[str, str]] = [] + +@overload +def short_filename(filename: str) -> str: + pass + +@overload +def short_filename(filename: None) -> None: + pass + +def short_filename(filename: str | None) -> str | None: + """Shorten a file name. Directories are replaced by prefixes like 'syspath:'""" + if not _FILENAME_SUBS: + for pathdir in sys.path: + _FILENAME_SUBS.append((pathdir, "syspath:")) + import coverage + _FILENAME_SUBS.append((os.path.dirname(coverage.__file__), "cov:")) + _FILENAME_SUBS.sort(key=(lambda pair: len(pair[0])), reverse=True) + if filename is not None: + for pat, sub in _FILENAME_REGEXES: + filename = re.sub(pat, sub, filename) + for before, after in _FILENAME_SUBS: + filename = filename.replace(before, after) + return filename + + +def short_stack( + skip: int = 0, + full: bool = False, + frame_ids: bool = False, + short_filenames: bool = False, +) -> str: + """Return a string summarizing the call stack. + + The string is multi-line, with one line per stack frame. Each line shows + the function name, the file name, and the line number: + + ... + start_import_stop : /Users/ned/coverage/trunk/tests/coveragetest.py:95 + import_local_file : /Users/ned/coverage/trunk/tests/coveragetest.py:81 + import_local_file : /Users/ned/coverage/trunk/coverage/backward.py:159 + ... + + `skip` is the number of closest immediate frames to skip, so that debugging + functions can call this and not be included in the result. + + If `full` is true, then include all frames. Otherwise, initial "boring" + frames (ones in site-packages and earlier) are omitted. + + `short_filenames` will shorten filenames using `short_filename`, to reduce + the amount of repetitive noise in stack traces. + + """ + # Regexes in initial frames that we don't care about. + BORING_PRELUDE = [ + "", # pytest-xdist has string execution. + r"\bigor.py$", # Our test runner. + r"\bsite-packages\b", # pytest etc getting to our tests. + ] + + stack: Iterable[inspect.FrameInfo] = inspect.stack()[:skip:-1] + if not full: + for pat in BORING_PRELUDE: + stack = itertools.dropwhile( + (lambda fi, pat=pat: re.search(pat, fi.filename)), # type: ignore[misc] + stack, + ) + lines = [] + for frame_info in stack: + line = f"{frame_info.function:>30s} : " + if frame_ids: + line += f"{id(frame_info.frame):#x} " + filename = frame_info.filename + if short_filenames: + filename = short_filename(filename) + line += f"{filename}:{frame_info.lineno}" + lines.append(line) + return "\n".join(lines) + + +def dump_stack_frames(out: TWritable, skip: int = 0) -> None: + """Print a summary of the stack to `out`.""" + out.write(short_stack(skip=skip+1) + "\n") + + +def clipped_repr(text: str, numchars: int = 50) -> str: + """`repr(text)`, but limited to `numchars`.""" + r = reprlib.Repr() + r.maxstring = numchars + return r.repr(text) + + +def short_id(id64: int) -> int: + """Given a 64-bit id, make a shorter 16-bit one.""" + id16 = 0 + for offset in range(0, 64, 16): + id16 ^= id64 >> offset + return id16 & 0xFFFF + + +def add_pid_and_tid(text: str) -> str: + """A filter to add pid and tid to debug messages.""" + # Thread ids are useful, but too long. Make a shorter one. + tid = f"{short_id(_thread.get_ident()):04x}" + text = f"{os.getpid():5d}.{tid}: {text}" + return text + + +AUTO_REPR_IGNORE = {"$coverage.object_id"} + +def auto_repr(self: Any) -> str: + """A function implementing an automatic __repr__ for debugging.""" + show_attrs = ( + (k, v) for k, v in self.__dict__.items() + if getattr(v, "show_repr_attr", True) + and not inspect.ismethod(v) + and k not in AUTO_REPR_IGNORE + ) + return "<{klass} @{id:#x}{attrs}>".format( + klass=self.__class__.__name__, + id=id(self), + attrs="".join(f" {k}={v!r}" for k, v in show_attrs), + ) + + +def simplify(v: Any) -> Any: # pragma: debugging + """Turn things which are nearly dict/list/etc into dict/list/etc.""" + if isinstance(v, dict): + return {k:simplify(vv) for k, vv in v.items()} + elif isinstance(v, (list, tuple)): + return type(v)(simplify(vv) for vv in v) + elif hasattr(v, "__dict__"): + return simplify({"."+k: v for k, v in v.__dict__.items()}) + else: + return v + + +def pp(v: Any) -> None: # pragma: debugging + """Debug helper to pretty-print data, including SimpleNamespace objects.""" + # Might not be needed in 3.9+ + pprint.pprint(simplify(v)) + + +def filter_text(text: str, filters: Iterable[Callable[[str], str]]) -> str: + """Run `text` through a series of filters. + + `filters` is a list of functions. Each takes a string and returns a + string. Each is run in turn. After each filter, the text is split into + lines, and each line is passed through the next filter. + + Returns: the final string that results after all of the filters have + run. + + """ + clean_text = text.rstrip() + ending = text[len(clean_text):] + text = clean_text + for filter_fn in filters: + lines = [] + for line in text.splitlines(): + lines.extend(filter_fn(line).splitlines()) + text = "\n".join(lines) + return text + ending + + +class CwdTracker: + """A class to add cwd info to debug messages.""" + def __init__(self) -> None: + self.cwd: str | None = None + + def filter(self, text: str) -> str: + """Add a cwd message for each new cwd.""" + cwd = os.getcwd() + if cwd != self.cwd: + text = f"cwd is now {cwd!r}\n" + text + self.cwd = cwd + return text + + +class ProcessTracker: + """Track process creation for debug logging.""" + def __init__(self) -> None: + self.pid: int = os.getpid() + self.did_welcome = False + + def filter(self, text: str) -> str: + """Add a message about how new processes came to be.""" + welcome = "" + pid = os.getpid() + if self.pid != pid: + welcome = f"New process: forked {self.pid} -> {pid}\n" + self.pid = pid + elif not self.did_welcome: + argv = getattr(sys, "argv", None) + welcome = ( + f"New process: {pid=}, executable: {sys.executable!r}\n" + + f"New process: cmd: {argv!r}\n" + + f"New process parent pid: {os.getppid()!r}\n" + ) + + if welcome: + self.did_welcome = True + return welcome + text + else: + return text + + +class PytestTracker: + """Track the current pytest test name to add to debug messages.""" + def __init__(self) -> None: + self.test_name: str | None = None + + def filter(self, text: str) -> str: + """Add a message when the pytest test changes.""" + test_name = os.getenv("PYTEST_CURRENT_TEST") + if test_name != self.test_name: + text = f"Pytest context: {test_name}\n" + text + self.test_name = test_name + return text + + +class DebugOutputFile: + """A file-like object that includes pid and cwd information.""" + def __init__( + self, + outfile: IO[str] | None, + filters: Iterable[Callable[[str], str]], + ): + self.outfile = outfile + self.filters = list(filters) + self.pid = os.getpid() + + @classmethod + def get_one( + cls, + fileobj: IO[str] | None = None, + file_name: str | None = None, + filters: Iterable[Callable[[str], str]] = (), + interim: bool = False, + ) -> DebugOutputFile: + """Get a DebugOutputFile. + + If `fileobj` is provided, then a new DebugOutputFile is made with it. + + If `fileobj` isn't provided, then a file is chosen (`file_name` if + provided, or COVERAGE_DEBUG_FILE, or stderr), and a process-wide + singleton DebugOutputFile is made. + + `filters` are the text filters to apply to the stream to annotate with + pids, etc. + + If `interim` is true, then a future `get_one` can replace this one. + + """ + if fileobj is not None: + # Make DebugOutputFile around the fileobj passed. + return cls(fileobj, filters) + + the_one, is_interim = cls._get_singleton_data() + if the_one is None or is_interim: + if file_name is not None: + fileobj = open(file_name, "a", encoding="utf-8") + else: + # $set_env.py: COVERAGE_DEBUG_FILE - Where to write debug output + file_name = os.getenv("COVERAGE_DEBUG_FILE", FORCED_DEBUG_FILE) + if file_name in ("stdout", "stderr"): + fileobj = getattr(sys, file_name) + elif file_name: + fileobj = open(file_name, "a", encoding="utf-8") + atexit.register(fileobj.close) + else: + fileobj = sys.stderr + the_one = cls(fileobj, filters) + cls._set_singleton_data(the_one, interim) + + if not(the_one.filters): + the_one.filters = list(filters) + return the_one + + # Because of the way igor.py deletes and re-imports modules, + # this class can be defined more than once. But we really want + # a process-wide singleton. So stash it in sys.modules instead of + # on a class attribute. Yes, this is aggressively gross. + + SYS_MOD_NAME = "$coverage.debug.DebugOutputFile.the_one" + SINGLETON_ATTR = "the_one_and_is_interim" + + @classmethod + def _set_singleton_data(cls, the_one: DebugOutputFile, interim: bool) -> None: + """Set the one DebugOutputFile to rule them all.""" + singleton_module = types.ModuleType(cls.SYS_MOD_NAME) + setattr(singleton_module, cls.SINGLETON_ATTR, (the_one, interim)) + sys.modules[cls.SYS_MOD_NAME] = singleton_module + + @classmethod + def _get_singleton_data(cls) -> tuple[DebugOutputFile | None, bool]: + """Get the one DebugOutputFile.""" + singleton_module = sys.modules.get(cls.SYS_MOD_NAME) + return getattr(singleton_module, cls.SINGLETON_ATTR, (None, True)) + + @classmethod + def _del_singleton_data(cls) -> None: + """Delete the one DebugOutputFile, just for tests to use.""" + if cls.SYS_MOD_NAME in sys.modules: + del sys.modules[cls.SYS_MOD_NAME] + + def write(self, text: str) -> None: + """Just like file.write, but filter through all our filters.""" + assert self.outfile is not None + self.outfile.write(filter_text(text, self.filters)) + self.outfile.flush() + + def flush(self) -> None: + """Flush our file.""" + assert self.outfile is not None + self.outfile.flush() + + +def log(msg: str, stack: bool = False) -> None: # pragma: debugging + """Write a log message as forcefully as possible.""" + out = DebugOutputFile.get_one(interim=True) + out.write(msg+"\n") + if stack: + dump_stack_frames(out=out, skip=1) + + +def decorate_methods( + decorator: Callable[..., Any], + butnot: Iterable[str] = (), + private: bool = False, +) -> Callable[..., Any]: # pragma: debugging + """A class decorator to apply a decorator to methods.""" + def _decorator(cls): # type: ignore[no-untyped-def] + for name, meth in inspect.getmembers(cls, inspect.isroutine): + if name not in cls.__dict__: + continue + if name != "__init__": + if not private and name.startswith("_"): + continue + if name in butnot: + continue + setattr(cls, name, decorator(meth)) + return cls + return _decorator + + +def break_in_pudb(func: AnyCallable) -> AnyCallable: # pragma: debugging + """A function decorator to stop in the debugger for each call.""" + @functools.wraps(func) + def _wrapper(*args: Any, **kwargs: Any) -> Any: + import pudb + sys.stdout = sys.__stdout__ + pudb.set_trace() + return func(*args, **kwargs) + return _wrapper + + +OBJ_IDS = itertools.count() +CALLS = itertools.count() +OBJ_ID_ATTR = "$coverage.object_id" + +def show_calls( + show_args: bool = True, + show_stack: bool = False, + show_return: bool = False, +) -> Callable[..., Any]: # pragma: debugging + """A method decorator to debug-log each call to the function.""" + def _decorator(func: AnyCallable) -> AnyCallable: + @functools.wraps(func) + def _wrapper(self: Any, *args: Any, **kwargs: Any) -> Any: + oid = getattr(self, OBJ_ID_ATTR, None) + if oid is None: + oid = f"{os.getpid():08d} {next(OBJ_IDS):04d}" + setattr(self, OBJ_ID_ATTR, oid) + extra = "" + if show_args: + eargs = ", ".join(map(repr, args)) + ekwargs = ", ".join("{}={!r}".format(*item) for item in kwargs.items()) + extra += "(" + extra += eargs + if eargs and ekwargs: + extra += ", " + extra += ekwargs + extra += ")" + if show_stack: + extra += " @ " + extra += "; ".join(short_stack(short_filenames=True).splitlines()) + callid = next(CALLS) + msg = f"{oid} {callid:04d} {func.__name__}{extra}\n" + DebugOutputFile.get_one(interim=True).write(msg) + ret = func(self, *args, **kwargs) + if show_return: + msg = f"{oid} {callid:04d} {func.__name__} return {ret!r}\n" + DebugOutputFile.get_one(interim=True).write(msg) + return ret + return _wrapper + return _decorator + + +def relevant_environment_display(env: Mapping[str, str]) -> list[tuple[str, str]]: + """Filter environment variables for a debug display. + + Select variables to display (with COV or PY in the name, or HOME, TEMP, or + TMP), and also cloak sensitive values with asterisks. + + Arguments: + env: a dict of environment variable names and values. + + Returns: + A list of pairs (name, value) to show. + + """ + slugs = {"COV", "PY"} + include = {"HOME", "TEMP", "TMP"} + cloak = {"API", "TOKEN", "KEY", "SECRET", "PASS", "SIGNATURE"} + + to_show = [] + for name, val in env.items(): + keep = False + if name in include: + keep = True + elif any(slug in name for slug in slugs): + keep = True + if keep: + if any(slug in name for slug in cloak): + val = re.sub(r"\w", "*", val) + to_show.append((name, val)) + return human_sorted_items(to_show) diff --git a/.venv/Lib/site-packages/coverage/disposition.py b/.venv/Lib/site-packages/coverage/disposition.py new file mode 100644 index 00000000..7aa15e97 --- /dev/null +++ b/.venv/Lib/site-packages/coverage/disposition.py @@ -0,0 +1,58 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Simple value objects for tracking what to do with files.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from coverage.types import TFileDisposition + +if TYPE_CHECKING: + from coverage.plugin import FileTracer + + +class FileDisposition: + """A simple value type for recording what to do with a file.""" + + original_filename: str + canonical_filename: str + source_filename: str | None + trace: bool + reason: str + file_tracer: FileTracer | None + has_dynamic_filename: bool + + def __repr__(self) -> str: + return f"" + + +# FileDisposition "methods": FileDisposition is a pure value object, so it can +# be implemented in either C or Python. Acting on them is done with these +# functions. + +def disposition_init(cls: type[TFileDisposition], original_filename: str) -> TFileDisposition: + """Construct and initialize a new FileDisposition object.""" + disp = cls() + disp.original_filename = original_filename + disp.canonical_filename = original_filename + disp.source_filename = None + disp.trace = False + disp.reason = "" + disp.file_tracer = None + disp.has_dynamic_filename = False + return disp + + +def disposition_debug_msg(disp: TFileDisposition) -> str: + """Make a nice debug message of what the FileDisposition is doing.""" + if disp.trace: + msg = f"Tracing {disp.original_filename!r}" + if disp.original_filename != disp.source_filename: + msg += f" as {disp.source_filename!r}" + if disp.file_tracer: + msg += f": will be traced by {disp.file_tracer!r}" + else: + msg = f"Not tracing {disp.original_filename!r}: {disp.reason}" + return msg diff --git a/.venv/Lib/site-packages/coverage/env.py b/.venv/Lib/site-packages/coverage/env.py new file mode 100644 index 00000000..39ec169c --- /dev/null +++ b/.venv/Lib/site-packages/coverage/env.py @@ -0,0 +1,125 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Determine facts about the environment.""" + +from __future__ import annotations + +import os +import platform +import sys + +from typing import Any +from collections.abc import Iterable + +# debug_info() at the bottom wants to show all the globals, but not imports. +# Grab the global names here to know which names to not show. Nothing defined +# above this line will be in the output. +_UNINTERESTING_GLOBALS = list(globals()) +# These names also shouldn't be shown. +_UNINTERESTING_GLOBALS += ["PYBEHAVIOR", "debug_info"] + +# Operating systems. +WINDOWS = sys.platform == "win32" +LINUX = sys.platform.startswith("linux") +OSX = sys.platform == "darwin" + +# Python implementations. +CPYTHON = (platform.python_implementation() == "CPython") +PYPY = (platform.python_implementation() == "PyPy") + +# Python versions. We amend version_info with one more value, a zero if an +# official version, or 1 if built from source beyond an official version. +# Only use sys.version_info directly where tools like mypy need it to understand +# version-specfic code, otherwise use PYVERSION. +PYVERSION = sys.version_info + (int(platform.python_version()[-1] == "+"),) + +if PYPY: + # Minimum now is 7.3.16 + PYPYVERSION = sys.pypy_version_info # type: ignore[attr-defined] +else: + PYPYVERSION = (0,) + +# Python behavior. +class PYBEHAVIOR: + """Flags indicating this Python's behavior.""" + + # Does Python conform to PEP626, Precise line numbers for debugging and other tools. + # https://www.python.org/dev/peps/pep-0626 + pep626 = (PYVERSION > (3, 10, 0, "alpha", 4)) + + # Is "if __debug__" optimized away? + optimize_if_debug = not pep626 + + # Is "if not __debug__" optimized away? The exact details have changed + # across versions. + if pep626: + optimize_if_not_debug = 1 + else: + optimize_if_not_debug = 2 + + # 3.7 changed how functions with only docstrings are numbered. + docstring_only_function = (not PYPY) and (PYVERSION <= (3, 10)) + + # Lines after break/continue/return/raise are no longer compiled into the + # bytecode. They used to be marked as missing, now they aren't executable. + omit_after_jump = pep626 or PYPY + + # PyPy has always omitted statements after return. + omit_after_return = omit_after_jump or PYPY + + # Optimize away unreachable try-else clauses. + optimize_unreachable_try_else = pep626 + + # Modules used to have firstlineno equal to the line number of the first + # real line of code. Now they always start at 1. + module_firstline_1 = pep626 + + # Are "if 0:" lines (and similar) kept in the compiled code? + keep_constant_test = pep626 + + # When leaving a with-block, do we visit the with-line again for the exit? + exit_through_with = (PYVERSION >= (3, 10, 0, "beta")) + + # When leaving a with-block, do we visit the with-line exactly, + # or the inner-most context manager? + exit_with_through_ctxmgr = (PYVERSION >= (3, 12)) + + # Match-case construct. + match_case = (PYVERSION >= (3, 10)) + + # Some words are keywords in some places, identifiers in other places. + soft_keywords = (PYVERSION >= (3, 10)) + + # PEP669 Low Impact Monitoring: https://peps.python.org/pep-0669/ + pep669 = bool(getattr(sys, "monitoring", None)) + + # Where does frame.f_lasti point when yielding from a generator? + # It used to point at the YIELD, in 3.13 it points at the RESUME, + # then it went back to the YIELD. + # https://github.com/python/cpython/issues/113728 + lasti_is_yield = (PYVERSION[:2] != (3, 13)) + + +# Coverage.py specifics, about testing scenarios. See tests/testenv.py also. + +# Are we coverage-measuring ourselves? +METACOV = os.getenv("COVERAGE_COVERAGE") is not None + +# Are we running our test suite? +# Even when running tests, you can use COVERAGE_TESTING=0 to disable the +# test-specific behavior like AST checking. +TESTING = os.getenv("COVERAGE_TESTING") == "True" + + +def debug_info() -> Iterable[tuple[str, Any]]: + """Return a list of (name, value) pairs for printing debug information.""" + info = [ + (name, value) for name, value in globals().items() + if not name.startswith("_") and name not in _UNINTERESTING_GLOBALS + ] + info += [ + (name, value) for name, value in PYBEHAVIOR.__dict__.items() + if not name.startswith("_") + ] + return sorted(info) diff --git a/.venv/Lib/site-packages/coverage/exceptions.py b/.venv/Lib/site-packages/coverage/exceptions.py new file mode 100644 index 00000000..ecd1b5e6 --- /dev/null +++ b/.venv/Lib/site-packages/coverage/exceptions.py @@ -0,0 +1,63 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Exceptions coverage.py can raise.""" + +from __future__ import annotations + +class _BaseCoverageException(Exception): + """The base-base of all Coverage exceptions.""" + pass + + +class CoverageException(_BaseCoverageException): + """The base class of all exceptions raised by Coverage.py.""" + pass + + +class ConfigError(_BaseCoverageException): + """A problem with a config file, or a value in one.""" + pass + + +class DataError(CoverageException): + """An error in using a data file.""" + pass + +class NoDataError(CoverageException): + """We didn't have data to work with.""" + pass + + +class NoSource(CoverageException): + """We couldn't find the source for a module.""" + pass + + +class NoCode(NoSource): + """We couldn't find any code at all.""" + pass + + +class NotPython(CoverageException): + """A source file turned out not to be parsable Python.""" + pass + + +class PluginError(CoverageException): + """A plugin misbehaved.""" + pass + + +class _ExceptionDuringRun(CoverageException): + """An exception happened while running customer code. + + Construct it with three arguments, the values from `sys.exc_info`. + + """ + pass + + +class CoverageWarning(Warning): + """A warning from Coverage.py.""" + pass diff --git a/.venv/Lib/site-packages/coverage/execfile.py b/.venv/Lib/site-packages/coverage/execfile.py new file mode 100644 index 00000000..cbecec84 --- /dev/null +++ b/.venv/Lib/site-packages/coverage/execfile.py @@ -0,0 +1,324 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Execute files of Python code.""" + +from __future__ import annotations + +import importlib.machinery +import importlib.util +import inspect +import marshal +import os +import struct +import sys + +from importlib.machinery import ModuleSpec +from types import CodeType, ModuleType +from typing import Any + +from coverage.exceptions import CoverageException, _ExceptionDuringRun, NoCode, NoSource +from coverage.files import canonical_filename, python_reported_file +from coverage.misc import isolate_module +from coverage.python import get_python_source + +os = isolate_module(os) + + +PYC_MAGIC_NUMBER = importlib.util.MAGIC_NUMBER + +class DummyLoader: + """A shim for the pep302 __loader__, emulating pkgutil.ImpLoader. + + Currently only implements the .fullname attribute + """ + def __init__(self, fullname: str, *_args: Any) -> None: + self.fullname = fullname + + +def find_module( + modulename: str, +) -> tuple[str | None, str, ModuleSpec]: + """Find the module named `modulename`. + + Returns the file path of the module, the name of the enclosing + package, and the spec. + """ + try: + spec = importlib.util.find_spec(modulename) + except ImportError as err: + raise NoSource(str(err)) from err + if not spec: + raise NoSource(f"No module named {modulename!r}") + pathname = spec.origin + packagename = spec.name + if spec.submodule_search_locations: + mod_main = modulename + ".__main__" + spec = importlib.util.find_spec(mod_main) + if not spec: + raise NoSource( + f"No module named {mod_main}; " + + f"{modulename!r} is a package and cannot be directly executed", + ) + pathname = spec.origin + packagename = spec.name + packagename = packagename.rpartition(".")[0] + return pathname, packagename, spec + + +class PyRunner: + """Multi-stage execution of Python code. + + This is meant to emulate real Python execution as closely as possible. + + """ + def __init__(self, args: list[str], as_module: bool = False) -> None: + self.args = args + self.as_module = as_module + + self.arg0 = args[0] + self.package: str | None = None + self.modulename: str | None = None + self.pathname: str | None = None + self.loader: DummyLoader | None = None + self.spec: ModuleSpec | None = None + + def prepare(self) -> None: + """Set sys.path properly. + + This needs to happen before any importing, and without importing anything. + """ + path0: str | None + if self.as_module: + path0 = os.getcwd() + elif os.path.isdir(self.arg0): + # Running a directory means running the __main__.py file in that + # directory. + path0 = self.arg0 + else: + path0 = os.path.abspath(os.path.dirname(self.arg0)) + + if os.path.isdir(sys.path[0]): + # sys.path fakery. If we are being run as a command, then sys.path[0] + # is the directory of the "coverage" script. If this is so, replace + # sys.path[0] with the directory of the file we're running, or the + # current directory when running modules. If it isn't so, then we + # don't know what's going on, and just leave it alone. + top_file = inspect.stack()[-1][0].f_code.co_filename + sys_path_0_abs = os.path.abspath(sys.path[0]) + top_file_dir_abs = os.path.abspath(os.path.dirname(top_file)) + sys_path_0_abs = canonical_filename(sys_path_0_abs) + top_file_dir_abs = canonical_filename(top_file_dir_abs) + if sys_path_0_abs != top_file_dir_abs: + path0 = None + + else: + # sys.path[0] is a file. Is the next entry the directory containing + # that file? + if sys.path[1] == os.path.dirname(sys.path[0]): + # Can it be right to always remove that? + del sys.path[1] + + if path0 is not None: + sys.path[0] = python_reported_file(path0) + + def _prepare2(self) -> None: + """Do more preparation to run Python code. + + Includes finding the module to run and adjusting sys.argv[0]. + This method is allowed to import code. + + """ + if self.as_module: + self.modulename = self.arg0 + pathname, self.package, self.spec = find_module(self.modulename) + if self.spec is not None: + self.modulename = self.spec.name + self.loader = DummyLoader(self.modulename) + assert pathname is not None + self.pathname = os.path.abspath(pathname) + self.args[0] = self.arg0 = self.pathname + elif os.path.isdir(self.arg0): + # Running a directory means running the __main__.py file in that + # directory. + for ext in [".py", ".pyc", ".pyo"]: + try_filename = os.path.join(self.arg0, "__main__" + ext) + # 3.8.10 changed how files are reported when running a + # directory. + try_filename = os.path.abspath(try_filename) + if os.path.exists(try_filename): + self.arg0 = try_filename + break + else: + raise NoSource(f"Can't find '__main__' module in '{self.arg0}'") + + # Make a spec. I don't know if this is the right way to do it. + try_filename = python_reported_file(try_filename) + self.spec = importlib.machinery.ModuleSpec("__main__", None, origin=try_filename) + self.spec.has_location = True + self.package = "" + self.loader = DummyLoader("__main__") + else: + self.loader = DummyLoader("__main__") + + self.arg0 = python_reported_file(self.arg0) + + def run(self) -> None: + """Run the Python code!""" + + self._prepare2() + + # Create a module to serve as __main__ + main_mod = ModuleType("__main__") + + from_pyc = self.arg0.endswith((".pyc", ".pyo")) + main_mod.__file__ = self.arg0 + if from_pyc: + main_mod.__file__ = main_mod.__file__[:-1] + if self.package is not None: + main_mod.__package__ = self.package + main_mod.__loader__ = self.loader # type: ignore[assignment] + if self.spec is not None: + main_mod.__spec__ = self.spec + + main_mod.__builtins__ = sys.modules["builtins"] # type: ignore[attr-defined] + + sys.modules["__main__"] = main_mod + + # Set sys.argv properly. + sys.argv = self.args + + try: + # Make a code object somehow. + if from_pyc: + code = make_code_from_pyc(self.arg0) + else: + code = make_code_from_py(self.arg0) + except CoverageException: + raise + except Exception as exc: + msg = f"Couldn't run '{self.arg0}' as Python code: {exc.__class__.__name__}: {exc}" + raise CoverageException(msg) from exc + + # Execute the code object. + # Return to the original directory in case the test code exits in + # a non-existent directory. + cwd = os.getcwd() + try: + exec(code, main_mod.__dict__) + except SystemExit: # pylint: disable=try-except-raise + # The user called sys.exit(). Just pass it along to the upper + # layers, where it will be handled. + raise + except Exception: + # Something went wrong while executing the user code. + # Get the exc_info, and pack them into an exception that we can + # throw up to the outer loop. We peel one layer off the traceback + # so that the coverage.py code doesn't appear in the final printed + # traceback. + typ, err, tb = sys.exc_info() + assert typ is not None + assert err is not None + assert tb is not None + + # PyPy3 weirdness. If I don't access __context__, then somehow it + # is non-None when the exception is reported at the upper layer, + # and a nested exception is shown to the user. This getattr fixes + # it somehow? https://bitbucket.org/pypy/pypy/issue/1903 + getattr(err, "__context__", None) + + # Call the excepthook. + try: + assert err.__traceback__ is not None + err.__traceback__ = err.__traceback__.tb_next + sys.excepthook(typ, err, tb.tb_next) + except SystemExit: # pylint: disable=try-except-raise + raise + except Exception as exc: + # Getting the output right in the case of excepthook + # shenanigans is kind of involved. + sys.stderr.write("Error in sys.excepthook:\n") + typ2, err2, tb2 = sys.exc_info() + assert typ2 is not None + assert err2 is not None + assert tb2 is not None + err2.__suppress_context__ = True + assert err2.__traceback__ is not None + err2.__traceback__ = err2.__traceback__.tb_next + sys.__excepthook__(typ2, err2, tb2.tb_next) + sys.stderr.write("\nOriginal exception was:\n") + raise _ExceptionDuringRun(typ, err, tb.tb_next) from exc + else: + sys.exit(1) + finally: + os.chdir(cwd) + + +def run_python_module(args: list[str]) -> None: + """Run a Python module, as though with ``python -m name args...``. + + `args` is the argument array to present as sys.argv, including the first + element naming the module being executed. + + This is a helper for tests, to encapsulate how to use PyRunner. + + """ + runner = PyRunner(args, as_module=True) + runner.prepare() + runner.run() + + +def run_python_file(args: list[str]) -> None: + """Run a Python file as if it were the main program on the command line. + + `args` is the argument array to present as sys.argv, including the first + element naming the file being executed. `package` is the name of the + enclosing package, if any. + + This is a helper for tests, to encapsulate how to use PyRunner. + + """ + runner = PyRunner(args, as_module=False) + runner.prepare() + runner.run() + + +def make_code_from_py(filename: str) -> CodeType: + """Get source from `filename` and make a code object of it.""" + try: + source = get_python_source(filename) + except (OSError, NoSource) as exc: + raise NoSource(f"No file to run: '{filename}'") from exc + + code = compile(source, filename, mode="exec", dont_inherit=True) + return code + + +def make_code_from_pyc(filename: str) -> CodeType: + """Get a code object from a .pyc file.""" + try: + fpyc = open(filename, "rb") + except OSError as exc: + raise NoCode(f"No file to run: '{filename}'") from exc + + with fpyc: + # First four bytes are a version-specific magic number. It has to + # match or we won't run the file. + magic = fpyc.read(4) + if magic != PYC_MAGIC_NUMBER: + raise NoCode(f"Bad magic number in .pyc file: {magic!r} != {PYC_MAGIC_NUMBER!r}") + + flags = struct.unpack(" None: + """Set the directory that `relative_filename` will be relative to.""" + global RELATIVE_DIR, CANONICAL_FILENAME_CACHE + + # The current directory + abs_curdir = abs_file(os.curdir) + if not abs_curdir.endswith(os.sep): + # Suffix with separator only if not at the system root + abs_curdir = abs_curdir + os.sep + + # The absolute path to our current directory. + RELATIVE_DIR = os.path.normcase(abs_curdir) + + # Cache of results of calling the canonical_filename() method, to + # avoid duplicating work. + CANONICAL_FILENAME_CACHE = {} + + +def relative_directory() -> str: + """Return the directory that `relative_filename` is relative to.""" + return RELATIVE_DIR + + +def relative_filename(filename: str) -> str: + """Return the relative form of `filename`. + + The file name will be relative to the current directory when the + `set_relative_directory` was called. + + """ + fnorm = os.path.normcase(filename) + if fnorm.startswith(RELATIVE_DIR): + filename = filename[len(RELATIVE_DIR):] + return filename + + +def canonical_filename(filename: str) -> str: + """Return a canonical file name for `filename`. + + An absolute path with no redundant components and normalized case. + + """ + if filename not in CANONICAL_FILENAME_CACHE: + cf = filename + if not os.path.isabs(filename): + for path in [os.curdir] + sys.path: + if path is None: + continue # type: ignore[unreachable] + f = os.path.join(path, filename) + try: + exists = os.path.exists(f) + except UnicodeError: + exists = False + if exists: + cf = f + break + cf = abs_file(cf) + CANONICAL_FILENAME_CACHE[filename] = cf + return CANONICAL_FILENAME_CACHE[filename] + + +def flat_rootname(filename: str) -> str: + """A base for a flat file name to correspond to this file. + + Useful for writing files about the code where you want all the files in + the same directory, but need to differentiate same-named files from + different directories. + + For example, the file a/b/c.py will return 'z_86bbcbe134d28fd2_c_py' + + """ + dirname, basename = ntpath.split(filename) + if dirname: + fp = hashlib.new( + "sha3_256", + dirname.encode("UTF-8"), + usedforsecurity=False, + ).hexdigest()[:16] + prefix = f"z_{fp}_" + else: + prefix = "" + return prefix + basename.replace(".", "_") + + +if env.WINDOWS: + + _ACTUAL_PATH_CACHE: dict[str, str] = {} + _ACTUAL_PATH_LIST_CACHE: dict[str, list[str]] = {} + + def actual_path(path: str) -> str: + """Get the actual path of `path`, including the correct case.""" + if path in _ACTUAL_PATH_CACHE: + return _ACTUAL_PATH_CACHE[path] + + head, tail = os.path.split(path) + if not tail: + # This means head is the drive spec: normalize it. + actpath = head.upper() + elif not head: + actpath = tail + else: + head = actual_path(head) + if head in _ACTUAL_PATH_LIST_CACHE: + files = _ACTUAL_PATH_LIST_CACHE[head] + else: + try: + files = os.listdir(head) + except Exception: + # This will raise OSError, or this bizarre TypeError: + # https://bugs.python.org/issue1776160 + files = [] + _ACTUAL_PATH_LIST_CACHE[head] = files + normtail = os.path.normcase(tail) + for f in files: + if os.path.normcase(f) == normtail: + tail = f + break + actpath = os.path.join(head, tail) + _ACTUAL_PATH_CACHE[path] = actpath + return actpath + +else: + def actual_path(path: str) -> str: + """The actual path for non-Windows platforms.""" + return path + + +def abs_file(path: str) -> str: + """Return the absolute normalized form of `path`.""" + return actual_path(os.path.abspath(os.path.realpath(path))) + + +def zip_location(filename: str) -> tuple[str, str] | None: + """Split a filename into a zipfile / inner name pair. + + Only return a pair if the zipfile exists. No check is made if the inner + name is in the zipfile. + + """ + for ext in [".zip", ".whl", ".egg", ".pex"]: + zipbase, extension, inner = filename.partition(ext + sep(filename)) + if extension: + zipfile = zipbase + ext + if os.path.exists(zipfile): + return zipfile, inner + return None + + +def source_exists(path: str) -> bool: + """Determine if a source file path exists.""" + if os.path.exists(path): + return True + + if zip_location(path): + # If zip_location returns anything, then it's a zipfile that + # exists. That's good enough for us. + return True + + return False + + +def python_reported_file(filename: str) -> str: + """Return the string as Python would describe this file name.""" + return os.path.abspath(filename) + + +def isabs_anywhere(filename: str) -> bool: + """Is `filename` an absolute path on any OS?""" + return ntpath.isabs(filename) or posixpath.isabs(filename) + + +def prep_patterns(patterns: Iterable[str]) -> list[str]: + """Prepare the file patterns for use in a `GlobMatcher`. + + If a pattern starts with a wildcard, it is used as a pattern + as-is. If it does not start with a wildcard, then it is made + absolute with the current directory. + + If `patterns` is None, an empty list is returned. + + """ + prepped = [] + for p in patterns or []: + prepped.append(p) + if not p.startswith(("*", "?")): + prepped.append(abs_file(p)) + return prepped + + +class TreeMatcher: + """A matcher for files in a tree. + + Construct with a list of paths, either files or directories. Paths match + with the `match` method if they are one of the files, or if they are + somewhere in a subtree rooted at one of the directories. + + """ + def __init__(self, paths: Iterable[str], name: str = "unknown") -> None: + self.original_paths: list[str] = human_sorted(paths) + #self.paths = list(map(os.path.normcase, paths)) + self.paths = [os.path.normcase(p) for p in paths] + self.name = name + + def __repr__(self) -> str: + return f"" + + def info(self) -> list[str]: + """A list of strings for displaying when dumping state.""" + return self.original_paths + + def match(self, fpath: str) -> bool: + """Does `fpath` indicate a file in one of our trees?""" + fpath = os.path.normcase(fpath) + for p in self.paths: + if fpath.startswith(p): + if fpath == p: + # This is the same file! + return True + if fpath[len(p)] == os.sep: + # This is a file in the directory + return True + return False + + +class ModuleMatcher: + """A matcher for modules in a tree.""" + def __init__(self, module_names: Iterable[str], name:str = "unknown") -> None: + self.modules = list(module_names) + self.name = name + + def __repr__(self) -> str: + return f"" + + def info(self) -> list[str]: + """A list of strings for displaying when dumping state.""" + return self.modules + + def match(self, module_name: str) -> bool: + """Does `module_name` indicate a module in one of our packages?""" + if not module_name: + return False + + for m in self.modules: + if module_name.startswith(m): + if module_name == m: + return True + if module_name[len(m)] == ".": + # This is a module in the package + return True + + return False + + +class GlobMatcher: + """A matcher for files by file name pattern.""" + def __init__(self, pats: Iterable[str], name: str = "unknown") -> None: + self.pats = list(pats) + self.re = globs_to_regex(self.pats, case_insensitive=env.WINDOWS) + self.name = name + + def __repr__(self) -> str: + return f"" + + def info(self) -> list[str]: + """A list of strings for displaying when dumping state.""" + return self.pats + + def match(self, fpath: str) -> bool: + """Does `fpath` match one of our file name patterns?""" + return self.re.match(fpath) is not None + + +def sep(s: str) -> str: + """Find the path separator used in this string, or os.sep if none.""" + if sep_match := re.search(r"[\\/]", s): + the_sep = sep_match[0] + else: + the_sep = os.sep + return the_sep + + +# Tokenizer for _glob_to_regex. +# None as a sub means disallowed. +G2RX_TOKENS = [(re.compile(rx), sub) for rx, sub in [ + (r"\*\*\*+", None), # Can't have *** + (r"[^/]+\*\*+", None), # Can't have x** + (r"\*\*+[^/]+", None), # Can't have **x + (r"\*\*/\*\*", None), # Can't have **/** + (r"^\*+/", r"(.*[/\\\\])?"), # ^*/ matches any prefix-slash, or nothing. + (r"/\*+$", r"[/\\\\].*"), # /*$ matches any slash-suffix. + (r"\*\*/", r"(.*[/\\\\])?"), # **/ matches any subdirs, including none + (r"/", r"[/\\\\]"), # / matches either slash or backslash + (r"\*", r"[^/\\\\]*"), # * matches any number of non slash-likes + (r"\?", r"[^/\\\\]"), # ? matches one non slash-like + (r"\[.*?\]", r"\g<0>"), # [a-f] matches [a-f] + (r"[a-zA-Z0-9_-]+", r"\g<0>"), # word chars match themselves + (r"[\[\]]", None), # Can't have single square brackets + (r".", r"\\\g<0>"), # Anything else is escaped to be safe +]] + +def _glob_to_regex(pattern: str) -> str: + """Convert a file-path glob pattern into a regex.""" + # Turn all backslashes into slashes to simplify the tokenizer. + pattern = pattern.replace("\\", "/") + if "/" not in pattern: + pattern = "**/" + pattern + path_rx = [] + pos = 0 + while pos < len(pattern): + for rx, sub in G2RX_TOKENS: # pragma: always breaks + if m := rx.match(pattern, pos=pos): + if sub is None: + raise ConfigError(f"File pattern can't include {m[0]!r}") + path_rx.append(m.expand(sub)) + pos = m.end() + break + return "".join(path_rx) + + +def globs_to_regex( + patterns: Iterable[str], + case_insensitive: bool = False, + partial: bool = False, +) -> re.Pattern[str]: + """Convert glob patterns to a compiled regex that matches any of them. + + Slashes are always converted to match either slash or backslash, for + Windows support, even when running elsewhere. + + If the pattern has no slash or backslash, then it is interpreted as + matching a file name anywhere it appears in the tree. Otherwise, the glob + pattern must match the whole file path. + + If `partial` is true, then the pattern will match if the target string + starts with the pattern. Otherwise, it must match the entire string. + + Returns: a compiled regex object. Use the .match method to compare target + strings. + + """ + flags = 0 + if case_insensitive: + flags |= re.IGNORECASE + rx = join_regex(map(_glob_to_regex, patterns)) + if not partial: + rx = fr"(?:{rx})\Z" + compiled = re.compile(rx, flags=flags) + return compiled + + +class PathAliases: + """A collection of aliases for paths. + + When combining data files from remote machines, often the paths to source + code are different, for example, due to OS differences, or because of + serialized checkouts on continuous integration machines. + + A `PathAliases` object tracks a list of pattern/result pairs, and can + map a path through those aliases to produce a unified path. + + """ + def __init__( + self, + debugfn: Callable[[str], None] | None = None, + relative: bool = False, + ) -> None: + # A list of (original_pattern, regex, result) + self.aliases: list[tuple[str, re.Pattern[str], str]] = [] + self.debugfn = debugfn or (lambda msg: 0) + self.relative = relative + self.pprinted = False + + def pprint(self) -> None: + """Dump the important parts of the PathAliases, for debugging.""" + self.debugfn(f"Aliases (relative={self.relative}):") + for original_pattern, regex, result in self.aliases: + self.debugfn(f" Rule: {original_pattern!r} -> {result!r} using regex {regex.pattern!r}") + + def add(self, pattern: str, result: str) -> None: + """Add the `pattern`/`result` pair to the list of aliases. + + `pattern` is an `glob`-style pattern. `result` is a simple + string. When mapping paths, if a path starts with a match against + `pattern`, then that match is replaced with `result`. This models + isomorphic source trees being rooted at different places on two + different machines. + + `pattern` can't end with a wildcard component, since that would + match an entire tree, and not just its root. + + """ + original_pattern = pattern + pattern_sep = sep(pattern) + + if len(pattern) > 1: + pattern = pattern.rstrip(r"\/") + + # The pattern can't end with a wildcard component. + if pattern.endswith("*"): + raise ConfigError("Pattern must not end with wildcards.") + + # The pattern is meant to match a file path. Let's make it absolute + # unless it already is, or is meant to match any prefix. + if not self.relative: + if not pattern.startswith("*") and not isabs_anywhere(pattern + pattern_sep): + pattern = abs_file(pattern) + if not pattern.endswith(pattern_sep): + pattern += pattern_sep + + # Make a regex from the pattern. + regex = globs_to_regex([pattern], case_insensitive=True, partial=True) + + # Normalize the result: it must end with a path separator. + result_sep = sep(result) + result = result.rstrip(r"\/") + result_sep + self.aliases.append((original_pattern, regex, result)) + + def map(self, path: str, exists:Callable[[str], bool] = source_exists) -> str: + """Map `path` through the aliases. + + `path` is checked against all of the patterns. The first pattern to + match is used to replace the root of the path with the result root. + Only one pattern is ever used. If no patterns match, `path` is + returned unchanged. + + The separator style in the result is made to match that of the result + in the alias. + + `exists` is a function to determine if the resulting path actually + exists. + + Returns the mapped path. If a mapping has happened, this is a + canonical path. If no mapping has happened, it is the original value + of `path` unchanged. + + """ + if not self.pprinted: + self.pprint() + self.pprinted = True + + for original_pattern, regex, result in self.aliases: + if m := regex.match(path): + new = path.replace(m[0], result) + new = new.replace(sep(path), sep(result)) + if not self.relative: + new = canonical_filename(new) + dot_start = result.startswith(("./", ".\\")) and len(result) > 2 + if new.startswith(("./", ".\\")) and not dot_start: + new = new[2:] + if not exists(new): + self.debugfn( + f"Rule {original_pattern!r} changed {path!r} to {new!r} " + + "which doesn't exist, continuing", + ) + continue + self.debugfn( + f"Matched path {path!r} to rule {original_pattern!r} -> {result!r}, " + + f"producing {new!r}", + ) + return new + + # If we get here, no pattern matched. + + if self.relative: + path = relative_filename(path) + + if self.relative and not isabs_anywhere(path): + # Auto-generate a pattern to implicitly match relative files + parts = re.split(r"[/\\]", path) + if len(parts) > 1: + dir1 = parts[0] + pattern = f"*/{dir1}" + regex_pat = fr"^(.*[\\/])?{re.escape(dir1)}[\\/]" + result = f"{dir1}{os.sep}" + # Only add a new pattern if we don't already have this pattern. + if not any(p == pattern for p, _, _ in self.aliases): + self.debugfn( + f"Generating rule: {pattern!r} -> {result!r} using regex {regex_pat!r}", + ) + self.aliases.append((pattern, re.compile(regex_pat), result)) + return self.map(path, exists=exists) + + self.debugfn(f"No rules match, path {path!r} is unchanged") + return path + + +def find_python_files(dirname: str, include_namespace_packages: bool) -> Iterable[str]: + """Yield all of the importable Python files in `dirname`, recursively. + + To be importable, the files have to be in a directory with a __init__.py, + except for `dirname` itself, which isn't required to have one. The + assumption is that `dirname` was specified directly, so the user knows + best, but sub-directories are checked for a __init__.py to be sure we only + find the importable files. + + If `include_namespace_packages` is True, then the check for __init__.py + files is skipped. + + Files with strange characters are skipped, since they couldn't have been + imported, and are probably editor side-files. + + """ + for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dirname)): + if not include_namespace_packages: + if i > 0 and "__init__.py" not in filenames: + # If a directory doesn't have __init__.py, then it isn't + # importable and neither are its files + del dirnames[:] + continue + for filename in filenames: + # We're only interested in files that look like reasonable Python + # files: Must end with .py or .pyw, and must not have certain funny + # characters that probably mean they are editor junk. + if re.match(r"^[^.#~!$@%^&*()+=,]+\.pyw?$", filename): + yield os.path.join(dirpath, filename) + + +# Globally set the relative directory. +set_relative_directory() diff --git a/.venv/Lib/site-packages/coverage/html.py b/.venv/Lib/site-packages/coverage/html.py new file mode 100644 index 00000000..20a354b1 --- /dev/null +++ b/.venv/Lib/site-packages/coverage/html.py @@ -0,0 +1,810 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""HTML reporting for coverage.py.""" + +from __future__ import annotations + +import collections +import dataclasses +import datetime +import functools +import json +import os +import re +import string + +from dataclasses import dataclass, field +from typing import Any, TYPE_CHECKING +from collections.abc import Iterable + +import coverage +from coverage.data import CoverageData, add_data_to_hash +from coverage.exceptions import NoDataError +from coverage.files import flat_rootname +from coverage.misc import ( + ensure_dir, file_be_gone, Hasher, isolate_module, format_local_datetime, + human_sorted, plural, stdout_link, +) +from coverage.report_core import get_analysis_to_report +from coverage.results import Analysis, Numbers +from coverage.templite import Templite +from coverage.types import TLineNo, TMorf +from coverage.version import __url__ + + +if TYPE_CHECKING: + from coverage import Coverage + from coverage.plugins import FileReporter + + +os = isolate_module(os) + + +def data_filename(fname: str) -> str: + """Return the path to an "htmlfiles" data file of ours. + """ + static_dir = os.path.join(os.path.dirname(__file__), "htmlfiles") + static_filename = os.path.join(static_dir, fname) + return static_filename + + +def read_data(fname: str) -> str: + """Return the contents of a data file of ours.""" + with open(data_filename(fname)) as data_file: + return data_file.read() + + +def write_html(fname: str, html: str) -> None: + """Write `html` to `fname`, properly encoded.""" + html = re.sub(r"(\A\s+)|(\s+$)", "", html, flags=re.MULTILINE) + "\n" + with open(fname, "wb") as fout: + fout.write(html.encode("ascii", "xmlcharrefreplace")) + + +@dataclass +class LineData: + """The data for each source line of HTML output.""" + tokens: list[tuple[str, str]] + number: TLineNo + category: str + contexts: list[str] + contexts_label: str + context_list: list[str] + short_annotations: list[str] + long_annotations: list[str] + html: str = "" + context_str: str | None = None + annotate: str | None = None + annotate_long: str | None = None + css_class: str = "" + + +@dataclass +class FileData: + """The data for each source file of HTML output.""" + relative_filename: str + nums: Numbers + lines: list[LineData] + + +@dataclass +class IndexItem: + """Information for each index entry, to render an index page.""" + url: str = "" + file: str = "" + description: str = "" + nums: Numbers = field(default_factory=Numbers) + + +@dataclass +class IndexPage: + """Data for each index page.""" + noun: str + plural: str + filename: str + summaries: list[IndexItem] + totals: Numbers + skipped_covered_count: int + skipped_empty_count: int + + +class HtmlDataGeneration: + """Generate structured data to be turned into HTML reports.""" + + EMPTY = "(empty)" + + def __init__(self, cov: Coverage) -> None: + self.coverage = cov + self.config = self.coverage.config + self.data = self.coverage.get_data() + self.has_arcs = self.data.has_arcs() + if self.config.show_contexts: + if self.data.measured_contexts() == {""}: + self.coverage._warn("No contexts were measured") + self.data.set_query_contexts(self.config.report_contexts) + + def data_for_file(self, fr: FileReporter, analysis: Analysis) -> FileData: + """Produce the data needed for one file's report.""" + if self.has_arcs: + missing_branch_arcs = analysis.missing_branch_arcs() + arcs_executed = analysis.arcs_executed + else: + missing_branch_arcs = {} + arcs_executed = [] + + if self.config.show_contexts: + contexts_by_lineno = self.data.contexts_by_lineno(analysis.filename) + + lines = [] + + for lineno, tokens in enumerate(fr.source_token_lines(), start=1): + # Figure out how to mark this line. + category = "" + short_annotations = [] + long_annotations = [] + + if lineno in analysis.excluded: + category = "exc" + elif lineno in analysis.missing: + category = "mis" + elif self.has_arcs and lineno in missing_branch_arcs: + category = "par" + for b in missing_branch_arcs[lineno]: + if b < 0: + short_annotations.append("exit") + else: + short_annotations.append(str(b)) + long_annotations.append(fr.missing_arc_description(lineno, b, arcs_executed)) + elif lineno in analysis.statements: + category = "run" + + contexts = [] + contexts_label = "" + context_list = [] + if category and self.config.show_contexts: + contexts = human_sorted(c or self.EMPTY for c in contexts_by_lineno.get(lineno, ())) + if contexts == [self.EMPTY]: + contexts_label = self.EMPTY + else: + contexts_label = f"{len(contexts)} ctx" + context_list = contexts + + lines.append(LineData( + tokens=tokens, + number=lineno, + category=category, + contexts=contexts, + contexts_label=contexts_label, + context_list=context_list, + short_annotations=short_annotations, + long_annotations=long_annotations, + )) + + file_data = FileData( + relative_filename=fr.relative_filename(), + nums=analysis.numbers, + lines=lines, + ) + + return file_data + + +class FileToReport: + """A file we're considering reporting.""" + def __init__(self, fr: FileReporter, analysis: Analysis) -> None: + self.fr = fr + self.analysis = analysis + self.rootname = flat_rootname(fr.relative_filename()) + self.html_filename = self.rootname + ".html" + self.prev_html = self.next_html = "" + + +HTML_SAFE = string.ascii_letters + string.digits + "!#$%'()*+,-./:;=?@[]^_`{|}~" + +@functools.cache +def encode_int(n: int) -> str: + """Create a short HTML-safe string from an integer, using HTML_SAFE.""" + if n == 0: + return HTML_SAFE[0] + + r = [] + while n: + n, t = divmod(n, len(HTML_SAFE)) + r.append(HTML_SAFE[t]) + return "".join(r) + + +def copy_with_cache_bust(src: str, dest_dir: str) -> str: + """Copy `src` to `dest_dir`, adding a hash to the name. + + Returns the updated destination file name with hash. + """ + with open(src, "rb") as f: + text = f.read() + h = Hasher() + h.update(text) + cache_bust = h.hexdigest()[:8] + src_base = os.path.basename(src) + dest = src_base.replace(".", f"_cb_{cache_bust}.") + with open(os.path.join(dest_dir, dest), "wb") as f: + f.write(text) + return dest + + +class HtmlReporter: + """HTML reporting.""" + + # These files will be copied from the htmlfiles directory to the output + # directory. + STATIC_FILES = [ + "style.css", + "coverage_html.js", + "keybd_closed.png", + "favicon_32.png", + ] + + def __init__(self, cov: Coverage) -> None: + self.coverage = cov + self.config = self.coverage.config + self.directory = self.config.html_dir + + self.skip_covered = self.config.html_skip_covered + if self.skip_covered is None: + self.skip_covered = self.config.skip_covered + self.skip_empty = self.config.html_skip_empty + if self.skip_empty is None: + self.skip_empty = self.config.skip_empty + + title = self.config.html_title + + self.extra_css = bool(self.config.extra_css) + + self.data = self.coverage.get_data() + self.has_arcs = self.data.has_arcs() + + self.index_pages: dict[str, IndexPage] = { + "file": self.new_index_page("file", "files"), + } + self.incr = IncrementalChecker(self.directory) + self.datagen = HtmlDataGeneration(self.coverage) + self.directory_was_empty = False + self.first_fr = None + self.final_fr = None + + self.template_globals = { + # Functions available in the templates. + "escape": escape, + "pair": pair, + "len": len, + + # Constants for this report. + "__url__": __url__, + "__version__": coverage.__version__, + "title": title, + "time_stamp": format_local_datetime(datetime.datetime.now()), + "extra_css": self.extra_css, + "has_arcs": self.has_arcs, + "show_contexts": self.config.show_contexts, + "statics": {}, + + # Constants for all reports. + # These css classes determine which lines are highlighted by default. + "category": { + "exc": "exc show_exc", + "mis": "mis show_mis", + "par": "par run show_par", + "run": "run", + }, + } + self.index_tmpl = Templite(read_data("index.html"), self.template_globals) + self.pyfile_html_source = read_data("pyfile.html") + self.source_tmpl = Templite(self.pyfile_html_source, self.template_globals) + + def new_index_page(self, noun: str, plural_noun: str) -> IndexPage: + """Create an IndexPage for a kind of region.""" + return IndexPage( + noun=noun, + plural=plural_noun, + filename="index.html" if noun == "file" else f"{noun}_index.html", + summaries=[], + totals=Numbers(precision=self.config.precision), + skipped_covered_count=0, + skipped_empty_count=0, + ) + + def report(self, morfs: Iterable[TMorf] | None) -> float: + """Generate an HTML report for `morfs`. + + `morfs` is a list of modules or file names. + + """ + # Read the status data and check that this run used the same + # global data as the last run. + self.incr.read() + self.incr.check_global_data(self.config, self.pyfile_html_source) + + # Process all the files. For each page we need to supply a link + # to the next and previous page. + files_to_report = [] + + have_data = False + for fr, analysis in get_analysis_to_report(self.coverage, morfs): + have_data = True + ftr = FileToReport(fr, analysis) + if self.should_report(analysis, self.index_pages["file"]): + files_to_report.append(ftr) + else: + file_be_gone(os.path.join(self.directory, ftr.html_filename)) + + if not have_data: + raise NoDataError("No data to report.") + + self.make_directory() + self.make_local_static_report_files() + + if files_to_report: + for ftr1, ftr2 in zip(files_to_report[:-1], files_to_report[1:]): + ftr1.next_html = ftr2.html_filename + ftr2.prev_html = ftr1.html_filename + files_to_report[0].prev_html = "index.html" + files_to_report[-1].next_html = "index.html" + + for ftr in files_to_report: + self.write_html_page(ftr) + for noun, plural_noun in ftr.fr.code_region_kinds(): + if noun not in self.index_pages: + self.index_pages[noun] = self.new_index_page(noun, plural_noun) + + # Write the index page. + if files_to_report: + first_html = files_to_report[0].html_filename + final_html = files_to_report[-1].html_filename + else: + first_html = final_html = "index.html" + self.write_file_index_page(first_html, final_html) + + # Write function and class index pages. + self.write_region_index_pages(files_to_report) + + return ( + self.index_pages["file"].totals.n_statements + and self.index_pages["file"].totals.pc_covered + ) + + def make_directory(self) -> None: + """Make sure our htmlcov directory exists.""" + ensure_dir(self.directory) + if not os.listdir(self.directory): + self.directory_was_empty = True + + def copy_static_file(self, src: str, slug: str = "") -> None: + """Copy a static file into the output directory with cache busting.""" + dest = copy_with_cache_bust(src, self.directory) + if not slug: + slug = os.path.basename(src).replace(".", "_") + self.template_globals["statics"][slug] = dest # type: ignore + + def make_local_static_report_files(self) -> None: + """Make local instances of static files for HTML report.""" + + # The files we provide must always be copied. + for static in self.STATIC_FILES: + self.copy_static_file(data_filename(static)) + + # The user may have extra CSS they want copied. + if self.extra_css: + assert self.config.extra_css is not None + self.copy_static_file(self.config.extra_css, slug="extra_css") + + # Only write the .gitignore file if the directory was originally empty. + # .gitignore can't be copied from the source tree because if it was in + # the source tree, it would stop the static files from being checked in. + if self.directory_was_empty: + with open(os.path.join(self.directory, ".gitignore"), "w") as fgi: + fgi.write("# Created by coverage.py\n*\n") + + def should_report(self, analysis: Analysis, index_page: IndexPage) -> bool: + """Determine if we'll report this file or region.""" + # Get the numbers for this file. + nums = analysis.numbers + index_page.totals += nums + + if self.skip_covered: + # Don't report on 100% files. + no_missing_lines = (nums.n_missing == 0) + no_missing_branches = (nums.n_partial_branches == 0) + if no_missing_lines and no_missing_branches: + index_page.skipped_covered_count += 1 + return False + + if self.skip_empty: + # Don't report on empty files. + if nums.n_statements == 0: + index_page.skipped_empty_count += 1 + return False + + return True + + def write_html_page(self, ftr: FileToReport) -> None: + """Generate an HTML page for one source file. + + If the page on disk is already correct based on our incremental status + checking, then the page doesn't have to be generated, and this function + only does page summary bookkeeping. + + """ + # Find out if the page on disk is already correct. + if self.incr.can_skip_file(self.data, ftr.fr, ftr.rootname): + self.index_pages["file"].summaries.append(self.incr.index_info(ftr.rootname)) + return + + # Write the HTML page for this source file. + file_data = self.datagen.data_for_file(ftr.fr, ftr.analysis) + + contexts = collections.Counter(c for cline in file_data.lines for c in cline.contexts) + context_codes = {y: i for (i, y) in enumerate(x[0] for x in contexts.most_common())} + if context_codes: + contexts_json = json.dumps( + {encode_int(v): k for (k, v) in context_codes.items()}, + indent=2, + ) + else: + contexts_json = None + + for ldata in file_data.lines: + # Build the HTML for the line. + html_parts = [] + for tok_type, tok_text in ldata.tokens: + if tok_type == "ws": + html_parts.append(escape(tok_text)) + else: + tok_html = escape(tok_text) or " " + html_parts.append(f'{tok_html}') + ldata.html = "".join(html_parts) + if ldata.context_list: + encoded_contexts = [ + encode_int(context_codes[c_context]) for c_context in ldata.context_list + ] + code_width = max(len(ec) for ec in encoded_contexts) + ldata.context_str = ( + str(code_width) + + "".join(ec.ljust(code_width) for ec in encoded_contexts) + ) + else: + ldata.context_str = "" + + if ldata.short_annotations: + # 202F is NARROW NO-BREAK SPACE. + # 219B is RIGHTWARDS ARROW WITH STROKE. + ldata.annotate = ",   ".join( + f"{ldata.number} ↛ {d}" + for d in ldata.short_annotations + ) + else: + ldata.annotate = None + + if ldata.long_annotations: + longs = ldata.long_annotations + if len(longs) == 1: + ldata.annotate_long = longs[0] + else: + ldata.annotate_long = "{:d} missed branches: {}".format( + len(longs), + ", ".join( + f"{num:d}) {ann_long}" + for num, ann_long in enumerate(longs, start=1) + ), + ) + else: + ldata.annotate_long = None + + css_classes = [] + if ldata.category: + css_classes.append( + self.template_globals["category"][ldata.category], # type: ignore[index] + ) + ldata.css_class = " ".join(css_classes) or "pln" + + html_path = os.path.join(self.directory, ftr.html_filename) + html = self.source_tmpl.render({ + **file_data.__dict__, + "contexts_json": contexts_json, + "prev_html": ftr.prev_html, + "next_html": ftr.next_html, + }) + write_html(html_path, html) + + # Save this file's information for the index page. + index_info = IndexItem( + url = ftr.html_filename, + file = escape(ftr.fr.relative_filename()), + nums = ftr.analysis.numbers, + ) + self.index_pages["file"].summaries.append(index_info) + self.incr.set_index_info(ftr.rootname, index_info) + + def write_file_index_page(self, first_html: str, final_html: str) -> None: + """Write the file index page for this report.""" + index_file = self.write_index_page( + self.index_pages["file"], + first_html=first_html, + final_html=final_html, + ) + + print_href = stdout_link(index_file, f"file://{os.path.abspath(index_file)}") + self.coverage._message(f"Wrote HTML report to {print_href}") + + # Write the latest hashes for next time. + self.incr.write() + + def write_region_index_pages(self, files_to_report: Iterable[FileToReport]) -> None: + """Write the other index pages for this report.""" + for ftr in files_to_report: + region_nouns = [pair[0] for pair in ftr.fr.code_region_kinds()] + num_lines = len(ftr.fr.source().splitlines()) + regions = ftr.fr.code_regions() + + for noun in region_nouns: + page_data = self.index_pages[noun] + outside_lines = set(range(1, num_lines + 1)) + + for region in regions: + if region.kind != noun: + continue + outside_lines -= region.lines + analysis = ftr.analysis.narrow(region.lines) + if not self.should_report(analysis, page_data): + continue + sorting_name = region.name.rpartition(".")[-1].lstrip("_") + page_data.summaries.append(IndexItem( + url=f"{ftr.html_filename}#t{region.start}", + file=escape(ftr.fr.relative_filename()), + description=( + f"" + + escape(region.name) + + "" + ), + nums=analysis.numbers, + )) + + analysis = ftr.analysis.narrow(outside_lines) + if self.should_report(analysis, page_data): + page_data.summaries.append(IndexItem( + url=ftr.html_filename, + file=escape(ftr.fr.relative_filename()), + description=( + "" + + f"(no {escape(noun)})" + + "" + ), + nums=analysis.numbers, + )) + + for noun, index_page in self.index_pages.items(): + if noun != "file": + self.write_index_page(index_page) + + def write_index_page(self, index_page: IndexPage, **kwargs: str) -> str: + """Write an index page specified by `index_page`. + + Returns the filename created. + """ + skipped_covered_msg = skipped_empty_msg = "" + if n := index_page.skipped_covered_count: + word = plural(n, index_page.noun, index_page.plural) + skipped_covered_msg = f"{n} {word} skipped due to complete coverage." + if n := index_page.skipped_empty_count: + word = plural(n, index_page.noun, index_page.plural) + skipped_empty_msg = f"{n} empty {word} skipped." + + index_buttons = [ + { + "label": ip.plural.title(), + "url": ip.filename if ip.noun != index_page.noun else "", + "current": ip.noun == index_page.noun, + } + for ip in self.index_pages.values() + ] + render_data = { + "regions": index_page.summaries, + "totals": index_page.totals, + "noun": index_page.noun, + "region_noun": index_page.noun if index_page.noun != "file" else "", + "skip_covered": self.skip_covered, + "skipped_covered_msg": skipped_covered_msg, + "skipped_empty_msg": skipped_empty_msg, + "first_html": "", + "final_html": "", + "index_buttons": index_buttons, + } + render_data.update(kwargs) + html = self.index_tmpl.render(render_data) + + index_file = os.path.join(self.directory, index_page.filename) + write_html(index_file, html) + return index_file + + +@dataclass +class FileInfo: + """Summary of the information from last rendering, to avoid duplicate work.""" + hash: str = "" + index: IndexItem = field(default_factory=IndexItem) + + +class IncrementalChecker: + """Logic and data to support incremental reporting. + + When generating an HTML report, often only a few of the source files have + changed since the last time we made the HTML report. This means previously + created HTML pages can be reused without generating them again, speeding + the command. + + This class manages a JSON data file that captures enough information to + know whether an HTML page for a .py file needs to be regenerated or not. + The data file also needs to store all the information needed to create the + entry for the file on the index page so that if the HTML page is reused, + the index page can still be created to refer to it. + + The data looks like:: + + { + "note": "This file is an internal implementation detail ...", + // A fixed number indicating the data format. STATUS_FORMAT + "format": 5, + // The version of coverage.py + "version": "7.4.4", + // A hash of a number of global things, including the configuration + // settings and the pyfile.html template itself. + "globals": "540ee119c15d52a68a53fe6f0897346d", + "files": { + // An entry for each source file keyed by the flat_rootname(). + "z_7b071bdc2a35fa80___init___py": { + // Hash of the source, the text of the .py file. + "hash": "e45581a5b48f879f301c0f30bf77a50c", + // Information for the index.html file. + "index": { + "url": "z_7b071bdc2a35fa80___init___py.html", + "file": "cogapp/__init__.py", + "description": "", + // The Numbers for this file. + "nums": { "precision": 2, "n_files": 1, "n_statements": 43, ... } + } + }, + ... + } + } + + """ + + STATUS_FILE = "status.json" + STATUS_FORMAT = 5 + NOTE = ( + "This file is an internal implementation detail to speed up HTML report" + + " generation. Its format can change at any time. You might be looking" + + " for the JSON report: https://coverage.rtfd.io/cmd.html#cmd-json" + ) + + def __init__(self, directory: str) -> None: + self.directory = directory + self._reset() + + def _reset(self) -> None: + """Initialize to empty. Causes all files to be reported.""" + self.globals = "" + self.files: dict[str, FileInfo] = {} + + def read(self) -> None: + """Read the information we stored last time.""" + try: + status_file = os.path.join(self.directory, self.STATUS_FILE) + with open(status_file) as fstatus: + status = json.load(fstatus) + except (OSError, ValueError): + # Status file is missing or malformed. + usable = False + else: + if status["format"] != self.STATUS_FORMAT: + usable = False + elif status["version"] != coverage.__version__: + usable = False + else: + usable = True + + if usable: + self.files = {} + for filename, filedict in status["files"].items(): + indexdict = filedict["index"] + index_item = IndexItem(**indexdict) + index_item.nums = Numbers(**indexdict["nums"]) + fileinfo = FileInfo( + hash=filedict["hash"], + index=index_item, + ) + self.files[filename] = fileinfo + self.globals = status["globals"] + else: + self._reset() + + def write(self) -> None: + """Write the current status.""" + status_file = os.path.join(self.directory, self.STATUS_FILE) + status_data = { + "note": self.NOTE, + "format": self.STATUS_FORMAT, + "version": coverage.__version__, + "globals": self.globals, + "files": { + fname: dataclasses.asdict(finfo) + for fname, finfo in self.files.items() + }, + } + with open(status_file, "w") as fout: + json.dump(status_data, fout, separators=(",", ":")) + + def check_global_data(self, *data: Any) -> None: + """Check the global data that can affect incremental reporting. + + Pass in whatever global information could affect the content of the + HTML pages. If the global data has changed since last time, this will + clear the data so that all files are regenerated. + + """ + m = Hasher() + for d in data: + m.update(d) + these_globals = m.hexdigest() + if self.globals != these_globals: + self._reset() + self.globals = these_globals + + def can_skip_file(self, data: CoverageData, fr: FileReporter, rootname: str) -> bool: + """Can we skip reporting this file? + + `data` is a CoverageData object, `fr` is a `FileReporter`, and + `rootname` is the name being used for the file. + + Returns True if the HTML page is fine as-is, False if we need to recreate + the HTML page. + + """ + m = Hasher() + m.update(fr.source().encode("utf-8")) + add_data_to_hash(data, fr.filename, m) + this_hash = m.hexdigest() + + file_info = self.files.setdefault(rootname, FileInfo()) + + if this_hash == file_info.hash: + # Nothing has changed to require the file to be reported again. + return True + else: + # File has changed, record the latest hash and force regeneration. + file_info.hash = this_hash + return False + + def index_info(self, fname: str) -> IndexItem: + """Get the information for index.html for `fname`.""" + return self.files.get(fname, FileInfo()).index + + def set_index_info(self, fname: str, info: IndexItem) -> None: + """Set the information for index.html for `fname`.""" + self.files.setdefault(fname, FileInfo()).index = info + + +# Helpers for templates and generating HTML + +def escape(t: str) -> str: + """HTML-escape the text in `t`. + + This is only suitable for HTML text, not attributes. + + """ + # Convert HTML special chars into HTML entities. + return t.replace("&", "&").replace("<", "<") + + +def pair(ratio: tuple[int, int]) -> str: + """Format a pair of numbers so JavaScript can read them in an attribute.""" + return "{} {}".format(*ratio) diff --git a/.venv/Lib/site-packages/coverage/htmlfiles/coverage_html.js b/.venv/Lib/site-packages/coverage/htmlfiles/coverage_html.js new file mode 100644 index 00000000..1face13d --- /dev/null +++ b/.venv/Lib/site-packages/coverage/htmlfiles/coverage_html.js @@ -0,0 +1,733 @@ +// Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +// For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +// Coverage.py HTML report browser code. +/*jslint browser: true, sloppy: true, vars: true, plusplus: true, maxerr: 50, indent: 4 */ +/*global coverage: true, document, window, $ */ + +coverage = {}; + +// General helpers +function debounce(callback, wait) { + let timeoutId = null; + return function(...args) { + clearTimeout(timeoutId); + timeoutId = setTimeout(() => { + callback.apply(this, args); + }, wait); + }; +}; + +function checkVisible(element) { + const rect = element.getBoundingClientRect(); + const viewBottom = Math.max(document.documentElement.clientHeight, window.innerHeight); + const viewTop = 30; + return !(rect.bottom < viewTop || rect.top >= viewBottom); +} + +function on_click(sel, fn) { + const elt = document.querySelector(sel); + if (elt) { + elt.addEventListener("click", fn); + } +} + +// Helpers for table sorting +function getCellValue(row, column = 0) { + const cell = row.cells[column] // nosemgrep: eslint.detect-object-injection + if (cell.childElementCount == 1) { + var child = cell.firstElementChild; + if (child.tagName === "A") { + child = child.firstElementChild; + } + if (child instanceof HTMLDataElement && child.value) { + return child.value; + } + } + return cell.innerText || cell.textContent; +} + +function rowComparator(rowA, rowB, column = 0) { + let valueA = getCellValue(rowA, column); + let valueB = getCellValue(rowB, column); + if (!isNaN(valueA) && !isNaN(valueB)) { + return valueA - valueB; + } + return valueA.localeCompare(valueB, undefined, {numeric: true}); +} + +function sortColumn(th) { + // Get the current sorting direction of the selected header, + // clear state on other headers and then set the new sorting direction. + const currentSortOrder = th.getAttribute("aria-sort"); + [...th.parentElement.cells].forEach(header => header.setAttribute("aria-sort", "none")); + var direction; + if (currentSortOrder === "none") { + direction = th.dataset.defaultSortOrder || "ascending"; + } + else if (currentSortOrder === "ascending") { + direction = "descending"; + } + else { + direction = "ascending"; + } + th.setAttribute("aria-sort", direction); + + const column = [...th.parentElement.cells].indexOf(th) + + // Sort all rows and afterwards append them in order to move them in the DOM. + Array.from(th.closest("table").querySelectorAll("tbody tr")) + .sort((rowA, rowB) => rowComparator(rowA, rowB, column) * (direction === "ascending" ? 1 : -1)) + .forEach(tr => tr.parentElement.appendChild(tr)); + + // Save the sort order for next time. + if (th.id !== "region") { + let th_id = "file"; // Sort by file if we don't have a column id + let current_direction = direction; + const stored_list = localStorage.getItem(coverage.INDEX_SORT_STORAGE); + if (stored_list) { + ({th_id, direction} = JSON.parse(stored_list)) + } + localStorage.setItem(coverage.INDEX_SORT_STORAGE, JSON.stringify({ + "th_id": th.id, + "direction": current_direction + })); + if (th.id !== th_id || document.getElementById("region")) { + // Sort column has changed, unset sorting by function or class. + localStorage.setItem(coverage.SORTED_BY_REGION, JSON.stringify({ + "by_region": false, + "region_direction": current_direction + })); + } + } + else { + // Sort column has changed to by function or class, remember that. + localStorage.setItem(coverage.SORTED_BY_REGION, JSON.stringify({ + "by_region": true, + "region_direction": direction + })); + } +} + +// Find all the elements with data-shortcut attribute, and use them to assign a shortcut key. +coverage.assign_shortkeys = function () { + document.querySelectorAll("[data-shortcut]").forEach(element => { + document.addEventListener("keypress", event => { + if (event.target.tagName.toLowerCase() === "input") { + return; // ignore keypress from search filter + } + if (event.key === element.dataset.shortcut) { + element.click(); + } + }); + }); +}; + +// Create the events for the filter box. +coverage.wire_up_filter = function () { + // Populate the filter and hide100 inputs if there are saved values for them. + const saved_filter_value = localStorage.getItem(coverage.FILTER_STORAGE); + if (saved_filter_value) { + document.getElementById("filter").value = saved_filter_value; + } + const saved_hide100_value = localStorage.getItem(coverage.HIDE100_STORAGE); + if (saved_hide100_value) { + document.getElementById("hide100").checked = JSON.parse(saved_hide100_value); + } + + // Cache elements. + const table = document.querySelector("table.index"); + const table_body_rows = table.querySelectorAll("tbody tr"); + const no_rows = document.getElementById("no_rows"); + + // Observe filter keyevents. + const filter_handler = (event => { + // Keep running total of each metric, first index contains number of shown rows + const totals = new Array(table.rows[0].cells.length).fill(0); + // Accumulate the percentage as fraction + totals[totals.length - 1] = { "numer": 0, "denom": 0 }; // nosemgrep: eslint.detect-object-injection + + var text = document.getElementById("filter").value; + // Store filter value + localStorage.setItem(coverage.FILTER_STORAGE, text); + const casefold = (text === text.toLowerCase()); + const hide100 = document.getElementById("hide100").checked; + // Store hide value. + localStorage.setItem(coverage.HIDE100_STORAGE, JSON.stringify(hide100)); + + // Hide / show elements. + table_body_rows.forEach(row => { + var show = false; + // Check the text filter. + for (let column = 0; column < totals.length; column++) { + cell = row.cells[column]; + if (cell.classList.contains("name")) { + var celltext = cell.textContent; + if (casefold) { + celltext = celltext.toLowerCase(); + } + if (celltext.includes(text)) { + show = true; + } + } + } + + // Check the "hide covered" filter. + if (show && hide100) { + const [numer, denom] = row.cells[row.cells.length - 1].dataset.ratio.split(" "); + show = (numer !== denom); + } + + if (!show) { + // hide + row.classList.add("hidden"); + return; + } + + // show + row.classList.remove("hidden"); + totals[0]++; + + for (let column = 0; column < totals.length; column++) { + // Accumulate dynamic totals + cell = row.cells[column] // nosemgrep: eslint.detect-object-injection + if (cell.classList.contains("name")) { + continue; + } + if (column === totals.length - 1) { + // Last column contains percentage + const [numer, denom] = cell.dataset.ratio.split(" "); + totals[column]["numer"] += parseInt(numer, 10); // nosemgrep: eslint.detect-object-injection + totals[column]["denom"] += parseInt(denom, 10); // nosemgrep: eslint.detect-object-injection + } + else { + totals[column] += parseInt(cell.textContent, 10); // nosemgrep: eslint.detect-object-injection + } + } + }); + + // Show placeholder if no rows will be displayed. + if (!totals[0]) { + // Show placeholder, hide table. + no_rows.style.display = "block"; + table.style.display = "none"; + return; + } + + // Hide placeholder, show table. + no_rows.style.display = null; + table.style.display = null; + + const footer = table.tFoot.rows[0]; + // Calculate new dynamic sum values based on visible rows. + for (let column = 0; column < totals.length; column++) { + // Get footer cell element. + const cell = footer.cells[column]; // nosemgrep: eslint.detect-object-injection + if (cell.classList.contains("name")) { + continue; + } + + // Set value into dynamic footer cell element. + if (column === totals.length - 1) { + // Percentage column uses the numerator and denominator, + // and adapts to the number of decimal places. + const match = /\.([0-9]+)/.exec(cell.textContent); + const places = match ? match[1].length : 0; + const { numer, denom } = totals[column]; // nosemgrep: eslint.detect-object-injection + cell.dataset.ratio = `${numer} ${denom}`; + // Check denom to prevent NaN if filtered files contain no statements + cell.textContent = denom + ? `${(numer * 100 / denom).toFixed(places)}%` + : `${(100).toFixed(places)}%`; + } + else { + cell.textContent = totals[column]; // nosemgrep: eslint.detect-object-injection + } + } + }); + + document.getElementById("filter").addEventListener("input", debounce(filter_handler)); + document.getElementById("hide100").addEventListener("input", debounce(filter_handler)); + + // Trigger change event on setup, to force filter on page refresh + // (filter value may still be present). + document.getElementById("filter").dispatchEvent(new Event("input")); + document.getElementById("hide100").dispatchEvent(new Event("input")); +}; +coverage.FILTER_STORAGE = "COVERAGE_FILTER_VALUE"; +coverage.HIDE100_STORAGE = "COVERAGE_HIDE100_VALUE"; + +// Set up the click-to-sort columns. +coverage.wire_up_sorting = function () { + document.querySelectorAll("[data-sortable] th[aria-sort]").forEach( + th => th.addEventListener("click", e => sortColumn(e.target)) + ); + + // Look for a localStorage item containing previous sort settings: + let th_id = "file", direction = "ascending"; + const stored_list = localStorage.getItem(coverage.INDEX_SORT_STORAGE); + if (stored_list) { + ({th_id, direction} = JSON.parse(stored_list)); + } + let by_region = false, region_direction = "ascending"; + const sorted_by_region = localStorage.getItem(coverage.SORTED_BY_REGION); + if (sorted_by_region) { + ({ + by_region, + region_direction + } = JSON.parse(sorted_by_region)); + } + + const region_id = "region"; + if (by_region && document.getElementById(region_id)) { + direction = region_direction; + } + // If we are in a page that has a column with id of "region", sort on + // it if the last sort was by function or class. + let th; + if (document.getElementById(region_id)) { + th = document.getElementById(by_region ? region_id : th_id); + } + else { + th = document.getElementById(th_id); + } + th.setAttribute("aria-sort", direction === "ascending" ? "descending" : "ascending"); + th.click() +}; + +coverage.INDEX_SORT_STORAGE = "COVERAGE_INDEX_SORT_2"; +coverage.SORTED_BY_REGION = "COVERAGE_SORT_REGION"; + +// Loaded on index.html +coverage.index_ready = function () { + coverage.assign_shortkeys(); + coverage.wire_up_filter(); + coverage.wire_up_sorting(); + + on_click(".button_prev_file", coverage.to_prev_file); + on_click(".button_next_file", coverage.to_next_file); + + on_click(".button_show_hide_help", coverage.show_hide_help); +}; + +// -- pyfile stuff -- + +coverage.LINE_FILTERS_STORAGE = "COVERAGE_LINE_FILTERS"; + +coverage.pyfile_ready = function () { + // If we're directed to a particular line number, highlight the line. + var frag = location.hash; + if (frag.length > 2 && frag[1] === "t") { + document.querySelector(frag).closest(".n").classList.add("highlight"); + coverage.set_sel(parseInt(frag.substr(2), 10)); + } + else { + coverage.set_sel(0); + } + + on_click(".button_toggle_run", coverage.toggle_lines); + on_click(".button_toggle_mis", coverage.toggle_lines); + on_click(".button_toggle_exc", coverage.toggle_lines); + on_click(".button_toggle_par", coverage.toggle_lines); + + on_click(".button_next_chunk", coverage.to_next_chunk_nicely); + on_click(".button_prev_chunk", coverage.to_prev_chunk_nicely); + on_click(".button_top_of_page", coverage.to_top); + on_click(".button_first_chunk", coverage.to_first_chunk); + + on_click(".button_prev_file", coverage.to_prev_file); + on_click(".button_next_file", coverage.to_next_file); + on_click(".button_to_index", coverage.to_index); + + on_click(".button_show_hide_help", coverage.show_hide_help); + + coverage.filters = undefined; + try { + coverage.filters = localStorage.getItem(coverage.LINE_FILTERS_STORAGE); + } catch(err) {} + + if (coverage.filters) { + coverage.filters = JSON.parse(coverage.filters); + } + else { + coverage.filters = {run: false, exc: true, mis: true, par: true}; + } + + for (cls in coverage.filters) { + coverage.set_line_visibilty(cls, coverage.filters[cls]); // nosemgrep: eslint.detect-object-injection + } + + coverage.assign_shortkeys(); + coverage.init_scroll_markers(); + coverage.wire_up_sticky_header(); + + document.querySelectorAll("[id^=ctxs]").forEach( + cbox => cbox.addEventListener("click", coverage.expand_contexts) + ); + + // Rebuild scroll markers when the window height changes. + window.addEventListener("resize", coverage.build_scroll_markers); +}; + +coverage.toggle_lines = function (event) { + const btn = event.target.closest("button"); + const category = btn.value + const show = !btn.classList.contains("show_" + category); + coverage.set_line_visibilty(category, show); + coverage.build_scroll_markers(); + coverage.filters[category] = show; + try { + localStorage.setItem(coverage.LINE_FILTERS_STORAGE, JSON.stringify(coverage.filters)); + } catch(err) {} +}; + +coverage.set_line_visibilty = function (category, should_show) { + const cls = "show_" + category; + const btn = document.querySelector(".button_toggle_" + category); + if (btn) { + if (should_show) { + document.querySelectorAll("#source ." + category).forEach(e => e.classList.add(cls)); + btn.classList.add(cls); + } + else { + document.querySelectorAll("#source ." + category).forEach(e => e.classList.remove(cls)); + btn.classList.remove(cls); + } + } +}; + +// Return the nth line div. +coverage.line_elt = function (n) { + return document.getElementById("t" + n)?.closest("p"); +}; + +// Set the selection. b and e are line numbers. +coverage.set_sel = function (b, e) { + // The first line selected. + coverage.sel_begin = b; + // The next line not selected. + coverage.sel_end = (e === undefined) ? b+1 : e; +}; + +coverage.to_top = function () { + coverage.set_sel(0, 1); + coverage.scroll_window(0); +}; + +coverage.to_first_chunk = function () { + coverage.set_sel(0, 1); + coverage.to_next_chunk(); +}; + +coverage.to_prev_file = function () { + window.location = document.getElementById("prevFileLink").href; +} + +coverage.to_next_file = function () { + window.location = document.getElementById("nextFileLink").href; +} + +coverage.to_index = function () { + location.href = document.getElementById("indexLink").href; +} + +coverage.show_hide_help = function () { + const helpCheck = document.getElementById("help_panel_state") + helpCheck.checked = !helpCheck.checked; +} + +// Return a string indicating what kind of chunk this line belongs to, +// or null if not a chunk. +coverage.chunk_indicator = function (line_elt) { + const classes = line_elt?.className; + if (!classes) { + return null; + } + const match = classes.match(/\bshow_\w+\b/); + if (!match) { + return null; + } + return match[0]; +}; + +coverage.to_next_chunk = function () { + const c = coverage; + + // Find the start of the next colored chunk. + var probe = c.sel_end; + var chunk_indicator, probe_line; + while (true) { + probe_line = c.line_elt(probe); + if (!probe_line) { + return; + } + chunk_indicator = c.chunk_indicator(probe_line); + if (chunk_indicator) { + break; + } + probe++; + } + + // There's a next chunk, `probe` points to it. + var begin = probe; + + // Find the end of this chunk. + var next_indicator = chunk_indicator; + while (next_indicator === chunk_indicator) { + probe++; + probe_line = c.line_elt(probe); + next_indicator = c.chunk_indicator(probe_line); + } + c.set_sel(begin, probe); + c.show_selection(); +}; + +coverage.to_prev_chunk = function () { + const c = coverage; + + // Find the end of the prev colored chunk. + var probe = c.sel_begin-1; + var probe_line = c.line_elt(probe); + if (!probe_line) { + return; + } + var chunk_indicator = c.chunk_indicator(probe_line); + while (probe > 1 && !chunk_indicator) { + probe--; + probe_line = c.line_elt(probe); + if (!probe_line) { + return; + } + chunk_indicator = c.chunk_indicator(probe_line); + } + + // There's a prev chunk, `probe` points to its last line. + var end = probe+1; + + // Find the beginning of this chunk. + var prev_indicator = chunk_indicator; + while (prev_indicator === chunk_indicator) { + probe--; + if (probe <= 0) { + return; + } + probe_line = c.line_elt(probe); + prev_indicator = c.chunk_indicator(probe_line); + } + c.set_sel(probe+1, end); + c.show_selection(); +}; + +// Returns 0, 1, or 2: how many of the two ends of the selection are on +// the screen right now? +coverage.selection_ends_on_screen = function () { + if (coverage.sel_begin === 0) { + return 0; + } + + const begin = coverage.line_elt(coverage.sel_begin); + const end = coverage.line_elt(coverage.sel_end-1); + + return ( + (checkVisible(begin) ? 1 : 0) + + (checkVisible(end) ? 1 : 0) + ); +}; + +coverage.to_next_chunk_nicely = function () { + if (coverage.selection_ends_on_screen() === 0) { + // The selection is entirely off the screen: + // Set the top line on the screen as selection. + + // This will select the top-left of the viewport + // As this is most likely the span with the line number we take the parent + const line = document.elementFromPoint(0, 0).parentElement; + if (line.parentElement !== document.getElementById("source")) { + // The element is not a source line but the header or similar + coverage.select_line_or_chunk(1); + } + else { + // We extract the line number from the id + coverage.select_line_or_chunk(parseInt(line.id.substring(1), 10)); + } + } + coverage.to_next_chunk(); +}; + +coverage.to_prev_chunk_nicely = function () { + if (coverage.selection_ends_on_screen() === 0) { + // The selection is entirely off the screen: + // Set the lowest line on the screen as selection. + + // This will select the bottom-left of the viewport + // As this is most likely the span with the line number we take the parent + const line = document.elementFromPoint(document.documentElement.clientHeight-1, 0).parentElement; + if (line.parentElement !== document.getElementById("source")) { + // The element is not a source line but the header or similar + coverage.select_line_or_chunk(coverage.lines_len); + } + else { + // We extract the line number from the id + coverage.select_line_or_chunk(parseInt(line.id.substring(1), 10)); + } + } + coverage.to_prev_chunk(); +}; + +// Select line number lineno, or if it is in a colored chunk, select the +// entire chunk +coverage.select_line_or_chunk = function (lineno) { + var c = coverage; + var probe_line = c.line_elt(lineno); + if (!probe_line) { + return; + } + var the_indicator = c.chunk_indicator(probe_line); + if (the_indicator) { + // The line is in a highlighted chunk. + // Search backward for the first line. + var probe = lineno; + var indicator = the_indicator; + while (probe > 0 && indicator === the_indicator) { + probe--; + probe_line = c.line_elt(probe); + if (!probe_line) { + break; + } + indicator = c.chunk_indicator(probe_line); + } + var begin = probe + 1; + + // Search forward for the last line. + probe = lineno; + indicator = the_indicator; + while (indicator === the_indicator) { + probe++; + probe_line = c.line_elt(probe); + indicator = c.chunk_indicator(probe_line); + } + + coverage.set_sel(begin, probe); + } + else { + coverage.set_sel(lineno); + } +}; + +coverage.show_selection = function () { + // Highlight the lines in the chunk + document.querySelectorAll("#source .highlight").forEach(e => e.classList.remove("highlight")); + for (let probe = coverage.sel_begin; probe < coverage.sel_end; probe++) { + coverage.line_elt(probe).querySelector(".n").classList.add("highlight"); + } + + coverage.scroll_to_selection(); +}; + +coverage.scroll_to_selection = function () { + // Scroll the page if the chunk isn't fully visible. + if (coverage.selection_ends_on_screen() < 2) { + const element = coverage.line_elt(coverage.sel_begin); + coverage.scroll_window(element.offsetTop - 60); + } +}; + +coverage.scroll_window = function (to_pos) { + window.scroll({top: to_pos, behavior: "smooth"}); +}; + +coverage.init_scroll_markers = function () { + // Init some variables + coverage.lines_len = document.querySelectorAll("#source > p").length; + + // Build html + coverage.build_scroll_markers(); +}; + +coverage.build_scroll_markers = function () { + const temp_scroll_marker = document.getElementById("scroll_marker") + if (temp_scroll_marker) temp_scroll_marker.remove(); + // Don't build markers if the window has no scroll bar. + if (document.body.scrollHeight <= window.innerHeight) { + return; + } + + const marker_scale = window.innerHeight / document.body.scrollHeight; + const line_height = Math.min(Math.max(3, window.innerHeight / coverage.lines_len), 10); + + let previous_line = -99, last_mark, last_top; + + const scroll_marker = document.createElement("div"); + scroll_marker.id = "scroll_marker"; + document.getElementById("source").querySelectorAll( + "p.show_run, p.show_mis, p.show_exc, p.show_exc, p.show_par" + ).forEach(element => { + const line_top = Math.floor(element.offsetTop * marker_scale); + const line_number = parseInt(element.querySelector(".n a").id.substr(1)); + + if (line_number === previous_line + 1) { + // If this solid missed block just make previous mark higher. + last_mark.style.height = `${line_top + line_height - last_top}px`; + } + else { + // Add colored line in scroll_marker block. + last_mark = document.createElement("div"); + last_mark.id = `m${line_number}`; + last_mark.classList.add("marker"); + last_mark.style.height = `${line_height}px`; + last_mark.style.top = `${line_top}px`; + scroll_marker.append(last_mark); + last_top = line_top; + } + + previous_line = line_number; + }); + + // Append last to prevent layout calculation + document.body.append(scroll_marker); +}; + +coverage.wire_up_sticky_header = function () { + const header = document.querySelector("header"); + const header_bottom = ( + header.querySelector(".content h2").getBoundingClientRect().top - + header.getBoundingClientRect().top + ); + + function updateHeader() { + if (window.scrollY > header_bottom) { + header.classList.add("sticky"); + } + else { + header.classList.remove("sticky"); + } + } + + window.addEventListener("scroll", updateHeader); + updateHeader(); +}; + +coverage.expand_contexts = function (e) { + var ctxs = e.target.parentNode.querySelector(".ctxs"); + + if (!ctxs.classList.contains("expanded")) { + var ctxs_text = ctxs.textContent; + var width = Number(ctxs_text[0]); + ctxs.textContent = ""; + for (var i = 1; i < ctxs_text.length; i += width) { + key = ctxs_text.substring(i, i + width).trim(); + ctxs.appendChild(document.createTextNode(contexts[key])); + ctxs.appendChild(document.createElement("br")); + } + ctxs.classList.add("expanded"); + } +}; + +document.addEventListener("DOMContentLoaded", () => { + if (document.body.classList.contains("indexfile")) { + coverage.index_ready(); + } + else { + coverage.pyfile_ready(); + } +}); diff --git a/.venv/Lib/site-packages/coverage/htmlfiles/favicon_32.png b/.venv/Lib/site-packages/coverage/htmlfiles/favicon_32.png new file mode 100644 index 0000000000000000000000000000000000000000..8649f0475d8d20793b2ec431fe25a186a414cf10 GIT binary patch literal 1732 zcmV;#20QtQP)K2KOkBOVxIZChq#W-v7@TU%U6P(wycKT1hUJUToW3ke1U1ONa4 z000000000000000bb)GRa9mqwR9|UWHy;^RUrt?IT__Y0JUcxmBP0(51q1>E00030 z|NrOz)aw7%8sJzM<5^g%z7^qE`}_Ot|JUUG(NUkWzR|7K?Zo%@_v-8G-1N%N=D$;; zw;keH4dGY$`1t4M=HK_s*zm^0#KgqfwWhe3qO_HtvXYvtjgX>;-~C$L`&k>^R)9)7 zdPh2TL^pCnHC#0+_4D)M`p?qp!pq{jO_{8;$fbaflbx`Tn52n|n}8VFRTA1&ugOP< zPd{uvFjz7t*Vot1&d$l-xWCk}s;sQL&#O(Bskh6gqNJv>#iB=ypG1e3K!K4yc7!~M zfj4S*g^zZ7eP$+_Sl07Z646l;%urinP#D8a6TwRtnLIRcI!r4f@bK~9-`~;E(N?Lv zSEst7s;rcxsi~}{Nsytfz@MtUoR*iFc8!#vvx}Umhm4blk(_~MdVD-@dW&>!Nn~ro z_E~-ESVQAj6Wmn;(olz(O&_{U2*pZBc1aYjMh>Dq3z|6`jW`RDHV=t3I6yRKJ~LOX zz_z!!vbVXPqob#=pj3^VMT?x6t(irRmSKsMo1~LLkB&=#j!=M%NP35mfqim$drWb9 zYIb>no_LUwc!r^NkDzs4YHu@=ZHRzrafWDZd1EhEVq=tGX?tK$pIa)DTh#bkvh!J- z?^%@YS!U*0E8$q$_*aOTQ&)Ra64g>ep;BdcQgvlg8qQHrP*E$;P{-m=A*@axn@$bO zO-Y4JzS&EAi%YG}N?cn?YFS7ivPY=EMV6~YH;+Xxu|tefLS|Aza)Cg6us#)=JW!uH zQa?H>d^j+YHCtyjL^LulF*05|F$RG!AX_OHVI&MtA~_@=5_lU|0000rbW%=J06GH4 z^5LD8b8apw8vNh1ua1mF{{Hy)_U`NA;Nacc+sCpuHXa-V{r&yz?c(9#+}oX+NmiRW z+W-IqK1oDDR5;6GfCDCOP5}iL5fK(cB~ET81`MFgF2kGa9AjhSIk~-E-4&*tPPKdiilQJ11k_J082ZS z>@TvivP!5ZFG?t@{t+GpR3XR&@*hA_VE1|Lo8@L@)l*h(Z@=?c-NS$Fk&&61IzUU9 z*nPqBM=OBZ-6ka1SJgGAS-Us5EN)r#dUX%>wQZLa2ytPCtMKp)Ob z*xcu38Z&d5<-NBS)@jRD+*!W*cf-m_wmxDEqBf?czI%3U0J$Xik;lA`jg}VH?(S(V zE!M3;X2B8w0TnnW&6(8;_Uc)WD;Ms6PKP+s(sFgO!}B!^ES~GDt4qLPxwYB)^7)XA zZwo9zDy-B0B+jT6V=!=bo(zs_8{eBA78gT9GH$(DVhz;4VAYwz+bOIdZ-PNb|I&rl z^XG=vFLF)1{&nT2*0vMz#}7^9hXzzf&ZdKlEj{LihP;|;Ywqn35ajP?H?7t|i-Un% z&&kxee@9B{nwgv1+S-~0)E1{ob1^Wn`F2isurqThKK=3%&;`@{0{!D- z&CSj80t;uPu&FaJFtSXKH#ajgGj}=sEad7US6jP0|Db@0j)?(5@sf<7`~a9>s;wCa zm^)spe{uxGFmrJYI9cOh7s$>8Npkt-5EWB1UKc`{W{y5Ce$1+nM9Cr;);=Ju#N^62OSlJMn7omiUgP&ErsYzT~iGxcW aE(`!K@+CXylaC4j0000 + + + + {{ title|escape }} + + + {% if extra_css %} + + {% endif %} + + + + +
+
+

{{ title|escape }}: + {{totals.pc_covered_str}}% +

+ + + +
+ +
+ + +
+
+ +

+ {% for ibtn in index_buttons %} + {{ ibtn.label }}{#-#} + {% endfor %} +

+ +

+ coverage.py v{{__version__}}, + created at {{ time_stamp }} +

+
+
+ +
+ + + {# The title="" attr doesn't work in Safari. #} + + + {% if region_noun %} + + {% endif %} + + + + {% if has_arcs %} + + + {% endif %} + + + + + {% for region in regions %} + + + {% if region_noun %} + + {% endif %} + + + + {% if has_arcs %} + + + {% endif %} + + + {% endfor %} + + + + + {% if region_noun %} + + {% endif %} + + + + {% if has_arcs %} + + + {% endif %} + + + +
File{{ region_noun }}statementsmissingexcludedbranchespartialcoverage
{{region.file}}{{region.description}}{{region.nums.n_statements}}{{region.nums.n_missing}}{{region.nums.n_excluded}}{{region.nums.n_branches}}{{region.nums.n_partial_branches}}{{region.nums.pc_covered_str}}%
Total {{totals.n_statements}}{{totals.n_missing}}{{totals.n_excluded}}{{totals.n_branches}}{{totals.n_partial_branches}}{{totals.pc_covered_str}}%
+ +

+ No items found using the specified filter. +

+ + {% if skipped_covered_msg %} +

{{ skipped_covered_msg }}

+ {% endif %} + {% if skipped_empty_msg %} +

{{ skipped_empty_msg }}

+ {% endif %} +
+ + + + + diff --git a/.venv/Lib/site-packages/coverage/htmlfiles/keybd_closed.png b/.venv/Lib/site-packages/coverage/htmlfiles/keybd_closed.png new file mode 100644 index 0000000000000000000000000000000000000000..ba119c47df81ed2bbd27a06988abf700139c4f99 GIT binary patch literal 9004 zcmeHLc{tSF+aIY=A^R4_poB4tZAN2XC;O7M(inrW3}(h&Q4}dl*&-65$i9^&vW6_# zcM4g`Qix=GhkBl;=lwnJ@Ap2}^}hc-b6vBXb3XUyzR%~}_c`-Dw+!?&>5p(90RRB> zXe~7($~PP3eT?=X<@3~Q1w84vX~IoSx~1#~02+TopXK(db;4v6!{+W`RHLkkHO zo;+s?)puc`+$yOwHv>I$5^8v^F3<|$44HA8AFnFB0cAP|C`p}aSMJK*-CUB{eQ!;K z-9Ju3OQ+xVPr3P#o4>_lNBT;M+1vgV&B~6!naOGHb-LFA9TkfHv1IFA1Y!Iz!Zl3) z%c#-^zNWPq7U_}6I7aHSmFWi125RZrNBKyvnV^?64)zviS;E!UD%LaGRl6@zn!3E{ zJ`B$5``cH_3a)t1#6I7d==JeB_IcSU%=I#DrRCBGm8GvCmA=+XHEvC2SIfsNa0(h9 z7P^C4U`W@@`9p>2f^zyb5B=lpc*RZMn-%%IqrxSWQF8{ec3i?-AB(_IVe z)XgT>Y^u41MwOMFvU=I4?!^#jaS-%bjnx@ zmL44yVEslR_ynm18F!u}Ru#moEn3EE?1=9@$B1Z5aLi5b8{&?V(IAYBzIar!SiY3< z`l0V)djHtrImy}(!7x-Pmq+njM)JFQ9mx*(C+9a3M)(_SW|lrN=gfxFhStu^zvynS zm@gl;>d8i8wpUkX42vS3BEzE3-yctH%t0#N%s+6-&_<*Fe7+h=`=FM?DOg1)eGL~~ zQvIFm$D*lqEh07XrXY=jb%hdyP4)`wyMCb$=-z9(lOme9=tirVkb)_GOl2MJn;=Ky z^0pV1owR7KP-BSxhI@@@+gG0roD-kXE1;!#R7KY1QiUbyDdTElm|ul7{mMdF1%UDJ z_vp=Vo!TCF?D*?u% zk~}4!xK2MSQd-QKC0${G=ZRv2x8%8ZqdfR!?Dv=5Mj^8WU)?iH;C?o6rSQy*^YwQb zf@5V)q=xah#a3UEIBC~N7on(p4jQd4K$|i7k`d8mw|M{Mxapl46Z^X^9U}JgqH#;T z`CTzafpMD+J-LjzF+3Xau>xM_sXisRj6m-287~i9g|%gHc}v77>n_+p7ZgmJszx!b zSmL4wV;&*5Z|zaCk`rOYFdOjZLLQr!WSV6AlaqYh_OE)>rYdtx`gk$yAMO=-E1b~J zIZY6gM*}1UWsJ)TW(pf1=h?lJy_0TFOr|nALGW>$IE1E7z+$`^2WJY+>$$nJo8Rs` z)xS>AH{N~X3+b=2+8Q_|n(1JoGv55r>TuwBV~MXE&9?3Zw>cIxnOPNs#gh~C4Zo=k z&!s;5)^6UG>!`?hh0Q|r|Qbm>}pgtOt23Vh!NSibozH$`#LSiYL)HR4bkfEJMa zBHwC3TaHx|BzD|MXAr>mm&FbZXeEX-=W}Ji&!pji4sO$#0Wk^Q7j%{8#bJPn$C=E% zPlB}0)@Ti^r_HMJrTMN?9~4LQbIiUiOKBVNm_QjABKY4;zC88yVjvB>ZETNzr%^(~ zI3U&Ont?P`r&4 z#Bp)jcVV_N_{c1_qW}_`dQm)D`NG?h{+S!YOaUgWna4i8SuoLcXAZ|#Jh&GNn7B}3 z?vZ8I{LpmCYT=@6)dLPd@|(;d<08ufov%+V?$mgUYQHYTrc%eA=CDUzK}v|G&9}yJ z)|g*=+RH1IQ>rvkY9UIam=fkxWDyGIKQ2RU{GqOQjD8nG#sl+$V=?wpzJdT=wlNWr z1%lw&+;kVs(z?e=YRWRA&jc75rQ~({*TS<( z8X!j>B}?Bxrrp%wEE7yBefQ?*nM20~+ZoQK(NO_wA`RNhsqVkXHy|sod@mqen=B#@ zmLi=x2*o9rWqTMWoB&qdZph$~qkJJTVNc*8^hU?gH_fY{GYPEBE8Q{j0Y$tvjMv%3 z)j#EyBf^7n)2d8IXDYX2O0S%ZTnGhg4Ss#sEIATKpE_E4TU=GimrD5F6K(%*+T-!o z?Se7^Vm`$ZKDwq+=~jf?w0qC$Kr&R-;IF#{iLF*8zKu8(=#chRO;>x zdM;h{i{RLpJgS!B-ueTFs8&4U4+D8|7nP~UZ@P`J;*0sj^#f_WqT#xpA?@qHonGB& zQ<^;OLtOG1w#)N~&@b0caUL7syAsAxV#R`n>-+eVL9aZwnlklzE>-6!1#!tVA`uNo z>Gv^P)sohc~g_1YMC;^f(N<{2y5C^;QCEXo;LQ^#$0 zr>jCrdoeXuff!dJ^`#=Wy2Gumo^Qt7BZrI~G+Pyl_kL>is3P0^JlE;Sjm-YfF~I>t z_KeNpK|5U&F4;v?WS&#l(jxUWDarfcIcl=-6!8>^S`57!M6;hZea5IFA@)2+*Rt85 zi-MBs_b^DU8LygXXQGkG+86N7<%M|baM(orG*ASffC`p!?@m{qd}IcYmZyi^d}#Q& zNjk-0@CajpUI-gPm20ERVDO!L8@p`tMJ69FD(ASIkdoLdiRV6h9TPKRz>2WK4upHd z6OZK33EP?`GoJkXh)S035}uLUO$;TlXwNdMg-WOhLB)7a`-%*a9lFmjf6n+4ZmIHN z-V@$ z8PXsoR4*`5RwXz=A8|5;aXKtSHFccj%dG7cO~UBJnt)61K>-uPX)`vu{7fcX6_>zZ zw_2V&Li+7mxbf!f7{Rk&VVyY!UtZywac%g!cH+xh#j$a`uf?XWl<``t`36W;p7=_* zO6uf~2{sAdkZn=Ts@p0>8N8rzw2ZLS@$ibV-c-QmG@%|3gUUrRxu=e*ekhTa+f?8q z3$JVGPr9w$VQG~QCq~Y=2ThLIH!T@(>{NihJ6nj*HA_C#Popv)CBa)+UI-bx8u8zfCT^*1|k z&N9oFYsZEijPn31Yx_yO5pFs>0tOAV=oRx~Wpy5ie&S_449m4R^{LWQMA~}vocV1O zIf#1ZV85E>tvZE4mz~zn{hs!pkIQM;EvZMimqiPAJu-9P@mId&nb$lsrICS=)zU3~ zn>a#9>}5*3N)9;PTMZ)$`5k} z?iG}Rwj$>Y*|(D3S3e&fxhaPHma8@vwu(cwdlaCjX+NIK6=$H4U`rfzcWQVOhp{fnzuZhgCCGpw|p zTi`>cv~xVzdx|^`C0vXdlMwPae3S?>3|7v$e*Bs6-5gS>>FMHk_r2M(ADOV{KV7+6 zA@5Q(mdx%7J}MY}K461iuQ}5GwDGI=Yc&g0MZHu)7gC3{5@QZj6SJl*o0MS2Cl_ia zyK?9QmC9tJ6yn{EA-erJ4wk$+!E#X(s~9h^HOmQ_|6V_s1)k;%9Q6Niw}SyT?jxl4 z;HYz2$Nj$8Q_*Xo`TWEUx^Q9b+ik@$o39`mlY&P}G8wnjdE+Dlj?uL;$aB$n;x zWoh-M_u>9}_Ok@d_uidMqz10zJc}RQijPW3Fs&~1am=j*+A$QWTvxf9)6n;n8zTQW z!Q_J1%apTsJzLF`#^P_#mRv2Ya_keUE7iMSP!ha-WQoo0vZZG?gyR;+4q8F6tL#u< zRj8Hu5f-p1$J;)4?WpGL{4@HmJ6&tF9A5Tc8Trp>;Y>{^s?Q1&bam}?OjsnKd?|Z82aix26wUOLxbEW~E)|CgJ#)MLf_me# zv4?F$o@A~Um)6>HlM0=3Bd-vc91EM}D+t6-@!}O%i*&Wl%@#C8X+?5+nv`oPu!!=5 znbL+Fk_#J_%8vOq^FIv~5N(nk03kyo1p@l|1c+rO^zCG3bk2?|%AF;*|4si1XM<`a z1NY0-8$wv?&129!(g_A1lXR!+pD*1*cF?T~e1d6*G1Fz)jcSaZoKpxtA%FNnKP2jo zLXn@OR#1z@6zuH%mMB98}-t zHJqClsZ!G5xMSgIs_=<8sBePXxfoXsuvy`|buON9BX%s-o>OVLA)k3W=wKnw1?so$ zEjm0aS=zu@Xu#;{A)QTjJ$a9_={++ACkRY*sk3jLk&Fu}RxR<-DXR<`5`$VNG*wJE zidM6VzaQ!M0gbQM98@x@;#0qUS8O)p6mrYwTk*;8J~!ovbY6jon^Ki}uggd3#J5G8 z>awvtF85Y<9yE{Iag}J7O7)1O=ylk^255@XmV5J06-{xaaSNASZoTKKp~$tSxdUI~ zU1RZ&UuW37Ro&_ryj^cSt$Jd&pt|+h!A&dwcr&`S=R5E`=6Tm`+(qGm@$YZ8(8@a$ zXfo@Rwtvm7N3RMmVCb7radAs-@QtCXx^CQ-<)V>QPLZy@jH{#dc4#(y zV)6Hp{ZMz!|NG8!>i01gZMy)G<8Hf2X7e&LH_gOaajW<<^Xi55@OnlY*|S|*TS8;u_nHbv7lgmmZ+Q<5 zi!*lLCJmdpyzl(L${$C?(pVo|oR%r~x_B_ocPePa_);27^=n4L=`toZ;xdBut9rSv z?wDQ7j2I3WQBdhz%X7`2YaG_y|wA!7|s?k;A&WNMLMTZEzCaE^d??E&u?f=ejQBR~|< z)=thyP2(p8r6mt?Ad}tXAP_GvF9|P630I;$1cpQ+Ay7C34hK^ZV3H4kjPV8&NP>G5 zKRDEIBrFl{M#j4mfP0)68&?mqJP1S?2mU0djAGTjDV;wZ?6vplNn~3Hn$nP>%!dMi zz@bnC7zzi&k&s{QDWkf&zgrVXKUJjY3Gv3bL0}S4h>OdgEJ$Q^&p-VAr3J}^a*+rz z!jW7(h*+GuCyqcC{MD(Ovj^!{pB^OKUe|uy&bD?CN>KZrf3?v>>l*xSvnQiH-o^ViN$%FRdm9url;%(*jf5H$*S)8;i0xWHdl>$p);nH9v0)YfW?Vz$! zNCeUbi9`NEg(i^57y=fzM@1o*z*Bf6?QCV>2p9}(BLlYsOCfMjFv1pw1mlo)Py{8v zppw{MDfEeWN+n>Ne~oI7%9cU}mz0r3!es2gNF0t5jkGipjIo2lz;-e)7}Ul_#!eDv zw;#>kI>;#-pyfeu3Fsd^2F@6=oh#8r9;A!G0`-mm7%{=S;Ec(bJ=I_`FodKGQVNEY zmXwr4{9*jpDl%4{ggQZ5Ac z%wYTdl*!1c5^)%^E78Q&)ma|27c6j(a=)g4sGrp$r{jv>>M2 z6y)E5|Aooe!PSfKzvKA>`a6pfK3=E8vL14ksP&f=>gOP?}rG6ye@9ZR3 zJF*vsh*P$w390i!FV~~_Hv6t2Zl<4VUi|rNja#boFt{%q~xGb z(2petq9A*_>~B*>?d?Olx^lmYg4)}sH2>G42RE; literal 0 HcmV?d00001 diff --git a/.venv/Lib/site-packages/coverage/htmlfiles/pyfile.html b/.venv/Lib/site-packages/coverage/htmlfiles/pyfile.html new file mode 100644 index 00000000..5a6ea43d --- /dev/null +++ b/.venv/Lib/site-packages/coverage/htmlfiles/pyfile.html @@ -0,0 +1,149 @@ +{# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 #} +{# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt #} + + + + + + Coverage for {{relative_filename|escape}}: {{nums.pc_covered_str}}% + + + {% if extra_css %} + + {% endif %} + + {% if contexts_json %} + + {% endif %} + + + + + +
+
+

+ Coverage for {{relative_filename|escape}}: + {{nums.pc_covered_str}}% +

+ + + +

+ {{nums.n_statements}} statements   + + + + {% if has_arcs %} + + {% endif %} +

+ +

+ « prev     + ^ index     + » next +       + coverage.py v{{__version__}}, + created at {{ time_stamp }} +

+ + +
+
+ +
+ {% for line in lines -%} + {% joined %} +

+ {{line.number}} + {{line.html}}  + {% if line.context_list %} + + {% endif %} + {# Things that should float right in the line. #} + + {% if line.annotate %} + {{line.annotate}} + {{line.annotate_long}} + {% endif %} + {% if line.contexts %} + + {% endif %} + + {# Things that should appear below the line. #} + {% if line.context_str %} + {{ line.context_str }} + {% endif %} +

+ {% endjoined %} + {% endfor %} +
+ + + + + diff --git a/.venv/Lib/site-packages/coverage/htmlfiles/style.css b/.venv/Lib/site-packages/coverage/htmlfiles/style.css new file mode 100644 index 00000000..3cdaf05a --- /dev/null +++ b/.venv/Lib/site-packages/coverage/htmlfiles/style.css @@ -0,0 +1,337 @@ +@charset "UTF-8"; +/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */ +/* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */ +/* Don't edit this .css file. Edit the .scss file instead! */ +html, body, h1, h2, h3, p, table, td, th { margin: 0; padding: 0; border: 0; font-weight: inherit; font-style: inherit; font-size: 100%; font-family: inherit; vertical-align: baseline; } + +body { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; font-size: 1em; background: #fff; color: #000; } + +@media (prefers-color-scheme: dark) { body { background: #1e1e1e; } } + +@media (prefers-color-scheme: dark) { body { color: #eee; } } + +html > body { font-size: 16px; } + +a:active, a:focus { outline: 2px dashed #007acc; } + +p { font-size: .875em; line-height: 1.4em; } + +table { border-collapse: collapse; } + +td { vertical-align: top; } + +table tr.hidden { display: none !important; } + +p#no_rows { display: none; font-size: 1.15em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; } + +a.nav { text-decoration: none; color: inherit; } + +a.nav:hover { text-decoration: underline; color: inherit; } + +.hidden { display: none; } + +header { background: #f8f8f8; width: 100%; z-index: 2; border-bottom: 1px solid #ccc; } + +@media (prefers-color-scheme: dark) { header { background: black; } } + +@media (prefers-color-scheme: dark) { header { border-color: #333; } } + +header .content { padding: 1rem 3.5rem; } + +header h2 { margin-top: .5em; font-size: 1em; } + +header h2 a.button { font-family: inherit; font-size: inherit; border: 1px solid; border-radius: .2em; background: #eee; color: inherit; text-decoration: none; padding: .1em .5em; margin: 1px calc(.1em + 1px); cursor: pointer; border-color: #ccc; } + +@media (prefers-color-scheme: dark) { header h2 a.button { background: #333; } } + +@media (prefers-color-scheme: dark) { header h2 a.button { border-color: #444; } } + +header h2 a.button.current { border: 2px solid; background: #fff; border-color: #999; cursor: default; } + +@media (prefers-color-scheme: dark) { header h2 a.button.current { background: #1e1e1e; } } + +@media (prefers-color-scheme: dark) { header h2 a.button.current { border-color: #777; } } + +header p.text { margin: .5em 0 -.5em; color: #666; font-style: italic; } + +@media (prefers-color-scheme: dark) { header p.text { color: #aaa; } } + +header.sticky { position: fixed; left: 0; right: 0; height: 2.5em; } + +header.sticky .text { display: none; } + +header.sticky h1, header.sticky h2 { font-size: 1em; margin-top: 0; display: inline-block; } + +header.sticky .content { padding: 0.5rem 3.5rem; } + +header.sticky .content p { font-size: 1em; } + +header.sticky ~ #source { padding-top: 6.5em; } + +main { position: relative; z-index: 1; } + +footer { margin: 1rem 3.5rem; } + +footer .content { padding: 0; color: #666; font-style: italic; } + +@media (prefers-color-scheme: dark) { footer .content { color: #aaa; } } + +#index { margin: 1rem 0 0 3.5rem; } + +h1 { font-size: 1.25em; display: inline-block; } + +#filter_container { float: right; margin: 0 2em 0 0; line-height: 1.66em; } + +#filter_container #filter { width: 10em; padding: 0.2em 0.5em; border: 2px solid #ccc; background: #fff; color: #000; } + +@media (prefers-color-scheme: dark) { #filter_container #filter { border-color: #444; } } + +@media (prefers-color-scheme: dark) { #filter_container #filter { background: #1e1e1e; } } + +@media (prefers-color-scheme: dark) { #filter_container #filter { color: #eee; } } + +#filter_container #filter:focus { border-color: #007acc; } + +#filter_container :disabled ~ label { color: #ccc; } + +@media (prefers-color-scheme: dark) { #filter_container :disabled ~ label { color: #444; } } + +#filter_container label { font-size: .875em; color: #666; } + +@media (prefers-color-scheme: dark) { #filter_container label { color: #aaa; } } + +header button { font-family: inherit; font-size: inherit; border: 1px solid; border-radius: .2em; background: #eee; color: inherit; text-decoration: none; padding: .1em .5em; margin: 1px calc(.1em + 1px); cursor: pointer; border-color: #ccc; } + +@media (prefers-color-scheme: dark) { header button { background: #333; } } + +@media (prefers-color-scheme: dark) { header button { border-color: #444; } } + +header button:active, header button:focus { outline: 2px dashed #007acc; } + +header button.run { background: #eeffee; } + +@media (prefers-color-scheme: dark) { header button.run { background: #373d29; } } + +header button.run.show_run { background: #dfd; border: 2px solid #00dd00; margin: 0 .1em; } + +@media (prefers-color-scheme: dark) { header button.run.show_run { background: #373d29; } } + +header button.mis { background: #ffeeee; } + +@media (prefers-color-scheme: dark) { header button.mis { background: #4b1818; } } + +header button.mis.show_mis { background: #fdd; border: 2px solid #ff0000; margin: 0 .1em; } + +@media (prefers-color-scheme: dark) { header button.mis.show_mis { background: #4b1818; } } + +header button.exc { background: #f7f7f7; } + +@media (prefers-color-scheme: dark) { header button.exc { background: #333; } } + +header button.exc.show_exc { background: #eee; border: 2px solid #808080; margin: 0 .1em; } + +@media (prefers-color-scheme: dark) { header button.exc.show_exc { background: #333; } } + +header button.par { background: #ffffd5; } + +@media (prefers-color-scheme: dark) { header button.par { background: #650; } } + +header button.par.show_par { background: #ffa; border: 2px solid #bbbb00; margin: 0 .1em; } + +@media (prefers-color-scheme: dark) { header button.par.show_par { background: #650; } } + +#help_panel, #source p .annotate.long { display: none; position: absolute; z-index: 999; background: #ffffcc; border: 1px solid #888; border-radius: .2em; color: #333; padding: .25em .5em; } + +#source p .annotate.long { white-space: normal; float: right; top: 1.75em; right: 1em; height: auto; } + +#help_panel_wrapper { float: right; position: relative; } + +#keyboard_icon { margin: 5px; } + +#help_panel_state { display: none; } + +#help_panel { top: 25px; right: 0; padding: .75em; border: 1px solid #883; color: #333; } + +#help_panel .keyhelp p { margin-top: .75em; } + +#help_panel .legend { font-style: italic; margin-bottom: 1em; } + +.indexfile #help_panel { width: 25em; } + +.pyfile #help_panel { width: 18em; } + +#help_panel_state:checked ~ #help_panel { display: block; } + +kbd { border: 1px solid black; border-color: #888 #333 #333 #888; padding: .1em .35em; font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; font-weight: bold; background: #eee; border-radius: 3px; } + +#source { padding: 1em 0 1em 3.5rem; font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; } + +#source p { position: relative; white-space: pre; } + +#source p * { box-sizing: border-box; } + +#source p .n { float: left; text-align: right; width: 3.5rem; box-sizing: border-box; margin-left: -3.5rem; padding-right: 1em; color: #999; user-select: none; } + +@media (prefers-color-scheme: dark) { #source p .n { color: #777; } } + +#source p .n.highlight { background: #ffdd00; } + +#source p .n a { scroll-margin-top: 6em; text-decoration: none; color: #999; } + +@media (prefers-color-scheme: dark) { #source p .n a { color: #777; } } + +#source p .n a:hover { text-decoration: underline; color: #999; } + +@media (prefers-color-scheme: dark) { #source p .n a:hover { color: #777; } } + +#source p .t { display: inline-block; width: 100%; box-sizing: border-box; margin-left: -.5em; padding-left: 0.3em; border-left: 0.2em solid #fff; } + +@media (prefers-color-scheme: dark) { #source p .t { border-color: #1e1e1e; } } + +#source p .t:hover { background: #f2f2f2; } + +@media (prefers-color-scheme: dark) { #source p .t:hover { background: #282828; } } + +#source p .t:hover ~ .r .annotate.long { display: block; } + +#source p .t .com { color: #008000; font-style: italic; line-height: 1px; } + +@media (prefers-color-scheme: dark) { #source p .t .com { color: #6a9955; } } + +#source p .t .key { font-weight: bold; line-height: 1px; } + +#source p .t .str { color: #0451a5; } + +@media (prefers-color-scheme: dark) { #source p .t .str { color: #9cdcfe; } } + +#source p.mis .t { border-left: 0.2em solid #ff0000; } + +#source p.mis.show_mis .t { background: #fdd; } + +@media (prefers-color-scheme: dark) { #source p.mis.show_mis .t { background: #4b1818; } } + +#source p.mis.show_mis .t:hover { background: #f2d2d2; } + +@media (prefers-color-scheme: dark) { #source p.mis.show_mis .t:hover { background: #532323; } } + +#source p.run .t { border-left: 0.2em solid #00dd00; } + +#source p.run.show_run .t { background: #dfd; } + +@media (prefers-color-scheme: dark) { #source p.run.show_run .t { background: #373d29; } } + +#source p.run.show_run .t:hover { background: #d2f2d2; } + +@media (prefers-color-scheme: dark) { #source p.run.show_run .t:hover { background: #404633; } } + +#source p.exc .t { border-left: 0.2em solid #808080; } + +#source p.exc.show_exc .t { background: #eee; } + +@media (prefers-color-scheme: dark) { #source p.exc.show_exc .t { background: #333; } } + +#source p.exc.show_exc .t:hover { background: #e2e2e2; } + +@media (prefers-color-scheme: dark) { #source p.exc.show_exc .t:hover { background: #3c3c3c; } } + +#source p.par .t { border-left: 0.2em solid #bbbb00; } + +#source p.par.show_par .t { background: #ffa; } + +@media (prefers-color-scheme: dark) { #source p.par.show_par .t { background: #650; } } + +#source p.par.show_par .t:hover { background: #f2f2a2; } + +@media (prefers-color-scheme: dark) { #source p.par.show_par .t:hover { background: #6d5d0c; } } + +#source p .r { position: absolute; top: 0; right: 2.5em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; } + +#source p .annotate { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; color: #666; padding-right: .5em; } + +@media (prefers-color-scheme: dark) { #source p .annotate { color: #ddd; } } + +#source p .annotate.short:hover ~ .long { display: block; } + +#source p .annotate.long { width: 30em; right: 2.5em; } + +#source p input { display: none; } + +#source p input ~ .r label.ctx { cursor: pointer; border-radius: .25em; } + +#source p input ~ .r label.ctx::before { content: "▶ "; } + +#source p input ~ .r label.ctx:hover { background: #e8f4ff; color: #666; } + +@media (prefers-color-scheme: dark) { #source p input ~ .r label.ctx:hover { background: #0f3a42; } } + +@media (prefers-color-scheme: dark) { #source p input ~ .r label.ctx:hover { color: #aaa; } } + +#source p input:checked ~ .r label.ctx { background: #d0e8ff; color: #666; border-radius: .75em .75em 0 0; padding: 0 .5em; margin: -.25em 0; } + +@media (prefers-color-scheme: dark) { #source p input:checked ~ .r label.ctx { background: #056; } } + +@media (prefers-color-scheme: dark) { #source p input:checked ~ .r label.ctx { color: #aaa; } } + +#source p input:checked ~ .r label.ctx::before { content: "▼ "; } + +#source p input:checked ~ .ctxs { padding: .25em .5em; overflow-y: scroll; max-height: 10.5em; } + +#source p label.ctx { color: #999; display: inline-block; padding: 0 .5em; font-size: .8333em; } + +@media (prefers-color-scheme: dark) { #source p label.ctx { color: #777; } } + +#source p .ctxs { display: block; max-height: 0; overflow-y: hidden; transition: all .2s; padding: 0 .5em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; white-space: nowrap; background: #d0e8ff; border-radius: .25em; margin-right: 1.75em; text-align: right; } + +@media (prefers-color-scheme: dark) { #source p .ctxs { background: #056; } } + +#index { font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; font-size: 0.875em; } + +#index table.index { margin-left: -.5em; } + +#index td, #index th { text-align: right; padding: .25em .5em; border-bottom: 1px solid #eee; } + +@media (prefers-color-scheme: dark) { #index td, #index th { border-color: #333; } } + +#index td.name, #index th.name { text-align: left; width: auto; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; min-width: 15em; } + +#index th { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; font-style: italic; color: #333; cursor: pointer; } + +@media (prefers-color-scheme: dark) { #index th { color: #ddd; } } + +#index th:hover { background: #eee; } + +@media (prefers-color-scheme: dark) { #index th:hover { background: #333; } } + +#index th .arrows { color: #666; font-size: 85%; font-family: sans-serif; font-style: normal; pointer-events: none; } + +#index th[aria-sort="ascending"], #index th[aria-sort="descending"] { white-space: nowrap; background: #eee; padding-left: .5em; } + +@media (prefers-color-scheme: dark) { #index th[aria-sort="ascending"], #index th[aria-sort="descending"] { background: #333; } } + +#index th[aria-sort="ascending"] .arrows::after { content: " ▲"; } + +#index th[aria-sort="descending"] .arrows::after { content: " ▼"; } + +#index td.name { font-size: 1.15em; } + +#index td.name a { text-decoration: none; color: inherit; } + +#index td.name .no-noun { font-style: italic; } + +#index tr.total td, #index tr.total_dynamic td { font-weight: bold; border-top: 1px solid #ccc; border-bottom: none; } + +#index tr.region:hover { background: #eee; } + +@media (prefers-color-scheme: dark) { #index tr.region:hover { background: #333; } } + +#index tr.region:hover td.name { text-decoration: underline; color: inherit; } + +#scroll_marker { position: fixed; z-index: 3; right: 0; top: 0; width: 16px; height: 100%; background: #fff; border-left: 1px solid #eee; will-change: transform; } + +@media (prefers-color-scheme: dark) { #scroll_marker { background: #1e1e1e; } } + +@media (prefers-color-scheme: dark) { #scroll_marker { border-color: #333; } } + +#scroll_marker .marker { background: #ccc; position: absolute; min-height: 3px; width: 100%; } + +@media (prefers-color-scheme: dark) { #scroll_marker .marker { background: #444; } } diff --git a/.venv/Lib/site-packages/coverage/htmlfiles/style.scss b/.venv/Lib/site-packages/coverage/htmlfiles/style.scss new file mode 100644 index 00000000..9bf1a7cc --- /dev/null +++ b/.venv/Lib/site-packages/coverage/htmlfiles/style.scss @@ -0,0 +1,760 @@ +/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */ +/* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */ + +// CSS styles for coverage.py HTML reports. + +// When you edit this file, you need to run "make css" to get the CSS file +// generated, and then check in both the .scss and the .css files. + +// When working on the file, this command is useful: +// sass --watch --style=compact --sourcemap=none --no-cache coverage/htmlfiles/style.scss:htmlcov/style.css +// +// OR you can process sass purely in python with `pip install pysass`, then: +// pysassc --style=compact coverage/htmlfiles/style.scss coverage/htmlfiles/style.css + +// Ignore this comment, it's for the CSS output file: +/* Don't edit this .css file. Edit the .scss file instead! */ + +// Dimensions +$left-gutter: 3.5rem; + +// +// Declare colors and variables +// + +$font-normal: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; +$font-code: SFMono-Regular, Menlo, Monaco, Consolas, monospace; + +$off-button-lighten: 50%; +$hover-dark-amt: 95%; + +$focus-color: #007acc; + +$mis-color: #ff0000; +$run-color: #00dd00; +$exc-color: #808080; +$par-color: #bbbb00; + +$light-bg: #fff; +$light-fg: #000; +$light-gray1: #f8f8f8; +$light-gray2: #eee; +$light-gray3: #ccc; +$light-gray4: #999; +$light-gray5: #666; +$light-gray6: #333; +$light-pln-bg: $light-bg; +$light-mis-bg: #fdd; +$light-run-bg: #dfd; +$light-exc-bg: $light-gray2; +$light-par-bg: #ffa; +$light-token-com: #008000; +$light-token-str: #0451a5; +$light-context-bg-color: #d0e8ff; + +$dark-bg: #1e1e1e; +$dark-fg: #eee; +$dark-gray1: #222; +$dark-gray2: #333; +$dark-gray3: #444; +$dark-gray4: #777; +$dark-gray5: #aaa; +$dark-gray6: #ddd; +$dark-pln-bg: $dark-bg; +$dark-mis-bg: #4b1818; +$dark-run-bg: #373d29; +$dark-exc-bg: $dark-gray2; +$dark-par-bg: #650; +$dark-token-com: #6a9955; +$dark-token-str: #9cdcfe; +$dark-context-bg-color: #056; + +// +// Mixins and utilities +// + +@mixin background-dark($color) { + @media (prefers-color-scheme: dark) { + background: $color; + } +} +@mixin color-dark($color) { + @media (prefers-color-scheme: dark) { + color: $color; + } +} +@mixin border-color-dark($color) { + @media (prefers-color-scheme: dark) { + border-color: $color; + } +} + +// Add visual outline to navigable elements on focus improve accessibility. +@mixin focus-border { + &:active, &:focus { + outline: 2px dashed $focus-color; + } +} + +@mixin button-shape { + font-family: inherit; + font-size: inherit; + border: 1px solid; + border-radius: .2em; + background: $light-gray2; + @include background-dark($dark-gray2); + color: inherit; + text-decoration: none; + padding: .1em .5em; + margin: 1px calc(.1em + 1px); + cursor: pointer; + border-color: $light-gray3; + @include border-color-dark($dark-gray3); +} + +// Page-wide styles +html, body, h1, h2, h3, p, table, td, th { + margin: 0; + padding: 0; + border: 0; + font-weight: inherit; + font-style: inherit; + font-size: 100%; + font-family: inherit; + vertical-align: baseline; +} + +// Set baseline grid to 16 pt. +body { + font-family: $font-normal; + font-size: 1em; + background: $light-bg; + color: $light-fg; + @include background-dark($dark-bg); + @include color-dark($dark-fg); +} + +html>body { + font-size: 16px; +} + +a { + @include focus-border; +} + +p { + font-size: .875em; + line-height: 1.4em; +} + +table { + border-collapse: collapse; +} +td { + vertical-align: top; +} +table tr.hidden { + display: none !important; +} + +p#no_rows { + display: none; + font-size: 1.15em; + font-family: $font-normal; +} + +a.nav { + text-decoration: none; + color: inherit; + + &:hover { + text-decoration: underline; + color: inherit; + } +} + +.hidden { + display: none; +} + +// Page structure +header { + background: $light-gray1; + @include background-dark(black); + width: 100%; + z-index: 2; + border-bottom: 1px solid $light-gray3; + @include border-color-dark($dark-gray2); + + .content { + padding: 1rem $left-gutter; + } + + h2 { + margin-top: .5em; + font-size: 1em; + + a.button { + @include button-shape; + &.current { + border: 2px solid; + background: $light-bg; + @include background-dark($dark-bg); + border-color: $light-gray4; + @include border-color-dark($dark-gray4); + cursor: default; + } + } + } + + p.text { + margin: .5em 0 -.5em; + color: $light-gray5; + @include color-dark($dark-gray5); + font-style: italic; + } + + &.sticky { + position: fixed; + left: 0; + right: 0; + height: 2.5em; + + .text { + display: none; + } + + h1, h2 { + font-size: 1em; + margin-top: 0; + display: inline-block; + } + + .content { + padding: .5rem $left-gutter; + p { + font-size: 1em; + } + } + + & ~ #source { + padding-top: 6.5em; + } + } +} + +main { + position: relative; + z-index: 1; +} + +footer { + margin: 1rem $left-gutter; + + .content { + padding: 0; + color: $light-gray5; + @include color-dark($dark-gray5); + font-style: italic; + } +} + +#index { + margin: 1rem 0 0 $left-gutter; +} + +// Header styles + +h1 { + font-size: 1.25em; + display: inline-block; +} + +#filter_container { + float: right; + margin: 0 2em 0 0; + line-height: 1.66em; + + #filter { + width: 10em; + padding: 0.2em 0.5em; + border: 2px solid $light-gray3; + background: $light-bg; + color: $light-fg; + @include border-color-dark($dark-gray3); + @include background-dark($dark-bg); + @include color-dark($dark-fg); + &:focus { + border-color: $focus-color; + } + } + + :disabled ~ label{ + color: $light-gray3; + @include color-dark($dark-gray3); + } + + label { + font-size: .875em; + color: $light-gray5; + @include color-dark($dark-gray5); + } +} + +header button { + @include button-shape; + @include focus-border; + + &.run { + background: mix($light-run-bg, $light-bg, $off-button-lighten); + @include background-dark($dark-run-bg); + &.show_run { + background: $light-run-bg; + @include background-dark($dark-run-bg); + border: 2px solid $run-color; + margin: 0 .1em; + } + } + &.mis { + background: mix($light-mis-bg, $light-bg, $off-button-lighten); + @include background-dark($dark-mis-bg); + &.show_mis { + background: $light-mis-bg; + @include background-dark($dark-mis-bg); + border: 2px solid $mis-color; + margin: 0 .1em; + } + } + &.exc { + background: mix($light-exc-bg, $light-bg, $off-button-lighten); + @include background-dark($dark-exc-bg); + &.show_exc { + background: $light-exc-bg; + @include background-dark($dark-exc-bg); + border: 2px solid $exc-color; + margin: 0 .1em; + } + } + &.par { + background: mix($light-par-bg, $light-bg, $off-button-lighten); + @include background-dark($dark-par-bg); + &.show_par { + background: $light-par-bg; + @include background-dark($dark-par-bg); + border: 2px solid $par-color; + margin: 0 .1em; + } + } +} + +// Yellow post-it things. +%popup { + display: none; + position: absolute; + z-index: 999; + background: #ffffcc; + border: 1px solid #888; + border-radius: .2em; + color: #333; + padding: .25em .5em; +} + +// Yellow post-it's in the text listings. +%in-text-popup { + @extend %popup; + white-space: normal; + float: right; + top: 1.75em; + right: 1em; + height: auto; +} + +// Help panel +#help_panel_wrapper { + float: right; + position: relative; +} + +#keyboard_icon { + margin: 5px; +} + +#help_panel_state { + display: none; +} + +#help_panel { + @extend %popup; + top: 25px; + right: 0; + padding: .75em; + border: 1px solid #883; + + color: #333; + + .keyhelp p { + margin-top: .75em; + } + + .legend { + font-style: italic; + margin-bottom: 1em; + } + + .indexfile & { + width: 25em; + } + + .pyfile & { + width: 18em; + } + + #help_panel_state:checked ~ & { + display: block; + } +} + +kbd { + border: 1px solid black; + border-color: #888 #333 #333 #888; + padding: .1em .35em; + font-family: $font-code; + font-weight: bold; + background: #eee; + border-radius: 3px; +} + +// Source file styles + +// The slim bar at the left edge of the source lines, colored by coverage. +$border-indicator-width: .2em; + +#source { + padding: 1em 0 1em $left-gutter; + font-family: $font-code; + + p { + // position relative makes position:absolute pop-ups appear in the right place. + position: relative; + white-space: pre; + + * { + box-sizing: border-box; + } + + .n { + float: left; + text-align: right; + width: $left-gutter; + box-sizing: border-box; + margin-left: -$left-gutter; + padding-right: 1em; + color: $light-gray4; + user-select: none; + @include color-dark($dark-gray4); + + &.highlight { + background: #ffdd00; + } + + a { + // Make anchors to the line scroll the line to be + // visible beneath the fixed-position header. + scroll-margin-top: 6em; + text-decoration: none; + color: $light-gray4; + @include color-dark($dark-gray4); + &:hover { + text-decoration: underline; + color: $light-gray4; + @include color-dark($dark-gray4); + } + } + } + + .t { + display: inline-block; + width: 100%; + box-sizing: border-box; + margin-left: -.5em; + padding-left: .5em - $border-indicator-width; + border-left: $border-indicator-width solid $light-bg; + @include border-color-dark($dark-bg); + + &:hover { + background: mix($light-pln-bg, $light-fg, $hover-dark-amt); + @include background-dark(mix($dark-pln-bg, $dark-fg, $hover-dark-amt)); + + & ~ .r .annotate.long { + display: block; + } + } + + // Syntax coloring + .com { + color: $light-token-com; + @include color-dark($dark-token-com); + font-style: italic; + line-height: 1px; + } + .key { + font-weight: bold; + line-height: 1px; + } + .str { + color: $light-token-str; + @include color-dark($dark-token-str); + } + } + + &.mis { + .t { + border-left: $border-indicator-width solid $mis-color; + } + + &.show_mis .t { + background: $light-mis-bg; + @include background-dark($dark-mis-bg); + + &:hover { + background: mix($light-mis-bg, $light-fg, $hover-dark-amt); + @include background-dark(mix($dark-mis-bg, $dark-fg, $hover-dark-amt)); + } + } + } + + &.run { + .t { + border-left: $border-indicator-width solid $run-color; + } + + &.show_run .t { + background: $light-run-bg; + @include background-dark($dark-run-bg); + + &:hover { + background: mix($light-run-bg, $light-fg, $hover-dark-amt); + @include background-dark(mix($dark-run-bg, $dark-fg, $hover-dark-amt)); + } + } + } + + &.exc { + .t { + border-left: $border-indicator-width solid $exc-color; + } + + &.show_exc .t { + background: $light-exc-bg; + @include background-dark($dark-exc-bg); + + &:hover { + background: mix($light-exc-bg, $light-fg, $hover-dark-amt); + @include background-dark(mix($dark-exc-bg, $dark-fg, $hover-dark-amt)); + } + } + } + + &.par { + .t { + border-left: $border-indicator-width solid $par-color; + } + + &.show_par .t { + background: $light-par-bg; + @include background-dark($dark-par-bg); + + &:hover { + background: mix($light-par-bg, $light-fg, $hover-dark-amt); + @include background-dark(mix($dark-par-bg, $dark-fg, $hover-dark-amt)); + } + } + + } + + .r { + position: absolute; + top: 0; + right: 2.5em; + font-family: $font-normal; + } + + .annotate { + font-family: $font-normal; + color: $light-gray5; + @include color-dark($dark-gray6); + padding-right: .5em; + + &.short:hover ~ .long { + display: block; + } + + &.long { + @extend %in-text-popup; + width: 30em; + right: 2.5em; + } + } + + input { + display: none; + + & ~ .r label.ctx { + cursor: pointer; + border-radius: .25em; + &::before { + content: "▶ "; + } + &:hover { + background: mix($light-context-bg-color, $light-bg, $off-button-lighten); + @include background-dark(mix($dark-context-bg-color, $dark-bg, $off-button-lighten)); + color: $light-gray5; + @include color-dark($dark-gray5); + } + } + + &:checked ~ .r label.ctx { + background: $light-context-bg-color; + @include background-dark($dark-context-bg-color); + color: $light-gray5; + @include color-dark($dark-gray5); + border-radius: .75em .75em 0 0; + padding: 0 .5em; + margin: -.25em 0; + &::before { + content: "▼ "; + } + } + + &:checked ~ .ctxs { + padding: .25em .5em; + overflow-y: scroll; + max-height: 10.5em; + } + } + + label.ctx { + color: $light-gray4; + @include color-dark($dark-gray4); + display: inline-block; + padding: 0 .5em; + font-size: .8333em; // 10/12 + } + + .ctxs { + display: block; + max-height: 0; + overflow-y: hidden; + transition: all .2s; + padding: 0 .5em; + font-family: $font-normal; + white-space: nowrap; + background: $light-context-bg-color; + @include background-dark($dark-context-bg-color); + border-radius: .25em; + margin-right: 1.75em; + text-align: right; + } + } +} + + +// index styles +#index { + font-family: $font-code; + font-size: 0.875em; + + table.index { + margin-left: -.5em; + } + td, th { + text-align: right; + padding: .25em .5em; + border-bottom: 1px solid $light-gray2; + @include border-color-dark($dark-gray2); + &.name { + text-align: left; + width: auto; + font-family: $font-normal; + min-width: 15em; + } + } + th { + font-family: $font-normal; + font-style: italic; + color: $light-gray6; + @include color-dark($dark-gray6); + cursor: pointer; + &:hover { + background: $light-gray2; + @include background-dark($dark-gray2); + } + .arrows { + color: #666; + font-size: 85%; + font-family: sans-serif; + font-style: normal; + pointer-events: none; + } + &[aria-sort="ascending"], &[aria-sort="descending"] { + white-space: nowrap; + background: $light-gray2; + @include background-dark($dark-gray2); + padding-left: .5em; + } + &[aria-sort="ascending"] .arrows::after { + content: " ▲"; + } + &[aria-sort="descending"] .arrows::after { + content: " ▼"; + } + } + td.name { + font-size: 1.15em; + a { + text-decoration: none; + color: inherit; + } + & .no-noun { + font-style: italic; + } + } + + tr.total td, + tr.total_dynamic td { + font-weight: bold; + border-top: 1px solid #ccc; + border-bottom: none; + } + tr.region:hover { + background: $light-gray2; + @include background-dark($dark-gray2); + td.name { + text-decoration: underline; + color: inherit; + } + } +} + +// scroll marker styles +#scroll_marker { + position: fixed; + z-index: 3; + right: 0; + top: 0; + width: 16px; + height: 100%; + background: $light-bg; + border-left: 1px solid $light-gray2; + @include background-dark($dark-bg); + @include border-color-dark($dark-gray2); + will-change: transform; // for faster scrolling of fixed element in Chrome + + .marker { + background: $light-gray3; + @include background-dark($dark-gray3); + position: absolute; + min-height: 3px; + width: 100%; + } +} diff --git a/.venv/Lib/site-packages/coverage/inorout.py b/.venv/Lib/site-packages/coverage/inorout.py new file mode 100644 index 00000000..75f48dbd --- /dev/null +++ b/.venv/Lib/site-packages/coverage/inorout.py @@ -0,0 +1,594 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Determining whether files are being measured/reported or not.""" + +from __future__ import annotations + +import importlib.util +import inspect +import itertools +import os +import platform +import re +import sys +import sysconfig +import traceback + +from types import FrameType, ModuleType +from typing import ( + cast, Any, TYPE_CHECKING, +) +from collections.abc import Iterable + +from coverage import env +from coverage.disposition import FileDisposition, disposition_init +from coverage.exceptions import CoverageException, PluginError +from coverage.files import TreeMatcher, GlobMatcher, ModuleMatcher +from coverage.files import prep_patterns, find_python_files, canonical_filename +from coverage.misc import sys_modules_saved +from coverage.python import source_for_file, source_for_morf +from coverage.types import TFileDisposition, TMorf, TWarnFn, TDebugCtl + +if TYPE_CHECKING: + from coverage.config import CoverageConfig + from coverage.plugin_support import Plugins + + +# Pypy has some unusual stuff in the "stdlib". Consider those locations +# when deciding where the stdlib is. These modules are not used for anything, +# they are modules importable from the pypy lib directories, so that we can +# find those directories. +modules_we_happen_to_have: list[ModuleType] = [ + inspect, itertools, os, platform, re, sysconfig, traceback, +] + +if env.PYPY: + try: + import _structseq + modules_we_happen_to_have.append(_structseq) + except ImportError: + pass + + try: + import _pypy_irc_topic + modules_we_happen_to_have.append(_pypy_irc_topic) + except ImportError: + pass + + +def canonical_path(morf: TMorf, directory: bool = False) -> str: + """Return the canonical path of the module or file `morf`. + + If the module is a package, then return its directory. If it is a + module, then return its file, unless `directory` is True, in which + case return its enclosing directory. + + """ + morf_path = canonical_filename(source_for_morf(morf)) + if morf_path.endswith("__init__.py") or directory: + morf_path = os.path.split(morf_path)[0] + return morf_path + + +def name_for_module(filename: str, frame: FrameType | None) -> str: + """Get the name of the module for a filename and frame. + + For configurability's sake, we allow __main__ modules to be matched by + their importable name. + + If loaded via runpy (aka -m), we can usually recover the "original" + full dotted module name, otherwise, we resort to interpreting the + file name to get the module's name. In the case that the module name + can't be determined, None is returned. + + """ + module_globals = frame.f_globals if frame is not None else {} + dunder_name: str = module_globals.get("__name__", None) + + if isinstance(dunder_name, str) and dunder_name != "__main__": + # This is the usual case: an imported module. + return dunder_name + + spec = module_globals.get("__spec__", None) + if spec: + fullname = spec.name + if isinstance(fullname, str) and fullname != "__main__": + # Module loaded via: runpy -m + return fullname + + # Script as first argument to Python command line. + inspectedname = inspect.getmodulename(filename) + if inspectedname is not None: + return inspectedname + else: + return dunder_name + + +def module_is_namespace(mod: ModuleType) -> bool: + """Is the module object `mod` a PEP420 namespace module?""" + return hasattr(mod, "__path__") and getattr(mod, "__file__", None) is None + + +def module_has_file(mod: ModuleType) -> bool: + """Does the module object `mod` have an existing __file__ ?""" + mod__file__ = getattr(mod, "__file__", None) + if mod__file__ is None: + return False + return os.path.exists(mod__file__) + + +def file_and_path_for_module(modulename: str) -> tuple[str | None, list[str]]: + """Find the file and search path for `modulename`. + + Returns: + filename: The filename of the module, or None. + path: A list (possibly empty) of directories to find submodules in. + + """ + filename = None + path = [] + try: + spec = importlib.util.find_spec(modulename) + except Exception: + pass + else: + if spec is not None: + filename = spec.origin + path = list(spec.submodule_search_locations or ()) + return filename, path + + +def add_stdlib_paths(paths: set[str]) -> None: + """Add paths where the stdlib can be found to the set `paths`.""" + # Look at where some standard modules are located. That's the + # indication for "installed with the interpreter". In some + # environments (virtualenv, for example), these modules may be + # spread across a few locations. Look at all the candidate modules + # we've imported, and take all the different ones. + for m in modules_we_happen_to_have: + if hasattr(m, "__file__"): + paths.add(canonical_path(m, directory=True)) + + +def add_third_party_paths(paths: set[str]) -> None: + """Add locations for third-party packages to the set `paths`.""" + # Get the paths that sysconfig knows about. + scheme_names = set(sysconfig.get_scheme_names()) + + for scheme in scheme_names: + # https://foss.heptapod.net/pypy/pypy/-/issues/3433 + better_scheme = "pypy_posix" if scheme == "pypy" else scheme + if os.name in better_scheme.split("_"): + config_paths = sysconfig.get_paths(scheme) + for path_name in ["platlib", "purelib", "scripts"]: + paths.add(config_paths[path_name]) + + +def add_coverage_paths(paths: set[str]) -> None: + """Add paths where coverage.py code can be found to the set `paths`.""" + cover_path = canonical_path(__file__, directory=True) + paths.add(cover_path) + if env.TESTING: + # Don't include our own test code. + paths.add(os.path.join(cover_path, "tests")) + + +class InOrOut: + """Machinery for determining what files to measure.""" + + def __init__( + self, + config: CoverageConfig, + warn: TWarnFn, + debug: TDebugCtl | None, + include_namespace_packages: bool, + ) -> None: + self.warn = warn + self.debug = debug + self.include_namespace_packages = include_namespace_packages + + self.source: list[str] = [] + self.source_pkgs: list[str] = [] + self.source_pkgs.extend(config.source_pkgs) + for src in config.source or []: + if os.path.isdir(src): + self.source.append(canonical_filename(src)) + else: + self.source_pkgs.append(src) + self.source_pkgs_unmatched = self.source_pkgs[:] + + self.include = prep_patterns(config.run_include) + self.omit = prep_patterns(config.run_omit) + + # The directories for files considered "installed with the interpreter". + self.pylib_paths: set[str] = set() + if not config.cover_pylib: + add_stdlib_paths(self.pylib_paths) + + # To avoid tracing the coverage.py code itself, we skip anything + # located where we are. + self.cover_paths: set[str] = set() + add_coverage_paths(self.cover_paths) + + # Find where third-party packages are installed. + self.third_paths: set[str] = set() + add_third_party_paths(self.third_paths) + + def _debug(msg: str) -> None: + if self.debug: + self.debug.write(msg) + + # The matchers for should_trace. + + # Generally useful information + _debug("sys.path:" + "".join(f"\n {p}" for p in sys.path)) + + # Create the matchers we need for should_trace + self.source_match = None + self.source_pkgs_match = None + self.pylib_match = None + self.include_match = self.omit_match = None + + if self.source or self.source_pkgs: + against = [] + if self.source: + self.source_match = TreeMatcher(self.source, "source") + against.append(f"trees {self.source_match!r}") + if self.source_pkgs: + self.source_pkgs_match = ModuleMatcher(self.source_pkgs, "source_pkgs") + against.append(f"modules {self.source_pkgs_match!r}") + _debug("Source matching against " + " and ".join(against)) + else: + if self.pylib_paths: + self.pylib_match = TreeMatcher(self.pylib_paths, "pylib") + _debug(f"Python stdlib matching: {self.pylib_match!r}") + if self.include: + self.include_match = GlobMatcher(self.include, "include") + _debug(f"Include matching: {self.include_match!r}") + if self.omit: + self.omit_match = GlobMatcher(self.omit, "omit") + _debug(f"Omit matching: {self.omit_match!r}") + + self.cover_match = TreeMatcher(self.cover_paths, "coverage") + _debug(f"Coverage code matching: {self.cover_match!r}") + + self.third_match = TreeMatcher(self.third_paths, "third") + _debug(f"Third-party lib matching: {self.third_match!r}") + + # Check if the source we want to measure has been installed as a + # third-party package. + # Is the source inside a third-party area? + self.source_in_third_paths = set() + with sys_modules_saved(): + for pkg in self.source_pkgs: + try: + modfile, path = file_and_path_for_module(pkg) + _debug(f"Imported source package {pkg!r} as {modfile!r}") + except CoverageException as exc: + _debug(f"Couldn't import source package {pkg!r}: {exc}") + continue + if modfile: + if self.third_match.match(modfile): + _debug( + f"Source in third-party: source_pkg {pkg!r} at {modfile!r}", + ) + self.source_in_third_paths.add(canonical_path(source_for_file(modfile))) + else: + for pathdir in path: + if self.third_match.match(pathdir): + _debug( + f"Source in third-party: {pkg!r} path directory at {pathdir!r}", + ) + self.source_in_third_paths.add(pathdir) + + for src in self.source: + if self.third_match.match(src): + _debug(f"Source in third-party: source directory {src!r}") + self.source_in_third_paths.add(src) + self.source_in_third_match = TreeMatcher(self.source_in_third_paths, "source_in_third") + _debug(f"Source in third-party matching: {self.source_in_third_match}") + + self.plugins: Plugins + self.disp_class: type[TFileDisposition] = FileDisposition + + def should_trace(self, filename: str, frame: FrameType | None = None) -> TFileDisposition: + """Decide whether to trace execution in `filename`, with a reason. + + This function is called from the trace function. As each new file name + is encountered, this function determines whether it is traced or not. + + Returns a FileDisposition object. + + """ + original_filename = filename + disp = disposition_init(self.disp_class, filename) + + def nope(disp: TFileDisposition, reason: str) -> TFileDisposition: + """Simple helper to make it easy to return NO.""" + disp.trace = False + disp.reason = reason + return disp + + if original_filename.startswith("<"): + return nope(disp, "original file name is not real") + + if frame is not None: + # Compiled Python files have two file names: frame.f_code.co_filename is + # the file name at the time the .pyc was compiled. The second name is + # __file__, which is where the .pyc was actually loaded from. Since + # .pyc files can be moved after compilation (for example, by being + # installed), we look for __file__ in the frame and prefer it to the + # co_filename value. + dunder_file = frame.f_globals and frame.f_globals.get("__file__") + if dunder_file: + # Danger: __file__ can (rarely?) be of type Path. + filename = source_for_file(str(dunder_file)) + if original_filename and not original_filename.startswith("<"): + orig = os.path.basename(original_filename) + if orig != os.path.basename(filename): + # Files shouldn't be renamed when moved. This happens when + # exec'ing code. If it seems like something is wrong with + # the frame's file name, then just use the original. + filename = original_filename + + if not filename: + # Empty string is pretty useless. + return nope(disp, "empty string isn't a file name") + + if filename.startswith("memory:"): + return nope(disp, "memory isn't traceable") + + if filename.startswith("<"): + # Lots of non-file execution is represented with artificial + # file names like "", "", or + # "". Don't ever trace these executions, since we + # can't do anything with the data later anyway. + return nope(disp, "file name is not real") + + canonical = canonical_filename(filename) + disp.canonical_filename = canonical + + # Try the plugins, see if they have an opinion about the file. + plugin = None + for plugin in self.plugins.file_tracers: + if not plugin._coverage_enabled: + continue + + try: + file_tracer = plugin.file_tracer(canonical) + if file_tracer is not None: + file_tracer._coverage_plugin = plugin + disp.trace = True + disp.file_tracer = file_tracer + if file_tracer.has_dynamic_source_filename(): + disp.has_dynamic_filename = True + else: + disp.source_filename = canonical_filename( + file_tracer.source_filename(), + ) + break + except Exception: + plugin_name = plugin._coverage_plugin_name + tb = traceback.format_exc() + self.warn(f"Disabling plug-in {plugin_name!r} due to an exception:\n{tb}") + plugin._coverage_enabled = False + continue + else: + # No plugin wanted it: it's Python. + disp.trace = True + disp.source_filename = canonical + + if not disp.has_dynamic_filename: + if not disp.source_filename: + raise PluginError( + f"Plugin {plugin!r} didn't set source_filename for '{disp.original_filename}'", + ) + reason = self.check_include_omit_etc(disp.source_filename, frame) + if reason: + nope(disp, reason) + + return disp + + def check_include_omit_etc(self, filename: str, frame: FrameType | None) -> str | None: + """Check a file name against the include, omit, etc, rules. + + Returns a string or None. String means, don't trace, and is the reason + why. None means no reason found to not trace. + + """ + modulename = name_for_module(filename, frame) + + # If the user specified source or include, then that's authoritative + # about the outer bound of what to measure and we don't have to apply + # any canned exclusions. If they didn't, then we have to exclude the + # stdlib and coverage.py directories. + if self.source_match or self.source_pkgs_match: + extra = "" + ok = False + if self.source_pkgs_match: + if self.source_pkgs_match.match(modulename): + ok = True + if modulename in self.source_pkgs_unmatched: + self.source_pkgs_unmatched.remove(modulename) + else: + extra = f"module {modulename!r} " + if not ok and self.source_match: + if self.source_match.match(filename): + ok = True + if not ok: + return extra + "falls outside the --source spec" + if self.third_match.match(filename) and not self.source_in_third_match.match(filename): + return "inside --source, but is third-party" + elif self.include_match: + if not self.include_match.match(filename): + return "falls outside the --include trees" + else: + # We exclude the coverage.py code itself, since a little of it + # will be measured otherwise. + if self.cover_match.match(filename): + return "is part of coverage.py" + + # If we aren't supposed to trace installed code, then check if this + # is near the Python standard library and skip it if so. + if self.pylib_match and self.pylib_match.match(filename): + return "is in the stdlib" + + # Exclude anything in the third-party installation areas. + if self.third_match.match(filename): + return "is a third-party module" + + # Check the file against the omit pattern. + if self.omit_match and self.omit_match.match(filename): + return "is inside an --omit pattern" + + # No point tracing a file we can't later write to SQLite. + try: + filename.encode("utf-8") + except UnicodeEncodeError: + return "non-encodable filename" + + # No reason found to skip this file. + return None + + def warn_conflicting_settings(self) -> None: + """Warn if there are settings that conflict.""" + if self.include: + if self.source or self.source_pkgs: + self.warn("--include is ignored because --source is set", slug="include-ignored") + + def warn_already_imported_files(self) -> None: + """Warn if files have already been imported that we will be measuring.""" + if self.include or self.source or self.source_pkgs: + warned = set() + for mod in list(sys.modules.values()): + filename = getattr(mod, "__file__", None) + if filename is None: + continue + if filename in warned: + continue + + if len(getattr(mod, "__path__", ())) > 1: + # A namespace package, which confuses this code, so ignore it. + continue + + disp = self.should_trace(filename) + if disp.has_dynamic_filename: + # A plugin with dynamic filenames: the Python file + # shouldn't cause a warning, since it won't be the subject + # of tracing anyway. + continue + if disp.trace: + msg = f"Already imported a file that will be measured: {filename}" + self.warn(msg, slug="already-imported") + warned.add(filename) + elif self.debug and self.debug.should("trace"): + self.debug.write( + "Didn't trace already imported file {!r}: {}".format( + disp.original_filename, disp.reason, + ), + ) + + def warn_unimported_source(self) -> None: + """Warn about source packages that were of interest, but never traced.""" + for pkg in self.source_pkgs_unmatched: + self._warn_about_unmeasured_code(pkg) + + def _warn_about_unmeasured_code(self, pkg: str) -> None: + """Warn about a package or module that we never traced. + + `pkg` is a string, the name of the package or module. + + """ + mod = sys.modules.get(pkg) + if mod is None: + self.warn(f"Module {pkg} was never imported.", slug="module-not-imported") + return + + if module_is_namespace(mod): + # A namespace package. It's OK for this not to have been traced, + # since there is no code directly in it. + return + + if not module_has_file(mod): + self.warn(f"Module {pkg} has no Python source.", slug="module-not-python") + return + + # The module was in sys.modules, and seems like a module with code, but + # we never measured it. I guess that means it was imported before + # coverage even started. + msg = f"Module {pkg} was previously imported, but not measured" + self.warn(msg, slug="module-not-measured") + + def find_possibly_unexecuted_files(self) -> Iterable[tuple[str, str | None]]: + """Find files in the areas of interest that might be untraced. + + Yields pairs: file path, and responsible plug-in name. + """ + for pkg in self.source_pkgs: + if (pkg not in sys.modules or + not module_has_file(sys.modules[pkg])): + continue + pkg_file = source_for_file(cast(str, sys.modules[pkg].__file__)) + yield from self._find_executable_files(canonical_path(pkg_file)) + + for src in self.source: + yield from self._find_executable_files(src) + + def _find_plugin_files(self, src_dir: str) -> Iterable[tuple[str, str]]: + """Get executable files from the plugins.""" + for plugin in self.plugins.file_tracers: + for x_file in plugin.find_executable_files(src_dir): + yield x_file, plugin._coverage_plugin_name + + def _find_executable_files(self, src_dir: str) -> Iterable[tuple[str, str | None]]: + """Find executable files in `src_dir`. + + Search for files in `src_dir` that can be executed because they + are probably importable. Don't include ones that have been omitted + by the configuration. + + Yield the file path, and the plugin name that handles the file. + + """ + py_files = ( + (py_file, None) for py_file in + find_python_files(src_dir, self.include_namespace_packages) + ) + plugin_files = self._find_plugin_files(src_dir) + + for file_path, plugin_name in itertools.chain(py_files, plugin_files): + file_path = canonical_filename(file_path) + if self.omit_match and self.omit_match.match(file_path): + # Turns out this file was omitted, so don't pull it back + # in as un-executed. + continue + yield file_path, plugin_name + + def sys_info(self) -> Iterable[tuple[str, Any]]: + """Our information for Coverage.sys_info. + + Returns a list of (key, value) pairs. + """ + info = [ + ("coverage_paths", self.cover_paths), + ("stdlib_paths", self.pylib_paths), + ("third_party_paths", self.third_paths), + ("source_in_third_party_paths", self.source_in_third_paths), + ] + + matcher_names = [ + "source_match", "source_pkgs_match", + "include_match", "omit_match", + "cover_match", "pylib_match", "third_match", "source_in_third_match", + ] + + for matcher_name in matcher_names: + matcher = getattr(self, matcher_name) + if matcher: + matcher_info = matcher.info() + else: + matcher_info = "-none-" + info.append((matcher_name, matcher_info)) + + return info diff --git a/.venv/Lib/site-packages/coverage/jsonreport.py b/.venv/Lib/site-packages/coverage/jsonreport.py new file mode 100644 index 00000000..00053ebf --- /dev/null +++ b/.venv/Lib/site-packages/coverage/jsonreport.py @@ -0,0 +1,179 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Json reporting for coverage.py""" + +from __future__ import annotations + +import datetime +import json +import sys + +from collections.abc import Iterable +from typing import Any, IO, TYPE_CHECKING + +from coverage import __version__ +from coverage.report_core import get_analysis_to_report +from coverage.results import Analysis, Numbers +from coverage.types import TMorf, TLineNo + +if TYPE_CHECKING: + from coverage import Coverage + from coverage.data import CoverageData + from coverage.plugin import FileReporter + + +# A type for data that can be JSON-serialized. +JsonObj = dict[str, Any] + +# "Version 1" had no format number at all. +# 2: add the meta.format field. +# 3: add region information (functions, classes) +FORMAT_VERSION = 3 + +class JsonReporter: + """A reporter for writing JSON coverage results.""" + + report_type = "JSON report" + + def __init__(self, coverage: Coverage) -> None: + self.coverage = coverage + self.config = self.coverage.config + self.total = Numbers(self.config.precision) + self.report_data: JsonObj = {} + + def make_summary(self, nums: Numbers) -> JsonObj: + """Create a dict summarizing `nums`.""" + return { + "covered_lines": nums.n_executed, + "num_statements": nums.n_statements, + "percent_covered": nums.pc_covered, + "percent_covered_display": nums.pc_covered_str, + "missing_lines": nums.n_missing, + "excluded_lines": nums.n_excluded, + } + + def make_branch_summary(self, nums: Numbers) -> JsonObj: + """Create a dict summarizing the branch info in `nums`.""" + return { + "num_branches": nums.n_branches, + "num_partial_branches": nums.n_partial_branches, + "covered_branches": nums.n_executed_branches, + "missing_branches": nums.n_missing_branches, + } + + def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str]) -> float: + """Generate a json report for `morfs`. + + `morfs` is a list of modules or file names. + + `outfile` is a file object to write the json to. + + """ + outfile = outfile or sys.stdout + coverage_data = self.coverage.get_data() + coverage_data.set_query_contexts(self.config.report_contexts) + self.report_data["meta"] = { + "format": FORMAT_VERSION, + "version": __version__, + "timestamp": datetime.datetime.now().isoformat(), + "branch_coverage": coverage_data.has_arcs(), + "show_contexts": self.config.json_show_contexts, + } + + measured_files = {} + for file_reporter, analysis in get_analysis_to_report(self.coverage, morfs): + measured_files[file_reporter.relative_filename()] = self.report_one_file( + coverage_data, + analysis, + file_reporter, + ) + + self.report_data["files"] = measured_files + self.report_data["totals"] = self.make_summary(self.total) + + if coverage_data.has_arcs(): + self.report_data["totals"].update(self.make_branch_summary(self.total)) + + json.dump( + self.report_data, + outfile, + indent=(4 if self.config.json_pretty_print else None), + ) + + return self.total.n_statements and self.total.pc_covered + + def report_one_file( + self, coverage_data: CoverageData, analysis: Analysis, file_reporter: FileReporter + ) -> JsonObj: + """Extract the relevant report data for a single file.""" + nums = analysis.numbers + self.total += nums + summary = self.make_summary(nums) + reported_file: JsonObj = { + "executed_lines": sorted(analysis.executed), + "summary": summary, + "missing_lines": sorted(analysis.missing), + "excluded_lines": sorted(analysis.excluded), + } + if self.config.json_show_contexts: + reported_file["contexts"] = coverage_data.contexts_by_lineno(analysis.filename) + if coverage_data.has_arcs(): + summary.update(self.make_branch_summary(nums)) + reported_file["executed_branches"] = list( + _convert_branch_arcs(analysis.executed_branch_arcs()), + ) + reported_file["missing_branches"] = list( + _convert_branch_arcs(analysis.missing_branch_arcs()), + ) + + num_lines = len(file_reporter.source().splitlines()) + for noun, plural in file_reporter.code_region_kinds(): + reported_file[plural] = region_data = {} + outside_lines = set(range(1, num_lines + 1)) + for region in file_reporter.code_regions(): + if region.kind != noun: + continue + outside_lines -= region.lines + region_data[region.name] = self.make_region_data( + coverage_data, + analysis.narrow(region.lines), + ) + + region_data[""] = self.make_region_data( + coverage_data, + analysis.narrow(outside_lines), + ) + return reported_file + + def make_region_data(self, coverage_data: CoverageData, narrowed_analysis: Analysis) -> JsonObj: + """Create the data object for one region of a file.""" + narrowed_nums = narrowed_analysis.numbers + narrowed_summary = self.make_summary(narrowed_nums) + this_region = { + "executed_lines": sorted(narrowed_analysis.executed), + "summary": narrowed_summary, + "missing_lines": sorted(narrowed_analysis.missing), + "excluded_lines": sorted(narrowed_analysis.excluded), + } + if self.config.json_show_contexts: + contexts = coverage_data.contexts_by_lineno(narrowed_analysis.filename) + this_region["contexts"] = contexts + if coverage_data.has_arcs(): + narrowed_summary.update(self.make_branch_summary(narrowed_nums)) + this_region["executed_branches"] = list( + _convert_branch_arcs(narrowed_analysis.executed_branch_arcs()), + ) + this_region["missing_branches"] = list( + _convert_branch_arcs(narrowed_analysis.missing_branch_arcs()), + ) + return this_region + + +def _convert_branch_arcs( + branch_arcs: dict[TLineNo, list[TLineNo]], +) -> Iterable[tuple[TLineNo, TLineNo]]: + """Convert branch arcs to a list of two-element tuples.""" + for source, targets in branch_arcs.items(): + for target in targets: + yield source, target diff --git a/.venv/Lib/site-packages/coverage/lcovreport.py b/.venv/Lib/site-packages/coverage/lcovreport.py new file mode 100644 index 00000000..70507c00 --- /dev/null +++ b/.venv/Lib/site-packages/coverage/lcovreport.py @@ -0,0 +1,223 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""LCOV reporting for coverage.py.""" + +from __future__ import annotations + +import base64 +import hashlib +import sys + +from typing import IO, TYPE_CHECKING +from collections.abc import Iterable + +from coverage.plugin import FileReporter +from coverage.report_core import get_analysis_to_report +from coverage.results import Analysis, Numbers +from coverage.types import TMorf + +if TYPE_CHECKING: + from coverage import Coverage + + +def line_hash(line: str) -> str: + """Produce a hash of a source line for use in the LCOV file.""" + # The LCOV file format optionally allows each line to be MD5ed as a + # fingerprint of the file. This is not a security use. Some security + # scanners raise alarms about the use of MD5 here, but it is a false + # positive. This is not a security concern. + # The unusual encoding of the MD5 hash, as a base64 sequence with the + # trailing = signs stripped, is specified by the LCOV file format. + hashed = hashlib.md5(line.encode("utf-8"), usedforsecurity=False).digest() + return base64.b64encode(hashed).decode("ascii").rstrip("=") + + +def lcov_lines( + analysis: Analysis, + lines: list[int], + source_lines: list[str], + outfile: IO[str], +) -> None: + """Emit line coverage records for an analyzed file.""" + hash_suffix = "" + for line in lines: + if source_lines: + hash_suffix = "," + line_hash(source_lines[line-1]) + # Q: can we get info about the number of times a statement is + # executed? If so, that should be recorded here. + hit = int(line not in analysis.missing) + outfile.write(f"DA:{line},{hit}{hash_suffix}\n") + + if analysis.numbers.n_statements > 0: + outfile.write(f"LF:{analysis.numbers.n_statements}\n") + outfile.write(f"LH:{analysis.numbers.n_executed}\n") + + +def lcov_functions( + fr: FileReporter, + file_analysis: Analysis, + outfile: IO[str], +) -> None: + """Emit function coverage records for an analyzed file.""" + # lcov 2.2 introduces a new format for function coverage records. + # We continue to generate the old format because we don't know what + # version of the lcov tools will be used to read this report. + + # "and region.lines" below avoids a crash due to a bug in PyPy 3.8 + # where, for whatever reason, when collecting data in --branch mode, + # top-level functions have an empty lines array. Instead we just don't + # emit function records for those. + + # suppressions because of https://github.com/pylint-dev/pylint/issues/9923 + functions = [ + (min(region.start, min(region.lines)), #pylint: disable=nested-min-max + max(region.start, max(region.lines)), #pylint: disable=nested-min-max + region) + for region in fr.code_regions() + if region.kind == "function" and region.lines + ] + if not functions: + return + + functions.sort() + functions_hit = 0 + for first_line, last_line, region in functions: + # A function counts as having been executed if any of it has been + # executed. + analysis = file_analysis.narrow(region.lines) + hit = int(analysis.numbers.n_executed > 0) + functions_hit += hit + + outfile.write(f"FN:{first_line},{last_line},{region.name}\n") + outfile.write(f"FNDA:{hit},{region.name}\n") + + outfile.write(f"FNF:{len(functions)}\n") + outfile.write(f"FNH:{functions_hit}\n") + + +def lcov_arcs( + fr: FileReporter, + analysis: Analysis, + lines: list[int], + outfile: IO[str], +) -> None: + """Emit branch coverage records for an analyzed file.""" + branch_stats = analysis.branch_stats() + executed_arcs = analysis.executed_branch_arcs() + missing_arcs = analysis.missing_branch_arcs() + + for line in lines: + if line not in branch_stats: + continue + + # This is only one of several possible ways to map our sets of executed + # and not-executed arcs to BRDA codes. It seems to produce reasonable + # results when fed through genhtml. + _, taken = branch_stats[line] + + if taken == 0: + # When _none_ of the out arcs from 'line' were executed, + # this probably means 'line' was never executed at all. + # Cross-check with the line stats. + assert len(executed_arcs[line]) == 0 + assert line in analysis.missing + destinations = [ + (dst, "-") for dst in missing_arcs[line] + ] + else: + # Q: can we get counts of the number of times each arc was executed? + # branch_stats has "total" and "taken" counts for each branch, + # but it doesn't have "taken" broken down by destination. + destinations = [ + (dst, "1") for dst in executed_arcs[line] + ] + destinations.extend( + (dst, "0") for dst in missing_arcs[line] + ) + + # Sort exit arcs after normal arcs. Exit arcs typically come from + # an if statement, at the end of a function, with no else clause. + # This structure reads like you're jumping to the end of the function + # when the conditional expression is false, so it should be presented + # as the second alternative for the branch, after the alternative that + # enters the if clause. + destinations.sort(key=lambda d: (d[0] < 0, d)) + + for dst, hit in destinations: + branch = fr.arc_description(line, dst) + outfile.write(f"BRDA:{line},0,{branch},{hit}\n") + + # Summary of the branch coverage. + brf = sum(t for t, k in branch_stats.values()) + brh = brf - sum(t - k for t, k in branch_stats.values()) + if brf > 0: + outfile.write(f"BRF:{brf}\n") + outfile.write(f"BRH:{brh}\n") + + +class LcovReporter: + """A reporter for writing LCOV coverage reports.""" + + report_type = "LCOV report" + + def __init__(self, coverage: Coverage) -> None: + self.coverage = coverage + self.config = coverage.config + self.total = Numbers(self.coverage.config.precision) + + def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str]) -> float: + """Renders the full lcov report. + + `morfs` is a list of modules or filenames + + outfile is the file object to write the file into. + """ + + self.coverage.get_data() + outfile = outfile or sys.stdout + + # ensure file records are sorted by the _relative_ filename, not the full path + to_report = [ + (fr.relative_filename(), fr, analysis) + for fr, analysis in get_analysis_to_report(self.coverage, morfs) + ] + to_report.sort() + + for fname, fr, analysis in to_report: + self.total += analysis.numbers + self.lcov_file(fname, fr, analysis, outfile) + + return self.total.n_statements and self.total.pc_covered + + def lcov_file( + self, + rel_fname: str, + fr: FileReporter, + analysis: Analysis, + outfile: IO[str], + ) -> None: + """Produces the lcov data for a single file. + + This currently supports both line and branch coverage, + however function coverage is not supported. + """ + + if analysis.numbers.n_statements == 0: + if self.config.skip_empty: + return + + outfile.write(f"SF:{rel_fname}\n") + + lines = sorted(analysis.statements) + if self.config.lcov_line_checksums: + source_lines = fr.source().splitlines() + else: + source_lines = [] + + lcov_lines(analysis, lines, source_lines, outfile) + lcov_functions(fr, analysis, outfile) + if analysis.has_arcs: + lcov_arcs(fr, analysis, lines, outfile) + + outfile.write("end_of_record\n") diff --git a/.venv/Lib/site-packages/coverage/misc.py b/.venv/Lib/site-packages/coverage/misc.py new file mode 100644 index 00000000..c5ce7f4a --- /dev/null +++ b/.venv/Lib/site-packages/coverage/misc.py @@ -0,0 +1,369 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Miscellaneous stuff for coverage.py.""" + +from __future__ import annotations + +import contextlib +import datetime +import errno +import functools +import hashlib +import importlib +import importlib.util +import inspect +import os +import os.path +import re +import sys +import types + +from types import ModuleType +from typing import ( + Any, NoReturn, TypeVar, +) +from collections.abc import Iterable, Iterator, Mapping, Sequence + +from coverage.exceptions import CoverageException +from coverage.types import TArc + +# In 6.0, the exceptions moved from misc.py to exceptions.py. But a number of +# other packages were importing the exceptions from misc, so import them here. +# pylint: disable=unused-wildcard-import +from coverage.exceptions import * # pylint: disable=wildcard-import + +ISOLATED_MODULES: dict[ModuleType, ModuleType] = {} + + +def isolate_module(mod: ModuleType) -> ModuleType: + """Copy a module so that we are isolated from aggressive mocking. + + If a test suite mocks os.path.exists (for example), and then we need to use + it during the test, everything will get tangled up if we use their mock. + Making a copy of the module when we import it will isolate coverage.py from + those complications. + """ + if mod not in ISOLATED_MODULES: + new_mod = types.ModuleType(mod.__name__) + ISOLATED_MODULES[mod] = new_mod + for name in dir(mod): + value = getattr(mod, name) + if isinstance(value, types.ModuleType): + value = isolate_module(value) + setattr(new_mod, name, value) + return ISOLATED_MODULES[mod] + +os = isolate_module(os) + + +class SysModuleSaver: + """Saves the contents of sys.modules, and removes new modules later.""" + def __init__(self) -> None: + self.old_modules = set(sys.modules) + + def restore(self) -> None: + """Remove any modules imported since this object started.""" + new_modules = set(sys.modules) - self.old_modules + for m in new_modules: + del sys.modules[m] + + +@contextlib.contextmanager +def sys_modules_saved() -> Iterator[None]: + """A context manager to remove any modules imported during a block.""" + saver = SysModuleSaver() + try: + yield + finally: + saver.restore() + + +def import_third_party(modname: str) -> tuple[ModuleType, bool]: + """Import a third-party module we need, but might not be installed. + + This also cleans out the module after the import, so that coverage won't + appear to have imported it. This lets the third party use coverage for + their own tests. + + Arguments: + modname (str): the name of the module to import. + + Returns: + The imported module, and a boolean indicating if the module could be imported. + + If the boolean is False, the module returned is not the one you want: don't use it. + + """ + with sys_modules_saved(): + try: + return importlib.import_module(modname), True + except ImportError: + return sys, False + + +def nice_pair(pair: TArc) -> str: + """Make a nice string representation of a pair of numbers. + + If the numbers are equal, just return the number, otherwise return the pair + with a dash between them, indicating the range. + + """ + start, end = pair + if start == end: + return "%d" % start + else: + return "%d-%d" % (start, end) + + +def bool_or_none(b: Any) -> bool | None: + """Return bool(b), but preserve None.""" + if b is None: + return None + else: + return bool(b) + + +def join_regex(regexes: Iterable[str]) -> str: + """Combine a series of regex strings into one that matches any of them.""" + regexes = list(regexes) + if len(regexes) == 1: + return regexes[0] + else: + return "|".join(f"(?:{r})" for r in regexes) + + +def file_be_gone(path: str) -> None: + """Remove a file, and don't get annoyed if it doesn't exist.""" + try: + os.remove(path) + except OSError as e: + if e.errno != errno.ENOENT: + raise + + +def ensure_dir(directory: str) -> None: + """Make sure the directory exists. + + If `directory` is None or empty, do nothing. + """ + if directory: + os.makedirs(directory, exist_ok=True) + + +def ensure_dir_for_file(path: str) -> None: + """Make sure the directory for the path exists.""" + ensure_dir(os.path.dirname(path)) + + +class Hasher: + """Hashes Python data for fingerprinting.""" + def __init__(self) -> None: + self.hash = hashlib.new("sha3_256", usedforsecurity=False) + + def update(self, v: Any) -> None: + """Add `v` to the hash, recursively if needed.""" + self.hash.update(str(type(v)).encode("utf-8")) + if isinstance(v, str): + self.hash.update(v.encode("utf-8")) + elif isinstance(v, bytes): + self.hash.update(v) + elif v is None: + pass + elif isinstance(v, (int, float)): + self.hash.update(str(v).encode("utf-8")) + elif isinstance(v, (tuple, list)): + for e in v: + self.update(e) + elif isinstance(v, dict): + keys = v.keys() + for k in sorted(keys): + self.update(k) + self.update(v[k]) + else: + for k in dir(v): + if k.startswith("__"): + continue + a = getattr(v, k) + if inspect.isroutine(a): + continue + self.update(k) + self.update(a) + self.hash.update(b".") + + def hexdigest(self) -> str: + """Retrieve the hex digest of the hash.""" + return self.hash.hexdigest()[:32] + + +def _needs_to_implement(that: Any, func_name: str) -> NoReturn: + """Helper to raise NotImplementedError in interface stubs.""" + if hasattr(that, "_coverage_plugin_name"): + thing = "Plugin" + name = that._coverage_plugin_name + else: + thing = "Class" + klass = that.__class__ + name = f"{klass.__module__}.{klass.__name__}" + + raise NotImplementedError( + f"{thing} {name!r} needs to implement {func_name}()", + ) + + +class DefaultValue: + """A sentinel object to use for unusual default-value needs. + + Construct with a string that will be used as the repr, for display in help + and Sphinx output. + + """ + def __init__(self, display_as: str) -> None: + self.display_as = display_as + + def __repr__(self) -> str: + return self.display_as + + +def substitute_variables(text: str, variables: Mapping[str, str]) -> str: + """Substitute ``${VAR}`` variables in `text` with their values. + + Variables in the text can take a number of shell-inspired forms:: + + $VAR + ${VAR} + ${VAR?} strict: an error if VAR isn't defined. + ${VAR-missing} defaulted: "missing" if VAR isn't defined. + $$ just a dollar sign. + + `variables` is a dictionary of variable values. + + Returns the resulting text with values substituted. + + """ + dollar_pattern = r"""(?x) # Use extended regex syntax + \$ # A dollar sign, + (?: # then + (?P\$) | # a dollar sign, or + (?P\w+) | # a plain word, or + { # a {-wrapped + (?P\w+) # word, + (?: + (?P\?) | # with a strict marker + -(?P[^}]*) # or a default value + )? # maybe. + } + ) + """ + + dollar_groups = ("dollar", "word1", "word2") + + def dollar_replace(match: re.Match[str]) -> str: + """Called for each $replacement.""" + # Only one of the dollar_groups will have matched, just get its text. + word = next(g for g in match.group(*dollar_groups) if g) # pragma: always breaks + if word == "$": + return "$" + elif word in variables: + return variables[word] + elif match["strict"]: + msg = f"Variable {word} is undefined: {text!r}" + raise CoverageException(msg) + else: + return match["defval"] + + text = re.sub(dollar_pattern, dollar_replace, text) + return text + + +def format_local_datetime(dt: datetime.datetime) -> str: + """Return a string with local timezone representing the date. + """ + return dt.astimezone().strftime("%Y-%m-%d %H:%M %z") + + +def import_local_file(modname: str, modfile: str | None = None) -> ModuleType: + """Import a local file as a module. + + Opens a file in the current directory named `modname`.py, imports it + as `modname`, and returns the module object. `modfile` is the file to + import if it isn't in the current directory. + + """ + if modfile is None: + modfile = modname + ".py" + spec = importlib.util.spec_from_file_location(modname, modfile) + assert spec is not None + mod = importlib.util.module_from_spec(spec) + sys.modules[modname] = mod + assert spec.loader is not None + spec.loader.exec_module(mod) + + return mod + + +@functools.cache +def _human_key(s: str) -> tuple[list[str | int], str]: + """Turn a string into a list of string and number chunks. + + "z23a" -> (["z", 23, "a"], "z23a") + + The original string is appended as a last value to ensure the + key is unique enough so that "x1y" and "x001y" can be distinguished. + """ + def tryint(s: str) -> str | int: + """If `s` is a number, return an int, else `s` unchanged.""" + try: + return int(s) + except ValueError: + return s + + return ([tryint(c) for c in re.split(r"(\d+)", s)], s) + +def human_sorted(strings: Iterable[str]) -> list[str]: + """Sort the given iterable of strings the way that humans expect. + + Numeric components in the strings are sorted as numbers. + + Returns the sorted list. + + """ + return sorted(strings, key=_human_key) + +SortableItem = TypeVar("SortableItem", bound=Sequence[Any]) + +def human_sorted_items( + items: Iterable[SortableItem], + reverse: bool = False, +) -> list[SortableItem]: + """Sort (string, ...) items the way humans expect. + + The elements of `items` can be any tuple/list. They'll be sorted by the + first element (a string), with ties broken by the remaining elements. + + Returns the sorted list of items. + """ + return sorted(items, key=lambda item: (_human_key(item[0]), *item[1:]), reverse=reverse) + + +def plural(n: int, thing: str = "", things: str = "") -> str: + """Pluralize a word. + + If n is 1, return thing. Otherwise return things, or thing+s. + """ + if n == 1: + return thing + else: + return things or (thing + "s") + + +def stdout_link(text: str, url: str) -> str: + """Format text+url as a clickable link for stdout. + + If attached to a terminal, use escape sequences. Otherwise, just return + the text. + """ + if hasattr(sys.stdout, "isatty") and sys.stdout.isatty(): + return f"\033]8;;{url}\a{text}\033]8;;\a" + else: + return text diff --git a/.venv/Lib/site-packages/coverage/multiproc.py b/.venv/Lib/site-packages/coverage/multiproc.py new file mode 100644 index 00000000..6d5a8273 --- /dev/null +++ b/.venv/Lib/site-packages/coverage/multiproc.py @@ -0,0 +1,115 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Monkey-patching to add multiprocessing support for coverage.py""" + +from __future__ import annotations + +import multiprocessing +import multiprocessing.process +import os +import os.path +import sys +import traceback + +from typing import Any + +from coverage.debug import DebugControl + +# An attribute that will be set on the module to indicate that it has been +# monkey-patched. +PATCHED_MARKER = "_coverage$patched" + + +OriginalProcess = multiprocessing.process.BaseProcess +original_bootstrap = OriginalProcess._bootstrap # type: ignore[attr-defined] + +class ProcessWithCoverage(OriginalProcess): # pylint: disable=abstract-method + """A replacement for multiprocess.Process that starts coverage.""" + + def _bootstrap(self, *args, **kwargs): # type: ignore[no-untyped-def] + """Wrapper around _bootstrap to start coverage.""" + debug: DebugControl | None = None + try: + from coverage import Coverage # avoid circular import + cov = Coverage(data_suffix=True, auto_data=True) + cov._warn_preimported_source = False + cov.start() + _debug = cov._debug + assert _debug is not None + if _debug.should("multiproc"): + debug = _debug + if debug: + debug.write("Calling multiprocessing bootstrap") + except Exception: + print("Exception during multiprocessing bootstrap init:", file=sys.stderr) + traceback.print_exc(file=sys.stderr) + sys.stderr.flush() + raise + try: + return original_bootstrap(self, *args, **kwargs) + finally: + if debug: + debug.write("Finished multiprocessing bootstrap") + try: + cov.stop() + cov.save() + except Exception as exc: + if debug: + debug.write("Exception during multiprocessing bootstrap cleanup", exc=exc) + raise + if debug: + debug.write("Saved multiprocessing data") + +class Stowaway: + """An object to pickle, so when it is unpickled, it can apply the monkey-patch.""" + def __init__(self, rcfile: str) -> None: + self.rcfile = rcfile + + def __getstate__(self) -> dict[str, str]: + return {"rcfile": self.rcfile} + + def __setstate__(self, state: dict[str, str]) -> None: + patch_multiprocessing(state["rcfile"]) + + +def patch_multiprocessing(rcfile: str) -> None: + """Monkey-patch the multiprocessing module. + + This enables coverage measurement of processes started by multiprocessing. + This involves aggressive monkey-patching. + + `rcfile` is the path to the rcfile being used. + + """ + + if hasattr(multiprocessing, PATCHED_MARKER): + return + + OriginalProcess._bootstrap = ProcessWithCoverage._bootstrap # type: ignore[attr-defined] + + # Set the value in ProcessWithCoverage that will be pickled into the child + # process. + os.environ["COVERAGE_RCFILE"] = os.path.abspath(rcfile) + + # When spawning processes rather than forking them, we have no state in the + # new process. We sneak in there with a Stowaway: we stuff one of our own + # objects into the data that gets pickled and sent to the sub-process. When + # the Stowaway is unpickled, its __setstate__ method is called, which + # re-applies the monkey-patch. + # Windows only spawns, so this is needed to keep Windows working. + try: + from multiprocessing import spawn + original_get_preparation_data = spawn.get_preparation_data + except (ImportError, AttributeError): + pass + else: + def get_preparation_data_with_stowaway(name: str) -> dict[str, Any]: + """Get the original preparation data, and also insert our stowaway.""" + d = original_get_preparation_data(name) + d["stowaway"] = Stowaway(rcfile) + return d + + spawn.get_preparation_data = get_preparation_data_with_stowaway + + setattr(multiprocessing, PATCHED_MARKER, True) diff --git a/.venv/Lib/site-packages/coverage/numbits.py b/.venv/Lib/site-packages/coverage/numbits.py new file mode 100644 index 00000000..0975a098 --- /dev/null +++ b/.venv/Lib/site-packages/coverage/numbits.py @@ -0,0 +1,147 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +""" +Functions to manipulate packed binary representations of number sets. + +To save space, coverage stores sets of line numbers in SQLite using a packed +binary representation called a numbits. A numbits is a set of positive +integers. + +A numbits is stored as a blob in the database. The exact meaning of the bytes +in the blobs should be considered an implementation detail that might change in +the future. Use these functions to work with those binary blobs of data. + +""" + +from __future__ import annotations + +import json +import sqlite3 + +from itertools import zip_longest +from collections.abc import Iterable + + +def nums_to_numbits(nums: Iterable[int]) -> bytes: + """Convert `nums` into a numbits. + + Arguments: + nums: a reusable iterable of integers, the line numbers to store. + + Returns: + A binary blob. + """ + try: + nbytes = max(nums) // 8 + 1 + except ValueError: + # nums was empty. + return b"" + b = bytearray(nbytes) + for num in nums: + b[num//8] |= 1 << num % 8 + return bytes(b) + + +def numbits_to_nums(numbits: bytes) -> list[int]: + """Convert a numbits into a list of numbers. + + Arguments: + numbits: a binary blob, the packed number set. + + Returns: + A list of ints. + + When registered as a SQLite function by :func:`register_sqlite_functions`, + this returns a string, a JSON-encoded list of ints. + + """ + nums = [] + for byte_i, byte in enumerate(numbits): + for bit_i in range(8): + if (byte & (1 << bit_i)): + nums.append(byte_i * 8 + bit_i) + return nums + + +def numbits_union(numbits1: bytes, numbits2: bytes) -> bytes: + """Compute the union of two numbits. + + Returns: + A new numbits, the union of `numbits1` and `numbits2`. + """ + byte_pairs = zip_longest(numbits1, numbits2, fillvalue=0) + return bytes(b1 | b2 for b1, b2 in byte_pairs) + + +def numbits_intersection(numbits1: bytes, numbits2: bytes) -> bytes: + """Compute the intersection of two numbits. + + Returns: + A new numbits, the intersection `numbits1` and `numbits2`. + """ + byte_pairs = zip_longest(numbits1, numbits2, fillvalue=0) + intersection_bytes = bytes(b1 & b2 for b1, b2 in byte_pairs) + return intersection_bytes.rstrip(b"\0") + + +def numbits_any_intersection(numbits1: bytes, numbits2: bytes) -> bool: + """Is there any number that appears in both numbits? + + Determine whether two number sets have a non-empty intersection. This is + faster than computing the intersection. + + Returns: + A bool, True if there is any number in both `numbits1` and `numbits2`. + """ + byte_pairs = zip_longest(numbits1, numbits2, fillvalue=0) + return any(b1 & b2 for b1, b2 in byte_pairs) + + +def num_in_numbits(num: int, numbits: bytes) -> bool: + """Does the integer `num` appear in `numbits`? + + Returns: + A bool, True if `num` is a member of `numbits`. + """ + nbyte, nbit = divmod(num, 8) + if nbyte >= len(numbits): + return False + return bool(numbits[nbyte] & (1 << nbit)) + + +def register_sqlite_functions(connection: sqlite3.Connection) -> None: + """ + Define numbits functions in a SQLite connection. + + This defines these functions for use in SQLite statements: + + * :func:`numbits_union` + * :func:`numbits_intersection` + * :func:`numbits_any_intersection` + * :func:`num_in_numbits` + * :func:`numbits_to_nums` + + `connection` is a :class:`sqlite3.Connection ` + object. After creating the connection, pass it to this function to + register the numbits functions. Then you can use numbits functions in your + queries:: + + import sqlite3 + from coverage.numbits import register_sqlite_functions + + conn = sqlite3.connect("example.db") + register_sqlite_functions(conn) + c = conn.cursor() + # Kind of a nonsense query: + # Find all the files and contexts that executed line 47 in any file: + c.execute( + "select file_id, context_id from line_bits where num_in_numbits(?, numbits)", + (47,) + ) + """ + connection.create_function("numbits_union", 2, numbits_union) + connection.create_function("numbits_intersection", 2, numbits_intersection) + connection.create_function("numbits_any_intersection", 2, numbits_any_intersection) + connection.create_function("num_in_numbits", 2, num_in_numbits) + connection.create_function("numbits_to_nums", 1, lambda b: json.dumps(numbits_to_nums(b))) diff --git a/.venv/Lib/site-packages/coverage/parser.py b/.venv/Lib/site-packages/coverage/parser.py new file mode 100644 index 00000000..c68fb4ea --- /dev/null +++ b/.venv/Lib/site-packages/coverage/parser.py @@ -0,0 +1,1287 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Code parsing for coverage.py.""" + +from __future__ import annotations + +import ast +import functools +import collections +import os +import re +import sys +import token +import tokenize + +from collections.abc import Iterable, Sequence +from dataclasses import dataclass +from types import CodeType +from typing import cast, Callable, Optional, Protocol + +from coverage import env +from coverage.bytecode import code_objects +from coverage.debug import short_stack +from coverage.exceptions import NoSource, NotPython +from coverage.misc import nice_pair +from coverage.phystokens import generate_tokens +from coverage.types import TArc, TLineNo + + +class PythonParser: + """Parse code to find executable lines, excluded lines, etc. + + This information is all based on static analysis: no code execution is + involved. + + """ + def __init__( + self, + text: str | None = None, + filename: str | None = None, + exclude: str | None = None, + ) -> None: + """ + Source can be provided as `text`, the text itself, or `filename`, from + which the text will be read. Excluded lines are those that match + `exclude`, a regex string. + + """ + assert text or filename, "PythonParser needs either text or filename" + self.filename = filename or "" + if text is not None: + self.text: str = text + else: + from coverage.python import get_python_source + try: + self.text = get_python_source(self.filename) + except OSError as err: + raise NoSource(f"No source for code: '{self.filename}': {err}") from err + + self.exclude = exclude + + # The parsed AST of the text. + self._ast_root: ast.AST | None = None + + # The normalized line numbers of the statements in the code. Exclusions + # are taken into account, and statements are adjusted to their first + # lines. + self.statements: set[TLineNo] = set() + + # The normalized line numbers of the excluded lines in the code, + # adjusted to their first lines. + self.excluded: set[TLineNo] = set() + + # The raw_* attributes are only used in this class, and in + # lab/parser.py to show how this class is working. + + # The line numbers that start statements, as reported by the line + # number table in the bytecode. + self.raw_statements: set[TLineNo] = set() + + # The raw line numbers of excluded lines of code, as marked by pragmas. + self.raw_excluded: set[TLineNo] = set() + + # The line numbers of docstring lines. + self.raw_docstrings: set[TLineNo] = set() + + # Internal detail, used by lab/parser.py. + self.show_tokens = False + + # A dict mapping line numbers to lexical statement starts for + # multi-line statements. + self._multiline: dict[TLineNo, TLineNo] = {} + + # Lazily-created arc data, and missing arc descriptions. + self._all_arcs: set[TArc] | None = None + self._missing_arc_fragments: TArcFragments | None = None + self._with_jump_fixers: dict[TArc, tuple[TArc, TArc]] = {} + + def lines_matching(self, regex: str) -> set[TLineNo]: + """Find the lines matching a regex. + + Returns a set of line numbers, the lines that contain a match for + `regex`. The entire line needn't match, just a part of it. + Handles multiline regex patterns. + + """ + matches: set[TLineNo] = set() + + last_start = 0 + last_start_line = 0 + for match in re.finditer(regex, self.text, flags=re.MULTILINE): + start, end = match.span() + start_line = last_start_line + self.text.count('\n', last_start, start) + end_line = last_start_line + self.text.count('\n', last_start, end) + matches.update(self._multiline.get(i, i) for i in range(start_line + 1, end_line + 2)) + last_start = start + last_start_line = start_line + return matches + + def _raw_parse(self) -> None: + """Parse the source to find the interesting facts about its lines. + + A handful of attributes are updated. + + """ + # Find lines which match an exclusion pattern. + if self.exclude: + self.raw_excluded = self.lines_matching(self.exclude) + self.excluded = set(self.raw_excluded) + + # The current number of indents. + indent: int = 0 + # An exclusion comment will exclude an entire clause at this indent. + exclude_indent: int = 0 + # Are we currently excluding lines? + excluding: bool = False + # The line number of the first line in a multi-line statement. + first_line: int = 0 + # Is the file empty? + empty: bool = True + # Parenthesis (and bracket) nesting level. + nesting: int = 0 + + assert self.text is not None + tokgen = generate_tokens(self.text) + for toktype, ttext, (slineno, _), (elineno, _), ltext in tokgen: + if self.show_tokens: # pragma: debugging + print("%10s %5s %-20r %r" % ( + tokenize.tok_name.get(toktype, toktype), + nice_pair((slineno, elineno)), ttext, ltext, + )) + if toktype == token.INDENT: + indent += 1 + elif toktype == token.DEDENT: + indent -= 1 + elif toktype == token.OP: + if ttext == ":" and nesting == 0: + should_exclude = ( + self.excluded.intersection(range(first_line, elineno + 1)) + ) + if not excluding and should_exclude: + # Start excluding a suite. We trigger off of the colon + # token so that the #pragma comment will be recognized on + # the same line as the colon. + self.excluded.add(elineno) + exclude_indent = indent + excluding = True + elif ttext in "([{": + nesting += 1 + elif ttext in ")]}": + nesting -= 1 + elif toktype == token.NEWLINE: + if first_line and elineno != first_line: + # We're at the end of a line, and we've ended on a + # different line than the first line of the statement, + # so record a multi-line range. + for l in range(first_line, elineno+1): + self._multiline[l] = first_line + first_line = 0 + + if ttext.strip() and toktype != tokenize.COMMENT: + # A non-white-space token. + empty = False + if not first_line: + # The token is not white space, and is the first in a statement. + first_line = slineno + # Check whether to end an excluded suite. + if excluding and indent <= exclude_indent: + excluding = False + if excluding: + self.excluded.add(elineno) + + # Find the starts of the executable statements. + if not empty: + byte_parser = ByteParser(self.text, filename=self.filename) + self.raw_statements.update(byte_parser._find_statements()) + + # The first line of modules can lie and say 1 always, even if the first + # line of code is later. If so, map 1 to the actual first line of the + # module. + if env.PYBEHAVIOR.module_firstline_1 and self._multiline: + self._multiline[1] = min(self.raw_statements) + + self.excluded = self.first_lines(self.excluded) + + # AST lets us find classes, docstrings, and decorator-affected + # functions and classes. + assert self._ast_root is not None + for node in ast.walk(self._ast_root): + # Find docstrings. + if isinstance(node, (ast.ClassDef, ast.FunctionDef, ast.AsyncFunctionDef, ast.Module)): + if node.body: + first = node.body[0] + if ( + isinstance(first, ast.Expr) + and isinstance(first.value, ast.Constant) + and isinstance(first.value.value, str) + ): + self.raw_docstrings.update( + range(first.lineno, cast(int, first.end_lineno) + 1) + ) + # Exclusions carry from decorators and signatures to the bodies of + # functions and classes. + if isinstance(node, (ast.ClassDef, ast.FunctionDef, ast.AsyncFunctionDef)): + first_line = min((d.lineno for d in node.decorator_list), default=node.lineno) + if self.excluded.intersection(range(first_line, node.lineno + 1)): + self.excluded.update(range(first_line, cast(int, node.end_lineno) + 1)) + + @functools.lru_cache(maxsize=1000) + def first_line(self, lineno: TLineNo) -> TLineNo: + """Return the first line number of the statement including `lineno`.""" + if lineno < 0: + lineno = -self._multiline.get(-lineno, -lineno) + else: + lineno = self._multiline.get(lineno, lineno) + return lineno + + def first_lines(self, linenos: Iterable[TLineNo]) -> set[TLineNo]: + """Map the line numbers in `linenos` to the correct first line of the + statement. + + Returns a set of the first lines. + + """ + return {self.first_line(l) for l in linenos} + + def translate_lines(self, lines: Iterable[TLineNo]) -> set[TLineNo]: + """Implement `FileReporter.translate_lines`.""" + return self.first_lines(lines) + + def translate_arcs(self, arcs: Iterable[TArc]) -> set[TArc]: + """Implement `FileReporter.translate_arcs`.""" + return {(self.first_line(a), self.first_line(b)) for (a, b) in self.fix_with_jumps(arcs)} + + def parse_source(self) -> None: + """Parse source text to find executable lines, excluded lines, etc. + + Sets the .excluded and .statements attributes, normalized to the first + line of multi-line statements. + + """ + try: + self._ast_root = ast.parse(self.text) + self._raw_parse() + except (tokenize.TokenError, IndentationError, SyntaxError) as err: + if hasattr(err, "lineno"): + lineno = err.lineno # IndentationError + else: + lineno = err.args[1][0] # TokenError + raise NotPython( + f"Couldn't parse '{self.filename}' as Python source: " + + f"{err.args[0]!r} at line {lineno}", + ) from err + + ignore = self.excluded | self.raw_docstrings + starts = self.raw_statements - ignore + self.statements = self.first_lines(starts) - ignore + + def arcs(self) -> set[TArc]: + """Get information about the arcs available in the code. + + Returns a set of line number pairs. Line numbers have been normalized + to the first line of multi-line statements. + + """ + if self._all_arcs is None: + self._analyze_ast() + assert self._all_arcs is not None + return self._all_arcs + + def _analyze_ast(self) -> None: + """Run the AstArcAnalyzer and save its results. + + `_all_arcs` is the set of arcs in the code. + + """ + assert self._ast_root is not None + aaa = AstArcAnalyzer(self._ast_root, self.raw_statements, self._multiline) + aaa.analyze() + self._with_jump_fixers = aaa.with_jump_fixers() + + self._all_arcs = set() + for l1, l2 in self.fix_with_jumps(aaa.arcs): + fl1 = self.first_line(l1) + fl2 = self.first_line(l2) + if fl1 != fl2: + self._all_arcs.add((fl1, fl2)) + + self._missing_arc_fragments = aaa.missing_arc_fragments + + def fix_with_jumps(self, arcs: Iterable[TArc]) -> set[TArc]: + """Adjust arcs to fix jumps leaving `with` statements.""" + to_remove = set() + to_add = set() + for arc in arcs: + if arc in self._with_jump_fixers: + start = arc[0] + to_remove.add(arc) + start_next, prev_next = self._with_jump_fixers[arc] + while start_next in self._with_jump_fixers: + to_remove.add(start_next) + start_next, prev_next = self._with_jump_fixers[start_next] + to_remove.add(prev_next) + to_add.add((start, prev_next[1])) + to_remove.add(arc) + to_remove.add(start_next) + arcs = (set(arcs) | to_add) - to_remove + return arcs + + @functools.lru_cache + def exit_counts(self) -> dict[TLineNo, int]: + """Get a count of exits from that each line. + + Excluded lines are excluded. + + """ + exit_counts: dict[TLineNo, int] = collections.defaultdict(int) + for l1, l2 in self.arcs(): + if l1 < 0: + # Don't ever report -1 as a line number + continue + if l1 in self.excluded: + # Don't report excluded lines as line numbers. + continue + if l2 in self.excluded: + # Arcs to excluded lines shouldn't count. + continue + exit_counts[l1] += 1 + + return exit_counts + + def _finish_action_msg(self, action_msg: str | None, end: TLineNo) -> str: + """Apply some defaulting and formatting to an arc's description.""" + if action_msg is None: + if end < 0: + action_msg = "jump to the function exit" + else: + action_msg = "jump to line {lineno}" + action_msg = action_msg.format(lineno=end) + return action_msg + + def missing_arc_description(self, start: TLineNo, end: TLineNo) -> str: + """Provide an English sentence describing a missing arc.""" + if self._missing_arc_fragments is None: + self._analyze_ast() + assert self._missing_arc_fragments is not None + + fragment_pairs = self._missing_arc_fragments.get((start, end), [(None, None)]) + + msgs = [] + for missing_cause_msg, action_msg in fragment_pairs: + action_msg = self._finish_action_msg(action_msg, end) + msg = f"line {start} didn't {action_msg}" + if missing_cause_msg is not None: + msg += f" because {missing_cause_msg.format(lineno=start)}" + + msgs.append(msg) + + return " or ".join(msgs) + + def arc_description(self, start: TLineNo, end: TLineNo) -> str: + """Provide an English description of an arc's effect.""" + if self._missing_arc_fragments is None: + self._analyze_ast() + assert self._missing_arc_fragments is not None + + fragment_pairs = self._missing_arc_fragments.get((start, end), [(None, None)]) + action_msg = self._finish_action_msg(fragment_pairs[0][1], end) + return action_msg + + +class ByteParser: + """Parse bytecode to understand the structure of code.""" + + def __init__( + self, + text: str, + code: CodeType | None = None, + filename: str | None = None, + ) -> None: + self.text = text + if code is not None: + self.code = code + else: + assert filename is not None + # We only get here if earlier ast parsing succeeded, so no need to + # catch errors. + self.code = compile(text, filename, "exec", dont_inherit=True) + + def child_parsers(self) -> Iterable[ByteParser]: + """Iterate over all the code objects nested within this one. + + The iteration includes `self` as its first value. + + """ + return (ByteParser(self.text, code=c) for c in code_objects(self.code)) + + def _line_numbers(self) -> Iterable[TLineNo]: + """Yield the line numbers possible in this code object. + + Uses co_lnotab described in Python/compile.c to find the + line numbers. Produces a sequence: l0, l1, ... + """ + if hasattr(self.code, "co_lines"): + # PYVERSIONS: new in 3.10 + for _, _, line in self.code.co_lines(): + if line: + yield line + else: + # Adapted from dis.py in the standard library. + byte_increments = self.code.co_lnotab[0::2] + line_increments = self.code.co_lnotab[1::2] + + last_line_num = None + line_num = self.code.co_firstlineno + byte_num = 0 + for byte_incr, line_incr in zip(byte_increments, line_increments): + if byte_incr: + if line_num != last_line_num: + yield line_num + last_line_num = line_num + byte_num += byte_incr + if line_incr >= 0x80: + line_incr -= 0x100 + line_num += line_incr + if line_num != last_line_num: + yield line_num + + def _find_statements(self) -> Iterable[TLineNo]: + """Find the statements in `self.code`. + + Produce a sequence of line numbers that start statements. Recurses + into all code objects reachable from `self.code`. + + """ + for bp in self.child_parsers(): + # Get all of the lineno information from this code. + yield from bp._line_numbers() + + +# +# AST analysis +# + +@dataclass(frozen=True, order=True) +class ArcStart: + """The information needed to start an arc. + + `lineno` is the line number the arc starts from. + + `cause` is an English text fragment used as the `missing_cause_msg` for + AstArcAnalyzer.missing_arc_fragments. It will be used to describe why an + arc wasn't executed, so should fit well into a sentence of the form, + "Line 17 didn't run because {cause}." The fragment can include "{lineno}" + to have `lineno` interpolated into it. + + As an example, this code:: + + if something(x): # line 1 + func(x) # line 2 + more_stuff() # line 3 + + would have two ArcStarts: + + - ArcStart(1, "the condition on line 1 was always true") + - ArcStart(1, "the condition on line 1 was never true") + + The first would be used to create an arc from 1 to 3, creating a message like + "line 1 didn't jump to line 3 because the condition on line 1 was always true." + + The second would be used for the arc from 1 to 2, creating a message like + "line 1 didn't jump to line 2 because the condition on line 1 was never true." + + """ + lineno: TLineNo + cause: str = "" + + +class TAddArcFn(Protocol): + """The type for AstArcAnalyzer.add_arc().""" + def __call__( + self, + start: TLineNo, + end: TLineNo, + missing_cause_msg: str | None = None, + action_msg: str | None = None, + ) -> None: + """ + Record an arc from `start` to `end`. + + `missing_cause_msg` is a description of the reason the arc wasn't + taken if it wasn't taken. For example, "the condition on line 10 was + never true." + + `action_msg` is a description of what the arc does, like "jump to line + 10" or "exit from function 'fooey'." + + """ + + +TArcFragments = dict[TArc, list[tuple[Optional[str], Optional[str]]]] + +class Block: + """ + Blocks need to handle various exiting statements in their own ways. + + All of these methods take a list of exits, and a callable `add_arc` + function that they can use to add arcs if needed. They return True if the + exits are handled, or False if the search should continue up the block + stack. + """ + # pylint: disable=unused-argument + def process_break_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: + """Process break exits.""" + return False + + def process_continue_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: + """Process continue exits.""" + return False + + def process_raise_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: + """Process raise exits.""" + return False + + def process_return_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: + """Process return exits.""" + return False + + +class LoopBlock(Block): + """A block on the block stack representing a `for` or `while` loop.""" + def __init__(self, start: TLineNo) -> None: + # The line number where the loop starts. + self.start = start + # A set of ArcStarts, the arcs from break statements exiting this loop. + self.break_exits: set[ArcStart] = set() + + def process_break_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: + self.break_exits.update(exits) + return True + + def process_continue_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: + for xit in exits: + add_arc(xit.lineno, self.start, xit.cause) + return True + + +class FunctionBlock(Block): + """A block on the block stack representing a function definition.""" + def __init__(self, start: TLineNo, name: str) -> None: + # The line number where the function starts. + self.start = start + # The name of the function. + self.name = name + + def process_raise_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: + for xit in exits: + add_arc( + xit.lineno, -self.start, xit.cause, + f"except from function {self.name!r}", + ) + return True + + def process_return_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: + for xit in exits: + add_arc( + xit.lineno, -self.start, xit.cause, + f"return from function {self.name!r}", + ) + return True + + +class TryBlock(Block): + """A block on the block stack representing a `try` block.""" + def __init__(self, handler_start: TLineNo | None, final_start: TLineNo | None) -> None: + # The line number of the first "except" handler, if any. + self.handler_start = handler_start + # The line number of the "finally:" clause, if any. + self.final_start = final_start + + def process_raise_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool: + if self.handler_start is not None: + for xit in exits: + add_arc(xit.lineno, self.handler_start, xit.cause) + return True + + +class NodeList(ast.AST): + """A synthetic fictitious node, containing a sequence of nodes. + + This is used when collapsing optimized if-statements, to represent the + unconditional execution of one of the clauses. + + """ + def __init__(self, body: Sequence[ast.AST]) -> None: + self.body = body + self.lineno = body[0].lineno # type: ignore[attr-defined] + +# TODO: Shouldn't the cause messages join with "and" instead of "or"? + + +class AstArcAnalyzer: + """Analyze source text with an AST to find executable code paths. + + The .analyze() method does the work, and populates these attributes: + + `arcs`: a set of (from, to) pairs of the the arcs possible in the code. + + `missing_arc_fragments`: a dict mapping (from, to) arcs to lists of + message fragments explaining why the arc is missing from execution:: + + { (start, end): [(missing_cause_msg, action_msg), ...], } + + For an arc starting from line 17, they should be usable to form complete + sentences like: "Line 17 didn't {action_msg} because {missing_cause_msg}". + + NOTE: Starting in July 2024, I've been whittling this down to only report + arc that are part of true branches. It's not clear how far this work will + go. + + """ + + def __init__( + self, + root_node: ast.AST, + statements: set[TLineNo], + multiline: dict[TLineNo, TLineNo], + ) -> None: + self.root_node = root_node + # TODO: I think this is happening in too many places. + self.statements = {multiline.get(l, l) for l in statements} + self.multiline = multiline + + # Turn on AST dumps with an environment variable. + # $set_env.py: COVERAGE_AST_DUMP - Dump the AST nodes when parsing code. + dump_ast = bool(int(os.getenv("COVERAGE_AST_DUMP", "0"))) + + if dump_ast: # pragma: debugging + # Dump the AST so that failing tests have helpful output. + print(f"Statements: {self.statements}") + print(f"Multiline map: {self.multiline}") + print(ast.dump(self.root_node, include_attributes=True, indent=4)) + + self.arcs: set[TArc] = set() + self.missing_arc_fragments: TArcFragments = collections.defaultdict(list) + self.block_stack: list[Block] = [] + + # If `with` clauses jump to their start on the way out, we need + # information to be able to skip over that jump. We record the arcs + # from `with` into the clause (with_entries), and the arcs from the + # clause to the `with` (with_exits). + self.current_with_starts: set[TLineNo] = set() + self.all_with_starts: set[TLineNo] = set() + self.with_entries: set[TArc] = set() + self.with_exits: set[TArc] = set() + + # $set_env.py: COVERAGE_TRACK_ARCS - Trace possible arcs added while parsing code. + self.debug = bool(int(os.getenv("COVERAGE_TRACK_ARCS", "0"))) + + def analyze(self) -> None: + """Examine the AST tree from `self.root_node` to determine possible arcs.""" + for node in ast.walk(self.root_node): + node_name = node.__class__.__name__ + code_object_handler = getattr(self, "_code_object__" + node_name, None) + if code_object_handler is not None: + code_object_handler(node) + + def with_jump_fixers(self) -> dict[TArc, tuple[TArc, TArc]]: + """Get a dict with data for fixing jumps out of with statements. + + Returns a dict. The keys are arcs leaving a with statement by jumping + back to its start. The values are pairs: first, the arc from the start + to the next statement, then the arc that exits the with without going + to the start. + + """ + if not env.PYBEHAVIOR.exit_through_with: + return {} + + fixers = {} + with_nexts = { + arc + for arc in self.arcs + if arc[0] in self.all_with_starts and arc not in self.with_entries + } + for start in self.all_with_starts: + nexts = {arc[1] for arc in with_nexts if arc[0] == start} + if not nexts: + continue + assert len(nexts) == 1, f"Expected one arc, got {nexts} with {start = }" + nxt = nexts.pop() + prvs = {arc[0] for arc in self.with_exits if arc[1] == start} + for prv in prvs: + fixers[(prv, start)] = ((start, nxt), (prv, nxt)) + return fixers + + # Code object dispatchers: _code_object__* + # + # These methods are used by analyze() as the start of the analysis. + # There is one for each construct with a code object. + + def _code_object__Module(self, node: ast.Module) -> None: + start = self.line_for_node(node) + if node.body: + exits = self.process_body(node.body) + for xit in exits: + self.add_arc(xit.lineno, -start, xit.cause, "exit the module") + else: + # Empty module. + self.add_arc(start, -start) + + def _code_object__FunctionDef(self, node: ast.FunctionDef) -> None: + start = self.line_for_node(node) + self.block_stack.append(FunctionBlock(start=start, name=node.name)) + exits = self.process_body(node.body) + self.process_return_exits(exits) + self.block_stack.pop() + + _code_object__AsyncFunctionDef = _code_object__FunctionDef + + def _code_object__ClassDef(self, node: ast.ClassDef) -> None: + start = self.line_for_node(node) + exits = self.process_body(node.body) + for xit in exits: + self.add_arc(xit.lineno, -start, xit.cause, f"exit class {node.name!r}") + + def add_arc( + self, + start: TLineNo, + end: TLineNo, + missing_cause_msg: str | None = None, + action_msg: str | None = None, + ) -> None: + """Add an arc, including message fragments to use if it is missing.""" + if self.debug: # pragma: debugging + print(f"Adding possible arc: ({start}, {end}): {missing_cause_msg!r}, {action_msg!r}") + print(short_stack(), end="\n\n") + self.arcs.add((start, end)) + if start in self.current_with_starts: + self.with_entries.add((start, end)) + + if missing_cause_msg is not None or action_msg is not None: + self.missing_arc_fragments[(start, end)].append((missing_cause_msg, action_msg)) + + def nearest_blocks(self) -> Iterable[Block]: + """Yield the blocks in nearest-to-farthest order.""" + return reversed(self.block_stack) + + def line_for_node(self, node: ast.AST) -> TLineNo: + """What is the right line number to use for this node? + + This dispatches to _line__Node functions where needed. + + """ + node_name = node.__class__.__name__ + handler = cast( + Optional[Callable[[ast.AST], TLineNo]], + getattr(self, "_line__" + node_name, None), + ) + if handler is not None: + return handler(node) + else: + return node.lineno # type: ignore[attr-defined, no-any-return] + + # First lines: _line__* + # + # Dispatched by line_for_node, each method knows how to identify the first + # line number in the node, as Python will report it. + + def _line_decorated(self, node: ast.FunctionDef) -> TLineNo: + """Compute first line number for things that can be decorated (classes and functions).""" + if node.decorator_list: + lineno = node.decorator_list[0].lineno + else: + lineno = node.lineno + return lineno + + def _line__Assign(self, node: ast.Assign) -> TLineNo: + return self.line_for_node(node.value) + + _line__ClassDef = _line_decorated + + def _line__Dict(self, node: ast.Dict) -> TLineNo: + if node.keys: + if node.keys[0] is not None: + return node.keys[0].lineno + else: + # Unpacked dict literals `{**{"a":1}}` have None as the key, + # use the value in that case. + return node.values[0].lineno + else: + return node.lineno + + _line__FunctionDef = _line_decorated + _line__AsyncFunctionDef = _line_decorated + + def _line__List(self, node: ast.List) -> TLineNo: + if node.elts: + return self.line_for_node(node.elts[0]) + else: + return node.lineno + + def _line__Module(self, node: ast.Module) -> TLineNo: + if env.PYBEHAVIOR.module_firstline_1: + return 1 + elif node.body: + return self.line_for_node(node.body[0]) + else: + # Empty modules have no line number, they always start at 1. + return 1 + + # The node types that just flow to the next node with no complications. + OK_TO_DEFAULT = { + "AnnAssign", "Assign", "Assert", "AugAssign", "Delete", "Expr", "Global", + "Import", "ImportFrom", "Nonlocal", "Pass", + } + + def node_exits(self, node: ast.AST) -> set[ArcStart]: + """Find the set of arc starts that exit this node. + + Return a set of ArcStarts, exits from this node to the next. Because a + node represents an entire sub-tree (including its children), the exits + from a node can be arbitrarily complex:: + + if something(1): + if other(2): + doit(3) + else: + doit(5) + + There are three exits from line 1: they start at lines 1, 3 and 5. + There are two exits from line 2: lines 3 and 5. + + """ + node_name = node.__class__.__name__ + handler = cast( + Optional[Callable[[ast.AST], set[ArcStart]]], + getattr(self, "_handle__" + node_name, None), + ) + if handler is not None: + arc_starts = handler(node) + else: + # No handler: either it's something that's ok to default (a simple + # statement), or it's something we overlooked. + if env.TESTING: + if node_name not in self.OK_TO_DEFAULT: + raise RuntimeError(f"*** Unhandled: {node}") # pragma: only failure + + # Default for simple statements: one exit from this node. + arc_starts = {ArcStart(self.line_for_node(node))} + return arc_starts + + def process_body( + self, + body: Sequence[ast.AST], + from_start: ArcStart | None = None, + prev_starts: set[ArcStart] | None = None, + ) -> set[ArcStart]: + """Process the body of a compound statement. + + `body` is the body node to process. + + `from_start` is a single `ArcStart` that starts an arc into this body. + `prev_starts` is a set of ArcStarts that can all be the start of arcs + into this body. Only one of `from_start` and `prev_starts` should be + given. + + Records arcs within the body by calling `self.add_arc`. + + Returns a set of ArcStarts, the exits from this body. + + """ + if prev_starts is None: + if from_start is None: + prev_starts = set() + else: + prev_starts = {from_start} + else: + assert from_start is None + + # Loop over the nodes in the body, making arcs from each one's exits to + # the next node. + for body_node in body: + lineno = self.line_for_node(body_node) + first_line = self.multiline.get(lineno, lineno) + if first_line not in self.statements: + maybe_body_node = self.find_non_missing_node(body_node) + if maybe_body_node is None: + continue + body_node = maybe_body_node + lineno = self.line_for_node(body_node) + for prev_start in prev_starts: + self.add_arc(prev_start.lineno, lineno, prev_start.cause) + prev_starts = self.node_exits(body_node) + return prev_starts + + def find_non_missing_node(self, node: ast.AST) -> ast.AST | None: + """Search `node` looking for a child that has not been optimized away. + + This might return the node you started with, or it will work recursively + to find a child node in self.statements. + + Returns a node, or None if none of the node remains. + + """ + # This repeats work just done in process_body, but this duplication + # means we can avoid a function call in the 99.9999% case of not + # optimizing away statements. + lineno = self.line_for_node(node) + first_line = self.multiline.get(lineno, lineno) + if first_line in self.statements: + return node + + missing_fn = cast( + Optional[Callable[[ast.AST], Optional[ast.AST]]], + getattr(self, "_missing__" + node.__class__.__name__, None), + ) + if missing_fn is not None: + ret_node = missing_fn(node) + else: + ret_node = None + return ret_node + + # Missing nodes: _missing__* + # + # Entire statements can be optimized away by Python. They will appear in + # the AST, but not the bytecode. These functions are called (by + # find_non_missing_node) to find a node to use instead of the missing + # node. They can return None if the node should truly be gone. + + def _missing__If(self, node: ast.If) -> ast.AST | None: + # If the if-node is missing, then one of its children might still be + # here, but not both. So return the first of the two that isn't missing. + # Use a NodeList to hold the clauses as a single node. + non_missing = self.find_non_missing_node(NodeList(node.body)) + if non_missing: + return non_missing + if node.orelse: + return self.find_non_missing_node(NodeList(node.orelse)) + return None + + def _missing__NodeList(self, node: NodeList) -> ast.AST | None: + # A NodeList might be a mixture of missing and present nodes. Find the + # ones that are present. + non_missing_children = [] + for child in node.body: + maybe_child = self.find_non_missing_node(child) + if maybe_child is not None: + non_missing_children.append(maybe_child) + + # Return the simplest representation of the present children. + if not non_missing_children: + return None + if len(non_missing_children) == 1: + return non_missing_children[0] + return NodeList(non_missing_children) + + def _missing__While(self, node: ast.While) -> ast.AST | None: + body_nodes = self.find_non_missing_node(NodeList(node.body)) + if not body_nodes: + return None + # Make a synthetic While-true node. + new_while = ast.While() # type: ignore[call-arg] + new_while.lineno = body_nodes.lineno # type: ignore[attr-defined] + new_while.test = ast.Name() # type: ignore[call-arg] + new_while.test.lineno = body_nodes.lineno # type: ignore[attr-defined] + new_while.test.id = "True" + assert hasattr(body_nodes, "body") + new_while.body = body_nodes.body + new_while.orelse = [] + return new_while + + def is_constant_expr(self, node: ast.AST) -> str | None: + """Is this a compile-time constant?""" + node_name = node.__class__.__name__ + if node_name in ["Constant", "NameConstant", "Num"]: + return "Num" + elif isinstance(node, ast.Name): + if node.id in ["True", "False", "None", "__debug__"]: + return "Name" + return None + + # In the fullness of time, these might be good tests to write: + # while EXPR: + # while False: + # listcomps hidden deep in other expressions + # listcomps hidden in lists: x = [[i for i in range(10)]] + # nested function definitions + + # Exit processing: process_*_exits + # + # These functions process the four kinds of jump exits: break, continue, + # raise, and return. To figure out where an exit goes, we have to look at + # the block stack context. For example, a break will jump to the nearest + # enclosing loop block, or the nearest enclosing finally block, whichever + # is nearer. + + def process_break_exits(self, exits: set[ArcStart]) -> None: + """Add arcs due to jumps from `exits` being breaks.""" + for block in self.nearest_blocks(): # pragma: always breaks + if block.process_break_exits(exits, self.add_arc): + break + + def process_continue_exits(self, exits: set[ArcStart]) -> None: + """Add arcs due to jumps from `exits` being continues.""" + for block in self.nearest_blocks(): # pragma: always breaks + if block.process_continue_exits(exits, self.add_arc): + break + + def process_raise_exits(self, exits: set[ArcStart]) -> None: + """Add arcs due to jumps from `exits` being raises.""" + for block in self.nearest_blocks(): + if block.process_raise_exits(exits, self.add_arc): + break + + def process_return_exits(self, exits: set[ArcStart]) -> None: + """Add arcs due to jumps from `exits` being returns.""" + for block in self.nearest_blocks(): # pragma: always breaks + if block.process_return_exits(exits, self.add_arc): + break + + # Node handlers: _handle__* + # + # Each handler deals with a specific AST node type, dispatched from + # node_exits. Handlers return the set of exits from that node, and can + # also call self.add_arc to record arcs they find. These functions mirror + # the Python semantics of each syntactic construct. See the docstring + # for node_exits to understand the concept of exits from a node. + # + # Every node type that represents a statement should have a handler, or it + # should be listed in OK_TO_DEFAULT. + + def _handle__Break(self, node: ast.Break) -> set[ArcStart]: + here = self.line_for_node(node) + break_start = ArcStart(here, cause="the break on line {lineno} wasn't executed") + self.process_break_exits({break_start}) + return set() + + def _handle_decorated(self, node: ast.FunctionDef) -> set[ArcStart]: + """Add arcs for things that can be decorated (classes and functions).""" + main_line: TLineNo = node.lineno + last: TLineNo | None = node.lineno + decs = node.decorator_list + if decs: + last = None + for dec_node in decs: + dec_start = self.line_for_node(dec_node) + if last is not None and dec_start != last: # type: ignore[unreachable] + self.add_arc(last, dec_start) # type: ignore[unreachable] + last = dec_start + assert last is not None + self.add_arc(last, main_line) + last = main_line + # The definition line may have been missed, but we should have it + # in `self.statements`. For some constructs, `line_for_node` is + # not what we'd think of as the first line in the statement, so map + # it to the first one. + if node.body: + body_start = self.line_for_node(node.body[0]) + body_start = self.multiline.get(body_start, body_start) + # The body is handled in collect_arcs. + assert last is not None + return {ArcStart(last)} + + _handle__ClassDef = _handle_decorated + + def _handle__Continue(self, node: ast.Continue) -> set[ArcStart]: + here = self.line_for_node(node) + continue_start = ArcStart(here, cause="the continue on line {lineno} wasn't executed") + self.process_continue_exits({continue_start}) + return set() + + def _handle__For(self, node: ast.For) -> set[ArcStart]: + start = self.line_for_node(node.iter) + self.block_stack.append(LoopBlock(start=start)) + from_start = ArcStart(start, cause="the loop on line {lineno} never started") + exits = self.process_body(node.body, from_start=from_start) + # Any exit from the body will go back to the top of the loop. + for xit in exits: + self.add_arc(xit.lineno, start, xit.cause) + my_block = self.block_stack.pop() + assert isinstance(my_block, LoopBlock) + exits = my_block.break_exits + from_start = ArcStart(start, cause="the loop on line {lineno} didn't complete") + if node.orelse: + else_exits = self.process_body(node.orelse, from_start=from_start) + exits |= else_exits + else: + # No else clause: exit from the for line. + exits.add(from_start) + return exits + + _handle__AsyncFor = _handle__For + + _handle__FunctionDef = _handle_decorated + _handle__AsyncFunctionDef = _handle_decorated + + def _handle__If(self, node: ast.If) -> set[ArcStart]: + start = self.line_for_node(node.test) + from_start = ArcStart(start, cause="the condition on line {lineno} was never true") + exits = self.process_body(node.body, from_start=from_start) + from_start = ArcStart(start, cause="the condition on line {lineno} was always true") + exits |= self.process_body(node.orelse, from_start=from_start) + return exits + + if sys.version_info >= (3, 10): + def _handle__Match(self, node: ast.Match) -> set[ArcStart]: + start = self.line_for_node(node) + last_start = start + exits = set() + for case in node.cases: + case_start = self.line_for_node(case.pattern) + self.add_arc(last_start, case_start, "the pattern on line {lineno} always matched") + from_start = ArcStart( + case_start, + cause="the pattern on line {lineno} never matched", + ) + exits |= self.process_body(case.body, from_start=from_start) + last_start = case_start + + # case is now the last case, check for wildcard match. + pattern = case.pattern # pylint: disable=undefined-loop-variable + while isinstance(pattern, ast.MatchOr): + pattern = pattern.patterns[-1] + while isinstance(pattern, ast.MatchAs) and pattern.pattern is not None: + pattern = pattern.pattern + had_wildcard = ( + isinstance(pattern, ast.MatchAs) + and pattern.pattern is None + and case.guard is None # pylint: disable=undefined-loop-variable + ) + + if not had_wildcard: + exits.add( + ArcStart(case_start, cause="the pattern on line {lineno} always matched"), + ) + return exits + + def _handle__NodeList(self, node: NodeList) -> set[ArcStart]: + start = self.line_for_node(node) + exits = self.process_body(node.body, from_start=ArcStart(start)) + return exits + + def _handle__Raise(self, node: ast.Raise) -> set[ArcStart]: + here = self.line_for_node(node) + raise_start = ArcStart(here, cause="the raise on line {lineno} wasn't executed") + self.process_raise_exits({raise_start}) + # `raise` statement jumps away, no exits from here. + return set() + + def _handle__Return(self, node: ast.Return) -> set[ArcStart]: + here = self.line_for_node(node) + return_start = ArcStart(here, cause="the return on line {lineno} wasn't executed") + self.process_return_exits({return_start}) + # `return` statement jumps away, no exits from here. + return set() + + def _handle__Try(self, node: ast.Try) -> set[ArcStart]: + if node.handlers: + handler_start = self.line_for_node(node.handlers[0]) + else: + handler_start = None + + if node.finalbody: + final_start = self.line_for_node(node.finalbody[0]) + else: + final_start = None + + # This is true by virtue of Python syntax: have to have either except + # or finally, or both. + assert handler_start is not None or final_start is not None + try_block = TryBlock(handler_start, final_start) + self.block_stack.append(try_block) + + start = self.line_for_node(node) + exits = self.process_body(node.body, from_start=ArcStart(start)) + + # We're done with the `try` body, so this block no longer handles + # exceptions. We keep the block so the `finally` clause can pick up + # flows from the handlers and `else` clause. + if node.finalbody: + try_block.handler_start = None + else: + self.block_stack.pop() + + handler_exits: set[ArcStart] = set() + + if node.handlers: + for handler_node in node.handlers: + handler_start = self.line_for_node(handler_node) + from_cause = "the exception caught by line {lineno} didn't happen" + from_start = ArcStart(handler_start, cause=from_cause) + handler_exits |= self.process_body(handler_node.body, from_start=from_start) + + if node.orelse: + exits = self.process_body(node.orelse, prev_starts=exits) + + exits |= handler_exits + + if node.finalbody: + self.block_stack.pop() + final_from = exits + + final_exits = self.process_body(node.finalbody, prev_starts=final_from) + + if exits: + # The finally clause's exits are only exits for the try block + # as a whole if the try block had some exits to begin with. + exits = final_exits + + return exits + + def _handle__While(self, node: ast.While) -> set[ArcStart]: + start = to_top = self.line_for_node(node.test) + constant_test = self.is_constant_expr(node.test) + top_is_body0 = False + if constant_test: + top_is_body0 = True + if env.PYBEHAVIOR.keep_constant_test: + top_is_body0 = False + if top_is_body0: + to_top = self.line_for_node(node.body[0]) + self.block_stack.append(LoopBlock(start=to_top)) + from_start = ArcStart(start, cause="the condition on line {lineno} was never true") + exits = self.process_body(node.body, from_start=from_start) + for xit in exits: + self.add_arc(xit.lineno, to_top, xit.cause) + exits = set() + my_block = self.block_stack.pop() + assert isinstance(my_block, LoopBlock) + exits.update(my_block.break_exits) + from_start = ArcStart(start, cause="the condition on line {lineno} was always true") + if node.orelse: + else_exits = self.process_body(node.orelse, from_start=from_start) + exits |= else_exits + else: + # No `else` clause: you can exit from the start. + if not constant_test: + exits.add(from_start) + return exits + + def _handle__With(self, node: ast.With) -> set[ArcStart]: + if env.PYBEHAVIOR.exit_with_through_ctxmgr: + starts = [self.line_for_node(item.context_expr) for item in node.items] + else: + starts = [self.line_for_node(node)] + if env.PYBEHAVIOR.exit_through_with: + for start in starts: + self.current_with_starts.add(start) + self.all_with_starts.add(start) + + exits = self.process_body(node.body, from_start=ArcStart(starts[-1])) + + if env.PYBEHAVIOR.exit_through_with: + start = starts[-1] + self.current_with_starts.remove(start) + with_exit = {ArcStart(start)} + if exits: + for xit in exits: + self.add_arc(xit.lineno, start) + self.with_exits.add((xit.lineno, start)) + exits = with_exit + + return exits + + _handle__AsyncWith = _handle__With diff --git a/.venv/Lib/site-packages/coverage/phystokens.py b/.venv/Lib/site-packages/coverage/phystokens.py new file mode 100644 index 00000000..8c29402a --- /dev/null +++ b/.venv/Lib/site-packages/coverage/phystokens.py @@ -0,0 +1,198 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Better tokenizing for coverage.py.""" + +from __future__ import annotations + +import ast +import io +import keyword +import re +import sys +import token +import tokenize + +from collections.abc import Iterable + +from coverage import env +from coverage.types import TLineNo, TSourceTokenLines + + +TokenInfos = Iterable[tokenize.TokenInfo] + + +def _phys_tokens(toks: TokenInfos) -> TokenInfos: + """Return all physical tokens, even line continuations. + + tokenize.generate_tokens() doesn't return a token for the backslash that + continues lines. This wrapper provides those tokens so that we can + re-create a faithful representation of the original source. + + Returns the same values as generate_tokens() + + """ + last_line: str | None = None + last_lineno = -1 + last_ttext: str = "" + for ttype, ttext, (slineno, scol), (elineno, ecol), ltext in toks: + if last_lineno != elineno: + if last_line and last_line.endswith("\\\n"): + # We are at the beginning of a new line, and the last line + # ended with a backslash. We probably have to inject a + # backslash token into the stream. Unfortunately, there's more + # to figure out. This code:: + # + # usage = """\ + # HEY THERE + # """ + # + # triggers this condition, but the token text is:: + # + # '"""\\\nHEY THERE\n"""' + # + # so we need to figure out if the backslash is already in the + # string token or not. + inject_backslash = True + if last_ttext.endswith("\\"): + inject_backslash = False + elif ttype == token.STRING: + if (last_line.endswith("\\\n") and # pylint: disable=simplifiable-if-statement + last_line.rstrip(" \\\n").endswith(last_ttext)): + # Deal with special cases like such code:: + # + # a = ["aaa",\ # there may be zero or more blanks between "," and "\". + # "bbb \ + # ccc"] + # + inject_backslash = True + else: + # It's a multi-line string and the first line ends with + # a backslash, so we don't need to inject another. + inject_backslash = False + elif sys.version_info >= (3, 12) and ttype == token.FSTRING_MIDDLE: + inject_backslash = False + if inject_backslash: + # Figure out what column the backslash is in. + ccol = len(last_line.split("\n")[-2]) - 1 + # Yield the token, with a fake token type. + yield tokenize.TokenInfo( + 99999, "\\\n", + (slineno, ccol), (slineno, ccol+2), + last_line, + ) + last_line = ltext + if ttype not in (tokenize.NEWLINE, tokenize.NL): + last_ttext = ttext + yield tokenize.TokenInfo(ttype, ttext, (slineno, scol), (elineno, ecol), ltext) + last_lineno = elineno + + +def find_soft_key_lines(source: str) -> set[TLineNo]: + """Helper for finding lines with soft keywords, like match/case lines.""" + soft_key_lines: set[TLineNo] = set() + + for node in ast.walk(ast.parse(source)): + if sys.version_info >= (3, 10) and isinstance(node, ast.Match): + soft_key_lines.add(node.lineno) + for case in node.cases: + soft_key_lines.add(case.pattern.lineno) + elif sys.version_info >= (3, 12) and isinstance(node, ast.TypeAlias): + soft_key_lines.add(node.lineno) + + return soft_key_lines + + +def source_token_lines(source: str) -> TSourceTokenLines: + """Generate a series of lines, one for each line in `source`. + + Each line is a list of pairs, each pair is a token:: + + [('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ] + + Each pair has a token class, and the token text. + + If you concatenate all the token texts, and then join them with newlines, + you should have your original `source` back, with two differences: + trailing white space is not preserved, and a final line with no newline + is indistinguishable from a final line with a newline. + + """ + + ws_tokens = {token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL} + line: list[tuple[str, str]] = [] + col = 0 + + source = source.expandtabs(8).replace("\r\n", "\n") + tokgen = generate_tokens(source) + + if env.PYBEHAVIOR.soft_keywords: + soft_key_lines = find_soft_key_lines(source) + else: + soft_key_lines = set() + + for ttype, ttext, (sline, scol), (_, ecol), _ in _phys_tokens(tokgen): + mark_start = True + for part in re.split("(\n)", ttext): + if part == "\n": + yield line + line = [] + col = 0 + mark_end = False + elif part == "": + mark_end = False + elif ttype in ws_tokens: + mark_end = False + else: + if mark_start and scol > col: + line.append(("ws", " " * (scol - col))) + mark_start = False + tok_class = tokenize.tok_name.get(ttype, "xx").lower()[:3] + if ttype == token.NAME: + if keyword.iskeyword(ttext): + # Hard keywords are always keywords. + tok_class = "key" + elif sys.version_info >= (3, 10): # PYVERSIONS + # Need the version_info check to keep mypy from borking + # on issoftkeyword here. + if env.PYBEHAVIOR.soft_keywords and keyword.issoftkeyword(ttext): + # Soft keywords appear at the start of their line. + if len(line) == 0: + is_start_of_line = True + elif (len(line) == 1) and line[0][0] == "ws": + is_start_of_line = True + else: + is_start_of_line = False + if is_start_of_line and sline in soft_key_lines: + tok_class = "key" + line.append((tok_class, part)) + mark_end = True + scol = 0 + if mark_end: + col = ecol + + if line: + yield line + + +def generate_tokens(text: str) -> TokenInfos: + """A helper around `tokenize.generate_tokens`. + + Originally this was used to cache the results, but it didn't seem to make + reporting go faster, and caused issues with using too much memory. + + """ + readline = io.StringIO(text).readline + return tokenize.generate_tokens(readline) + + +def source_encoding(source: bytes) -> str: + """Determine the encoding for `source`, according to PEP 263. + + `source` is a byte string: the text of the program. + + Returns a string, the name of the encoding. + + """ + readline = iter(source.splitlines(True)).__next__ + return tokenize.detect_encoding(readline)[0] diff --git a/.venv/Lib/site-packages/coverage/plugin.py b/.venv/Lib/site-packages/coverage/plugin.py new file mode 100644 index 00000000..11c0679f --- /dev/null +++ b/.venv/Lib/site-packages/coverage/plugin.py @@ -0,0 +1,617 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +""" +.. versionadded:: 4.0 + +Plug-in interfaces for coverage.py. + +Coverage.py supports a few different kinds of plug-ins that change its +behavior: + +* File tracers implement tracing of non-Python file types. + +* Configurers add custom configuration, using Python code to change the + configuration. + +* Dynamic context switchers decide when the dynamic context has changed, for + example, to record what test function produced the coverage. + +To write a coverage.py plug-in, create a module with a subclass of +:class:`~coverage.CoveragePlugin`. You will override methods in your class to +participate in various aspects of coverage.py's processing. +Different types of plug-ins have to override different methods. + +Any plug-in can optionally implement :meth:`~coverage.CoveragePlugin.sys_info` +to provide debugging information about their operation. + +Your module must also contain a ``coverage_init`` function that registers an +instance of your plug-in class:: + + import coverage + + class MyPlugin(coverage.CoveragePlugin): + ... + + def coverage_init(reg, options): + reg.add_file_tracer(MyPlugin()) + +You use the `reg` parameter passed to your ``coverage_init`` function to +register your plug-in object. The registration method you call depends on +what kind of plug-in it is. + +If your plug-in takes options, the `options` parameter is a dictionary of your +plug-in's options from the coverage.py configuration file. Use them however +you want to configure your object before registering it. + +Coverage.py will store its own information on your plug-in object, using +attributes whose names start with ``_coverage_``. Don't be startled. + +.. warning:: + Plug-ins are imported by coverage.py before it begins measuring code. + If you write a plugin in your own project, it might import your product + code before coverage.py can start measuring. This can result in your + own code being reported as missing. + + One solution is to put your plugins in your project tree, but not in + your importable Python package. + + +.. _file_tracer_plugins: + +File Tracers +============ + +File tracers implement measurement support for non-Python files. File tracers +implement the :meth:`~coverage.CoveragePlugin.file_tracer` method to claim +files and the :meth:`~coverage.CoveragePlugin.file_reporter` method to report +on those files. + +In your ``coverage_init`` function, use the ``add_file_tracer`` method to +register your file tracer. + + +.. _configurer_plugins: + +Configurers +=========== + +.. versionadded:: 4.5 + +Configurers modify the configuration of coverage.py during start-up. +Configurers implement the :meth:`~coverage.CoveragePlugin.configure` method to +change the configuration. + +In your ``coverage_init`` function, use the ``add_configurer`` method to +register your configurer. + + +.. _dynamic_context_plugins: + +Dynamic Context Switchers +========================= + +.. versionadded:: 5.0 + +Dynamic context switcher plugins implement the +:meth:`~coverage.CoveragePlugin.dynamic_context` method to dynamically compute +the context label for each measured frame. + +Computed context labels are useful when you want to group measured data without +modifying the source code. + +For example, you could write a plugin that checks `frame.f_code` to inspect +the currently executed method, and set the context label to a fully qualified +method name if it's an instance method of `unittest.TestCase` and the method +name starts with 'test'. Such a plugin would provide basic coverage grouping +by test and could be used with test runners that have no built-in coveragepy +support. + +In your ``coverage_init`` function, use the ``add_dynamic_context`` method to +register your dynamic context switcher. + +""" + +from __future__ import annotations + +import dataclasses +import functools + +from types import FrameType +from typing import Any +from collections.abc import Iterable + +from coverage import files +from coverage.misc import _needs_to_implement +from coverage.types import TArc, TConfigurable, TLineNo, TSourceTokenLines + + +class CoveragePlugin: + """Base class for coverage.py plug-ins.""" + + _coverage_plugin_name: str + _coverage_enabled: bool + + def file_tracer(self, filename: str) -> FileTracer | None: # pylint: disable=unused-argument + """Get a :class:`FileTracer` object for a file. + + Plug-in type: file tracer. + + Every Python source file is offered to your plug-in to give it a chance + to take responsibility for tracing the file. If your plug-in can + handle the file, it should return a :class:`FileTracer` object. + Otherwise return None. + + There is no way to register your plug-in for particular files. + Instead, this method is invoked for all files as they are executed, + and the plug-in decides whether it can trace the file or not. + Be prepared for `filename` to refer to all kinds of files that have + nothing to do with your plug-in. + + The file name will be a Python file being executed. There are two + broad categories of behavior for a plug-in, depending on the kind of + files your plug-in supports: + + * Static file names: each of your original source files has been + converted into a distinct Python file. Your plug-in is invoked with + the Python file name, and it maps it back to its original source + file. + + * Dynamic file names: all of your source files are executed by the same + Python file. In this case, your plug-in implements + :meth:`FileTracer.dynamic_source_filename` to provide the actual + source file for each execution frame. + + `filename` is a string, the path to the file being considered. This is + the absolute real path to the file. If you are comparing to other + paths, be sure to take this into account. + + Returns a :class:`FileTracer` object to use to trace `filename`, or + None if this plug-in cannot trace this file. + + """ + return None + + def file_reporter( + self, + filename: str, # pylint: disable=unused-argument + ) -> FileReporter | str: # str should be Literal["python"] + """Get the :class:`FileReporter` class to use for a file. + + Plug-in type: file tracer. + + This will only be invoked if `filename` returns non-None from + :meth:`file_tracer`. It's an error to return None from this method. + + Returns a :class:`FileReporter` object to use to report on `filename`, + or the string `"python"` to have coverage.py treat the file as Python. + + """ + _needs_to_implement(self, "file_reporter") + + def dynamic_context( + self, + frame: FrameType, # pylint: disable=unused-argument + ) -> str | None: + """Get the dynamically computed context label for `frame`. + + Plug-in type: dynamic context. + + This method is invoked for each frame when outside of a dynamic + context, to see if a new dynamic context should be started. If it + returns a string, a new context label is set for this and deeper + frames. The dynamic context ends when this frame returns. + + Returns a string to start a new dynamic context, or None if no new + context should be started. + + """ + return None + + def find_executable_files( + self, + src_dir: str, # pylint: disable=unused-argument + ) -> Iterable[str]: + """Yield all of the executable files in `src_dir`, recursively. + + Plug-in type: file tracer. + + Executability is a plug-in-specific property, but generally means files + which would have been considered for coverage analysis, had they been + included automatically. + + Returns or yields a sequence of strings, the paths to files that could + have been executed, including files that had been executed. + + """ + return [] + + def configure(self, config: TConfigurable) -> None: + """Modify the configuration of coverage.py. + + Plug-in type: configurer. + + This method is called during coverage.py start-up, to give your plug-in + a chance to change the configuration. The `config` parameter is an + object with :meth:`~coverage.Coverage.get_option` and + :meth:`~coverage.Coverage.set_option` methods. Do not call any other + methods on the `config` object. + + """ + pass + + def sys_info(self) -> Iterable[tuple[str, Any]]: + """Get a list of information useful for debugging. + + Plug-in type: any. + + This method will be invoked for ``--debug=sys``. Your + plug-in can return any information it wants to be displayed. + + Returns a list of pairs: `[(name, value), ...]`. + + """ + return [] + + +class CoveragePluginBase: + """Plugins produce specialized objects, which point back to the original plugin.""" + _coverage_plugin: CoveragePlugin + + +class FileTracer(CoveragePluginBase): + """Support needed for files during the execution phase. + + File tracer plug-ins implement subclasses of FileTracer to return from + their :meth:`~CoveragePlugin.file_tracer` method. + + You may construct this object from :meth:`CoveragePlugin.file_tracer` any + way you like. A natural choice would be to pass the file name given to + `file_tracer`. + + `FileTracer` objects should only be created in the + :meth:`CoveragePlugin.file_tracer` method. + + See :ref:`howitworks` for details of the different coverage.py phases. + + """ + + def source_filename(self) -> str: + """The source file name for this file. + + This may be any file name you like. A key responsibility of a plug-in + is to own the mapping from Python execution back to whatever source + file name was originally the source of the code. + + See :meth:`CoveragePlugin.file_tracer` for details about static and + dynamic file names. + + Returns the file name to credit with this execution. + + """ + _needs_to_implement(self, "source_filename") + + def has_dynamic_source_filename(self) -> bool: + """Does this FileTracer have dynamic source file names? + + FileTracers can provide dynamically determined file names by + implementing :meth:`dynamic_source_filename`. Invoking that function + is expensive. To determine whether to invoke it, coverage.py uses the + result of this function to know if it needs to bother invoking + :meth:`dynamic_source_filename`. + + See :meth:`CoveragePlugin.file_tracer` for details about static and + dynamic file names. + + Returns True if :meth:`dynamic_source_filename` should be called to get + dynamic source file names. + + """ + return False + + def dynamic_source_filename( + self, + filename: str, # pylint: disable=unused-argument + frame: FrameType, # pylint: disable=unused-argument + ) -> str | None: + """Get a dynamically computed source file name. + + Some plug-ins need to compute the source file name dynamically for each + frame. + + This function will not be invoked if + :meth:`has_dynamic_source_filename` returns False. + + Returns the source file name for this frame, or None if this frame + shouldn't be measured. + + """ + return None + + def line_number_range(self, frame: FrameType) -> tuple[TLineNo, TLineNo]: + """Get the range of source line numbers for a given a call frame. + + The call frame is examined, and the source line number in the original + file is returned. The return value is a pair of numbers, the starting + line number and the ending line number, both inclusive. For example, + returning (5, 7) means that lines 5, 6, and 7 should be considered + executed. + + This function might decide that the frame doesn't indicate any lines + from the source file were executed. Return (-1, -1) in this case to + tell coverage.py that no lines should be recorded for this frame. + + """ + lineno = frame.f_lineno + return lineno, lineno + + +@dataclasses.dataclass +class CodeRegion: + """Data for a region of code found by :meth:`FileReporter.code_regions`.""" + + #: The kind of region, like `"function"` or `"class"`. Must be one of the + #: singular values returned by :meth:`FileReporter.code_region_kinds`. + kind: str + + #: The name of the region. For example, a function or class name. + name: str + + #: The line in the source file to link to when navigating to the region. + #: Can be a line not mentioned in `lines`. + start: int + + #: The lines in the region. Should be lines that could be executed in the + #: region. For example, a class region includes all of the lines in the + #: methods of the class, but not the lines defining class attributes, since + #: they are executed on import, not as part of exercising the class. The + #: set can include non-executable lines like blanks and comments. + lines: set[int] + + def __lt__(self, other: CodeRegion) -> bool: + """To support sorting to make test-writing easier.""" + if self.name == other.name: + return min(self.lines) < min(other.lines) + return self.name < other.name + + +@functools.total_ordering +class FileReporter(CoveragePluginBase): + """Support needed for files during the analysis and reporting phases. + + File tracer plug-ins implement a subclass of `FileReporter`, and return + instances from their :meth:`CoveragePlugin.file_reporter` method. + + There are many methods here, but only :meth:`lines` is required, to provide + the set of executable lines in the file. + + See :ref:`howitworks` for details of the different coverage.py phases. + + """ + + def __init__(self, filename: str) -> None: + """Simple initialization of a `FileReporter`. + + The `filename` argument is the path to the file being reported. This + will be available as the `.filename` attribute on the object. Other + method implementations on this base class rely on this attribute. + + """ + self.filename = filename + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} filename={self.filename!r}>" + + def relative_filename(self) -> str: + """Get the relative file name for this file. + + This file path will be displayed in reports. The default + implementation will supply the actual project-relative file path. You + only need to supply this method if you have an unusual syntax for file + paths. + + """ + return files.relative_filename(self.filename) + + def source(self) -> str: + """Get the source for the file. + + Returns a Unicode string. + + The base implementation simply reads the `self.filename` file and + decodes it as UTF-8. Override this method if your file isn't readable + as a text file, or if you need other encoding support. + + """ + with open(self.filename, encoding="utf-8") as f: + return f.read() + + def lines(self) -> set[TLineNo]: + """Get the executable lines in this file. + + Your plug-in must determine which lines in the file were possibly + executable. This method returns a set of those line numbers. + + Returns a set of line numbers. + + """ + _needs_to_implement(self, "lines") + + def excluded_lines(self) -> set[TLineNo]: + """Get the excluded executable lines in this file. + + Your plug-in can use any method it likes to allow the user to exclude + executable lines from consideration. + + Returns a set of line numbers. + + The base implementation returns the empty set. + + """ + return set() + + def translate_lines(self, lines: Iterable[TLineNo]) -> set[TLineNo]: + """Translate recorded lines into reported lines. + + Some file formats will want to report lines slightly differently than + they are recorded. For example, Python records the last line of a + multi-line statement, but reports are nicer if they mention the first + line. + + Your plug-in can optionally define this method to perform these kinds + of adjustment. + + `lines` is a sequence of integers, the recorded line numbers. + + Returns a set of integers, the adjusted line numbers. + + The base implementation returns the numbers unchanged. + + """ + return set(lines) + + def arcs(self) -> set[TArc]: + """Get the executable arcs in this file. + + To support branch coverage, your plug-in needs to be able to indicate + possible execution paths, as a set of line number pairs. Each pair is + a `(prev, next)` pair indicating that execution can transition from the + `prev` line number to the `next` line number. + + Returns a set of pairs of line numbers. The default implementation + returns an empty set. + + """ + return set() + + def no_branch_lines(self) -> set[TLineNo]: + """Get the lines excused from branch coverage in this file. + + Your plug-in can use any method it likes to allow the user to exclude + lines from consideration of branch coverage. + + Returns a set of line numbers. + + The base implementation returns the empty set. + + """ + return set() + + def translate_arcs(self, arcs: Iterable[TArc]) -> set[TArc]: + """Translate recorded arcs into reported arcs. + + Similar to :meth:`translate_lines`, but for arcs. `arcs` is a set of + line number pairs. + + Returns a set of line number pairs. + + The default implementation returns `arcs` unchanged. + + """ + return set(arcs) + + def exit_counts(self) -> dict[TLineNo, int]: + """Get a count of exits from that each line. + + To determine which lines are branches, coverage.py looks for lines that + have more than one exit. This function creates a dict mapping each + executable line number to a count of how many exits it has. + + To be honest, this feels wrong, and should be refactored. Let me know + if you attempt to implement this method in your plug-in... + + """ + return {} + + def missing_arc_description( + self, + start: TLineNo, + end: TLineNo, + executed_arcs: Iterable[TArc] | None = None, # pylint: disable=unused-argument + ) -> str: + """Provide an English sentence describing a missing arc. + + The `start` and `end` arguments are the line numbers of the missing + arc. Negative numbers indicate entering or exiting code objects. + + The `executed_arcs` argument is a set of line number pairs, the arcs + that were executed in this file. + + By default, this simply returns the string "Line {start} didn't jump + to {end}". + + """ + return f"Line {start} didn't jump to line {end}" + + def arc_description( + self, + start: TLineNo, # pylint: disable=unused-argument + end: TLineNo + ) -> str: + """Provide an English description of an arc's effect.""" + return f"jump to line {end}" + + def source_token_lines(self) -> TSourceTokenLines: + """Generate a series of tokenized lines, one for each line in `source`. + + These tokens are used for syntax-colored reports. + + Each line is a list of pairs, each pair is a token:: + + [("key", "def"), ("ws", " "), ("nam", "hello"), ("op", "("), ... ] + + Each pair has a token class, and the token text. The token classes + are: + + * ``"com"``: a comment + * ``"key"``: a keyword + * ``"nam"``: a name, or identifier + * ``"num"``: a number + * ``"op"``: an operator + * ``"str"``: a string literal + * ``"ws"``: some white space + * ``"txt"``: some other kind of text + + If you concatenate all the token texts, and then join them with + newlines, you should have your original source back. + + The default implementation simply returns each line tagged as + ``"txt"``. + + """ + for line in self.source().splitlines(): + yield [("txt", line)] + + def code_regions(self) -> Iterable[CodeRegion]: + """Identify regions in the source file for finer reporting than by file. + + Returns an iterable of :class:`CodeRegion` objects. The kinds reported + should be in the possibilities returned by :meth:`code_region_kinds`. + + """ + return [] + + def code_region_kinds(self) -> Iterable[tuple[str, str]]: + """Return the kinds of code regions this plugin can find. + + The returned pairs are the singular and plural forms of the kinds:: + + [ + ("function", "functions"), + ("class", "classes"), + ] + + This will usually be hard-coded, but could also differ by the specific + source file involved. + + """ + return [] + + def __eq__(self, other: Any) -> bool: + return isinstance(other, FileReporter) and self.filename == other.filename + + def __lt__(self, other: Any) -> bool: + return isinstance(other, FileReporter) and self.filename < other.filename + + # This object doesn't need to be hashed. + __hash__ = None # type: ignore[assignment] diff --git a/.venv/Lib/site-packages/coverage/plugin_support.py b/.venv/Lib/site-packages/coverage/plugin_support.py new file mode 100644 index 00000000..99e3bc22 --- /dev/null +++ b/.venv/Lib/site-packages/coverage/plugin_support.py @@ -0,0 +1,298 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Support for plugins.""" + +from __future__ import annotations + +import os +import os.path +import sys + +from types import FrameType +from typing import Any +from collections.abc import Iterable, Iterator + +from coverage.exceptions import PluginError +from coverage.misc import isolate_module +from coverage.plugin import CoveragePlugin, FileTracer, FileReporter +from coverage.types import ( + TArc, TConfigurable, TDebugCtl, TLineNo, TPluginConfig, TSourceTokenLines, +) + +os = isolate_module(os) + + +class Plugins: + """The currently loaded collection of coverage.py plugins.""" + + def __init__(self) -> None: + self.order: list[CoveragePlugin] = [] + self.names: dict[str, CoveragePlugin] = {} + self.file_tracers: list[CoveragePlugin] = [] + self.configurers: list[CoveragePlugin] = [] + self.context_switchers: list[CoveragePlugin] = [] + + self.current_module: str | None = None + self.debug: TDebugCtl | None + + @classmethod + def load_plugins( + cls, + modules: Iterable[str], + config: TPluginConfig, + debug: TDebugCtl | None = None, + ) -> Plugins: + """Load plugins from `modules`. + + Returns a Plugins object with the loaded and configured plugins. + + """ + plugins = cls() + plugins.debug = debug + + for module in modules: + plugins.current_module = module + __import__(module) + mod = sys.modules[module] + + coverage_init = getattr(mod, "coverage_init", None) + if not coverage_init: + raise PluginError( + f"Plugin module {module!r} didn't define a coverage_init function", + ) + + options = config.get_plugin_options(module) + coverage_init(plugins, options) + + plugins.current_module = None + return plugins + + def add_file_tracer(self, plugin: CoveragePlugin) -> None: + """Add a file tracer plugin. + + `plugin` is an instance of a third-party plugin class. It must + implement the :meth:`CoveragePlugin.file_tracer` method. + + """ + self._add_plugin(plugin, self.file_tracers) + + def add_configurer(self, plugin: CoveragePlugin) -> None: + """Add a configuring plugin. + + `plugin` is an instance of a third-party plugin class. It must + implement the :meth:`CoveragePlugin.configure` method. + + """ + self._add_plugin(plugin, self.configurers) + + def add_dynamic_context(self, plugin: CoveragePlugin) -> None: + """Add a dynamic context plugin. + + `plugin` is an instance of a third-party plugin class. It must + implement the :meth:`CoveragePlugin.dynamic_context` method. + + """ + self._add_plugin(plugin, self.context_switchers) + + def add_noop(self, plugin: CoveragePlugin) -> None: + """Add a plugin that does nothing. + + This is only useful for testing the plugin support. + + """ + self._add_plugin(plugin, None) + + def _add_plugin( + self, + plugin: CoveragePlugin, + specialized: list[CoveragePlugin] | None, + ) -> None: + """Add a plugin object. + + `plugin` is a :class:`CoveragePlugin` instance to add. `specialized` + is a list to append the plugin to. + + """ + plugin_name = f"{self.current_module}.{plugin.__class__.__name__}" + if self.debug and self.debug.should("plugin"): + self.debug.write(f"Loaded plugin {self.current_module!r}: {plugin!r}") + labelled = LabelledDebug(f"plugin {self.current_module!r}", self.debug) + plugin = DebugPluginWrapper(plugin, labelled) + + plugin._coverage_plugin_name = plugin_name + plugin._coverage_enabled = True + self.order.append(plugin) + self.names[plugin_name] = plugin + if specialized is not None: + specialized.append(plugin) + + def __bool__(self) -> bool: + return bool(self.order) + + def __iter__(self) -> Iterator[CoveragePlugin]: + return iter(self.order) + + def get(self, plugin_name: str) -> CoveragePlugin: + """Return a plugin by name.""" + return self.names[plugin_name] + + +class LabelledDebug: + """A Debug writer, but with labels for prepending to the messages.""" + + def __init__(self, label: str, debug: TDebugCtl, prev_labels: Iterable[str] = ()): + self.labels = list(prev_labels) + [label] + self.debug = debug + + def add_label(self, label: str) -> LabelledDebug: + """Add a label to the writer, and return a new `LabelledDebug`.""" + return LabelledDebug(label, self.debug, self.labels) + + def message_prefix(self) -> str: + """The prefix to use on messages, combining the labels.""" + prefixes = self.labels + [""] + return ":\n".join(" "*i+label for i, label in enumerate(prefixes)) + + def write(self, message: str) -> None: + """Write `message`, but with the labels prepended.""" + self.debug.write(f"{self.message_prefix()}{message}") + + +class DebugPluginWrapper(CoveragePlugin): + """Wrap a plugin, and use debug to report on what it's doing.""" + + def __init__(self, plugin: CoveragePlugin, debug: LabelledDebug) -> None: + super().__init__() + self.plugin = plugin + self.debug = debug + + def file_tracer(self, filename: str) -> FileTracer | None: + tracer = self.plugin.file_tracer(filename) + self.debug.write(f"file_tracer({filename!r}) --> {tracer!r}") + if tracer: + debug = self.debug.add_label(f"file {filename!r}") + tracer = DebugFileTracerWrapper(tracer, debug) + return tracer + + def file_reporter(self, filename: str) -> FileReporter | str: + reporter = self.plugin.file_reporter(filename) + assert isinstance(reporter, FileReporter) + self.debug.write(f"file_reporter({filename!r}) --> {reporter!r}") + if reporter: + debug = self.debug.add_label(f"file {filename!r}") + reporter = DebugFileReporterWrapper(filename, reporter, debug) + return reporter + + def dynamic_context(self, frame: FrameType) -> str | None: + context = self.plugin.dynamic_context(frame) + self.debug.write(f"dynamic_context({frame!r}) --> {context!r}") + return context + + def find_executable_files(self, src_dir: str) -> Iterable[str]: + executable_files = self.plugin.find_executable_files(src_dir) + self.debug.write(f"find_executable_files({src_dir!r}) --> {executable_files!r}") + return executable_files + + def configure(self, config: TConfigurable) -> None: + self.debug.write(f"configure({config!r})") + self.plugin.configure(config) + + def sys_info(self) -> Iterable[tuple[str, Any]]: + return self.plugin.sys_info() + + +class DebugFileTracerWrapper(FileTracer): + """A debugging `FileTracer`.""" + + def __init__(self, tracer: FileTracer, debug: LabelledDebug) -> None: + self.tracer = tracer + self.debug = debug + + def _show_frame(self, frame: FrameType) -> str: + """A short string identifying a frame, for debug messages.""" + return "%s@%d" % ( + os.path.basename(frame.f_code.co_filename), + frame.f_lineno, + ) + + def source_filename(self) -> str: + sfilename = self.tracer.source_filename() + self.debug.write(f"source_filename() --> {sfilename!r}") + return sfilename + + def has_dynamic_source_filename(self) -> bool: + has = self.tracer.has_dynamic_source_filename() + self.debug.write(f"has_dynamic_source_filename() --> {has!r}") + return has + + def dynamic_source_filename(self, filename: str, frame: FrameType) -> str | None: + dyn = self.tracer.dynamic_source_filename(filename, frame) + self.debug.write("dynamic_source_filename({!r}, {}) --> {!r}".format( + filename, self._show_frame(frame), dyn, + )) + return dyn + + def line_number_range(self, frame: FrameType) -> tuple[TLineNo, TLineNo]: + pair = self.tracer.line_number_range(frame) + self.debug.write(f"line_number_range({self._show_frame(frame)}) --> {pair!r}") + return pair + + +class DebugFileReporterWrapper(FileReporter): + """A debugging `FileReporter`.""" + + def __init__(self, filename: str, reporter: FileReporter, debug: LabelledDebug) -> None: + super().__init__(filename) + self.reporter = reporter + self.debug = debug + + def relative_filename(self) -> str: + ret = self.reporter.relative_filename() + self.debug.write(f"relative_filename() --> {ret!r}") + return ret + + def lines(self) -> set[TLineNo]: + ret = self.reporter.lines() + self.debug.write(f"lines() --> {ret!r}") + return ret + + def excluded_lines(self) -> set[TLineNo]: + ret = self.reporter.excluded_lines() + self.debug.write(f"excluded_lines() --> {ret!r}") + return ret + + def translate_lines(self, lines: Iterable[TLineNo]) -> set[TLineNo]: + ret = self.reporter.translate_lines(lines) + self.debug.write(f"translate_lines({lines!r}) --> {ret!r}") + return ret + + def translate_arcs(self, arcs: Iterable[TArc]) -> set[TArc]: + ret = self.reporter.translate_arcs(arcs) + self.debug.write(f"translate_arcs({arcs!r}) --> {ret!r}") + return ret + + def no_branch_lines(self) -> set[TLineNo]: + ret = self.reporter.no_branch_lines() + self.debug.write(f"no_branch_lines() --> {ret!r}") + return ret + + def exit_counts(self) -> dict[TLineNo, int]: + ret = self.reporter.exit_counts() + self.debug.write(f"exit_counts() --> {ret!r}") + return ret + + def arcs(self) -> set[TArc]: + ret = self.reporter.arcs() + self.debug.write(f"arcs() --> {ret!r}") + return ret + + def source(self) -> str: + ret = self.reporter.source() + self.debug.write("source() --> %d chars" % (len(ret),)) + return ret + + def source_token_lines(self) -> TSourceTokenLines: + ret = list(self.reporter.source_token_lines()) + self.debug.write("source_token_lines() --> %d tokens" % (len(ret),)) + return ret diff --git a/.venv/Lib/site-packages/coverage/py.typed b/.venv/Lib/site-packages/coverage/py.typed new file mode 100644 index 00000000..bacd23a1 --- /dev/null +++ b/.venv/Lib/site-packages/coverage/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561 to indicate that this package has type hints. diff --git a/.venv/Lib/site-packages/coverage/python.py b/.venv/Lib/site-packages/coverage/python.py new file mode 100644 index 00000000..e87ff43c --- /dev/null +++ b/.venv/Lib/site-packages/coverage/python.py @@ -0,0 +1,273 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Python source expertise for coverage.py""" + +from __future__ import annotations + +import os.path +import types +import zipimport + +from typing import TYPE_CHECKING +from collections.abc import Iterable + +from coverage import env +from coverage.exceptions import CoverageException, NoSource +from coverage.files import canonical_filename, relative_filename, zip_location +from coverage.misc import isolate_module, join_regex +from coverage.parser import PythonParser +from coverage.phystokens import source_token_lines, source_encoding +from coverage.plugin import CodeRegion, FileReporter +from coverage.regions import code_regions +from coverage.types import TArc, TLineNo, TMorf, TSourceTokenLines + +if TYPE_CHECKING: + from coverage import Coverage + +os = isolate_module(os) + + +def read_python_source(filename: str) -> bytes: + """Read the Python source text from `filename`. + + Returns bytes. + + """ + with open(filename, "rb") as f: + source = f.read() + + return source.replace(b"\r\n", b"\n").replace(b"\r", b"\n") + + +def get_python_source(filename: str) -> str: + """Return the source code, as unicode.""" + base, ext = os.path.splitext(filename) + if ext == ".py" and env.WINDOWS: + exts = [".py", ".pyw"] + else: + exts = [ext] + + source_bytes: bytes | None + for ext in exts: + try_filename = base + ext + if os.path.exists(try_filename): + # A regular text file: open it. + source_bytes = read_python_source(try_filename) + break + + # Maybe it's in a zip file? + source_bytes = get_zip_bytes(try_filename) + if source_bytes is not None: + break + else: + # Couldn't find source. + raise NoSource(f"No source for code: '{filename}'.") + + # Replace \f because of http://bugs.python.org/issue19035 + source_bytes = source_bytes.replace(b"\f", b" ") + source = source_bytes.decode(source_encoding(source_bytes), "replace") + + # Python code should always end with a line with a newline. + if source and source[-1] != "\n": + source += "\n" + + return source + + +def get_zip_bytes(filename: str) -> bytes | None: + """Get data from `filename` if it is a zip file path. + + Returns the bytestring data read from the zip file, or None if no zip file + could be found or `filename` isn't in it. The data returned will be + an empty string if the file is empty. + + """ + zipfile_inner = zip_location(filename) + if zipfile_inner is not None: + zipfile, inner = zipfile_inner + try: + zi = zipimport.zipimporter(zipfile) + except zipimport.ZipImportError: + return None + try: + data = zi.get_data(inner) + except OSError: + return None + return data + return None + + +def source_for_file(filename: str) -> str: + """Return the source filename for `filename`. + + Given a file name being traced, return the best guess as to the source + file to attribute it to. + + """ + if filename.endswith(".py"): + # .py files are themselves source files. + return filename + + elif filename.endswith((".pyc", ".pyo")): + # Bytecode files probably have source files near them. + py_filename = filename[:-1] + if os.path.exists(py_filename): + # Found a .py file, use that. + return py_filename + if env.WINDOWS: + # On Windows, it could be a .pyw file. + pyw_filename = py_filename + "w" + if os.path.exists(pyw_filename): + return pyw_filename + # Didn't find source, but it's probably the .py file we want. + return py_filename + + # No idea, just use the file name as-is. + return filename + + +def source_for_morf(morf: TMorf) -> str: + """Get the source filename for the module-or-file `morf`.""" + if hasattr(morf, "__file__") and morf.__file__: + filename = morf.__file__ + elif isinstance(morf, types.ModuleType): + # A module should have had .__file__, otherwise we can't use it. + # This could be a PEP-420 namespace package. + raise CoverageException(f"Module {morf} has no file") + else: + filename = morf + + filename = source_for_file(filename) + return filename + + +class PythonFileReporter(FileReporter): + """Report support for a Python file.""" + + def __init__(self, morf: TMorf, coverage: Coverage | None = None) -> None: + self.coverage = coverage + + filename = source_for_morf(morf) + + fname = filename + canonicalize = True + if self.coverage is not None: + if self.coverage.config.relative_files: + canonicalize = False + if canonicalize: + fname = canonical_filename(filename) + super().__init__(fname) + + if hasattr(morf, "__name__"): + name = morf.__name__.replace(".", os.sep) + if os.path.basename(filename).startswith("__init__."): + name += os.sep + "__init__" + name += ".py" + else: + name = relative_filename(filename) + self.relname = name + + self._source: str | None = None + self._parser: PythonParser | None = None + self._excluded = None + + def __repr__(self) -> str: + return f"" + + def relative_filename(self) -> str: + return self.relname + + @property + def parser(self) -> PythonParser: + """Lazily create a :class:`PythonParser`.""" + assert self.coverage is not None + if self._parser is None: + self._parser = PythonParser( + filename=self.filename, + exclude=self.coverage._exclude_regex("exclude"), + ) + self._parser.parse_source() + return self._parser + + def lines(self) -> set[TLineNo]: + """Return the line numbers of statements in the file.""" + return self.parser.statements + + def excluded_lines(self) -> set[TLineNo]: + """Return the line numbers of statements in the file.""" + return self.parser.excluded + + def translate_lines(self, lines: Iterable[TLineNo]) -> set[TLineNo]: + return self.parser.translate_lines(lines) + + def translate_arcs(self, arcs: Iterable[TArc]) -> set[TArc]: + return self.parser.translate_arcs(arcs) + + def no_branch_lines(self) -> set[TLineNo]: + assert self.coverage is not None + no_branch = self.parser.lines_matching( + join_regex( + self.coverage.config.partial_list + + self.coverage.config.partial_always_list + ) + ) + return no_branch + + def arcs(self) -> set[TArc]: + return self.parser.arcs() + + def exit_counts(self) -> dict[TLineNo, int]: + return self.parser.exit_counts() + + def missing_arc_description( + self, + start: TLineNo, + end: TLineNo, + executed_arcs: Iterable[TArc] | None = None, + ) -> str: + return self.parser.missing_arc_description(start, end) + + def arc_description( + self, + start: TLineNo, + end: TLineNo + ) -> str: + return self.parser.arc_description(start, end) + + def source(self) -> str: + if self._source is None: + self._source = get_python_source(self.filename) + return self._source + + def should_be_python(self) -> bool: + """Does it seem like this file should contain Python? + + This is used to decide if a file reported as part of the execution of + a program was really likely to have contained Python in the first + place. + + """ + # Get the file extension. + _, ext = os.path.splitext(self.filename) + + # Anything named *.py* should be Python. + if ext.startswith(".py"): + return True + # A file with no extension should be Python. + if not ext: + return True + # Everything else is probably not Python. + return False + + def source_token_lines(self) -> TSourceTokenLines: + return source_token_lines(self.source()) + + def code_regions(self) -> Iterable[CodeRegion]: + return code_regions(self.source()) + + def code_region_kinds(self) -> Iterable[tuple[str, str]]: + return [ + ("function", "functions"), + ("class", "classes"), + ] diff --git a/.venv/Lib/site-packages/coverage/pytracer.py b/.venv/Lib/site-packages/coverage/pytracer.py new file mode 100644 index 00000000..81401db3 --- /dev/null +++ b/.venv/Lib/site-packages/coverage/pytracer.py @@ -0,0 +1,362 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Raw data collector for coverage.py.""" + +from __future__ import annotations + +import atexit +import dis +import itertools +import sys +import threading + +from types import FrameType, ModuleType +from typing import Any, Callable, cast + +from coverage import env +from coverage.types import ( + TArc, + TFileDisposition, + TLineNo, + TShouldStartContextFn, + TShouldTraceFn, + TTraceData, + TTraceFileData, + TTraceFn, + TWarnFn, + Tracer, +) + +# We need the YIELD_VALUE opcode below, in a comparison-friendly form. +# PYVERSIONS: RESUME is new in Python3.11 +RESUME = dis.opmap.get("RESUME") +RETURN_VALUE = dis.opmap["RETURN_VALUE"] +if RESUME is None: + YIELD_VALUE = dis.opmap["YIELD_VALUE"] + YIELD_FROM = dis.opmap["YIELD_FROM"] + YIELD_FROM_OFFSET = 0 if env.PYPY else 2 +else: + YIELD_VALUE = YIELD_FROM = YIELD_FROM_OFFSET = -1 + +# When running meta-coverage, this file can try to trace itself, which confuses +# everything. Don't trace ourselves. + +THIS_FILE = __file__.rstrip("co") + +class PyTracer(Tracer): + """Python implementation of the raw data tracer.""" + + # Because of poor implementations of trace-function-manipulating tools, + # the Python trace function must be kept very simple. In particular, there + # must be only one function ever set as the trace function, both through + # sys.settrace, and as the return value from the trace function. Put + # another way, the trace function must always return itself. It cannot + # swap in other functions, or return None to avoid tracing a particular + # frame. + # + # The trace manipulator that introduced this restriction is DecoratorTools, + # which sets a trace function, and then later restores the pre-existing one + # by calling sys.settrace with a function it found in the current frame. + # + # Systems that use DecoratorTools (or similar trace manipulations) must use + # PyTracer to get accurate results. The command-line --timid argument is + # used to force the use of this tracer. + + tracer_ids = itertools.count() + + def __init__(self) -> None: + # Which tracer are we? + self.id = next(self.tracer_ids) + + # Attributes set from the collector: + self.data: TTraceData + self.trace_arcs = False + self.should_trace: TShouldTraceFn + self.should_trace_cache: dict[str, TFileDisposition | None] + self.should_start_context: TShouldStartContextFn | None = None + self.switch_context: Callable[[str | None], None] | None = None + self.lock_data: Callable[[], None] + self.unlock_data: Callable[[], None] + self.warn: TWarnFn + + # The threading module to use, if any. + self.threading: ModuleType | None = None + + self.cur_file_data: TTraceFileData | None = None + self.last_line: TLineNo = 0 + self.cur_file_name: str | None = None + self.context: str | None = None + self.started_context = False + + # The data_stack parallels the Python call stack. Each entry is + # information about an active frame, a four-element tuple: + # [0] The TTraceData for this frame's file. Could be None if we + # aren't tracing this frame. + # [1] The current file name for the frame. None if we aren't tracing + # this frame. + # [2] The last line number executed in this frame. + # [3] Boolean: did this frame start a new context? + self.data_stack: list[tuple[TTraceFileData | None, str | None, TLineNo, bool]] = [] + self.thread: threading.Thread | None = None + self.stopped = False + self._activity = False + + self.in_atexit = False + # On exit, self.in_atexit = True + atexit.register(setattr, self, "in_atexit", True) + + # Cache a bound method on the instance, so that we don't have to + # re-create a bound method object all the time. + self._cached_bound_method_trace: TTraceFn = self._trace + + def __repr__(self) -> str: + points = sum(len(v) for v in self.data.values()) + files = len(self.data) + return f"" + + def log(self, marker: str, *args: Any) -> None: + """For hard-core logging of what this tracer is doing.""" + with open("/tmp/debug_trace.txt", "a") as f: + f.write(f"{marker} {self.id}[{len(self.data_stack)}]") + if 0: # if you want thread ids.. + f.write(".{:x}.{:x}".format( # type: ignore[unreachable] + self.thread.ident, + self.threading.current_thread().ident, + )) + f.write(" {}".format(" ".join(map(str, args)))) + if 0: # if you want callers.. + f.write(" | ") # type: ignore[unreachable] + stack = " / ".join( + (fname or "???").rpartition("/")[-1] + for _, fname, _, _ in self.data_stack + ) + f.write(stack) + f.write("\n") + + def _trace( + self, + frame: FrameType, + event: str, + arg: Any, # pylint: disable=unused-argument + lineno: TLineNo | None = None, # pylint: disable=unused-argument + ) -> TTraceFn | None: + """The trace function passed to sys.settrace.""" + + if THIS_FILE in frame.f_code.co_filename: + return None + + # f = frame; code = f.f_code + # self.log(":", f"{code.co_filename} {f.f_lineno} {code.co_name}()", event) + + if (self.stopped and sys.gettrace() == self._cached_bound_method_trace): # pylint: disable=comparison-with-callable + # The PyTrace.stop() method has been called, possibly by another + # thread, let's deactivate ourselves now. + if 0: + f = frame # type: ignore[unreachable] + self.log("---\nX", f.f_code.co_filename, f.f_lineno) + while f: + self.log(">", f.f_code.co_filename, f.f_lineno, f.f_code.co_name, f.f_trace) + f = f.f_back + sys.settrace(None) + try: + self.cur_file_data, self.cur_file_name, self.last_line, self.started_context = ( + self.data_stack.pop() + ) + except IndexError: + self.log( + "Empty stack!", + frame.f_code.co_filename, + frame.f_lineno, + frame.f_code.co_name, + ) + return None + + # if event != "call" and frame.f_code.co_filename != self.cur_file_name: + # self.log("---\n*", frame.f_code.co_filename, self.cur_file_name, frame.f_lineno) + + if event == "call": + # Should we start a new context? + if self.should_start_context and self.context is None: + context_maybe = self.should_start_context(frame) # pylint: disable=not-callable + if context_maybe is not None: + self.context = context_maybe + started_context = True + assert self.switch_context is not None + self.switch_context(self.context) # pylint: disable=not-callable + else: + started_context = False + else: + started_context = False + self.started_context = started_context + + # Entering a new frame. Decide if we should trace in this file. + self._activity = True + self.data_stack.append( + ( + self.cur_file_data, + self.cur_file_name, + self.last_line, + started_context, + ), + ) + + # Improve tracing performance: when calling a function, both caller + # and callee are often within the same file. if that's the case, we + # don't have to re-check whether to trace the corresponding + # function (which is a little bit expensive since it involves + # dictionary lookups). This optimization is only correct if we + # didn't start a context. + filename = frame.f_code.co_filename + if filename != self.cur_file_name or started_context: + self.cur_file_name = filename + disp = self.should_trace_cache.get(filename) + if disp is None: + disp = self.should_trace(filename, frame) + self.should_trace_cache[filename] = disp + + self.cur_file_data = None + if disp.trace: + tracename = disp.source_filename + assert tracename is not None + self.lock_data() + try: + if tracename not in self.data: + self.data[tracename] = set() + finally: + self.unlock_data() + self.cur_file_data = self.data[tracename] + else: + frame.f_trace_lines = False + elif not self.cur_file_data: + frame.f_trace_lines = False + + # The call event is really a "start frame" event, and happens for + # function calls and re-entering generators. The f_lasti field is + # -1 for calls, and a real offset for generators. Use <0 as the + # line number for calls, and the real line number for generators. + if RESUME is not None: + # The current opcode is guaranteed to be RESUME. The argument + # determines what kind of resume it is. + oparg = frame.f_code.co_code[frame.f_lasti + 1] + real_call = (oparg == 0) + else: + real_call = (getattr(frame, "f_lasti", -1) < 0) + if real_call: + self.last_line = -frame.f_code.co_firstlineno + else: + self.last_line = frame.f_lineno + + elif event == "line": + # Record an executed line. + if self.cur_file_data is not None: + flineno: TLineNo = frame.f_lineno + + if self.trace_arcs: + cast(set[TArc], self.cur_file_data).add((self.last_line, flineno)) + else: + cast(set[TLineNo], self.cur_file_data).add(flineno) + self.last_line = flineno + + elif event == "return": + if self.trace_arcs and self.cur_file_data: + # Record an arc leaving the function, but beware that a + # "return" event might just mean yielding from a generator. + code = frame.f_code.co_code + lasti = frame.f_lasti + if RESUME is not None: + if len(code) == lasti + 2: + # A return from the end of a code object is a real return. + real_return = True + else: + # It is a real return if we aren't going to resume next. + if env.PYBEHAVIOR.lasti_is_yield: + lasti += 2 + real_return = (code[lasti] != RESUME) + else: + if code[lasti] == RETURN_VALUE: + real_return = True + elif code[lasti] == YIELD_VALUE: + real_return = False + elif len(code) <= lasti + YIELD_FROM_OFFSET: + real_return = True + elif code[lasti + YIELD_FROM_OFFSET] == YIELD_FROM: + real_return = False + else: + real_return = True + if real_return: + first = frame.f_code.co_firstlineno + cast(set[TArc], self.cur_file_data).add((self.last_line, -first)) + + # Leaving this function, pop the filename stack. + self.cur_file_data, self.cur_file_name, self.last_line, self.started_context = ( + self.data_stack.pop() + ) + # Leaving a context? + if self.started_context: + assert self.switch_context is not None + self.context = None + self.switch_context(None) # pylint: disable=not-callable + return self._cached_bound_method_trace + + def start(self) -> TTraceFn: + """Start this Tracer. + + Return a Python function suitable for use with sys.settrace(). + + """ + self.stopped = False + if self.threading: + if self.thread is None: + self.thread = self.threading.current_thread() + + sys.settrace(self._cached_bound_method_trace) + return self._cached_bound_method_trace + + def stop(self) -> None: + """Stop this Tracer.""" + # Get the active tracer callback before setting the stop flag to be + # able to detect if the tracer was changed prior to stopping it. + tf = sys.gettrace() + + # Set the stop flag. The actual call to sys.settrace(None) will happen + # in the self._trace callback itself to make sure to call it from the + # right thread. + self.stopped = True + + if self.threading: + assert self.thread is not None + if self.thread.ident != self.threading.current_thread().ident: + # Called on a different thread than started us: we can't unhook + # ourselves, but we've set the flag that we should stop, so we + # won't do any more tracing. + #self.log("~", "stopping on different threads") + return + + # PyPy clears the trace function before running atexit functions, + # so don't warn if we are in atexit on PyPy and the trace function + # has changed to None. Metacoverage also messes this up, so don't + # warn if we are measuring ourselves. + suppress_warning = ( + (env.PYPY and self.in_atexit and tf is None) + or env.METACOV + ) + if self.warn and not suppress_warning: + if tf != self._cached_bound_method_trace: # pylint: disable=comparison-with-callable + self.warn( + "Trace function changed, data is likely wrong: " + + f"{tf!r} != {self._cached_bound_method_trace!r}", + slug="trace-changed", + ) + + def activity(self) -> bool: + """Has there been any activity?""" + return self._activity + + def reset_activity(self) -> None: + """Reset the activity() flag.""" + self._activity = False + + def get_stats(self) -> dict[str, int] | None: + """Return a dictionary of statistics, or None.""" + return None diff --git a/.venv/Lib/site-packages/coverage/regions.py b/.venv/Lib/site-packages/coverage/regions.py new file mode 100644 index 00000000..7954be69 --- /dev/null +++ b/.venv/Lib/site-packages/coverage/regions.py @@ -0,0 +1,126 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Find functions and classes in Python code.""" + +from __future__ import annotations + +import ast +import dataclasses + +from typing import cast + +from coverage.plugin import CodeRegion + + +@dataclasses.dataclass +class Context: + """The nested named context of a function or class.""" + name: str + kind: str + lines: set[int] + + +class RegionFinder: + """An ast visitor that will find and track regions of code. + + Functions and classes are tracked by name. Results are in the .regions + attribute. + + """ + def __init__(self) -> None: + self.regions: list[CodeRegion] = [] + self.context: list[Context] = [] + + def parse_source(self, source: str) -> None: + """Parse `source` and walk the ast to populate the .regions attribute.""" + self.handle_node(ast.parse(source)) + + def fq_node_name(self) -> str: + """Get the current fully qualified name we're processing.""" + return ".".join(c.name for c in self.context) + + def handle_node(self, node: ast.AST) -> None: + """Recursively handle any node.""" + if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)): + self.handle_FunctionDef(node) + elif isinstance(node, ast.ClassDef): + self.handle_ClassDef(node) + else: + self.handle_node_body(node) + + def handle_node_body(self, node: ast.AST) -> None: + """Recursively handle the nodes in this node's body, if any.""" + for body_node in getattr(node, "body", ()): + self.handle_node(body_node) + + def handle_FunctionDef(self, node: ast.FunctionDef | ast.AsyncFunctionDef) -> None: + """Called for `def` or `async def`.""" + lines = set(range(node.body[0].lineno, cast(int, node.body[-1].end_lineno) + 1)) + if self.context and self.context[-1].kind == "class": + # Function bodies are part of their enclosing class. + self.context[-1].lines |= lines + # Function bodies should be excluded from the nearest enclosing function. + for ancestor in reversed(self.context): + if ancestor.kind == "function": + ancestor.lines -= lines + break + self.context.append(Context(node.name, "function", lines)) + self.regions.append( + CodeRegion( + kind="function", + name=self.fq_node_name(), + start=node.lineno, + lines=lines, + ) + ) + self.handle_node_body(node) + self.context.pop() + + def handle_ClassDef(self, node: ast.ClassDef) -> None: + """Called for `class`.""" + # The lines for a class are the lines in the methods of the class. + # We start empty, and count on visit_FunctionDef to add the lines it + # finds. + lines: set[int] = set() + self.context.append(Context(node.name, "class", lines)) + self.regions.append( + CodeRegion( + kind="class", + name=self.fq_node_name(), + start=node.lineno, + lines=lines, + ) + ) + self.handle_node_body(node) + self.context.pop() + # Class bodies should be excluded from the enclosing classes. + for ancestor in reversed(self.context): + if ancestor.kind == "class": + ancestor.lines -= lines + + +def code_regions(source: str) -> list[CodeRegion]: + """Find function and class regions in source code. + + Analyzes the code in `source`, and returns a list of :class:`CodeRegion` + objects describing functions and classes as regions of the code:: + + [ + CodeRegion(kind="function", name="func1", start=8, lines={10, 11, 12}), + CodeRegion(kind="function", name="MyClass.method", start=30, lines={34, 35, 36}), + CodeRegion(kind="class", name="MyClass", start=25, lines={34, 35, 36}), + ] + + The line numbers will include comments and blank lines. Later processing + will need to ignore those lines as needed. + + Nested functions and classes are excluded from their enclosing region. No + line should be reported as being part of more than one function, or more + than one class. Lines in methods are reported as being in a function and + in a class. + + """ + rf = RegionFinder() + rf.parse_source(source) + return rf.regions diff --git a/.venv/Lib/site-packages/coverage/report.py b/.venv/Lib/site-packages/coverage/report.py new file mode 100644 index 00000000..bf04f2a8 --- /dev/null +++ b/.venv/Lib/site-packages/coverage/report.py @@ -0,0 +1,282 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Summary reporting""" + +from __future__ import annotations + +import sys + +from typing import Any, IO, TYPE_CHECKING +from collections.abc import Iterable + +from coverage.exceptions import ConfigError, NoDataError +from coverage.misc import human_sorted_items +from coverage.plugin import FileReporter +from coverage.report_core import get_analysis_to_report +from coverage.results import Analysis, Numbers +from coverage.types import TMorf + +if TYPE_CHECKING: + from coverage import Coverage + + +class SummaryReporter: + """A reporter for writing the summary report.""" + + def __init__(self, coverage: Coverage) -> None: + self.coverage = coverage + self.config = self.coverage.config + self.branches = coverage.get_data().has_arcs() + self.outfile: IO[str] | None = None + self.output_format = self.config.format or "text" + if self.output_format not in {"text", "markdown", "total"}: + raise ConfigError(f"Unknown report format choice: {self.output_format!r}") + self.fr_analysis: list[tuple[FileReporter, Analysis]] = [] + self.skipped_count = 0 + self.empty_count = 0 + self.total = Numbers(precision=self.config.precision) + + def write(self, line: str) -> None: + """Write a line to the output, adding a newline.""" + assert self.outfile is not None + self.outfile.write(line.rstrip()) + self.outfile.write("\n") + + def write_items(self, items: Iterable[str]) -> None: + """Write a list of strings, joined together.""" + self.write("".join(items)) + + def _report_text( + self, + header: list[str], + lines_values: list[list[Any]], + total_line: list[Any], + end_lines: list[str], + ) -> None: + """Internal method that prints report data in text format. + + `header` is a list with captions. + `lines_values` is list of lists of sortable values. + `total_line` is a list with values of the total line. + `end_lines` is a list of ending lines with information about skipped files. + + """ + # Prepare the formatting strings, header, and column sorting. + max_name = max([len(line[0]) for line in lines_values] + [5]) + 1 + max_n = max(len(total_line[header.index("Cover")]) + 2, len(" Cover")) + 1 + max_n = max([max_n] + [len(line[header.index("Cover")]) + 2 for line in lines_values]) + formats = dict( + Name="{:{name_len}}", + Stmts="{:>7}", + Miss="{:>7}", + Branch="{:>7}", + BrPart="{:>7}", + Cover="{:>{n}}", + Missing="{:>10}", + ) + header_items = [ + formats[item].format(item, name_len=max_name, n=max_n) + for item in header + ] + header_str = "".join(header_items) + rule = "-" * len(header_str) + + # Write the header + self.write(header_str) + self.write(rule) + + formats.update(dict(Cover="{:>{n}}%"), Missing=" {:9}") + for values in lines_values: + # build string with line values + line_items = [ + formats[item].format(str(value), + name_len=max_name, n=max_n-1) for item, value in zip(header, values) + ] + self.write_items(line_items) + + # Write a TOTAL line + if lines_values: + self.write(rule) + + line_items = [ + formats[item].format(str(value), + name_len=max_name, n=max_n-1) for item, value in zip(header, total_line) + ] + self.write_items(line_items) + + for end_line in end_lines: + self.write(end_line) + + def _report_markdown( + self, + header: list[str], + lines_values: list[list[Any]], + total_line: list[Any], + end_lines: list[str], + ) -> None: + """Internal method that prints report data in markdown format. + + `header` is a list with captions. + `lines_values` is a sorted list of lists containing coverage information. + `total_line` is a list with values of the total line. + `end_lines` is a list of ending lines with information about skipped files. + + """ + # Prepare the formatting strings, header, and column sorting. + max_name = max((len(line[0].replace("_", "\\_")) for line in lines_values), default=0) + max_name = max(max_name, len("**TOTAL**")) + 1 + formats = dict( + Name="| {:{name_len}}|", + Stmts="{:>9} |", + Miss="{:>9} |", + Branch="{:>9} |", + BrPart="{:>9} |", + Cover="{:>{n}} |", + Missing="{:>10} |", + ) + max_n = max(len(total_line[header.index("Cover")]) + 6, len(" Cover ")) + header_items = [formats[item].format(item, name_len=max_name, n=max_n) for item in header] + header_str = "".join(header_items) + rule_str = "|" + " ".join(["- |".rjust(len(header_items[0])-1, "-")] + + ["-: |".rjust(len(item)-1, "-") for item in header_items[1:]], + ) + + # Write the header + self.write(header_str) + self.write(rule_str) + + for values in lines_values: + # build string with line values + formats.update(dict(Cover="{:>{n}}% |")) + line_items = [ + formats[item].format(str(value).replace("_", "\\_"), name_len=max_name, n=max_n-1) + for item, value in zip(header, values) + ] + self.write_items(line_items) + + # Write the TOTAL line + formats.update(dict(Name="|{:>{name_len}} |", Cover="{:>{n}} |")) + total_line_items: list[str] = [] + for item, value in zip(header, total_line): + if value == "": + insert = value + elif item == "Cover": + insert = f" **{value}%**" + else: + insert = f" **{value}**" + total_line_items += formats[item].format(insert, name_len=max_name, n=max_n) + self.write_items(total_line_items) + for end_line in end_lines: + self.write(end_line) + + def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str] | None = None) -> float: + """Writes a report summarizing coverage statistics per module. + + `outfile` is a text-mode file object to write the summary to. + + """ + self.outfile = outfile or sys.stdout + + self.coverage.get_data().set_query_contexts(self.config.report_contexts) + for fr, analysis in get_analysis_to_report(self.coverage, morfs): + self.report_one_file(fr, analysis) + + if not self.total.n_files and not self.skipped_count: + raise NoDataError("No data to report.") + + if self.output_format == "total": + self.write(self.total.pc_covered_str) + else: + self.tabular_report() + + return self.total.pc_covered + + def tabular_report(self) -> None: + """Writes tabular report formats.""" + # Prepare the header line and column sorting. + header = ["Name", "Stmts", "Miss"] + if self.branches: + header += ["Branch", "BrPart"] + header += ["Cover"] + if self.config.show_missing: + header += ["Missing"] + + column_order = dict(name=0, stmts=1, miss=2, cover=-1) + if self.branches: + column_order.update(dict(branch=3, brpart=4)) + + # `lines_values` is list of lists of sortable values. + lines_values = [] + + for (fr, analysis) in self.fr_analysis: + nums = analysis.numbers + + args = [fr.relative_filename(), nums.n_statements, nums.n_missing] + if self.branches: + args += [nums.n_branches, nums.n_partial_branches] + args += [nums.pc_covered_str] + if self.config.show_missing: + args += [analysis.missing_formatted(branches=True)] + args += [nums.pc_covered] + lines_values.append(args) + + # Line sorting. + sort_option = (self.config.sort or "name").lower() + reverse = False + if sort_option[0] == "-": + reverse = True + sort_option = sort_option[1:] + elif sort_option[0] == "+": + sort_option = sort_option[1:] + sort_idx = column_order.get(sort_option) + if sort_idx is None: + raise ConfigError(f"Invalid sorting option: {self.config.sort!r}") + if sort_option == "name": + lines_values = human_sorted_items(lines_values, reverse=reverse) + else: + lines_values.sort( + key=lambda line: (line[sort_idx], line[0]), + reverse=reverse, + ) + + # Calculate total if we had at least one file. + total_line = ["TOTAL", self.total.n_statements, self.total.n_missing] + if self.branches: + total_line += [self.total.n_branches, self.total.n_partial_branches] + total_line += [self.total.pc_covered_str] + if self.config.show_missing: + total_line += [""] + + # Create other final lines. + end_lines = [] + if self.config.skip_covered and self.skipped_count: + file_suffix = "s" if self.skipped_count>1 else "" + end_lines.append( + f"\n{self.skipped_count} file{file_suffix} skipped due to complete coverage.", + ) + if self.config.skip_empty and self.empty_count: + file_suffix = "s" if self.empty_count > 1 else "" + end_lines.append(f"\n{self.empty_count} empty file{file_suffix} skipped.") + + if self.output_format == "markdown": + formatter = self._report_markdown + else: + formatter = self._report_text + formatter(header, lines_values, total_line, end_lines) + + def report_one_file(self, fr: FileReporter, analysis: Analysis) -> None: + """Report on just one file, the callback from report().""" + nums = analysis.numbers + self.total += nums + + no_missing_lines = (nums.n_missing == 0) + no_missing_branches = (nums.n_partial_branches == 0) + if self.config.skip_covered and no_missing_lines and no_missing_branches: + # Don't report on 100% files. + self.skipped_count += 1 + elif self.config.skip_empty and nums.n_statements == 0: + # Don't report on empty files. + self.empty_count += 1 + else: + self.fr_analysis.append((fr, analysis)) diff --git a/.venv/Lib/site-packages/coverage/report_core.py b/.venv/Lib/site-packages/coverage/report_core.py new file mode 100644 index 00000000..477034ba --- /dev/null +++ b/.venv/Lib/site-packages/coverage/report_core.py @@ -0,0 +1,120 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Reporter foundation for coverage.py.""" + +from __future__ import annotations + +import sys + +from typing import ( + Callable, IO, Protocol, TYPE_CHECKING, +) +from collections.abc import Iterable, Iterator + +from coverage.exceptions import NoDataError, NotPython +from coverage.files import prep_patterns, GlobMatcher +from coverage.misc import ensure_dir_for_file, file_be_gone +from coverage.plugin import FileReporter +from coverage.results import Analysis +from coverage.types import TMorf + +if TYPE_CHECKING: + from coverage import Coverage + + +class Reporter(Protocol): + """What we expect of reporters.""" + + report_type: str + + def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str]) -> float: + """Generate a report of `morfs`, written to `outfile`.""" + + +def render_report( + output_path: str, + reporter: Reporter, + morfs: Iterable[TMorf] | None, + msgfn: Callable[[str], None], +) -> float: + """Run a one-file report generator, managing the output file. + + This function ensures the output file is ready to be written to. Then writes + the report to it. Then closes the file and cleans up. + + """ + file_to_close = None + delete_file = False + + if output_path == "-": + outfile = sys.stdout + else: + # Ensure that the output directory is created; done here because this + # report pre-opens the output file. HtmlReporter does this on its own + # because its task is more complex, being multiple files. + ensure_dir_for_file(output_path) + outfile = open(output_path, "w", encoding="utf-8") + file_to_close = outfile + delete_file = True + + try: + ret = reporter.report(morfs, outfile=outfile) + if file_to_close is not None: + msgfn(f"Wrote {reporter.report_type} to {output_path}") + delete_file = False + return ret + finally: + if file_to_close is not None: + file_to_close.close() + if delete_file: + file_be_gone(output_path) # pragma: part covered (doesn't return) + + +def get_analysis_to_report( + coverage: Coverage, + morfs: Iterable[TMorf] | None, +) -> Iterator[tuple[FileReporter, Analysis]]: + """Get the files to report on. + + For each morf in `morfs`, if it should be reported on (based on the omit + and include configuration options), yield a pair, the `FileReporter` and + `Analysis` for the morf. + + """ + fr_morfs = coverage._get_file_reporters(morfs) + config = coverage.config + + if config.report_include: + matcher = GlobMatcher(prep_patterns(config.report_include), "report_include") + fr_morfs = [(fr, morf) for (fr, morf) in fr_morfs if matcher.match(fr.filename)] + + if config.report_omit: + matcher = GlobMatcher(prep_patterns(config.report_omit), "report_omit") + fr_morfs = [(fr, morf) for (fr, morf) in fr_morfs if not matcher.match(fr.filename)] + + if not fr_morfs: + raise NoDataError("No data to report.") + + for fr, morf in sorted(fr_morfs): + try: + analysis = coverage._analyze(morf) + except NotPython: + # Only report errors for .py files, and only if we didn't + # explicitly suppress those errors. + # NotPython is only raised by PythonFileReporter, which has a + # should_be_python() method. + if fr.should_be_python(): # type: ignore[attr-defined] + if config.ignore_errors: + msg = f"Couldn't parse Python file '{fr.filename}'" + coverage._warn(msg, slug="couldnt-parse") + else: + raise + except Exception as exc: + if config.ignore_errors: + msg = f"Couldn't parse '{fr.filename}': {exc}".rstrip() + coverage._warn(msg, slug="couldnt-parse") + else: + raise + else: + yield (fr, analysis) diff --git a/.venv/Lib/site-packages/coverage/results.py b/.venv/Lib/site-packages/coverage/results.py new file mode 100644 index 00000000..dd14c8a2 --- /dev/null +++ b/.venv/Lib/site-packages/coverage/results.py @@ -0,0 +1,419 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Results of coverage measurement.""" + +from __future__ import annotations + +import collections +import dataclasses + +from collections.abc import Container, Iterable +from typing import TYPE_CHECKING + +from coverage.exceptions import ConfigError +from coverage.misc import nice_pair +from coverage.types import TArc, TLineNo + +if TYPE_CHECKING: + from coverage.data import CoverageData + from coverage.plugin import FileReporter + + +def analysis_from_file_reporter( + data: CoverageData, + precision: int, + file_reporter: FileReporter, + filename: str, +) -> Analysis: + """Create an Analysis from a FileReporter.""" + has_arcs = data.has_arcs() + statements = file_reporter.lines() + excluded = file_reporter.excluded_lines() + executed = file_reporter.translate_lines(data.lines(filename) or []) + + if has_arcs: + arc_possibilities_set = file_reporter.arcs() + arcs = data.arcs(filename) or [] + + # Reduce the set of arcs to the ones that could be branches. + dests = collections.defaultdict(set) + for fromno, tono in arc_possibilities_set: + dests[fromno].add(tono) + single_dests = { + fromno: list(tonos)[0] + for fromno, tonos in dests.items() + if len(tonos) == 1 + } + new_arcs = set() + for fromno, tono in arcs: + if fromno != tono: + new_arcs.add((fromno, tono)) + else: + if fromno in single_dests: + new_arcs.add((fromno, single_dests[fromno])) + + arcs_executed_set = file_reporter.translate_arcs(new_arcs) + exit_counts = file_reporter.exit_counts() + no_branch = file_reporter.no_branch_lines() + else: + arc_possibilities_set = set() + arcs_executed_set = set() + exit_counts = {} + no_branch = set() + + return Analysis( + precision=precision, + filename=filename, + has_arcs=has_arcs, + statements=statements, + excluded=excluded, + executed=executed, + arc_possibilities_set=arc_possibilities_set, + arcs_executed_set=arcs_executed_set, + exit_counts=exit_counts, + no_branch=no_branch, + ) + + +@dataclasses.dataclass +class Analysis: + """The results of analyzing a FileReporter.""" + + precision: int + filename: str + has_arcs: bool + statements: set[TLineNo] + excluded: set[TLineNo] + executed: set[TLineNo] + arc_possibilities_set: set[TArc] + arcs_executed_set: set[TArc] + exit_counts: dict[TLineNo, int] + no_branch: set[TLineNo] + + def __post_init__(self) -> None: + self.arc_possibilities = sorted(self.arc_possibilities_set) + self.arcs_executed = sorted(self.arcs_executed_set) + self.missing = self.statements - self.executed + + if self.has_arcs: + n_branches = self._total_branches() + mba = self.missing_branch_arcs() + n_partial_branches = sum(len(v) for k,v in mba.items() if k not in self.missing) + n_missing_branches = sum(len(v) for k,v in mba.items()) + else: + n_branches = n_partial_branches = n_missing_branches = 0 + + self.numbers = Numbers( + precision=self.precision, + n_files=1, + n_statements=len(self.statements), + n_excluded=len(self.excluded), + n_missing=len(self.missing), + n_branches=n_branches, + n_partial_branches=n_partial_branches, + n_missing_branches=n_missing_branches, + ) + + def narrow(self, lines: Container[TLineNo]) -> Analysis: + """Create a narrowed Analysis. + + The current analysis is copied to make a new one that only considers + the lines in `lines`. + """ + + statements = {lno for lno in self.statements if lno in lines} + excluded = {lno for lno in self.excluded if lno in lines} + executed = {lno for lno in self.executed if lno in lines} + + if self.has_arcs: + arc_possibilities_set = { + (a, b) for a, b in self.arc_possibilities_set + if a in lines or b in lines + } + arcs_executed_set = { + (a, b) for a, b in self.arcs_executed_set + if a in lines or b in lines + } + exit_counts = { + lno: num for lno, num in self.exit_counts.items() + if lno in lines + } + no_branch = {lno for lno in self.no_branch if lno in lines} + else: + arc_possibilities_set = set() + arcs_executed_set = set() + exit_counts = {} + no_branch = set() + + return Analysis( + precision=self.precision, + filename=self.filename, + has_arcs=self.has_arcs, + statements=statements, + excluded=excluded, + executed=executed, + arc_possibilities_set=arc_possibilities_set, + arcs_executed_set=arcs_executed_set, + exit_counts=exit_counts, + no_branch=no_branch, + ) + + def missing_formatted(self, branches: bool = False) -> str: + """The missing line numbers, formatted nicely. + + Returns a string like "1-2, 5-11, 13-14". + + If `branches` is true, includes the missing branch arcs also. + + """ + if branches and self.has_arcs: + arcs = self.missing_branch_arcs().items() + else: + arcs = None + + return format_lines(self.statements, self.missing, arcs=arcs) + + def arcs_missing(self) -> list[TArc]: + """Returns a sorted list of the un-executed arcs in the code.""" + missing = ( + p for p in self.arc_possibilities + if p not in self.arcs_executed + and p[0] not in self.no_branch + and p[1] not in self.excluded + ) + return sorted(missing) + + def _branch_lines(self) -> list[TLineNo]: + """Returns a list of line numbers that have more than one exit.""" + return [l1 for l1,count in self.exit_counts.items() if count > 1] + + def _total_branches(self) -> int: + """How many total branches are there?""" + return sum(count for count in self.exit_counts.values() if count > 1) + + def missing_branch_arcs(self) -> dict[TLineNo, list[TLineNo]]: + """Return arcs that weren't executed from branch lines. + + Returns {l1:[l2a,l2b,...], ...} + + """ + missing = self.arcs_missing() + branch_lines = set(self._branch_lines()) + mba = collections.defaultdict(list) + for l1, l2 in missing: + if l1 == l2: + continue + if l1 in branch_lines: + mba[l1].append(l2) + return mba + + def executed_branch_arcs(self) -> dict[TLineNo, list[TLineNo]]: + """Return arcs that were executed from branch lines. + + Only include ones that we considered possible. + + Returns {l1:[l2a,l2b,...], ...} + + """ + branch_lines = set(self._branch_lines()) + eba = collections.defaultdict(list) + for l1, l2 in self.arcs_executed: + if l1 == l2: + continue + if (l1, l2) not in self.arc_possibilities_set: + continue + if l1 in branch_lines: + eba[l1].append(l2) + return eba + + def branch_stats(self) -> dict[TLineNo, tuple[int, int]]: + """Get stats about branches. + + Returns a dict mapping line numbers to a tuple: + (total_exits, taken_exits). + """ + + missing_arcs = self.missing_branch_arcs() + stats = {} + for lnum in self._branch_lines(): + exits = self.exit_counts[lnum] + missing = len(missing_arcs[lnum]) + stats[lnum] = (exits, exits - missing) + return stats + + +@dataclasses.dataclass +class Numbers: + """The numerical results of measuring coverage. + + This holds the basic statistics from `Analysis`, and is used to roll + up statistics across files. + + """ + + precision: int = 0 + n_files: int = 0 + n_statements: int = 0 + n_excluded: int = 0 + n_missing: int = 0 + n_branches: int = 0 + n_partial_branches: int = 0 + n_missing_branches: int = 0 + + @property + def n_executed(self) -> int: + """Returns the number of executed statements.""" + return self.n_statements - self.n_missing + + @property + def n_executed_branches(self) -> int: + """Returns the number of executed branches.""" + return self.n_branches - self.n_missing_branches + + @property + def pc_covered(self) -> float: + """Returns a single percentage value for coverage.""" + if self.n_statements > 0: + numerator, denominator = self.ratio_covered + pc_cov = (100.0 * numerator) / denominator + else: + pc_cov = 100.0 + return pc_cov + + @property + def pc_covered_str(self) -> str: + """Returns the percent covered, as a string, without a percent sign. + + Note that "0" is only returned when the value is truly zero, and "100" + is only returned when the value is truly 100. Rounding can never + result in either "0" or "100". + + """ + return display_covered(self.pc_covered, self.precision) + + @property + def ratio_covered(self) -> tuple[int, int]: + """Return a numerator and denominator for the coverage ratio.""" + numerator = self.n_executed + self.n_executed_branches + denominator = self.n_statements + self.n_branches + return numerator, denominator + + def __add__(self, other: Numbers) -> Numbers: + return Numbers( + self.precision, + self.n_files + other.n_files, + self.n_statements + other.n_statements, + self.n_excluded + other.n_excluded, + self.n_missing + other.n_missing, + self.n_branches + other.n_branches, + self.n_partial_branches + other.n_partial_branches, + self.n_missing_branches + other.n_missing_branches, + ) + + def __radd__(self, other: int) -> Numbers: + # Implementing 0+Numbers allows us to sum() a list of Numbers. + assert other == 0 # we only ever call it this way. + return self + + +def display_covered(pc: float, precision: int) -> str: + """Return a displayable total percentage, as a string. + + Note that "0" is only returned when the value is truly zero, and "100" + is only returned when the value is truly 100. Rounding can never + result in either "0" or "100". + + """ + near0 = 1.0 / 10 ** precision + if 0 < pc < near0: + pc = near0 + elif (100.0 - near0) < pc < 100: + pc = 100.0 - near0 + else: + pc = round(pc, precision) + return "%.*f" % (precision, pc) + + +def _line_ranges( + statements: Iterable[TLineNo], + lines: Iterable[TLineNo], +) -> list[tuple[TLineNo, TLineNo]]: + """Produce a list of ranges for `format_lines`.""" + statements = sorted(statements) + lines = sorted(lines) + + pairs = [] + start = None + lidx = 0 + for stmt in statements: + if lidx >= len(lines): + break + if stmt == lines[lidx]: + lidx += 1 + if not start: + start = stmt + end = stmt + elif start: + pairs.append((start, end)) + start = None + if start: + pairs.append((start, end)) + return pairs + + +def format_lines( + statements: Iterable[TLineNo], + lines: Iterable[TLineNo], + arcs: Iterable[tuple[TLineNo, list[TLineNo]]] | None = None, +) -> str: + """Nicely format a list of line numbers. + + Format a list of line numbers for printing by coalescing groups of lines as + long as the lines represent consecutive statements. This will coalesce + even if there are gaps between statements. + + For example, if `statements` is [1,2,3,4,5,10,11,12,13,14] and + `lines` is [1,2,5,10,11,13,14] then the result will be "1-2, 5-11, 13-14". + + Both `lines` and `statements` can be any iterable. All of the elements of + `lines` must be in `statements`, and all of the values must be positive + integers. + + If `arcs` is provided, they are (start,[end,end,end]) pairs that will be + included in the output as long as start isn't in `lines`. + + """ + line_items = [(pair[0], nice_pair(pair)) for pair in _line_ranges(statements, lines)] + if arcs is not None: + line_exits = sorted(arcs) + for line, exits in line_exits: + for ex in sorted(exits): + if line not in lines and ex not in lines: + dest = (ex if ex > 0 else "exit") + line_items.append((line, f"{line}->{dest}")) + + ret = ", ".join(t[-1] for t in sorted(line_items)) + return ret + + +def should_fail_under(total: float, fail_under: float, precision: int) -> bool: + """Determine if a total should fail due to fail-under. + + `total` is a float, the coverage measurement total. `fail_under` is the + fail_under setting to compare with. `precision` is the number of digits + to consider after the decimal point. + + Returns True if the total should fail. + + """ + # We can never achieve higher than 100% coverage, or less than zero. + if not (0 <= fail_under <= 100.0): + msg = f"fail_under={fail_under} is invalid. Must be between 0 and 100." + raise ConfigError(msg) + + # Special case for fail_under=100, it must really be 100. + if fail_under == 100.0 and total != 100.0: + return True + + return round(total, precision) < fail_under diff --git a/.venv/Lib/site-packages/coverage/sqldata.py b/.venv/Lib/site-packages/coverage/sqldata.py new file mode 100644 index 00000000..76b56928 --- /dev/null +++ b/.venv/Lib/site-packages/coverage/sqldata.py @@ -0,0 +1,1103 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""SQLite coverage data.""" + +from __future__ import annotations + +import collections +import datetime +import functools +import glob +import itertools +import os +import random +import socket +import sqlite3 +import string +import sys +import textwrap +import threading +import zlib + +from typing import ( + cast, Any, Callable, +) +from collections.abc import Collection, Mapping, Sequence + +from coverage.debug import NoDebugging, auto_repr +from coverage.exceptions import CoverageException, DataError +from coverage.misc import file_be_gone, isolate_module +from coverage.numbits import numbits_to_nums, numbits_union, nums_to_numbits +from coverage.sqlitedb import SqliteDb +from coverage.types import AnyCallable, FilePath, TArc, TDebugCtl, TLineNo, TWarnFn +from coverage.version import __version__ + +os = isolate_module(os) + +# If you change the schema: increment the SCHEMA_VERSION and update the +# docs in docs/dbschema.rst by running "make cogdoc". + +SCHEMA_VERSION = 7 + +# Schema versions: +# 1: Released in 5.0a2 +# 2: Added contexts in 5.0a3. +# 3: Replaced line table with line_map table. +# 4: Changed line_map.bitmap to line_map.numbits. +# 5: Added foreign key declarations. +# 6: Key-value in meta. +# 7: line_map -> line_bits + +SCHEMA = """\ +CREATE TABLE coverage_schema ( + -- One row, to record the version of the schema in this db. + version integer +); + +CREATE TABLE meta ( + -- Key-value pairs, to record metadata about the data + key text, + value text, + unique (key) + -- Possible keys: + -- 'has_arcs' boolean -- Is this data recording branches? + -- 'sys_argv' text -- The coverage command line that recorded the data. + -- 'version' text -- The version of coverage.py that made the file. + -- 'when' text -- Datetime when the file was created. +); + +CREATE TABLE file ( + -- A row per file measured. + id integer primary key, + path text, + unique (path) +); + +CREATE TABLE context ( + -- A row per context measured. + id integer primary key, + context text, + unique (context) +); + +CREATE TABLE line_bits ( + -- If recording lines, a row per context per file executed. + -- All of the line numbers for that file/context are in one numbits. + file_id integer, -- foreign key to `file`. + context_id integer, -- foreign key to `context`. + numbits blob, -- see the numbits functions in coverage.numbits + foreign key (file_id) references file (id), + foreign key (context_id) references context (id), + unique (file_id, context_id) +); + +CREATE TABLE arc ( + -- If recording branches, a row per context per from/to line transition executed. + file_id integer, -- foreign key to `file`. + context_id integer, -- foreign key to `context`. + fromno integer, -- line number jumped from. + tono integer, -- line number jumped to. + foreign key (file_id) references file (id), + foreign key (context_id) references context (id), + unique (file_id, context_id, fromno, tono) +); + +CREATE TABLE tracer ( + -- A row per file indicating the tracer used for that file. + file_id integer primary key, + tracer text, + foreign key (file_id) references file (id) +); +""" + +def _locked(method: AnyCallable) -> AnyCallable: + """A decorator for methods that should hold self._lock.""" + @functools.wraps(method) + def _wrapped(self: CoverageData, *args: Any, **kwargs: Any) -> Any: + if self._debug.should("lock"): + self._debug.write(f"Locking {self._lock!r} for {method.__name__}") + with self._lock: + if self._debug.should("lock"): + self._debug.write(f"Locked {self._lock!r} for {method.__name__}") + return method(self, *args, **kwargs) + return _wrapped + + +class CoverageData: + """Manages collected coverage data, including file storage. + + This class is the public supported API to the data that coverage.py + collects during program execution. It includes information about what code + was executed. It does not include information from the analysis phase, to + determine what lines could have been executed, or what lines were not + executed. + + .. note:: + + The data file is currently a SQLite database file, with a + :ref:`documented schema `. The schema is subject to change + though, so be careful about querying it directly. Use this API if you + can to isolate yourself from changes. + + There are a number of kinds of data that can be collected: + + * **lines**: the line numbers of source lines that were executed. + These are always available. + + * **arcs**: pairs of source and destination line numbers for transitions + between source lines. These are only available if branch coverage was + used. + + * **file tracer names**: the module names of the file tracer plugins that + handled each file in the data. + + Lines, arcs, and file tracer names are stored for each source file. File + names in this API are case-sensitive, even on platforms with + case-insensitive file systems. + + A data file either stores lines, or arcs, but not both. + + A data file is associated with the data when the :class:`CoverageData` + is created, using the parameters `basename`, `suffix`, and `no_disk`. The + base name can be queried with :meth:`base_filename`, and the actual file + name being used is available from :meth:`data_filename`. + + To read an existing coverage.py data file, use :meth:`read`. You can then + access the line, arc, or file tracer data with :meth:`lines`, :meth:`arcs`, + or :meth:`file_tracer`. + + The :meth:`has_arcs` method indicates whether arc data is available. You + can get a set of the files in the data with :meth:`measured_files`. As + with most Python containers, you can determine if there is any data at all + by using this object as a boolean value. + + The contexts for each line in a file can be read with + :meth:`contexts_by_lineno`. + + To limit querying to certain contexts, use :meth:`set_query_context` or + :meth:`set_query_contexts`. These will narrow the focus of subsequent + :meth:`lines`, :meth:`arcs`, and :meth:`contexts_by_lineno` calls. The set + of all measured context names can be retrieved with + :meth:`measured_contexts`. + + Most data files will be created by coverage.py itself, but you can use + methods here to create data files if you like. The :meth:`add_lines`, + :meth:`add_arcs`, and :meth:`add_file_tracers` methods add data, in ways + that are convenient for coverage.py. + + To record data for contexts, use :meth:`set_context` to set a context to + be used for subsequent :meth:`add_lines` and :meth:`add_arcs` calls. + + To add a source file without any measured data, use :meth:`touch_file`, + or :meth:`touch_files` for a list of such files. + + Write the data to its file with :meth:`write`. + + You can clear the data in memory with :meth:`erase`. Data for specific + files can be removed from the database with :meth:`purge_files`. + + Two data collections can be combined by using :meth:`update` on one + :class:`CoverageData`, passing it the other. + + Data in a :class:`CoverageData` can be serialized and deserialized with + :meth:`dumps` and :meth:`loads`. + + The methods used during the coverage.py collection phase + (:meth:`add_lines`, :meth:`add_arcs`, :meth:`set_context`, and + :meth:`add_file_tracers`) are thread-safe. Other methods may not be. + + """ + + def __init__( + self, + basename: FilePath | None = None, + suffix: str | bool | None = None, + no_disk: bool = False, + warn: TWarnFn | None = None, + debug: TDebugCtl | None = None, + ) -> None: + """Create a :class:`CoverageData` object to hold coverage-measured data. + + Arguments: + basename (str): the base name of the data file, defaulting to + ".coverage". This can be a path to a file in another directory. + suffix (str or bool): has the same meaning as the `data_suffix` + argument to :class:`coverage.Coverage`. + no_disk (bool): if True, keep all data in memory, and don't + write any disk file. + warn: a warning callback function, accepting a warning message + argument. + debug: a `DebugControl` object (optional) + + """ + self._no_disk = no_disk + self._basename = os.path.abspath(basename or ".coverage") + self._suffix = suffix + self._warn = warn + self._debug = debug or NoDebugging() + + self._choose_filename() + # Maps filenames to row ids. + self._file_map: dict[str, int] = {} + # Maps thread ids to SqliteDb objects. + self._dbs: dict[int, SqliteDb] = {} + self._pid = os.getpid() + # Synchronize the operations used during collection. + self._lock = threading.RLock() + + # Are we in sync with the data file? + self._have_used = False + + self._has_lines = False + self._has_arcs = False + + self._current_context: str | None = None + self._current_context_id: int | None = None + self._query_context_ids: list[int] | None = None + + __repr__ = auto_repr + + def _choose_filename(self) -> None: + """Set self._filename based on inited attributes.""" + if self._no_disk: + self._filename = ":memory:" + else: + self._filename = self._basename + suffix = filename_suffix(self._suffix) + if suffix: + self._filename += "." + suffix + + def _reset(self) -> None: + """Reset our attributes.""" + if not self._no_disk: + for db in self._dbs.values(): + db.close() + self._dbs = {} + self._file_map = {} + self._have_used = False + self._current_context_id = None + + def _open_db(self) -> None: + """Open an existing db file, and read its metadata.""" + if self._debug.should("dataio"): + self._debug.write(f"Opening data file {self._filename!r}") + self._dbs[threading.get_ident()] = SqliteDb(self._filename, self._debug) + self._read_db() + + def _read_db(self) -> None: + """Read the metadata from a database so that we are ready to use it.""" + with self._dbs[threading.get_ident()] as db: + try: + row = db.execute_one("select version from coverage_schema") + assert row is not None + except Exception as exc: + if "no such table: coverage_schema" in str(exc): + self._init_db(db) + else: + raise DataError( + "Data file {!r} doesn't seem to be a coverage data file: {}".format( + self._filename, exc, + ), + ) from exc + else: + schema_version = row[0] + if schema_version != SCHEMA_VERSION: + raise DataError( + "Couldn't use data file {!r}: wrong schema: {} instead of {}".format( + self._filename, schema_version, SCHEMA_VERSION, + ), + ) + + row = db.execute_one("select value from meta where key = 'has_arcs'") + if row is not None: + self._has_arcs = bool(int(row[0])) + self._has_lines = not self._has_arcs + + with db.execute("select id, path from file") as cur: + for file_id, path in cur: + self._file_map[path] = file_id + + def _init_db(self, db: SqliteDb) -> None: + """Write the initial contents of the database.""" + if self._debug.should("dataio"): + self._debug.write(f"Initing data file {self._filename!r}") + db.executescript(SCHEMA) + db.execute_void("insert into coverage_schema (version) values (?)", (SCHEMA_VERSION,)) + + # When writing metadata, avoid information that will needlessly change + # the hash of the data file, unless we're debugging processes. + meta_data = [ + ("version", __version__), + ] + if self._debug.should("process"): + meta_data.extend([ + ("sys_argv", str(getattr(sys, "argv", None))), + ("when", datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")), + ]) + db.executemany_void("insert or ignore into meta (key, value) values (?, ?)", meta_data) + + def _connect(self) -> SqliteDb: + """Get the SqliteDb object to use.""" + if threading.get_ident() not in self._dbs: + self._open_db() + return self._dbs[threading.get_ident()] + + def __bool__(self) -> bool: + if (threading.get_ident() not in self._dbs and not os.path.exists(self._filename)): + return False + try: + with self._connect() as con: + with con.execute("select * from file limit 1") as cur: + return bool(list(cur)) + except CoverageException: + return False + + def dumps(self) -> bytes: + """Serialize the current data to a byte string. + + The format of the serialized data is not documented. It is only + suitable for use with :meth:`loads` in the same version of + coverage.py. + + Note that this serialization is not what gets stored in coverage data + files. This method is meant to produce bytes that can be transmitted + elsewhere and then deserialized with :meth:`loads`. + + Returns: + A byte string of serialized data. + + .. versionadded:: 5.0 + + """ + if self._debug.should("dataio"): + self._debug.write(f"Dumping data from data file {self._filename!r}") + with self._connect() as con: + script = con.dump() + return b"z" + zlib.compress(script.encode("utf-8")) + + def loads(self, data: bytes) -> None: + """Deserialize data from :meth:`dumps`. + + Use with a newly-created empty :class:`CoverageData` object. It's + undefined what happens if the object already has data in it. + + Note that this is not for reading data from a coverage data file. It + is only for use on data you produced with :meth:`dumps`. + + Arguments: + data: A byte string of serialized data produced by :meth:`dumps`. + + .. versionadded:: 5.0 + + """ + if self._debug.should("dataio"): + self._debug.write(f"Loading data into data file {self._filename!r}") + if data[:1] != b"z": + raise DataError( + f"Unrecognized serialization: {data[:40]!r} (head of {len(data)} bytes)", + ) + script = zlib.decompress(data[1:]).decode("utf-8") + self._dbs[threading.get_ident()] = db = SqliteDb(self._filename, self._debug) + with db: + db.executescript(script) + self._read_db() + self._have_used = True + + def _file_id(self, filename: str, add: bool = False) -> int | None: + """Get the file id for `filename`. + + If filename is not in the database yet, add it if `add` is True. + If `add` is not True, return None. + """ + if filename not in self._file_map: + if add: + with self._connect() as con: + self._file_map[filename] = con.execute_for_rowid( + "insert or replace into file (path) values (?)", + (filename,), + ) + return self._file_map.get(filename) + + def _context_id(self, context: str) -> int | None: + """Get the id for a context.""" + assert context is not None + self._start_using() + with self._connect() as con: + row = con.execute_one("select id from context where context = ?", (context,)) + if row is not None: + return cast(int, row[0]) + else: + return None + + @_locked + def set_context(self, context: str | None) -> None: + """Set the current context for future :meth:`add_lines` etc. + + `context` is a str, the name of the context to use for the next data + additions. The context persists until the next :meth:`set_context`. + + .. versionadded:: 5.0 + + """ + if self._debug.should("dataop"): + self._debug.write(f"Setting coverage context: {context!r}") + self._current_context = context + self._current_context_id = None + + def _set_context_id(self) -> None: + """Use the _current_context to set _current_context_id.""" + context = self._current_context or "" + context_id = self._context_id(context) + if context_id is not None: + self._current_context_id = context_id + else: + with self._connect() as con: + self._current_context_id = con.execute_for_rowid( + "insert into context (context) values (?)", + (context,), + ) + + def base_filename(self) -> str: + """The base filename for storing data. + + .. versionadded:: 5.0 + + """ + return self._basename + + def data_filename(self) -> str: + """Where is the data stored? + + .. versionadded:: 5.0 + + """ + return self._filename + + @_locked + def add_lines(self, line_data: Mapping[str, Collection[TLineNo]]) -> None: + """Add measured line data. + + `line_data` is a dictionary mapping file names to iterables of ints:: + + { filename: { line1, line2, ... }, ...} + + """ + if self._debug.should("dataop"): + self._debug.write("Adding lines: %d files, %d lines total" % ( + len(line_data), sum(len(lines) for lines in line_data.values()), + )) + if self._debug.should("dataop2"): + for filename, linenos in sorted(line_data.items()): + self._debug.write(f" {filename}: {linenos}") + self._start_using() + self._choose_lines_or_arcs(lines=True) + if not line_data: + return + with self._connect() as con: + self._set_context_id() + for filename, linenos in line_data.items(): + line_bits = nums_to_numbits(linenos) + file_id = self._file_id(filename, add=True) + query = "select numbits from line_bits where file_id = ? and context_id = ?" + with con.execute(query, (file_id, self._current_context_id)) as cur: + existing = list(cur) + if existing: + line_bits = numbits_union(line_bits, existing[0][0]) + + con.execute_void( + "insert or replace into line_bits " + + " (file_id, context_id, numbits) values (?, ?, ?)", + (file_id, self._current_context_id, line_bits), + ) + + @_locked + def add_arcs(self, arc_data: Mapping[str, Collection[TArc]]) -> None: + """Add measured arc data. + + `arc_data` is a dictionary mapping file names to iterables of pairs of + ints:: + + { filename: { (l1,l2), (l1,l2), ... }, ...} + + """ + if self._debug.should("dataop"): + self._debug.write("Adding arcs: %d files, %d arcs total" % ( + len(arc_data), sum(len(arcs) for arcs in arc_data.values()), + )) + if self._debug.should("dataop2"): + for filename, arcs in sorted(arc_data.items()): + self._debug.write(f" {filename}: {arcs}") + self._start_using() + self._choose_lines_or_arcs(arcs=True) + if not arc_data: + return + with self._connect() as con: + self._set_context_id() + for filename, arcs in arc_data.items(): + if not arcs: + continue + file_id = self._file_id(filename, add=True) + data = [(file_id, self._current_context_id, fromno, tono) for fromno, tono in arcs] + con.executemany_void( + "insert or ignore into arc " + + "(file_id, context_id, fromno, tono) values (?, ?, ?, ?)", + data, + ) + + def _choose_lines_or_arcs(self, lines: bool = False, arcs: bool = False) -> None: + """Force the data file to choose between lines and arcs.""" + assert lines or arcs + assert not (lines and arcs) + if lines and self._has_arcs: + if self._debug.should("dataop"): + self._debug.write("Error: Can't add line measurements to existing branch data") + raise DataError("Can't add line measurements to existing branch data") + if arcs and self._has_lines: + if self._debug.should("dataop"): + self._debug.write("Error: Can't add branch measurements to existing line data") + raise DataError("Can't add branch measurements to existing line data") + if not self._has_arcs and not self._has_lines: + self._has_lines = lines + self._has_arcs = arcs + with self._connect() as con: + con.execute_void( + "insert or ignore into meta (key, value) values (?, ?)", + ("has_arcs", str(int(arcs))), + ) + + @_locked + def add_file_tracers(self, file_tracers: Mapping[str, str]) -> None: + """Add per-file plugin information. + + `file_tracers` is { filename: plugin_name, ... } + + """ + if self._debug.should("dataop"): + self._debug.write("Adding file tracers: %d files" % (len(file_tracers),)) + if not file_tracers: + return + self._start_using() + with self._connect() as con: + for filename, plugin_name in file_tracers.items(): + file_id = self._file_id(filename, add=True) + existing_plugin = self.file_tracer(filename) + if existing_plugin: + if existing_plugin != plugin_name: + raise DataError( + "Conflicting file tracer name for '{}': {!r} vs {!r}".format( + filename, existing_plugin, plugin_name, + ), + ) + elif plugin_name: + con.execute_void( + "insert into tracer (file_id, tracer) values (?, ?)", + (file_id, plugin_name), + ) + + def touch_file(self, filename: str, plugin_name: str = "") -> None: + """Ensure that `filename` appears in the data, empty if needed. + + `plugin_name` is the name of the plugin responsible for this file. + It is used to associate the right filereporter, etc. + """ + self.touch_files([filename], plugin_name) + + def touch_files(self, filenames: Collection[str], plugin_name: str | None = None) -> None: + """Ensure that `filenames` appear in the data, empty if needed. + + `plugin_name` is the name of the plugin responsible for these files. + It is used to associate the right filereporter, etc. + """ + if self._debug.should("dataop"): + self._debug.write(f"Touching {filenames!r}") + self._start_using() + with self._connect(): # Use this to get one transaction. + if not self._has_arcs and not self._has_lines: + raise DataError("Can't touch files in an empty CoverageData") + + for filename in filenames: + self._file_id(filename, add=True) + if plugin_name: + # Set the tracer for this file + self.add_file_tracers({filename: plugin_name}) + + def purge_files(self, filenames: Collection[str]) -> None: + """Purge any existing coverage data for the given `filenames`. + + .. versionadded:: 7.2 + + """ + if self._debug.should("dataop"): + self._debug.write(f"Purging data for {filenames!r}") + self._start_using() + with self._connect() as con: + + if self._has_lines: + sql = "delete from line_bits where file_id=?" + elif self._has_arcs: + sql = "delete from arc where file_id=?" + else: + raise DataError("Can't purge files in an empty CoverageData") + + for filename in filenames: + file_id = self._file_id(filename, add=False) + if file_id is None: + continue + con.execute_void(sql, (file_id,)) + + def update( + self, + other_data: CoverageData, + map_path: Callable[[str], str] | None = None, + ) -> None: + """Update this data with data from another :class:`CoverageData`. + + If `map_path` is provided, it's a function that re-map paths to match + the local machine's. Note: `map_path` is None only when called + directly from the test suite. + + """ + if self._debug.should("dataop"): + self._debug.write("Updating with data from {!r}".format( + getattr(other_data, "_filename", "???"), + )) + if self._has_lines and other_data._has_arcs: + raise DataError("Can't combine branch coverage data with statement data") + if self._has_arcs and other_data._has_lines: + raise DataError("Can't combine statement coverage data with branch data") + + map_path = map_path or (lambda p: p) + + # Force the database we're writing to to exist before we start nesting contexts. + self._start_using() + + # Collector for all arcs, lines and tracers + other_data.read() + with other_data._connect() as con: + # Get files data. + with con.execute("select path from file") as cur: + files = {path: map_path(path) for (path,) in cur} + + # Get contexts data. + with con.execute("select context from context") as cur: + contexts = [context for (context,) in cur] + + # Get arc data. + with con.execute( + "select file.path, context.context, arc.fromno, arc.tono " + + "from arc " + + "inner join file on file.id = arc.file_id " + + "inner join context on context.id = arc.context_id", + ) as cur: + arcs = [ + (files[path], context, fromno, tono) + for (path, context, fromno, tono) in cur + ] + + # Get line data. + with con.execute( + "select file.path, context.context, line_bits.numbits " + + "from line_bits " + + "inner join file on file.id = line_bits.file_id " + + "inner join context on context.id = line_bits.context_id", + ) as cur: + lines: dict[tuple[str, str], bytes] = {} + for path, context, numbits in cur: + key = (files[path], context) + if key in lines: + numbits = numbits_union(lines[key], numbits) + lines[key] = numbits + + # Get tracer data. + with con.execute( + "select file.path, tracer " + + "from tracer " + + "inner join file on file.id = tracer.file_id", + ) as cur: + tracers = {files[path]: tracer for (path, tracer) in cur} + + with self._connect() as con: + assert con.con is not None + con.con.isolation_level = "IMMEDIATE" + + # Get all tracers in the DB. Files not in the tracers are assumed + # to have an empty string tracer. Since Sqlite does not support + # full outer joins, we have to make two queries to fill the + # dictionary. + with con.execute("select path from file") as cur: + this_tracers = {path: "" for path, in cur} + with con.execute( + "select file.path, tracer from tracer " + + "inner join file on file.id = tracer.file_id", + ) as cur: + this_tracers.update({ + map_path(path): tracer + for path, tracer in cur + }) + + # Create all file and context rows in the DB. + con.executemany_void( + "insert or ignore into file (path) values (?)", + ((file,) for file in files.values()), + ) + with con.execute("select id, path from file") as cur: + file_ids = {path: id for id, path in cur} + self._file_map.update(file_ids) + con.executemany_void( + "insert or ignore into context (context) values (?)", + ((context,) for context in contexts), + ) + with con.execute("select id, context from context") as cur: + context_ids = {context: id for id, context in cur} + + # Prepare tracers and fail, if a conflict is found. + # tracer_paths is used to ensure consistency over the tracer data + # and tracer_map tracks the tracers to be inserted. + tracer_map = {} + for path in files.values(): + this_tracer = this_tracers.get(path) + other_tracer = tracers.get(path, "") + # If there is no tracer, there is always the None tracer. + if this_tracer is not None and this_tracer != other_tracer: + raise DataError( + "Conflicting file tracer name for '{}': {!r} vs {!r}".format( + path, this_tracer, other_tracer, + ), + ) + tracer_map[path] = other_tracer + + # Prepare arc and line rows to be inserted by converting the file + # and context strings with integer ids. Then use the efficient + # `executemany()` to insert all rows at once. + + if arcs: + self._choose_lines_or_arcs(arcs=True) + + arc_rows = ( + (file_ids[file], context_ids[context], fromno, tono) + for file, context, fromno, tono in arcs + ) + + # Write the combined data. + con.executemany_void( + "insert or ignore into arc " + + "(file_id, context_id, fromno, tono) values (?, ?, ?, ?)", + arc_rows, + ) + + if lines: + self._choose_lines_or_arcs(lines=True) + + for (file, context), numbits in lines.items(): + with con.execute( + "select numbits from line_bits where file_id = ? and context_id = ?", + (file_ids[file], context_ids[context]), + ) as cur: + existing = list(cur) + if existing: + lines[(file, context)] = numbits_union(numbits, existing[0][0]) + + con.executemany_void( + "insert or replace into line_bits " + + "(file_id, context_id, numbits) values (?, ?, ?)", + [ + (file_ids[file], context_ids[context], numbits) + for (file, context), numbits in lines.items() + ], + ) + + con.executemany_void( + "insert or ignore into tracer (file_id, tracer) values (?, ?)", + ((file_ids[filename], tracer) for filename, tracer in tracer_map.items()), + ) + + if not self._no_disk: + # Update all internal cache data. + self._reset() + self.read() + + def erase(self, parallel: bool = False) -> None: + """Erase the data in this object. + + If `parallel` is true, then also deletes data files created from the + basename by parallel-mode. + + """ + self._reset() + if self._no_disk: + return + if self._debug.should("dataio"): + self._debug.write(f"Erasing data file {self._filename!r}") + file_be_gone(self._filename) + if parallel: + data_dir, local = os.path.split(self._filename) + local_abs_path = os.path.join(os.path.abspath(data_dir), local) + pattern = glob.escape(local_abs_path) + ".*" + for filename in glob.glob(pattern): + if self._debug.should("dataio"): + self._debug.write(f"Erasing parallel data file {filename!r}") + file_be_gone(filename) + + def read(self) -> None: + """Start using an existing data file.""" + if os.path.exists(self._filename): + with self._connect(): + self._have_used = True + + def write(self) -> None: + """Ensure the data is written to the data file.""" + pass + + def _start_using(self) -> None: + """Call this before using the database at all.""" + if self._pid != os.getpid(): + # Looks like we forked! Have to start a new data file. + self._reset() + self._choose_filename() + self._pid = os.getpid() + if not self._have_used: + self.erase() + self._have_used = True + + def has_arcs(self) -> bool: + """Does the database have arcs (True) or lines (False).""" + return bool(self._has_arcs) + + def measured_files(self) -> set[str]: + """A set of all files that have been measured. + + Note that a file may be mentioned as measured even though no lines or + arcs for that file are present in the data. + + """ + return set(self._file_map) + + def measured_contexts(self) -> set[str]: + """A set of all contexts that have been measured. + + .. versionadded:: 5.0 + + """ + self._start_using() + with self._connect() as con: + with con.execute("select distinct(context) from context") as cur: + contexts = {row[0] for row in cur} + return contexts + + def file_tracer(self, filename: str) -> str | None: + """Get the plugin name of the file tracer for a file. + + Returns the name of the plugin that handles this file. If the file was + measured, but didn't use a plugin, then "" is returned. If the file + was not measured, then None is returned. + + """ + self._start_using() + with self._connect() as con: + file_id = self._file_id(filename) + if file_id is None: + return None + row = con.execute_one("select tracer from tracer where file_id = ?", (file_id,)) + if row is not None: + return row[0] or "" + return "" # File was measured, but no tracer associated. + + def set_query_context(self, context: str) -> None: + """Set a context for subsequent querying. + + The next :meth:`lines`, :meth:`arcs`, or :meth:`contexts_by_lineno` + calls will be limited to only one context. `context` is a string which + must match a context exactly. If it does not, no exception is raised, + but queries will return no data. + + .. versionadded:: 5.0 + + """ + self._start_using() + with self._connect() as con: + with con.execute("select id from context where context = ?", (context,)) as cur: + self._query_context_ids = [row[0] for row in cur.fetchall()] + + def set_query_contexts(self, contexts: Sequence[str] | None) -> None: + """Set a number of contexts for subsequent querying. + + The next :meth:`lines`, :meth:`arcs`, or :meth:`contexts_by_lineno` + calls will be limited to the specified contexts. `contexts` is a list + of Python regular expressions. Contexts will be matched using + :func:`re.search `. Data will be included in query + results if they are part of any of the contexts matched. + + .. versionadded:: 5.0 + + """ + self._start_using() + if contexts: + with self._connect() as con: + context_clause = " or ".join(["context regexp ?"] * len(contexts)) + with con.execute("select id from context where " + context_clause, contexts) as cur: + self._query_context_ids = [row[0] for row in cur.fetchall()] + else: + self._query_context_ids = None + + def lines(self, filename: str) -> list[TLineNo] | None: + """Get the list of lines executed for a source file. + + If the file was not measured, returns None. A file might be measured, + and have no lines executed, in which case an empty list is returned. + + If the file was executed, returns a list of integers, the line numbers + executed in the file. The list is in no particular order. + + """ + self._start_using() + if self.has_arcs(): + arcs = self.arcs(filename) + if arcs is not None: + all_lines = itertools.chain.from_iterable(arcs) + return list({l for l in all_lines if l > 0}) + + with self._connect() as con: + file_id = self._file_id(filename) + if file_id is None: + return None + else: + query = "select numbits from line_bits where file_id = ?" + data = [file_id] + if self._query_context_ids is not None: + ids_array = ", ".join("?" * len(self._query_context_ids)) + query += " and context_id in (" + ids_array + ")" + data += self._query_context_ids + with con.execute(query, data) as cur: + bitmaps = list(cur) + nums = set() + for row in bitmaps: + nums.update(numbits_to_nums(row[0])) + return list(nums) + + def arcs(self, filename: str) -> list[TArc] | None: + """Get the list of arcs executed for a file. + + If the file was not measured, returns None. A file might be measured, + and have no arcs executed, in which case an empty list is returned. + + If the file was executed, returns a list of 2-tuples of integers. Each + pair is a starting line number and an ending line number for a + transition from one line to another. The list is in no particular + order. + + Negative numbers have special meaning. If the starting line number is + -N, it represents an entry to the code object that starts at line N. + If the ending ling number is -N, it's an exit from the code object that + starts at line N. + + """ + self._start_using() + with self._connect() as con: + file_id = self._file_id(filename) + if file_id is None: + return None + else: + query = "select distinct fromno, tono from arc where file_id = ?" + data = [file_id] + if self._query_context_ids is not None: + ids_array = ", ".join("?" * len(self._query_context_ids)) + query += " and context_id in (" + ids_array + ")" + data += self._query_context_ids + with con.execute(query, data) as cur: + return list(cur) + + def contexts_by_lineno(self, filename: str) -> dict[TLineNo, list[str]]: + """Get the contexts for each line in a file. + + Returns: + A dict mapping line numbers to a list of context names. + + .. versionadded:: 5.0 + + """ + self._start_using() + with self._connect() as con: + file_id = self._file_id(filename) + if file_id is None: + return {} + + lineno_contexts_map = collections.defaultdict(set) + if self.has_arcs(): + query = ( + "select arc.fromno, arc.tono, context.context " + + "from arc, context " + + "where arc.file_id = ? and arc.context_id = context.id" + ) + data = [file_id] + if self._query_context_ids is not None: + ids_array = ", ".join("?" * len(self._query_context_ids)) + query += " and arc.context_id in (" + ids_array + ")" + data += self._query_context_ids + with con.execute(query, data) as cur: + for fromno, tono, context in cur: + if fromno > 0: + lineno_contexts_map[fromno].add(context) + if tono > 0: + lineno_contexts_map[tono].add(context) + else: + query = ( + "select l.numbits, c.context from line_bits l, context c " + + "where l.context_id = c.id " + + "and file_id = ?" + ) + data = [file_id] + if self._query_context_ids is not None: + ids_array = ", ".join("?" * len(self._query_context_ids)) + query += " and l.context_id in (" + ids_array + ")" + data += self._query_context_ids + with con.execute(query, data) as cur: + for numbits, context in cur: + for lineno in numbits_to_nums(numbits): + lineno_contexts_map[lineno].add(context) + + return {lineno: list(contexts) for lineno, contexts in lineno_contexts_map.items()} + + @classmethod + def sys_info(cls) -> list[tuple[str, Any]]: + """Our information for `Coverage.sys_info`. + + Returns a list of (key, value) pairs. + + """ + with SqliteDb(":memory:", debug=NoDebugging()) as db: + with db.execute("pragma temp_store") as cur: + temp_store = [row[0] for row in cur] + with db.execute("pragma compile_options") as cur: + copts = [row[0] for row in cur] + copts = textwrap.wrap(", ".join(copts), width=75) + + return [ + ("sqlite3_sqlite_version", sqlite3.sqlite_version), + ("sqlite3_temp_store", temp_store), + ("sqlite3_compile_options", copts), + ] + + +def filename_suffix(suffix: str | bool | None) -> str | None: + """Compute a filename suffix for a data file. + + If `suffix` is a string or None, simply return it. If `suffix` is True, + then build a suffix incorporating the hostname, process id, and a random + number. + + Returns a string or None. + + """ + if suffix is True: + # If data_suffix was a simple true value, then make a suffix with + # plenty of distinguishing information. We do this here in + # `save()` at the last minute so that the pid will be correct even + # if the process forks. + die = random.Random(os.urandom(8)) + letters = string.ascii_uppercase + string.ascii_lowercase + rolls = "".join(die.choice(letters) for _ in range(6)) + suffix = f"{socket.gethostname()}.{os.getpid()}.X{rolls}x" + elif suffix is False: + suffix = None + return suffix diff --git a/.venv/Lib/site-packages/coverage/sqlitedb.py b/.venv/Lib/site-packages/coverage/sqlitedb.py new file mode 100644 index 00000000..1bbe7e1b --- /dev/null +++ b/.venv/Lib/site-packages/coverage/sqlitedb.py @@ -0,0 +1,231 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""SQLite abstraction for coverage.py""" + +from __future__ import annotations + +import contextlib +import re +import sqlite3 + +from typing import cast, Any +from collections.abc import Iterable, Iterator + +from coverage.debug import auto_repr, clipped_repr, exc_one_line +from coverage.exceptions import DataError +from coverage.types import TDebugCtl + + +class SqliteDb: + """A simple abstraction over a SQLite database. + + Use as a context manager, then you can use it like a + :class:`python:sqlite3.Connection` object:: + + with SqliteDb(filename, debug_control) as db: + with db.execute("select a, b from some_table") as cur: + for a, b in cur: + etc(a, b) + + """ + def __init__(self, filename: str, debug: TDebugCtl) -> None: + self.debug = debug + self.filename = filename + self.nest = 0 + self.con: sqlite3.Connection | None = None + + __repr__ = auto_repr + + def _connect(self) -> None: + """Connect to the db and do universal initialization.""" + if self.con is not None: + return + + # It can happen that Python switches threads while the tracer writes + # data. The second thread will also try to write to the data, + # effectively causing a nested context. However, given the idempotent + # nature of the tracer operations, sharing a connection among threads + # is not a problem. + if self.debug.should("sql"): + self.debug.write(f"Connecting to {self.filename!r}") + try: + self.con = sqlite3.connect(self.filename, check_same_thread=False) + except sqlite3.Error as exc: + raise DataError(f"Couldn't use data file {self.filename!r}: {exc}") from exc + + if self.debug.should("sql"): + self.debug.write(f"Connected to {self.filename!r} as {self.con!r}") + + self.con.create_function("REGEXP", 2, lambda txt, pat: re.search(txt, pat) is not None) + + # Turning off journal_mode can speed up writing. It can't always be + # disabled, so we have to be prepared for *-journal files elsewhere. + # In Python 3.12+, we can change the config to allow journal_mode=off. + if hasattr(sqlite3, "SQLITE_DBCONFIG_DEFENSIVE"): + # Turn off defensive mode, so that journal_mode=off can succeed. + self.con.setconfig( # type: ignore[attr-defined, unused-ignore] + sqlite3.SQLITE_DBCONFIG_DEFENSIVE, False, + ) + + # This pragma makes writing faster. It disables rollbacks, but we never need them. + self.execute_void("pragma journal_mode=off") + + # This pragma makes writing faster. It can fail in unusual situations + # (https://github.com/nedbat/coveragepy/issues/1646), so use fail_ok=True + # to keep things going. + self.execute_void("pragma synchronous=off", fail_ok=True) + + def close(self) -> None: + """If needed, close the connection.""" + if self.con is not None and self.filename != ":memory:": + if self.debug.should("sql"): + self.debug.write(f"Closing {self.con!r} on {self.filename!r}") + self.con.close() + self.con = None + + def __enter__(self) -> SqliteDb: + if self.nest == 0: + self._connect() + assert self.con is not None + self.con.__enter__() + self.nest += 1 + return self + + def __exit__(self, exc_type, exc_value, traceback) -> None: # type: ignore[no-untyped-def] + self.nest -= 1 + if self.nest == 0: + try: + assert self.con is not None + self.con.__exit__(exc_type, exc_value, traceback) + self.close() + except Exception as exc: + if self.debug.should("sql"): + self.debug.write(f"EXCEPTION from __exit__: {exc_one_line(exc)}") + raise DataError(f"Couldn't end data file {self.filename!r}: {exc}") from exc + + def _execute(self, sql: str, parameters: Iterable[Any]) -> sqlite3.Cursor: + """Same as :meth:`python:sqlite3.Connection.execute`.""" + if self.debug.should("sql"): + tail = f" with {parameters!r}" if parameters else "" + self.debug.write(f"Executing {sql!r}{tail}") + try: + assert self.con is not None + try: + return self.con.execute(sql, parameters) # type: ignore[arg-type] + except Exception: + # In some cases, an error might happen that isn't really an + # error. Try again immediately. + # https://github.com/nedbat/coveragepy/issues/1010 + return self.con.execute(sql, parameters) # type: ignore[arg-type] + except sqlite3.Error as exc: + msg = str(exc) + if self.filename != ":memory:": + try: + # `execute` is the first thing we do with the database, so try + # hard to provide useful hints if something goes wrong now. + with open(self.filename, "rb") as bad_file: + cov4_sig = b"!coverage.py: This is a private format" + if bad_file.read(len(cov4_sig)) == cov4_sig: + msg = ( + "Looks like a coverage 4.x data file. " + + "Are you mixing versions of coverage?" + ) + except Exception: + pass + if self.debug.should("sql"): + self.debug.write(f"EXCEPTION from execute: {exc_one_line(exc)}") + raise DataError(f"Couldn't use data file {self.filename!r}: {msg}") from exc + + @contextlib.contextmanager + def execute( + self, + sql: str, + parameters: Iterable[Any] = (), + ) -> Iterator[sqlite3.Cursor]: + """Context managed :meth:`python:sqlite3.Connection.execute`. + + Use with a ``with`` statement to auto-close the returned cursor. + """ + cur = self._execute(sql, parameters) + try: + yield cur + finally: + cur.close() + + def execute_void(self, sql: str, parameters: Iterable[Any] = (), fail_ok: bool = False) -> None: + """Same as :meth:`python:sqlite3.Connection.execute` when you don't need the cursor. + + If `fail_ok` is True, then SQLite errors are ignored. + """ + try: + # PyPy needs the .close() calls here, or sqlite gets twisted up: + # https://bitbucket.org/pypy/pypy/issues/2872/default-isolation-mode-is-different-on + self._execute(sql, parameters).close() + except DataError: + if not fail_ok: + raise + + def execute_for_rowid(self, sql: str, parameters: Iterable[Any] = ()) -> int: + """Like execute, but returns the lastrowid.""" + with self.execute(sql, parameters) as cur: + assert cur.lastrowid is not None + rowid: int = cur.lastrowid + if self.debug.should("sqldata"): + self.debug.write(f"Row id result: {rowid!r}") + return rowid + + def execute_one(self, sql: str, parameters: Iterable[Any] = ()) -> tuple[Any, ...] | None: + """Execute a statement and return the one row that results. + + This is like execute(sql, parameters).fetchone(), except it is + correct in reading the entire result set. This will raise an + exception if more than one row results. + + Returns a row, or None if there were no rows. + """ + with self.execute(sql, parameters) as cur: + rows = list(cur) + if len(rows) == 0: + return None + elif len(rows) == 1: + return cast(tuple[Any, ...], rows[0]) + else: + raise AssertionError(f"SQL {sql!r} shouldn't return {len(rows)} rows") + + def _executemany(self, sql: str, data: list[Any]) -> sqlite3.Cursor: + """Same as :meth:`python:sqlite3.Connection.executemany`.""" + if self.debug.should("sql"): + final = ":" if self.debug.should("sqldata") else "" + self.debug.write(f"Executing many {sql!r} with {len(data)} rows{final}") + if self.debug.should("sqldata"): + for i, row in enumerate(data): + self.debug.write(f"{i:4d}: {row!r}") + assert self.con is not None + try: + return self.con.executemany(sql, data) + except Exception: + # In some cases, an error might happen that isn't really an + # error. Try again immediately. + # https://github.com/nedbat/coveragepy/issues/1010 + return self.con.executemany(sql, data) + + def executemany_void(self, sql: str, data: Iterable[Any]) -> None: + """Same as :meth:`python:sqlite3.Connection.executemany` when you don't need the cursor.""" + data = list(data) + if data: + self._executemany(sql, data).close() + + def executescript(self, script: str) -> None: + """Same as :meth:`python:sqlite3.Connection.executescript`.""" + if self.debug.should("sql"): + self.debug.write("Executing script with {} chars: {}".format( + len(script), clipped_repr(script, 100), + )) + assert self.con is not None + self.con.executescript(script).close() + + def dump(self) -> str: + """Return a multi-line string, the SQL dump of the database.""" + assert self.con is not None + return "\n".join(self.con.iterdump()) diff --git a/.venv/Lib/site-packages/coverage/sysmon.py b/.venv/Lib/site-packages/coverage/sysmon.py new file mode 100644 index 00000000..2252ae11 --- /dev/null +++ b/.venv/Lib/site-packages/coverage/sysmon.py @@ -0,0 +1,433 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""Callback functions and support for sys.monitoring data collection.""" + +from __future__ import annotations + +import functools +import inspect +import os +import os.path +import sys +import threading +import traceback + +from dataclasses import dataclass +from types import CodeType, FrameType +from typing import ( + Any, + Callable, + TYPE_CHECKING, + cast, +) + +from coverage.debug import short_filename, short_stack +from coverage.types import ( + AnyCallable, + TArc, + TFileDisposition, + TLineNo, + TShouldStartContextFn, + TShouldTraceFn, + TTraceData, + TTraceFileData, + Tracer, + TWarnFn, +) + +# pylint: disable=unused-argument + +LOG = False + +# This module will be imported in all versions of Python, but only used in 3.12+ +# It will be type-checked for 3.12, but not for earlier versions. +sys_monitoring = getattr(sys, "monitoring", None) + +if TYPE_CHECKING: + assert sys_monitoring is not None + # I want to say this but it's not allowed: + # MonitorReturn = Literal[sys.monitoring.DISABLE] | None + MonitorReturn = Any + + +if LOG: # pragma: debugging + + class LoggingWrapper: + """Wrap a namespace to log all its functions.""" + + def __init__(self, wrapped: Any, namespace: str) -> None: + self.wrapped = wrapped + self.namespace = namespace + + def __getattr__(self, name: str) -> Callable[..., Any]: + def _wrapped(*args: Any, **kwargs: Any) -> Any: + log(f"{self.namespace}.{name}{args}{kwargs}") + return getattr(self.wrapped, name)(*args, **kwargs) + + return _wrapped + + sys_monitoring = LoggingWrapper(sys_monitoring, "sys.monitoring") + assert sys_monitoring is not None + + short_stack = functools.partial( + short_stack, full=True, short_filenames=True, frame_ids=True, + ) + seen_threads: set[int] = set() + + def log(msg: str) -> None: + """Write a message to our detailed debugging log(s).""" + # Thread ids are reused across processes? + # Make a shorter number more likely to be unique. + pid = os.getpid() + tid = cast(int, threading.current_thread().ident) + tslug = f"{(pid * tid) % 9_999_991:07d}" + if tid not in seen_threads: + seen_threads.add(tid) + log(f"New thread {tid} {tslug}:\n{short_stack()}") + # log_seq = int(os.getenv("PANSEQ", "0")) + # root = f"/tmp/pan.{log_seq:03d}" + for filename in [ + "/tmp/foo.out", + # f"{root}.out", + # f"{root}-{pid}.out", + # f"{root}-{pid}-{tslug}.out", + ]: + with open(filename, "a") as f: + print(f"{pid}:{tslug}: {msg}", file=f, flush=True) + + def arg_repr(arg: Any) -> str: + """Make a customized repr for logged values.""" + if isinstance(arg, CodeType): + return ( + f"" + ) + return repr(arg) + + def panopticon(*names: str | None) -> AnyCallable: + """Decorate a function to log its calls.""" + + def _decorator(method: AnyCallable) -> AnyCallable: + @functools.wraps(method) + def _wrapped(self: Any, *args: Any) -> Any: + try: + # log(f"{method.__name__}() stack:\n{short_stack()}") + args_reprs = [] + for name, arg in zip(names, args): + if name is None: + continue + args_reprs.append(f"{name}={arg_repr(arg)}") + log(f"{id(self):#x}:{method.__name__}({', '.join(args_reprs)})") + ret = method(self, *args) + # log(f" end {id(self):#x}:{method.__name__}({', '.join(args_reprs)})") + return ret + except Exception as exc: + log(f"!!{exc.__class__.__name__}: {exc}") + log("".join(traceback.format_exception(exc))) # pylint: disable=[no-value-for-parameter] + try: + assert sys_monitoring is not None + sys_monitoring.set_events(sys.monitoring.COVERAGE_ID, 0) + except ValueError: + # We might have already shut off monitoring. + log("oops, shutting off events with disabled tool id") + raise + + return _wrapped + + return _decorator + +else: + + def log(msg: str) -> None: + """Write a message to our detailed debugging log(s), but not really.""" + + def panopticon(*names: str | None) -> AnyCallable: + """Decorate a function to log its calls, but not really.""" + + def _decorator(meth: AnyCallable) -> AnyCallable: + return meth + + return _decorator + + +@dataclass +class CodeInfo: + """The information we want about each code object.""" + + tracing: bool + file_data: TTraceFileData | None + # TODO: what is byte_to_line for? + byte_to_line: dict[int, int] | None + + +def bytes_to_lines(code: CodeType) -> dict[int, int]: + """Make a dict mapping byte code offsets to line numbers.""" + b2l = {} + for bstart, bend, lineno in code.co_lines(): + if lineno is not None: + for boffset in range(bstart, bend, 2): + b2l[boffset] = lineno + return b2l + + +class SysMonitor(Tracer): + """Python implementation of the raw data tracer for PEP669 implementations.""" + + # One of these will be used across threads. Be careful. + + def __init__(self, tool_id: int) -> None: + # Attributes set from the collector: + self.data: TTraceData + self.trace_arcs = False + self.should_trace: TShouldTraceFn + self.should_trace_cache: dict[str, TFileDisposition | None] + # TODO: should_start_context and switch_context are unused! + # Change tests/testenv.py:DYN_CONTEXTS when this is updated. + self.should_start_context: TShouldStartContextFn | None = None + self.switch_context: Callable[[str | None], None] | None = None + self.lock_data: Callable[[], None] + self.unlock_data: Callable[[], None] + # TODO: warn is unused. + self.warn: TWarnFn + + self.myid = tool_id + + # Map id(code_object) -> CodeInfo + self.code_infos: dict[int, CodeInfo] = {} + # A list of code_objects, just to keep them alive so that id's are + # useful as identity. + self.code_objects: list[CodeType] = [] + self.last_lines: dict[FrameType, int] = {} + # Map id(code_object) -> code_object + self.local_event_codes: dict[int, CodeType] = {} + self.sysmon_on = False + self.lock = threading.Lock() + + self.stats = { + "starts": 0, + } + + self.stopped = False + self._activity = False + + def __repr__(self) -> str: + points = sum(len(v) for v in self.data.values()) + files = len(self.data) + return f"" + + @panopticon() + def start(self) -> None: + """Start this Tracer.""" + self.stopped = False + + assert sys_monitoring is not None + sys_monitoring.use_tool_id(self.myid, "coverage.py") + register = functools.partial(sys_monitoring.register_callback, self.myid) + events = sys_monitoring.events + if self.trace_arcs: + sys_monitoring.set_events( + self.myid, + events.PY_START | events.PY_UNWIND, + ) + register(events.PY_START, self.sysmon_py_start) + register(events.PY_RESUME, self.sysmon_py_resume_arcs) + register(events.PY_RETURN, self.sysmon_py_return_arcs) + register(events.PY_UNWIND, self.sysmon_py_unwind_arcs) + register(events.LINE, self.sysmon_line_arcs) + else: + sys_monitoring.set_events(self.myid, events.PY_START) + register(events.PY_START, self.sysmon_py_start) + register(events.LINE, self.sysmon_line_lines) + sys_monitoring.restart_events() + self.sysmon_on = True + + @panopticon() + def stop(self) -> None: + """Stop this Tracer.""" + if not self.sysmon_on: + # In forking situations, we might try to stop when we are not + # started. Do nothing in that case. + return + assert sys_monitoring is not None + sys_monitoring.set_events(self.myid, 0) + with self.lock: + self.sysmon_on = False + for code in self.local_event_codes.values(): + sys_monitoring.set_local_events(self.myid, code, 0) + self.local_event_codes = {} + sys_monitoring.free_tool_id(self.myid) + + @panopticon() + def post_fork(self) -> None: + """The process has forked, clean up as needed.""" + self.stop() + + def activity(self) -> bool: + """Has there been any activity?""" + return self._activity + + def reset_activity(self) -> None: + """Reset the activity() flag.""" + self._activity = False + + def get_stats(self) -> dict[str, int] | None: + """Return a dictionary of statistics, or None.""" + return None + + # The number of frames in callers_frame takes @panopticon into account. + if LOG: + + def callers_frame(self) -> FrameType: + """Get the frame of the Python code we're monitoring.""" + return ( + inspect.currentframe().f_back.f_back.f_back # type: ignore[union-attr,return-value] + ) + + else: + + def callers_frame(self) -> FrameType: + """Get the frame of the Python code we're monitoring.""" + return inspect.currentframe().f_back.f_back # type: ignore[union-attr,return-value] + + @panopticon("code", "@") + def sysmon_py_start(self, code: CodeType, instruction_offset: int) -> MonitorReturn: + """Handle sys.monitoring.events.PY_START events.""" + # Entering a new frame. Decide if we should trace in this file. + self._activity = True + self.stats["starts"] += 1 + + code_info = self.code_infos.get(id(code)) + tracing_code: bool | None = None + file_data: TTraceFileData | None = None + if code_info is not None: + tracing_code = code_info.tracing + file_data = code_info.file_data + + if tracing_code is None: + filename = code.co_filename + disp = self.should_trace_cache.get(filename) + if disp is None: + frame = inspect.currentframe().f_back # type: ignore[union-attr] + if LOG: + # @panopticon adds a frame. + frame = frame.f_back # type: ignore[union-attr] + disp = self.should_trace(filename, frame) # type: ignore[arg-type] + self.should_trace_cache[filename] = disp + + tracing_code = disp.trace + if tracing_code: + tracename = disp.source_filename + assert tracename is not None + self.lock_data() + try: + if tracename not in self.data: + self.data[tracename] = set() + finally: + self.unlock_data() + file_data = self.data[tracename] + b2l = bytes_to_lines(code) + else: + file_data = None + b2l = None + + self.code_infos[id(code)] = CodeInfo( + tracing=tracing_code, + file_data=file_data, + byte_to_line=b2l, + ) + self.code_objects.append(code) + + if tracing_code: + events = sys.monitoring.events + with self.lock: + if self.sysmon_on: + assert sys_monitoring is not None + sys_monitoring.set_local_events( + self.myid, + code, + events.PY_RETURN + # + | events.PY_RESUME + # | events.PY_YIELD + | events.LINE, + # | events.BRANCH + # | events.JUMP + ) + self.local_event_codes[id(code)] = code + + if tracing_code and self.trace_arcs: + frame = self.callers_frame() + self.last_lines[frame] = -code.co_firstlineno + return None + else: + return sys.monitoring.DISABLE + + @panopticon("code", "@") + def sysmon_py_resume_arcs( + self, code: CodeType, instruction_offset: int, + ) -> MonitorReturn: + """Handle sys.monitoring.events.PY_RESUME events for branch coverage.""" + frame = self.callers_frame() + self.last_lines[frame] = frame.f_lineno + + @panopticon("code", "@", None) + def sysmon_py_return_arcs( + self, code: CodeType, instruction_offset: int, retval: object, + ) -> MonitorReturn: + """Handle sys.monitoring.events.PY_RETURN events for branch coverage.""" + frame = self.callers_frame() + code_info = self.code_infos.get(id(code)) + if code_info is not None and code_info.file_data is not None: + last_line = self.last_lines.get(frame) + if last_line is not None: + arc = (last_line, -code.co_firstlineno) + # log(f"adding {arc=}") + cast(set[TArc], code_info.file_data).add(arc) + + # Leaving this function, no need for the frame any more. + self.last_lines.pop(frame, None) + + @panopticon("code", "@", "exc") + def sysmon_py_unwind_arcs( + self, code: CodeType, instruction_offset: int, exception: BaseException, + ) -> MonitorReturn: + """Handle sys.monitoring.events.PY_UNWIND events for branch coverage.""" + frame = self.callers_frame() + # Leaving this function. + last_line = self.last_lines.pop(frame, None) + if isinstance(exception, GeneratorExit): + # We don't want to count generator exits as arcs. + return + code_info = self.code_infos.get(id(code)) + if code_info is not None and code_info.file_data is not None: + if last_line is not None: + arc = (last_line, -code.co_firstlineno) + # log(f"adding {arc=}") + cast(set[TArc], code_info.file_data).add(arc) + + + @panopticon("code", "line") + def sysmon_line_lines(self, code: CodeType, line_number: int) -> MonitorReturn: + """Handle sys.monitoring.events.LINE events for line coverage.""" + code_info = self.code_infos[id(code)] + if code_info.file_data is not None: + cast(set[TLineNo], code_info.file_data).add(line_number) + # log(f"adding {line_number=}") + return sys.monitoring.DISABLE + + @panopticon("code", "line") + def sysmon_line_arcs(self, code: CodeType, line_number: int) -> MonitorReturn: + """Handle sys.monitoring.events.LINE events for branch coverage.""" + code_info = self.code_infos[id(code)] + ret = None + if code_info.file_data is not None: + frame = self.callers_frame() + last_line = self.last_lines.get(frame) + if last_line is not None: + arc = (last_line, line_number) + cast(set[TArc], code_info.file_data).add(arc) + # log(f"adding {arc=}") + self.last_lines[frame] = line_number + return ret diff --git a/.venv/Lib/site-packages/coverage/templite.py b/.venv/Lib/site-packages/coverage/templite.py new file mode 100644 index 00000000..4689f957 --- /dev/null +++ b/.venv/Lib/site-packages/coverage/templite.py @@ -0,0 +1,306 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""A simple Python template renderer, for a nano-subset of Django syntax. + +For a detailed discussion of this code, see this chapter from 500 Lines: +http://aosabook.org/en/500L/a-template-engine.html + +""" + +# Coincidentally named the same as http://code.activestate.com/recipes/496702/ + +from __future__ import annotations + +import re + +from typing import ( + Any, Callable, NoReturn, cast, +) + + +class TempliteSyntaxError(ValueError): + """Raised when a template has a syntax error.""" + pass + + +class TempliteValueError(ValueError): + """Raised when an expression won't evaluate in a template.""" + pass + + +class CodeBuilder: + """Build source code conveniently.""" + + def __init__(self, indent: int = 0) -> None: + self.code: list[str | CodeBuilder] = [] + self.indent_level = indent + + def __str__(self) -> str: + return "".join(str(c) for c in self.code) + + def add_line(self, line: str) -> None: + """Add a line of source to the code. + + Indentation and newline will be added for you, don't provide them. + + """ + self.code.extend([" " * self.indent_level, line, "\n"]) + + def add_section(self) -> CodeBuilder: + """Add a section, a sub-CodeBuilder.""" + section = CodeBuilder(self.indent_level) + self.code.append(section) + return section + + INDENT_STEP = 4 # PEP8 says so! + + def indent(self) -> None: + """Increase the current indent for following lines.""" + self.indent_level += self.INDENT_STEP + + def dedent(self) -> None: + """Decrease the current indent for following lines.""" + self.indent_level -= self.INDENT_STEP + + def get_globals(self) -> dict[str, Any]: + """Execute the code, and return a dict of globals it defines.""" + # A check that the caller really finished all the blocks they started. + assert self.indent_level == 0 + # Get the Python source as a single string. + python_source = str(self) + # Execute the source, defining globals, and return them. + global_namespace: dict[str, Any] = {} + exec(python_source, global_namespace) + return global_namespace + + +class Templite: + """A simple template renderer, for a nano-subset of Django syntax. + + Supported constructs are extended variable access:: + + {{var.modifier.modifier|filter|filter}} + + loops:: + + {% for var in list %}...{% endfor %} + + and ifs:: + + {% if var %}...{% endif %} + + Comments are within curly-hash markers:: + + {# This will be ignored #} + + Lines between `{% joined %}` and `{% endjoined %}` will have lines stripped + and joined. Be careful, this could join words together! + + Any of these constructs can have a hyphen at the end (`-}}`, `-%}`, `-#}`), + which will collapse the white space following the tag. + + Construct a Templite with the template text, then use `render` against a + dictionary context to create a finished string:: + + templite = Templite(''' +

Hello {{name|upper}}!

+ {% for topic in topics %} +

You are interested in {{topic}}.

+ {% endif %} + ''', + {"upper": str.upper}, + ) + text = templite.render({ + "name": "Ned", + "topics": ["Python", "Geometry", "Juggling"], + }) + + """ + def __init__(self, text: str, *contexts: dict[str, Any]) -> None: + """Construct a Templite with the given `text`. + + `contexts` are dictionaries of values to use for future renderings. + These are good for filters and global values. + + """ + self.context = {} + for context in contexts: + self.context.update(context) + + self.all_vars: set[str] = set() + self.loop_vars: set[str] = set() + + # We construct a function in source form, then compile it and hold onto + # it, and execute it to render the template. + code = CodeBuilder() + + code.add_line("def render_function(context, do_dots):") + code.indent() + vars_code = code.add_section() + code.add_line("result = []") + code.add_line("append_result = result.append") + code.add_line("extend_result = result.extend") + code.add_line("to_str = str") + + buffered: list[str] = [] + + def flush_output() -> None: + """Force `buffered` to the code builder.""" + if len(buffered) == 1: + code.add_line("append_result(%s)" % buffered[0]) + elif len(buffered) > 1: + code.add_line("extend_result([%s])" % ", ".join(buffered)) + del buffered[:] + + ops_stack = [] + + # Split the text to form a list of tokens. + tokens = re.split(r"(?s)({{.*?}}|{%.*?%}|{#.*?#})", text) + + squash = in_joined = False + + for token in tokens: + if token.startswith("{"): + start, end = 2, -2 + squash = (token[-3] == "-") + if squash: + end = -3 + + if token.startswith("{#"): + # Comment: ignore it and move on. + continue + elif token.startswith("{{"): + # An expression to evaluate. + expr = self._expr_code(token[start:end].strip()) + buffered.append("to_str(%s)" % expr) + else: + # token.startswith("{%") + # Action tag: split into words and parse further. + flush_output() + + words = token[start:end].strip().split() + if words[0] == "if": + # An if statement: evaluate the expression to determine if. + if len(words) != 2: + self._syntax_error("Don't understand if", token) + ops_stack.append("if") + code.add_line("if %s:" % self._expr_code(words[1])) + code.indent() + elif words[0] == "for": + # A loop: iterate over expression result. + if len(words) != 4 or words[2] != "in": + self._syntax_error("Don't understand for", token) + ops_stack.append("for") + self._variable(words[1], self.loop_vars) + code.add_line( + f"for c_{words[1]} in {self._expr_code(words[3])}:", + ) + code.indent() + elif words[0] == "joined": + ops_stack.append("joined") + in_joined = True + elif words[0].startswith("end"): + # Endsomething. Pop the ops stack. + if len(words) != 1: + self._syntax_error("Don't understand end", token) + end_what = words[0][3:] + if not ops_stack: + self._syntax_error("Too many ends", token) + start_what = ops_stack.pop() + if start_what != end_what: + self._syntax_error("Mismatched end tag", end_what) + if end_what == "joined": + in_joined = False + else: + code.dedent() + else: + self._syntax_error("Don't understand tag", words[0]) + else: + # Literal content. If it isn't empty, output it. + if in_joined: + token = re.sub(r"\s*\n\s*", "", token.strip()) + elif squash: + token = token.lstrip() + if token: + buffered.append(repr(token)) + + if ops_stack: + self._syntax_error("Unmatched action tag", ops_stack[-1]) + + flush_output() + + for var_name in self.all_vars - self.loop_vars: + vars_code.add_line(f"c_{var_name} = context[{var_name!r}]") + + code.add_line("return ''.join(result)") + code.dedent() + self._render_function = cast( + Callable[ + [dict[str, Any], Callable[..., Any]], + str, + ], + code.get_globals()["render_function"], + ) + + def _expr_code(self, expr: str) -> str: + """Generate a Python expression for `expr`.""" + if "|" in expr: + pipes = expr.split("|") + code = self._expr_code(pipes[0]) + for func in pipes[1:]: + self._variable(func, self.all_vars) + code = f"c_{func}({code})" + elif "." in expr: + dots = expr.split(".") + code = self._expr_code(dots[0]) + args = ", ".join(repr(d) for d in dots[1:]) + code = f"do_dots({code}, {args})" + else: + self._variable(expr, self.all_vars) + code = "c_%s" % expr + return code + + def _syntax_error(self, msg: str, thing: Any) -> NoReturn: + """Raise a syntax error using `msg`, and showing `thing`.""" + raise TempliteSyntaxError(f"{msg}: {thing!r}") + + def _variable(self, name: str, vars_set: set[str]) -> None: + """Track that `name` is used as a variable. + + Adds the name to `vars_set`, a set of variable names. + + Raises an syntax error if `name` is not a valid name. + + """ + if not re.match(r"[_a-zA-Z][_a-zA-Z0-9]*$", name): + self._syntax_error("Not a valid name", name) + vars_set.add(name) + + def render(self, context: dict[str, Any] | None = None) -> str: + """Render this template by applying it to `context`. + + `context` is a dictionary of values to use in this rendering. + + """ + # Make the complete context we'll use. + render_context = dict(self.context) + if context: + render_context.update(context) + return self._render_function(render_context, self._do_dots) + + def _do_dots(self, value: Any, *dots: str) -> Any: + """Evaluate dotted expressions at run-time.""" + for dot in dots: + try: + value = getattr(value, dot) + except AttributeError: + try: + value = value[dot] + except (TypeError, KeyError) as exc: + raise TempliteValueError( + f"Couldn't evaluate {value!r}.{dot}", + ) from exc + if callable(value): + value = value() + return value diff --git a/.venv/Lib/site-packages/coverage/tomlconfig.py b/.venv/Lib/site-packages/coverage/tomlconfig.py new file mode 100644 index 00000000..1784f3f3 --- /dev/null +++ b/.venv/Lib/site-packages/coverage/tomlconfig.py @@ -0,0 +1,209 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""TOML configuration support for coverage.py""" + +from __future__ import annotations + +import os +import re + +from typing import Any, Callable, TypeVar +from collections.abc import Iterable + +from coverage import env +from coverage.exceptions import ConfigError +from coverage.misc import import_third_party, substitute_variables +from coverage.types import TConfigSectionOut, TConfigValueOut + + +if env.PYVERSION >= (3, 11, 0, "alpha", 7): + import tomllib # pylint: disable=import-error + has_tomllib = True +else: + # TOML support on Python 3.10 and below is an install-time extra option. + tomllib, has_tomllib = import_third_party("tomli") + + +class TomlDecodeError(Exception): + """An exception class that exists even when toml isn't installed.""" + pass + + +TWant = TypeVar("TWant") + +class TomlConfigParser: + """TOML file reading with the interface of HandyConfigParser.""" + + # This class has the same interface as config.HandyConfigParser, no + # need for docstrings. + # pylint: disable=missing-function-docstring + + def __init__(self, our_file: bool) -> None: + self.our_file = our_file + self.data: dict[str, Any] = {} + + def read(self, filenames: Iterable[str]) -> list[str]: + # RawConfigParser takes a filename or list of filenames, but we only + # ever call this with a single filename. + assert isinstance(filenames, (bytes, str, os.PathLike)) + filename = os.fspath(filenames) + + try: + with open(filename, encoding='utf-8') as fp: + toml_text = fp.read() + except OSError: + return [] + if has_tomllib: + try: + self.data = tomllib.loads(toml_text) + except tomllib.TOMLDecodeError as err: + raise TomlDecodeError(str(err)) from err + return [filename] + else: + has_toml = re.search(r"^\[tool\.coverage(\.|])", toml_text, flags=re.MULTILINE) + if self.our_file or has_toml: + # Looks like they meant to read TOML, but we can't read it. + msg = "Can't read {!r} without TOML support. Install with [toml] extra" + raise ConfigError(msg.format(filename)) + return [] + + def _get_section(self, section: str) -> tuple[str | None, TConfigSectionOut | None]: + """Get a section from the data. + + Arguments: + section (str): A section name, which can be dotted. + + Returns: + name (str): the actual name of the section that was found, if any, + or None. + data (str): the dict of data in the section, or None if not found. + + """ + prefixes = ["tool.coverage."] + for prefix in prefixes: + real_section = prefix + section + parts = real_section.split(".") + try: + data = self.data[parts[0]] + for part in parts[1:]: + data = data[part] + except KeyError: + continue + break + else: + return None, None + return real_section, data + + def _get(self, section: str, option: str) -> tuple[str, TConfigValueOut]: + """Like .get, but returns the real section name and the value.""" + name, data = self._get_section(section) + if data is None: + raise ConfigError(f"No section: {section!r}") + assert name is not None + try: + value = data[option] + except KeyError: + raise ConfigError(f"No option {option!r} in section: {name!r}") from None + return name, value + + def _get_single(self, section: str, option: str) -> Any: + """Get a single-valued option. + + Performs environment substitution if the value is a string. Other types + will be converted later as needed. + """ + name, value = self._get(section, option) + if isinstance(value, str): + value = substitute_variables(value, os.environ) + return name, value + + def has_option(self, section: str, option: str) -> bool: + _, data = self._get_section(section) + if data is None: + return False + return option in data + + def real_section(self, section: str) -> str | None: + name, _ = self._get_section(section) + return name + + def has_section(self, section: str) -> bool: + name, _ = self._get_section(section) + return bool(name) + + def options(self, section: str) -> list[str]: + _, data = self._get_section(section) + if data is None: + raise ConfigError(f"No section: {section!r}") + return list(data.keys()) + + def get_section(self, section: str) -> TConfigSectionOut: + _, data = self._get_section(section) + return data or {} + + def get(self, section: str, option: str) -> Any: + _, value = self._get_single(section, option) + return value + + def _check_type( + self, + section: str, + option: str, + value: Any, + type_: type[TWant], + converter: Callable[[Any], TWant] | None, + type_desc: str, + ) -> TWant: + """Check that `value` has the type we want, converting if needed. + + Returns the resulting value of the desired type. + """ + if isinstance(value, type_): + return value + if isinstance(value, str) and converter is not None: + try: + return converter(value) + except Exception as e: + raise ValueError( + f"Option [{section}]{option} couldn't convert to {type_desc}: {value!r}", + ) from e + raise ValueError( + f"Option [{section}]{option} is not {type_desc}: {value!r}", + ) + + def getboolean(self, section: str, option: str) -> bool: + name, value = self._get_single(section, option) + bool_strings = {"true": True, "false": False} + return self._check_type(name, option, value, bool, bool_strings.__getitem__, "a boolean") + + def _get_list(self, section: str, option: str) -> tuple[str, list[str]]: + """Get a list of strings, substituting environment variables in the elements.""" + name, values = self._get(section, option) + values = self._check_type(name, option, values, list, None, "a list") + values = [substitute_variables(value, os.environ) for value in values] + return name, values + + def getlist(self, section: str, option: str) -> list[str]: + _, values = self._get_list(section, option) + return values + + def getregexlist(self, section: str, option: str) -> list[str]: + name, values = self._get_list(section, option) + for value in values: + value = value.strip() + try: + re.compile(value) + except re.error as e: + raise ConfigError(f"Invalid [{name}].{option} value {value!r}: {e}") from e + return values + + def getint(self, section: str, option: str) -> int: + name, value = self._get_single(section, option) + return self._check_type(name, option, value, int, int, "an integer") + + def getfloat(self, section: str, option: str) -> float: + name, value = self._get_single(section, option) + if isinstance(value, int): + value = float(value) + return self._check_type(name, option, value, float, float, "a float") diff --git a/.venv/Lib/site-packages/coverage/tracer.cp312-win_amd64.pyd b/.venv/Lib/site-packages/coverage/tracer.cp312-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..7878535ab731ac627bc547902e6f691deb6396f1 GIT binary patch literal 22528 zcmeHv4Rlo1wf~tUlT09CB4i*c)IozpkQf-$#1NU01nN3aTL)%*YLbI(mC1H88X z)vou}YH!xP`<%V^*=L`9_Sxs--bv-Hn^-DiEDg7+GS&@9mz_WV=ZDF_*tkpfjbl$_ zzBIMl;Cg9lb)z?A4Fvsbf_2SScb(7Ym#wQk)?nCY_4=&kOO{!i{q>&0?Cdf5LiFbz z{_@saKYV*gzn691J9G!~H+}J)lPdu)=+xj%UwrT6=Q%!paw&%=PTmYSKJX65-}>P@ zLt}|Q@Q0IB{(`{UCvW3$!jJ~v<@u#vcO%u6+_I_)#_I1&XHEB8*66e$cByr2%D5}o z<-p9Tdc1ECi87~BhSCtP;MVub;Xk+%_T<#kpi zFd+#(dtopXbR*4e(uM@Jr5bm_W#@dJCO;U-Mr;#q;#)zPk;~554I{aK&6Ita6nWb! zDX&Y4Ge_z;Bwr9IVXawGS!vogaBz4-OM5lD`i*#x2+4muaiqx4s?iNHfi=B%&CUwNj z?bl!|fH2KQgh@n*8rxO@5b;8dXywF@i#TzSPW%K8f|3_=;%_wKH9GMPjX04LV;XU) zPTWsK#dwI&o!M{Q%;jFZj5K%*xQ^KR{75M?KlmTS7nx}=cWmM%UQjJR1>F|Y+NT(Q zgrbp{K~9evA4k&FX?%=?DKAK!78|UtT2^s+EtjCWTaiR{k1poby|{$Pg`B*89;$0K zQ&r(ilvXV(fRB_dfiTuONs7#>K*|u#M)7$_sg`R%A5lYsNTEgDx@^TQ@b@A~`B>_d^K0Sv@#j@lt*{-E zqH?~~rSwV4&n3mAT8_^`lOhL-YVHu8rPUyHZrB9vY)AVrU8<@StujTc(xm7HnmA$g z2MfvE8#W@Rm?Av}IVEbc#fH_<{Ie*sz6H#$OVQT+l`d3VAdj&fwH;C|?@+DEqH~PI zpOD~lj>KUhLO9X7lY(d7Qz@sFvtdx*CN3X2mybGYk3*_Pmqg!Z= zKprkdZ_hW;7=ieYT<+(cJs-`N$hR>NA^Ea?hY~w|tfC|4Fgs(CyIrQE*<&k~+)wZYcpCFPwhW1Q~S zq@LfUsg@f^WJfFv7n_gd;N55tXNo>cMV-n)sb?TfO6ACCfz)R=0nPgCd^PBFCz1!} zaI@S9t;?06(&xfs;kVQ%$e)(Pnju0csw2O$y(4v;l5->1^BJj-&q!$S0bcqz2uV}b z_lW>!kd)_W0928SuO!EC)Hv>db5Oirw!%^EST4F=vYqN{&!?bGonEfITdurWu6*WF z-lypQ;-{)gmOE^OSPWddq&&?-dGrVQZB!;hxiPIIqxB~Aqp7noS8~4!^BGl3D%XSh zGn}njJ}m&N`NZ8M#l0lWb`)d6=;&^vp^*A={L5G+OVM>E+o`BOO;RGf24zu|qjs4S+BHWb+jST3g%s+jm$9-N%vAUSBqf>wdtFGSO3#F|BqjTg z;BqP-4Cc_Vla$klvF7$iX)&l+E+jolpEq|rNWG%?tWw82=JtC)bSW!}UD1d0H^Uy4 zh-B?vLY5wu7o*r}EYzIJ^Qxr>?8f6j-sq!B00n58fQL*x}OL>k@9=y6BFUZZ7hwehGjxRy6^5`?%nNK{2R=7&b zE=Dy@HVqr>KWf-es=nQ!Ud8g}#}>#Tz#wOFdw`7YUNr zZ#O@3u;Y}ueLU${8aKCRYRcAP)pH8#aPV={y{4gyB;{4r@?$9B;t?|(P%XDQHq-!Llo%b->_a-RvH1HNzDhGIF^&0O5n)ET6 zf2BI2SRE{Cr1%{I1J}`p`q7q-nOxUMjQ604Ci9$<*(1X)mGsqyouoX-D|SWEFkZl= zJdw|bVN}W)tQcnFQbsSWo}p@mm_AfRIVHw{(Mq(aBgb)Y;g!Qo(H4`EN~`mfXsT0L zY;{KOu%5lr@SrFkt&=Y!iI~zBAps1gDAn>!Lc06gT4zrs90j-!F2$T8m7jin5c9>4 zK!=L+Vh!C%>-$`(gtKEk{+autGzlY+=^K;bxv<9J{>#JS&*CpjW_ zV;_*?j5K2t0RER$%YSm(ABZ+DpVO|)=d>49%hy1Y6fJDi>c@`$rm&C6L88gQvC47# z=*7HP@8pOT!mqG0P%SBFDWXS!rhMh+s^u=8=R;}cUN|^r=clF;OFvcwtI*5w&40#5 z08>&iU!P!4`Yh<9hR!K2L+2RT4bXm7-;szN#79&RfqX-Zt6t3HC4J@jRWx@2))JHy zDMu>Jo(iGn_D{j)QufmhMzQ>GF{xL~)#Fps6iN9e7o@x*DN9Z9Y!v4kD88?e6g%df zru+ihgIFCuBjKmpc0wYqG@`Al-8%QWFDM^X#FEAgmhXY?&DYkUW<}g z2++JaDqg!c?v1ek8NHp?yATc)!c3YS80#= ztJLJky{$~XP}riJ7EP}xOY`F94{7orEG7T{GwGr&O_wJvO?QHXWr#8~YNdB6s_Zz` zQb3xB0#QdE<flq>df*vfJGSbP9}PW5)eJH+6Rv`ijZc8oeX zO6B%6wvwPjzi&hN@h@} z&EV@o48<`}t>d_CmefA%T1&1}ot=}_@D54Q>XsBu7uv!G93{S%aF%BB_i=(e*y!lH zuMKLDwUw7Kmn?{!NRh9Md}Ih$N?1i{hIl#t91M=B9h*q4!L%EQ9B6Ueu?pM65w`Dq zjDEq=X$13Y?!ztP9ZEV)k~G6Rm3KQ%h5s8XYi$4C#(qGxG(eYgh)&ucga4 zYh-VmpnMF;h^odL07p19XB8~!C=#y*B2S!+P3Zs9>=Px}3S(c=lhbgz2SdWpEA<@8 zIdNYqoe;p63p$RJimwao|-6DL9xy%L+QnKujv8Wr_q_SRHrt?uAqqe;m(EiGEgO zf8Zm<_^O>0zBgMdY)6rznqIX(YJ3KS_yRPOTgDnGyMdGr+v2yNW$};EXYp4}n7*dX z0G^>4zlW~2qxJ`0iGPZe`N?D4u1fFVc1?lHNP$f+N>~?1S6W?BdtS5(eIuDx;4B5b zZ+9v~%E#!Qj-z37iDe$@zc<4cJMl+|@4P`pSX^g^lRZp3UO2w7bEScnjJe1w$8b1P z)N$P0{zEXsSjua#RD2i44@ii16viG(B&9=tjTBK0;cFmo4vN{1Qn9ySX&fEEnc}Y? z3ZqV|R;N5p=fU~nvpN9R*5cL1+xJ(zAgPTFe& zw*?(w$8p0QIB&pt8BN{VBGRdxK~PstVo-#Q@ki0)I8vq)Wh|_RJ01O9bK+6xjD8+| z|3dqjZ%!n~!g>6CNMQfMv;F%r)0j3DVQJY;IGkjg&=tjdC&5^%r3E<5Oc{K2`qC9} z@)h&Z@4G;hupTMKD~>Wd(wKj+M2$gR<4a&byDukn)cDQoHBLLykAl)?{30+^ZJ+T@ zq{wmmjA4=LhCeas&KQlRW-8g2W+1O0!7JI@t_D8pO0()n1D1P-btDS5=0QUhHrNDH z7WeAwV7Sifs>N?XY2_zpAk>Dl`IyvMO=tC3cCI1{+FBw->hp`Cfh)gJQb2WKIS#X6 z?{|=p0m>B6ss8OWM;4-(+80q%T7OJEC(iVVn1{vQ+ps0Zqj6|1gg-r}>y268qEo!m z-mol1JUOVPGDo!>%_XHOFcvDXMVbtWSVzvLv`I^MR_1B>A#f@cc_=o7<(XBoA#JM| zl59QFURq?(dv@tEWNaf|&nRAX8mA)7<-}im6XPXf%8vt5A_a96^zzsq ze;Uj%@Kleaq)jWLo>nB&ppjOIxwhDlWd3n>9KkI7?i?+)?=cLXrg#PY;`eH0GvZwc zNP`!~*C3DZ_-zfHHYYZi5q|`kJqBJOl|hi71@}Ube?*g~%eo(Y?XfTw8Xvz)%VfmA zh{WLA3FYH8z{GFGLm>6oIlQLM<7;ZC@(~3tjLj4AV+ec*B@-imF@!JRu|iU?G{!-O zkK7CHw31dqV#Ly_5wY41l`(9eM_Jz1hqw?q2S$$D35WUZIpE?)HR;CqPdO_LE86+r zB7fosJb`)iP&nhD@p~w3hzBTAlT6X(+&Df)piCw#^d%$*-x_Y97Bkw)8c~uduZP!Q zd9>~eaRa!}a|Ip8!}-u^DX8&T90#xI%)XWKQ#77JSk=~G_8YUQ!I6IT&*poLQ^5eA zn*l&sg2>tc4jMm2R4{M_upr)xsReTj9=zW%?e%n5-pW`*iSa*=V8PHLxw8k5z@+*Z zfz;Ik^Om6>Jjr(j&!LrtIgG!^}H0#&3Bd_ zk@J+BvRtJ-!7<7Wrqmu+X)K7Kx4&TU4V*P~VAun%%W3UL!&&ihXdULs3;?~shYTHIeQb^ajV{ueZH!`m_NE3b=){ALdCiZtbgm#)~0o@Ma^7-`TOA;Hmo z7%Ij6cu-cgYyh*QJQfF=)cMFU!YeNz>LCsi1**@}qh03!r*fu|h}SE1cE*W7F^@+b zB&;-{T*F0o^z*vNrK25G4JpErj_%D=E1wfOHj%*U=n>MSb;7#uGtsnm9axnmcQj=d(S^o`&NZB)`&99?#TRyHfC?6=QT zc0Zh#>P9%1F$vJxhR*{zwwUdZxvO01DOZjBd)ot zV(eke1da_SZ}j-W(N!N*j6D*qJfnORUH(b!&p z1|}Dj)uyK~d1_gJz(vMAAAN#_lXYFbwAX;imh3EX`TaJ~K2HLQ)_kgtAzq zwO-hbn8`Y&kEyXoQ3fS6_U?QT%1eh@Cuo*9o=C(rc_G^JG~x*QWZ<(t<2YnV<36Ja zDU5H58XwNW+H&8Yazht`>AgPV3FHOoS4c*u3}_4&oAD6>9P*Iaa2RhwtkPIad&{jT zI@ugKMY5C%>!rQd^_sckD#o2C_g@2%ep8?E5hQ7BMU4+3=5+%6be>TuJlkCFRoC*{_FOKr-J8tU?bV|>emjwrs7Mc$Zs>E z##7fp)P_R^u$FZCh;&)@5qb+7&3+yL9kO<;6#4D5$nQg*1X@D5BoOrsObBHHT>V{8 zpahN)X&azTNjX@BuEO?@J0+kkR$|-?K1`>4Q>aes$|65}m_NV6pWXbqk3XN`&!6#U zj6e7D=K=n_56>50++*m4++KCs8lFWoOuk0;kLo}&jq(ZfX=BF0y2~*qS|#PJ2P$Fc z1J+e1T-d3 zEWuYO;7v(#{zLHnKtR2YO`_dF0iP7GR=~vq&J*x+0-6N;V1*{>cLE+3aG!uX1^kA9 z_X#Kq*dXAo0^TIx3<2{5OcU_^pn+%DiZ1Z)>@t${$x{4)h!&;O&~rbeya zBCiH@xwJb=y!=7$8n3Uesiwi(mv&jBv}rIyaRGdg?-c z9~Qq5Q^S!#E~&9DR8zkmg1zpMLZG<)YdyibHJ-u+3sIun8w&VCUfJvSRj&__ykxZ1 zzxoRvw;bX^6U&Rb7S@Git1i^4Au(OE1=XZr@GN<{oEp7MO`s_ZOPmj1BW$C$CD{&5 zSV-0Q!p*Ba!J1&5Z;fupb)LFAgPsO<3*aKp`f{(E8d(=yFDj}C2K_;+-|Y?ugPwZp zx<**TTJH_jt%k%kR#Iw~*LU3*U9Yq1u{MX%@}Nf!2YuE#v*d7~iRwnrHBlK)i`x^R zDv_qLgwvS8Q=@ZHI+2s|Iv4sBU%OL5&svX92CNINVN0gsuhn~nLlTmE4B9i@6&;?mKI|K6g{Np6dK7;C=})ug9sQxO>VyrCa>|<*EB%GgnEd--XnX0&0gY#PZ49#<8!a4 zwD^3SiO;@Sbt69iuUm&M(>vDcZxGEQ9^NRlBVk-{AFiwm1W0^C(BI5kZzTsKlW2a( zvnZTsPgoh|AuC~RH&P#ANmL>$93m8MlBuQI#YaRw+@I%5eKf*_Ts=vB*HEHnh$wyBkMJjOOP=DuDLZS$&>$x@m{? z#mDV;*62ot9;3<=kxJLO-JVd0goHdz4g6DlT|n43TEeLE8u5RWpbbIKa8wvoeq?;s ziSFXnax-Y=X3NN6R=Bx{MckIMKOo4U`vZyOH6FPJ<%a{@jUL%bd2dMexL8Ll8HvFO}-wg~+;3b;wY%>r%_kfiCq>B&ZVC3c}ri*R49)sqL1+Lqdu2i#nw zU4T}=H}HAiZ8Pv&8Qd3M$=Fr6--8$b4!io?BF4Txhp{cV4K|d){TObBlMyzNv7*ay zbcD+CA&@rNYXQFwc;kX<7xCD!!REiS$#3=n&Fr9G1E}Nji;9NJXt)+IX}{Yk^wOZu z=h~yMSTU6=Dsnf#<98x&qWprTIKo`uK$7hljpM7~?ol`==ZlTTalZahxGK(fcr=dl z9UFzK=6nOdkvTM7syW{f;h>=Y+aX5%Ks@}nMEPs|O>%QR^3*@Krwrroy6dj`)ij{{ zD@FT)vOC}D(DWIFyH(RC5m&A84UEERd~x84Q&CTg7D)BqUKM(@JpBfX^bs~kZ{zn? zz>&`g9Q8Uo104OPO5n(i7_ED&a7ZF>)V=)oPi@qG5x3hK;cLXRIuzpcfc6_N{kKr8 zP5a3d|IJf=(QGaPxXnli9M_5Pf9*2hq$zz)N|9k=8k@LueBgrGoFa}}no=}%Od1>W z2O~4eV_9GVV|U~J1h$!KOku|QF+BfPF1`p_3>@)(kj9MFnT&9YE%@md;h{isK2er| z%FRt@W9Fm>40&T%Uj0~>R}DBfljY6H9U4E76EIB3U=zM<9?BjVdpI+eQJY=_xw&a9 zw>k&9nb=_%KI=lp9u)N@<~8{lntW3lGc8REq}J+fBYlmm?GlVvD`UUl<n{HU@FIwi?7pv(mp6SK&f%u=7hEYQO;=YoL&cpkV}H)757 z=~|3ImOX$peauTShc40YRTpsko7kA8=>aO62HGp&dlz@7;5~qJN&#crMSd*O6LITw z%1=t58`2Cc?FPIkyeOSrG}p{73Qioz4NRyd`J{IxcxNDv?t?sn!r!V4=5%I8Tg+&S zdCu5?0bhe=t_Gd@EvWUIAv=v_&mF_Em!?-`QazVq>JP2I z&}WJLWii5L``S1D6i9oc5&sAf=XKoD*HYCFEVW zt8gFAVXPSt-<9$WRiQgzn>}aNI8-L z6uriY9b3@15WCD}a**#Ec0o61bHI=NoCdT?*mFY5CE_fCv0sDJ<@c>YGyKhzV82hq zEcb=HYkZ!1qHwAL-84PX4%s2gL9824Vj-O$5XyyZ-Mc{K5JVE2DO_xNArseon<69g*S;RR_Z! z)|6;luFw}pc&H{#?_n$6#(Rf~>ixcJgn!r?2cZ{@m@2re%mVAtbRVXh!g?HPeVDdX zZdyL89AsVhS4fmE8Hy>(6S!euU;El3c@wm{o*&+71+ zHJ+fZ<)VtEiz{3<&E(j1)zmDg2?acEZvzf;HRHir(|`}jIPRm^<@GiAYwA5AIp|+s z(*)s+UC)|5%^?p`D;6wWzPQ?1SuuNV5${HXRG}kUdef#wt0be?jz|49p;?-JV+=1(;#h_aCHm4wA|nS zw66YN(*N&Qm6x!J_Trd=U)bi1y5J(e%2KlEAKdOF@#oD3#CiMq{JTxi_ayNp`*%#xhm!cLNj0Al z^t^?do%DK>{kuTWS0?c#`**FNZ%pD#_V3Mt9!ui0C+T-g&`mdv)-O2@=Lxz!iLWY2 z-bz7lN#a|X#J5qh=&!XqxQcHGp)oXhc^2KN@i1GeFwSEa#k z;H>>|;5P&2A^69UCP@EZupB%D@5U_?Kj0I%NoRuho?=C5w1y~`{5@4%H6a1P;KMeScNE4)wRH^JQ0O!ua*PuuftjA4wg7p8D zC{6IcMEVK9`GuO!cEDq}9|sS?PXwOew5v3pV!*Z{^doo(K7*TVND$wO@$@Hv2W@Cm z75Zi_&f{^LaORG0$N2k>9Hi;J2feQtfISJ)y9`Pbr1ub%CP?oNnjnXu>1xIfApP&l zz~60RIJS+p1>Ua!0rQ`Q&rHKO2dx&AuK^}s^X9iSH(BwmLrSTx;=Z-xBz zIBPANzPx(jtm5hPA&0NNt_jBmWz*MtLeuA8pFL(?T`1&fUfs0b3IV=Q*>rrrbzKPG z_cYgqX5nkOpg-hqkZ0i}0(=JcQx(`>}uK7wrk_AO}ozQ%GsT_d&}&g0VKRgZ6c zoNcpiE7&G&tJ)UW*0OE$wk_LY+xoY~w+(H}*`Bw(c)NXj?e@m)8@F%TzGr*)_G8-z zwzC~YJBoKmJF0f9+|jrru%l(i#vPk>Y~HbR$DSSCJNkFz?X>P(xwCd> None: ... + +class CTracer(Tracer): + """CTracer is in ctracer/tracer.c""" + check_include: Any + concur_id_func: Any + data: TTraceData + disable_plugin: Any + file_tracers: Any + should_start_context: Any + should_trace: Any + should_trace_cache: Any + switch_context: Any + lock_data: Any + unlock_data: Any + trace_arcs: Any + warn: Any + def __init__(self) -> None: ... + def activity(self) -> bool: ... + def get_stats(self) -> Dict[str, int]: ... + def reset_activity(self) -> Any: ... + def start(self) -> TTraceFn: ... + def stop(self) -> None: ... diff --git a/.venv/Lib/site-packages/coverage/types.py b/.venv/Lib/site-packages/coverage/types.py new file mode 100644 index 00000000..bcf8396d --- /dev/null +++ b/.venv/Lib/site-packages/coverage/types.py @@ -0,0 +1,203 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +""" +Types for use throughout coverage.py. +""" + +from __future__ import annotations + +import os +import pathlib + +from collections.abc import Iterable, Mapping +from types import FrameType, ModuleType +from typing import ( + Any, Callable, Optional, Protocol, + Union, TYPE_CHECKING, +) + +if TYPE_CHECKING: + from coverage.plugin import FileTracer + + +AnyCallable = Callable[..., Any] + +## File paths + +# For arguments that are file paths: +if TYPE_CHECKING: + FilePath = Union[str, os.PathLike[str]] +else: + # PathLike < python3.9 doesn't support subscription + FilePath = Union[str, os.PathLike] +# For testing FilePath arguments +FilePathClasses = [str, pathlib.Path] +FilePathType = Union[type[str], type[pathlib.Path]] + +## Python tracing + +class TTraceFn(Protocol): + """A Python trace function.""" + def __call__( + self, + frame: FrameType, + event: str, + arg: Any, + lineno: TLineNo | None = None, # Our own twist, see collector.py + ) -> TTraceFn | None: + ... + +## Coverage.py tracing + +# Line numbers are pervasive enough that they deserve their own type. +TLineNo = int + +TArc = tuple[TLineNo, TLineNo] + +class TFileDisposition(Protocol): + """A simple value type for recording what to do with a file.""" + + original_filename: str + canonical_filename: str + source_filename: str | None + trace: bool + reason: str + file_tracer: FileTracer | None + has_dynamic_filename: bool + + +# When collecting data, we use a dictionary with a few possible shapes. The +# keys are always file names. +# - If measuring line coverage, the values are sets of line numbers. +# - If measuring arcs in the Python tracer, the values are sets of arcs (pairs +# of line numbers). +# - If measuring arcs in the C tracer, the values are sets of packed arcs (two +# line numbers combined into one integer). + +TTraceFileData = Union[set[TLineNo], set[TArc], set[int]] + +TTraceData = dict[str, TTraceFileData] + +# Functions passed into collectors. +TShouldTraceFn = Callable[[str, FrameType], TFileDisposition] +TCheckIncludeFn = Callable[[str, FrameType], bool] +TShouldStartContextFn = Callable[[FrameType], Union[str, None]] + +class Tracer(Protocol): + """Anything that can report on Python execution.""" + + data: TTraceData + trace_arcs: bool + should_trace: TShouldTraceFn + should_trace_cache: Mapping[str, TFileDisposition | None] + should_start_context: TShouldStartContextFn | None + switch_context: Callable[[str | None], None] | None + lock_data: Callable[[], None] + unlock_data: Callable[[], None] + warn: TWarnFn + + def __init__(self) -> None: + ... + + def start(self) -> TTraceFn | None: + """Start this tracer, return a trace function if based on sys.settrace.""" + + def stop(self) -> None: + """Stop this tracer.""" + + def activity(self) -> bool: + """Has there been any activity?""" + + def reset_activity(self) -> None: + """Reset the activity() flag.""" + + def get_stats(self) -> dict[str, int] | None: + """Return a dictionary of statistics, or None.""" + + +## Coverage + +# Many places use kwargs as Coverage kwargs. +TCovKwargs = Any + + +## Configuration + +# One value read from a config file. +TConfigValueIn = Optional[Union[bool, int, float, str, Iterable[str]]] +TConfigValueOut = Optional[Union[bool, int, float, str, list[str]]] +# An entire config section, mapping option names to values. +TConfigSectionIn = Mapping[str, TConfigValueIn] +TConfigSectionOut = Mapping[str, TConfigValueOut] + +class TConfigurable(Protocol): + """Something that can proxy to the coverage configuration settings.""" + + def get_option(self, option_name: str) -> TConfigValueOut | None: + """Get an option from the configuration. + + `option_name` is a colon-separated string indicating the section and + option name. For example, the ``branch`` option in the ``[run]`` + section of the config file would be indicated with `"run:branch"`. + + Returns the value of the option. + + """ + + def set_option(self, option_name: str, value: TConfigValueIn | TConfigSectionIn) -> None: + """Set an option in the configuration. + + `option_name` is a colon-separated string indicating the section and + option name. For example, the ``branch`` option in the ``[run]`` + section of the config file would be indicated with `"run:branch"`. + + `value` is the new value for the option. + + """ + +class TPluginConfig(Protocol): + """Something that can provide options to a plugin.""" + + def get_plugin_options(self, plugin: str) -> TConfigSectionOut: + """Get the options for a plugin.""" + + +## Parsing + +TMorf = Union[ModuleType, str] + +TSourceTokenLines = Iterable[list[tuple[str, str]]] + + +## Plugins + +class TPlugin(Protocol): + """What all plugins have in common.""" + _coverage_plugin_name: str + _coverage_enabled: bool + + +## Debugging + +class TWarnFn(Protocol): + """A callable warn() function.""" + def __call__(self, msg: str, slug: str | None = None, once: bool = False) -> None: + ... + + +class TDebugCtl(Protocol): + """A DebugControl object, or something like it.""" + + def should(self, option: str) -> bool: + """Decide whether to output debug information in category `option`.""" + + def write(self, msg: str) -> None: + """Write a line of debug output.""" + + +class TWritable(Protocol): + """Anything that can be written to.""" + + def write(self, msg: str) -> None: + """Write a message.""" diff --git a/.venv/Lib/site-packages/coverage/version.py b/.venv/Lib/site-packages/coverage/version.py new file mode 100644 index 00000000..43e6f8c3 --- /dev/null +++ b/.venv/Lib/site-packages/coverage/version.py @@ -0,0 +1,50 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""The version and URL for coverage.py""" +# This file is exec'ed in setup.py, don't import anything! + +from __future__ import annotations + +# version_info: same semantics as sys.version_info. +# _dev: the .devN suffix if any. +version_info = (7, 6, 4, "final", 0) +_dev = 0 + + +def _make_version( + major: int, + minor: int, + micro: int, + releaselevel: str = "final", + serial: int = 0, + dev: int = 0, +) -> str: + """Create a readable version string from version_info tuple components.""" + assert releaselevel in ["alpha", "beta", "candidate", "final"] + version = "%d.%d.%d" % (major, minor, micro) + if releaselevel != "final": + short = {"alpha": "a", "beta": "b", "candidate": "rc"}[releaselevel] + version += f"{short}{serial}" + if dev != 0: + version += f".dev{dev}" + return version + + +def _make_url( + major: int, + minor: int, + micro: int, + releaselevel: str, + serial: int = 0, + dev: int = 0, +) -> str: + """Make the URL people should start at for this version of coverage.py.""" + return ( + "https://coverage.readthedocs.io/en/" + + _make_version(major, minor, micro, releaselevel, serial, dev) + ) + + +__version__ = _make_version(*version_info, _dev) +__url__ = _make_url(*version_info, _dev) diff --git a/.venv/Lib/site-packages/coverage/xmlreport.py b/.venv/Lib/site-packages/coverage/xmlreport.py new file mode 100644 index 00000000..487c2659 --- /dev/null +++ b/.venv/Lib/site-packages/coverage/xmlreport.py @@ -0,0 +1,261 @@ +# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +"""XML reporting for coverage.py""" + +from __future__ import annotations + +import os +import os.path +import sys +import time +import xml.dom.minidom + +from dataclasses import dataclass +from typing import Any, IO, TYPE_CHECKING +from collections.abc import Iterable + +from coverage import __version__, files +from coverage.misc import isolate_module, human_sorted, human_sorted_items +from coverage.plugin import FileReporter +from coverage.report_core import get_analysis_to_report +from coverage.results import Analysis +from coverage.types import TMorf +from coverage.version import __url__ + +if TYPE_CHECKING: + from coverage import Coverage + +os = isolate_module(os) + + +DTD_URL = "https://raw.githubusercontent.com/cobertura/web/master/htdocs/xml/coverage-04.dtd" + + +def rate(hit: int, num: int) -> str: + """Return the fraction of `hit`/`num`, as a string.""" + if num == 0: + return "1" + else: + return "%.4g" % (hit / num) + + +@dataclass +class PackageData: + """Data we keep about each "package" (in Java terms).""" + elements: dict[str, xml.dom.minidom.Element] + hits: int + lines: int + br_hits: int + branches: int + + +def appendChild(parent: Any, child: Any) -> None: + """Append a child to a parent, in a way mypy will shut up about.""" + parent.appendChild(child) + + +class XmlReporter: + """A reporter for writing Cobertura-style XML coverage results.""" + + report_type = "XML report" + + def __init__(self, coverage: Coverage) -> None: + self.coverage = coverage + self.config = self.coverage.config + + self.source_paths = set() + if self.config.source: + for src in self.config.source: + if os.path.exists(src): + if self.config.relative_files: + src = src.rstrip(r"\/") + else: + src = files.canonical_filename(src) + self.source_paths.add(src) + self.packages: dict[str, PackageData] = {} + self.xml_out: xml.dom.minidom.Document + + def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str] | None = None) -> float: + """Generate a Cobertura-compatible XML report for `morfs`. + + `morfs` is a list of modules or file names. + + `outfile` is a file object to write the XML to. + + """ + # Initial setup. + outfile = outfile or sys.stdout + has_arcs = self.coverage.get_data().has_arcs() + + # Create the DOM that will store the data. + impl = xml.dom.minidom.getDOMImplementation() + assert impl is not None + self.xml_out = impl.createDocument(None, "coverage", None) + + # Write header stuff. + xcoverage = self.xml_out.documentElement + xcoverage.setAttribute("version", __version__) + xcoverage.setAttribute("timestamp", str(int(time.time()*1000))) + xcoverage.appendChild(self.xml_out.createComment( + f" Generated by coverage.py: {__url__} ", + )) + xcoverage.appendChild(self.xml_out.createComment(f" Based on {DTD_URL} ")) + + # Call xml_file for each file in the data. + for fr, analysis in get_analysis_to_report(self.coverage, morfs): + self.xml_file(fr, analysis, has_arcs) + + xsources = self.xml_out.createElement("sources") + xcoverage.appendChild(xsources) + + # Populate the XML DOM with the source info. + for path in human_sorted(self.source_paths): + xsource = self.xml_out.createElement("source") + appendChild(xsources, xsource) + txt = self.xml_out.createTextNode(path) + appendChild(xsource, txt) + + lnum_tot, lhits_tot = 0, 0 + bnum_tot, bhits_tot = 0, 0 + + xpackages = self.xml_out.createElement("packages") + xcoverage.appendChild(xpackages) + + # Populate the XML DOM with the package info. + for pkg_name, pkg_data in human_sorted_items(self.packages.items()): + xpackage = self.xml_out.createElement("package") + appendChild(xpackages, xpackage) + xclasses = self.xml_out.createElement("classes") + appendChild(xpackage, xclasses) + for _, class_elt in human_sorted_items(pkg_data.elements.items()): + appendChild(xclasses, class_elt) + xpackage.setAttribute("name", pkg_name.replace(os.sep, ".")) + xpackage.setAttribute("line-rate", rate(pkg_data.hits, pkg_data.lines)) + if has_arcs: + branch_rate = rate(pkg_data.br_hits, pkg_data.branches) + else: + branch_rate = "0" + xpackage.setAttribute("branch-rate", branch_rate) + xpackage.setAttribute("complexity", "0") + + lhits_tot += pkg_data.hits + lnum_tot += pkg_data.lines + bhits_tot += pkg_data.br_hits + bnum_tot += pkg_data.branches + + xcoverage.setAttribute("lines-valid", str(lnum_tot)) + xcoverage.setAttribute("lines-covered", str(lhits_tot)) + xcoverage.setAttribute("line-rate", rate(lhits_tot, lnum_tot)) + if has_arcs: + xcoverage.setAttribute("branches-valid", str(bnum_tot)) + xcoverage.setAttribute("branches-covered", str(bhits_tot)) + xcoverage.setAttribute("branch-rate", rate(bhits_tot, bnum_tot)) + else: + xcoverage.setAttribute("branches-covered", "0") + xcoverage.setAttribute("branches-valid", "0") + xcoverage.setAttribute("branch-rate", "0") + xcoverage.setAttribute("complexity", "0") + + # Write the output file. + outfile.write(serialize_xml(self.xml_out)) + + # Return the total percentage. + denom = lnum_tot + bnum_tot + if denom == 0: + pct = 0.0 + else: + pct = 100.0 * (lhits_tot + bhits_tot) / denom + return pct + + def xml_file(self, fr: FileReporter, analysis: Analysis, has_arcs: bool) -> None: + """Add to the XML report for a single file.""" + + if self.config.skip_empty: + if analysis.numbers.n_statements == 0: + return + + # Create the "lines" and "package" XML elements, which + # are populated later. Note that a package == a directory. + filename = fr.filename.replace("\\", "/") + for source_path in self.source_paths: + if not self.config.relative_files: + source_path = files.canonical_filename(source_path) + if filename.startswith(source_path.replace("\\", "/") + "/"): + rel_name = filename[len(source_path)+1:] + break + else: + rel_name = fr.relative_filename().replace("\\", "/") + self.source_paths.add(fr.filename[:-len(rel_name)].rstrip(r"\/")) + + dirname = os.path.dirname(rel_name) or "." + dirname = "/".join(dirname.split("/")[:self.config.xml_package_depth]) + package_name = dirname.replace("/", ".") + + package = self.packages.setdefault(package_name, PackageData({}, 0, 0, 0, 0)) + + xclass: xml.dom.minidom.Element = self.xml_out.createElement("class") + + appendChild(xclass, self.xml_out.createElement("methods")) + + xlines = self.xml_out.createElement("lines") + appendChild(xclass, xlines) + + xclass.setAttribute("name", os.path.relpath(rel_name, dirname)) + xclass.setAttribute("filename", rel_name.replace("\\", "/")) + xclass.setAttribute("complexity", "0") + + branch_stats = analysis.branch_stats() + missing_branch_arcs = analysis.missing_branch_arcs() + + # For each statement, create an XML "line" element. + for line in sorted(analysis.statements): + xline = self.xml_out.createElement("line") + xline.setAttribute("number", str(line)) + + # Q: can we get info about the number of times a statement is + # executed? If so, that should be recorded here. + xline.setAttribute("hits", str(int(line not in analysis.missing))) + + if has_arcs: + if line in branch_stats: + total, taken = branch_stats[line] + xline.setAttribute("branch", "true") + xline.setAttribute( + "condition-coverage", + "%d%% (%d/%d)" % (100*taken//total, taken, total), + ) + if line in missing_branch_arcs: + annlines = ["exit" if b < 0 else str(b) for b in missing_branch_arcs[line]] + xline.setAttribute("missing-branches", ",".join(annlines)) + appendChild(xlines, xline) + + class_lines = len(analysis.statements) + class_hits = class_lines - len(analysis.missing) + + if has_arcs: + class_branches = sum(t for t, k in branch_stats.values()) + missing_branches = sum(t - k for t, k in branch_stats.values()) + class_br_hits = class_branches - missing_branches + else: + class_branches = 0 + class_br_hits = 0 + + # Finalize the statistics that are collected in the XML DOM. + xclass.setAttribute("line-rate", rate(class_hits, class_lines)) + if has_arcs: + branch_rate = rate(class_br_hits, class_branches) + else: + branch_rate = "0" + xclass.setAttribute("branch-rate", branch_rate) + + package.elements[rel_name] = xclass + package.hits += class_hits + package.lines += class_lines + package.br_hits += class_br_hits + package.branches += class_branches + + +def serialize_xml(dom: xml.dom.minidom.Document) -> str: + """Serialize a minidom node to XML.""" + return dom.toprettyxml() diff --git a/.venv/Lib/site-packages/iniconfig-2.0.0.dist-info/INSTALLER b/.venv/Lib/site-packages/iniconfig-2.0.0.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/.venv/Lib/site-packages/iniconfig-2.0.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/Lib/site-packages/iniconfig-2.0.0.dist-info/METADATA b/.venv/Lib/site-packages/iniconfig-2.0.0.dist-info/METADATA new file mode 100644 index 00000000..3ea1e01c --- /dev/null +++ b/.venv/Lib/site-packages/iniconfig-2.0.0.dist-info/METADATA @@ -0,0 +1,80 @@ +Metadata-Version: 2.1 +Name: iniconfig +Version: 2.0.0 +Summary: brain-dead simple config-ini parsing +Project-URL: Homepage, https://github.com/pytest-dev/iniconfig +Author-email: Ronny Pfannschmidt , Holger Krekel +License-Expression: MIT +License-File: LICENSE +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Utilities +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst + +iniconfig: brain-dead simple parsing of ini files +======================================================= + +iniconfig is a small and simple INI-file parser module +having a unique set of features: + +* maintains order of sections and entries +* supports multi-line values with or without line-continuations +* supports "#" comments everywhere +* raises errors with proper line-numbers +* no bells and whistles like automatic substitutions +* iniconfig raises an Error if two sections have the same name. + +If you encounter issues or have feature wishes please report them to: + + https://github.com/RonnyPfannschmidt/iniconfig/issues + +Basic Example +=================================== + +If you have an ini file like this: + +.. code-block:: ini + + # content of example.ini + [section1] # comment + name1=value1 # comment + name1b=value1,value2 # comment + + [section2] + name2= + line1 + line2 + +then you can do: + +.. code-block:: pycon + + >>> import iniconfig + >>> ini = iniconfig.IniConfig("example.ini") + >>> ini['section1']['name1'] # raises KeyError if not exists + 'value1' + >>> ini.get('section1', 'name1b', [], lambda x: x.split(",")) + ['value1', 'value2'] + >>> ini.get('section1', 'notexist', [], lambda x: x.split(",")) + [] + >>> [x.name for x in list(ini)] + ['section1', 'section2'] + >>> list(list(ini)[0].items()) + [('name1', 'value1'), ('name1b', 'value1,value2')] + >>> 'section1' in ini + True + >>> 'inexistendsection' in ini + False diff --git a/.venv/Lib/site-packages/iniconfig-2.0.0.dist-info/RECORD b/.venv/Lib/site-packages/iniconfig-2.0.0.dist-info/RECORD new file mode 100644 index 00000000..cfdc5b37 --- /dev/null +++ b/.venv/Lib/site-packages/iniconfig-2.0.0.dist-info/RECORD @@ -0,0 +1,14 @@ +iniconfig-2.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +iniconfig-2.0.0.dist-info/METADATA,sha256=2KcBd5DEFiZclO-ruP_qzN71qcTL0hNsCw5MCDIPN6I,2599 +iniconfig-2.0.0.dist-info/RECORD,, +iniconfig-2.0.0.dist-info/WHEEL,sha256=hKi7AIIx6qfnsRbr087vpeJnrVUuDokDHZacPPMW7-Y,87 +iniconfig-2.0.0.dist-info/licenses/LICENSE,sha256=KvaAw570k_uCgwNW0dPfGstaBgM8ui3sehniHKp3qGY,1061 +iniconfig/__init__.py,sha256=ALJSNenAgTD7RNj820NggEQuyaZp2QseTCThGJPavk0,5473 +iniconfig/__pycache__/__init__.cpython-312.pyc,, +iniconfig/__pycache__/_parse.cpython-312.pyc,, +iniconfig/__pycache__/_version.cpython-312.pyc,, +iniconfig/__pycache__/exceptions.cpython-312.pyc,, +iniconfig/_parse.py,sha256=OWGLbmE8GjxcoMWTvnGbck1RoNsTm5bt5ficIRZqWJ8,2436 +iniconfig/_version.py,sha256=WM8rOXoL5t25aMQJp4qbU2XP09nrDtmDnrAGhHSk0Wk,160 +iniconfig/exceptions.py,sha256=3V2JS5rndwiYUh84PNYS_1zd8H8IB-Rar81ARAA7E9s,501 +iniconfig/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/.venv/Lib/site-packages/iniconfig-2.0.0.dist-info/WHEEL b/.venv/Lib/site-packages/iniconfig-2.0.0.dist-info/WHEEL new file mode 100644 index 00000000..8d5c0ceb --- /dev/null +++ b/.venv/Lib/site-packages/iniconfig-2.0.0.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.12.2 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/.venv/Lib/site-packages/iniconfig-2.0.0.dist-info/licenses/LICENSE b/.venv/Lib/site-packages/iniconfig-2.0.0.dist-info/licenses/LICENSE new file mode 100644 index 00000000..31ecdfb1 --- /dev/null +++ b/.venv/Lib/site-packages/iniconfig-2.0.0.dist-info/licenses/LICENSE @@ -0,0 +1,19 @@ + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + diff --git a/.venv/Lib/site-packages/iniconfig/__init__.py b/.venv/Lib/site-packages/iniconfig/__init__.py new file mode 100644 index 00000000..c18a8e4b --- /dev/null +++ b/.venv/Lib/site-packages/iniconfig/__init__.py @@ -0,0 +1,216 @@ +""" brain-dead simple parser for ini-style files. +(C) Ronny Pfannschmidt, Holger Krekel -- MIT licensed +""" +from __future__ import annotations +from typing import ( + Callable, + Iterator, + Mapping, + Optional, + Tuple, + TypeVar, + Union, + TYPE_CHECKING, + NoReturn, + NamedTuple, + overload, + cast, +) + +import os + +if TYPE_CHECKING: + from typing_extensions import Final + +__all__ = ["IniConfig", "ParseError", "COMMENTCHARS", "iscommentline"] + +from .exceptions import ParseError +from . import _parse +from ._parse import COMMENTCHARS, iscommentline + +_D = TypeVar("_D") +_T = TypeVar("_T") + + +class SectionWrapper: + config: Final[IniConfig] + name: Final[str] + + def __init__(self, config: IniConfig, name: str) -> None: + self.config = config + self.name = name + + def lineof(self, name: str) -> int | None: + return self.config.lineof(self.name, name) + + @overload + def get(self, key: str) -> str | None: + ... + + @overload + def get( + self, + key: str, + convert: Callable[[str], _T], + ) -> _T | None: + ... + + @overload + def get( + self, + key: str, + default: None, + convert: Callable[[str], _T], + ) -> _T | None: + ... + + @overload + def get(self, key: str, default: _D, convert: None = None) -> str | _D: + ... + + @overload + def get( + self, + key: str, + default: _D, + convert: Callable[[str], _T], + ) -> _T | _D: + ... + + # TODO: investigate possible mypy bug wrt matching the passed over data + def get( # type: ignore [misc] + self, + key: str, + default: _D | None = None, + convert: Callable[[str], _T] | None = None, + ) -> _D | _T | str | None: + return self.config.get(self.name, key, convert=convert, default=default) + + def __getitem__(self, key: str) -> str: + return self.config.sections[self.name][key] + + def __iter__(self) -> Iterator[str]: + section: Mapping[str, str] = self.config.sections.get(self.name, {}) + + def lineof(key: str) -> int: + return self.config.lineof(self.name, key) # type: ignore[return-value] + + yield from sorted(section, key=lineof) + + def items(self) -> Iterator[tuple[str, str]]: + for name in self: + yield name, self[name] + + +class IniConfig: + path: Final[str] + sections: Final[Mapping[str, Mapping[str, str]]] + + def __init__( + self, + path: str | os.PathLike[str], + data: str | None = None, + encoding: str = "utf-8", + ) -> None: + self.path = os.fspath(path) + if data is None: + with open(self.path, encoding=encoding) as fp: + data = fp.read() + + tokens = _parse.parse_lines(self.path, data.splitlines(True)) + + self._sources = {} + sections_data: dict[str, dict[str, str]] + self.sections = sections_data = {} + + for lineno, section, name, value in tokens: + if section is None: + raise ParseError(self.path, lineno, "no section header defined") + self._sources[section, name] = lineno + if name is None: + if section in self.sections: + raise ParseError( + self.path, lineno, f"duplicate section {section!r}" + ) + sections_data[section] = {} + else: + if name in self.sections[section]: + raise ParseError(self.path, lineno, f"duplicate name {name!r}") + assert value is not None + sections_data[section][name] = value + + def lineof(self, section: str, name: str | None = None) -> int | None: + lineno = self._sources.get((section, name)) + return None if lineno is None else lineno + 1 + + @overload + def get( + self, + section: str, + name: str, + ) -> str | None: + ... + + @overload + def get( + self, + section: str, + name: str, + convert: Callable[[str], _T], + ) -> _T | None: + ... + + @overload + def get( + self, + section: str, + name: str, + default: None, + convert: Callable[[str], _T], + ) -> _T | None: + ... + + @overload + def get( + self, section: str, name: str, default: _D, convert: None = None + ) -> str | _D: + ... + + @overload + def get( + self, + section: str, + name: str, + default: _D, + convert: Callable[[str], _T], + ) -> _T | _D: + ... + + def get( # type: ignore + self, + section: str, + name: str, + default: _D | None = None, + convert: Callable[[str], _T] | None = None, + ) -> _D | _T | str | None: + try: + value: str = self.sections[section][name] + except KeyError: + return default + else: + if convert is not None: + return convert(value) + else: + return value + + def __getitem__(self, name: str) -> SectionWrapper: + if name not in self.sections: + raise KeyError(name) + return SectionWrapper(self, name) + + def __iter__(self) -> Iterator[SectionWrapper]: + for name in sorted(self.sections, key=self.lineof): # type: ignore + yield SectionWrapper(self, name) + + def __contains__(self, arg: str) -> bool: + return arg in self.sections diff --git a/.venv/Lib/site-packages/iniconfig/_parse.py b/.venv/Lib/site-packages/iniconfig/_parse.py new file mode 100644 index 00000000..2d03437b --- /dev/null +++ b/.venv/Lib/site-packages/iniconfig/_parse.py @@ -0,0 +1,82 @@ +from __future__ import annotations +from .exceptions import ParseError + +from typing import NamedTuple + + +COMMENTCHARS = "#;" + + +class _ParsedLine(NamedTuple): + lineno: int + section: str | None + name: str | None + value: str | None + + +def parse_lines(path: str, line_iter: list[str]) -> list[_ParsedLine]: + result: list[_ParsedLine] = [] + section = None + for lineno, line in enumerate(line_iter): + name, data = _parseline(path, line, lineno) + # new value + if name is not None and data is not None: + result.append(_ParsedLine(lineno, section, name, data)) + # new section + elif name is not None and data is None: + if not name: + raise ParseError(path, lineno, "empty section name") + section = name + result.append(_ParsedLine(lineno, section, None, None)) + # continuation + elif name is None and data is not None: + if not result: + raise ParseError(path, lineno, "unexpected value continuation") + last = result.pop() + if last.name is None: + raise ParseError(path, lineno, "unexpected value continuation") + + if last.value: + last = last._replace(value=f"{last.value}\n{data}") + else: + last = last._replace(value=data) + result.append(last) + return result + + +def _parseline(path: str, line: str, lineno: int) -> tuple[str | None, str | None]: + # blank lines + if iscommentline(line): + line = "" + else: + line = line.rstrip() + if not line: + return None, None + # section + if line[0] == "[": + realline = line + for c in COMMENTCHARS: + line = line.split(c)[0].rstrip() + if line[-1] == "]": + return line[1:-1], None + return None, realline.strip() + # value + elif not line[0].isspace(): + try: + name, value = line.split("=", 1) + if ":" in name: + raise ValueError() + except ValueError: + try: + name, value = line.split(":", 1) + except ValueError: + raise ParseError(path, lineno, "unexpected line: %r" % line) + return name.strip(), value.strip() + # continuation + else: + return None, line.strip() + + +def iscommentline(line: str) -> bool: + c = line.lstrip()[:1] + return c in COMMENTCHARS diff --git a/.venv/Lib/site-packages/iniconfig/_version.py b/.venv/Lib/site-packages/iniconfig/_version.py new file mode 100644 index 00000000..dd1883d7 --- /dev/null +++ b/.venv/Lib/site-packages/iniconfig/_version.py @@ -0,0 +1,4 @@ +# file generated by setuptools_scm +# don't change, don't track in version control +__version__ = version = '2.0.0' +__version_tuple__ = version_tuple = (2, 0, 0) diff --git a/.venv/Lib/site-packages/iniconfig/exceptions.py b/.venv/Lib/site-packages/iniconfig/exceptions.py new file mode 100644 index 00000000..bc898e68 --- /dev/null +++ b/.venv/Lib/site-packages/iniconfig/exceptions.py @@ -0,0 +1,20 @@ +from __future__ import annotations +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from typing_extensions import Final + + +class ParseError(Exception): + path: Final[str] + lineno: Final[int] + msg: Final[str] + + def __init__(self, path: str, lineno: int, msg: str) -> None: + super().__init__(path, lineno, msg) + self.path = path + self.lineno = lineno + self.msg = msg + + def __str__(self) -> str: + return f"{self.path}:{self.lineno + 1}: {self.msg}" diff --git a/.venv/Lib/site-packages/iniconfig/py.typed b/.venv/Lib/site-packages/iniconfig/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/.venv/Lib/site-packages/packaging-24.2.dist-info/INSTALLER b/.venv/Lib/site-packages/packaging-24.2.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/.venv/Lib/site-packages/packaging-24.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/Lib/site-packages/packaging-24.2.dist-info/LICENSE b/.venv/Lib/site-packages/packaging-24.2.dist-info/LICENSE new file mode 100644 index 00000000..6f62d44e --- /dev/null +++ b/.venv/Lib/site-packages/packaging-24.2.dist-info/LICENSE @@ -0,0 +1,3 @@ +This software is made available under the terms of *either* of the licenses +found in LICENSE.APACHE or LICENSE.BSD. Contributions to this software is made +under the terms of *both* these licenses. diff --git a/.venv/Lib/site-packages/packaging-24.2.dist-info/LICENSE.APACHE b/.venv/Lib/site-packages/packaging-24.2.dist-info/LICENSE.APACHE new file mode 100644 index 00000000..f433b1a5 --- /dev/null +++ b/.venv/Lib/site-packages/packaging-24.2.dist-info/LICENSE.APACHE @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/.venv/Lib/site-packages/packaging-24.2.dist-info/LICENSE.BSD b/.venv/Lib/site-packages/packaging-24.2.dist-info/LICENSE.BSD new file mode 100644 index 00000000..42ce7b75 --- /dev/null +++ b/.venv/Lib/site-packages/packaging-24.2.dist-info/LICENSE.BSD @@ -0,0 +1,23 @@ +Copyright (c) Donald Stufft and individual contributors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/.venv/Lib/site-packages/packaging-24.2.dist-info/METADATA b/.venv/Lib/site-packages/packaging-24.2.dist-info/METADATA new file mode 100644 index 00000000..1479c869 --- /dev/null +++ b/.venv/Lib/site-packages/packaging-24.2.dist-info/METADATA @@ -0,0 +1,102 @@ +Metadata-Version: 2.3 +Name: packaging +Version: 24.2 +Summary: Core utilities for Python packages +Author-email: Donald Stufft +Requires-Python: >=3.8 +Description-Content-Type: text/x-rst +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Apache Software License +Classifier: License :: OSI Approved :: BSD License +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Typing :: Typed +Project-URL: Documentation, https://packaging.pypa.io/ +Project-URL: Source, https://github.com/pypa/packaging + +packaging +========= + +.. start-intro + +Reusable core utilities for various Python Packaging +`interoperability specifications `_. + +This library provides utilities that implement the interoperability +specifications which have clearly one correct behaviour (eg: :pep:`440`) +or benefit greatly from having a single shared implementation (eg: :pep:`425`). + +.. end-intro + +The ``packaging`` project includes the following: version handling, specifiers, +markers, requirements, tags, utilities. + +Documentation +------------- + +The `documentation`_ provides information and the API for the following: + +- Version Handling +- Specifiers +- Markers +- Requirements +- Tags +- Utilities + +Installation +------------ + +Use ``pip`` to install these utilities:: + + pip install packaging + +The ``packaging`` library uses calendar-based versioning (``YY.N``). + +Discussion +---------- + +If you run into bugs, you can file them in our `issue tracker`_. + +You can also join ``#pypa`` on Freenode to ask questions or get involved. + + +.. _`documentation`: https://packaging.pypa.io/ +.. _`issue tracker`: https://github.com/pypa/packaging/issues + + +Code of Conduct +--------------- + +Everyone interacting in the packaging project's codebases, issue trackers, chat +rooms, and mailing lists is expected to follow the `PSF Code of Conduct`_. + +.. _PSF Code of Conduct: https://github.com/pypa/.github/blob/main/CODE_OF_CONDUCT.md + +Contributing +------------ + +The ``CONTRIBUTING.rst`` file outlines how to contribute to this project as +well as how to report a potential security issue. The documentation for this +project also covers information about `project development`_ and `security`_. + +.. _`project development`: https://packaging.pypa.io/en/latest/development/ +.. _`security`: https://packaging.pypa.io/en/latest/security/ + +Project History +--------------- + +Please review the ``CHANGELOG.rst`` file or the `Changelog documentation`_ for +recent changes and project history. + +.. _`Changelog documentation`: https://packaging.pypa.io/en/latest/changelog/ + diff --git a/.venv/Lib/site-packages/packaging-24.2.dist-info/RECORD b/.venv/Lib/site-packages/packaging-24.2.dist-info/RECORD new file mode 100644 index 00000000..e01c1a5a --- /dev/null +++ b/.venv/Lib/site-packages/packaging-24.2.dist-info/RECORD @@ -0,0 +1,40 @@ +packaging-24.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +packaging-24.2.dist-info/LICENSE,sha256=ytHvW9NA1z4HS6YU0m996spceUDD2MNIUuZcSQlobEg,197 +packaging-24.2.dist-info/LICENSE.APACHE,sha256=DVQuDIgE45qn836wDaWnYhSdxoLXgpRRKH4RuTjpRZQ,10174 +packaging-24.2.dist-info/LICENSE.BSD,sha256=tw5-m3QvHMb5SLNMFqo5_-zpQZY2S8iP8NIYDwAo-sU,1344 +packaging-24.2.dist-info/METADATA,sha256=ohH86s6k5mIfQxY2TS0LcSfADeOFa4BiCC-bxZV-pNs,3204 +packaging-24.2.dist-info/RECORD,, +packaging-24.2.dist-info/WHEEL,sha256=CpUCUxeHQbRN5UGRQHYRJorO5Af-Qy_fHMctcQ8DSGI,82 +packaging/__init__.py,sha256=dk4Ta_vmdVJxYHDcfyhvQNw8V3PgSBomKNXqg-D2JDY,494 +packaging/__pycache__/__init__.cpython-312.pyc,, +packaging/__pycache__/_elffile.cpython-312.pyc,, +packaging/__pycache__/_manylinux.cpython-312.pyc,, +packaging/__pycache__/_musllinux.cpython-312.pyc,, +packaging/__pycache__/_parser.cpython-312.pyc,, +packaging/__pycache__/_structures.cpython-312.pyc,, +packaging/__pycache__/_tokenizer.cpython-312.pyc,, +packaging/__pycache__/markers.cpython-312.pyc,, +packaging/__pycache__/metadata.cpython-312.pyc,, +packaging/__pycache__/requirements.cpython-312.pyc,, +packaging/__pycache__/specifiers.cpython-312.pyc,, +packaging/__pycache__/tags.cpython-312.pyc,, +packaging/__pycache__/utils.cpython-312.pyc,, +packaging/__pycache__/version.cpython-312.pyc,, +packaging/_elffile.py,sha256=cflAQAkE25tzhYmq_aCi72QfbT_tn891tPzfpbeHOwE,3306 +packaging/_manylinux.py,sha256=vl5OCoz4kx80H5rwXKeXWjl9WNISGmr4ZgTpTP9lU9c,9612 +packaging/_musllinux.py,sha256=p9ZqNYiOItGee8KcZFeHF_YcdhVwGHdK6r-8lgixvGQ,2694 +packaging/_parser.py,sha256=s_TvTvDNK0NrM2QB3VKThdWFM4Nc0P6JnkObkl3MjpM,10236 +packaging/_structures.py,sha256=q3eVNmbWJGG_S0Dit_S3Ao8qQqz_5PYTXFAKBZe5yr4,1431 +packaging/_tokenizer.py,sha256=J6v5H7Jzvb-g81xp_2QACKwO7LxHQA6ikryMU7zXwN8,5273 +packaging/licenses/__init__.py,sha256=1x5M1nEYjcgwEbLt0dXwz2ukjr18DiCzC0sraQqJ-Ww,5715 +packaging/licenses/__pycache__/__init__.cpython-312.pyc,, +packaging/licenses/__pycache__/_spdx.cpython-312.pyc,, +packaging/licenses/_spdx.py,sha256=oAm1ztPFwlsmCKe7lAAsv_OIOfS1cWDu9bNBkeu-2ns,48398 +packaging/markers.py,sha256=c89TNzB7ZdGYhkovm6PYmqGyHxXlYVaLW591PHUNKD8,10561 +packaging/metadata.py,sha256=YJibM7GYe4re8-0a3OlXmGS-XDgTEoO4tlBt2q25Bng,34762 +packaging/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +packaging/requirements.py,sha256=gYyRSAdbrIyKDY66ugIDUQjRMvxkH2ALioTmX3tnL6o,2947 +packaging/specifiers.py,sha256=GG1wPNMcL0fMJO68vF53wKMdwnfehDcaI-r9NpTfilA,40074 +packaging/tags.py,sha256=CFqrJzAzc2XNGexerH__T-Y5Iwq7WbsYXsiLERLWxY0,21014 +packaging/utils.py,sha256=0F3Hh9OFuRgrhTgGZUl5K22Fv1YP2tZl1z_2gO6kJiA,5050 +packaging/version.py,sha256=olfyuk_DPbflNkJ4wBWetXQ17c74x3DB501degUv7DY,16676 diff --git a/.venv/Lib/site-packages/packaging-24.2.dist-info/WHEEL b/.venv/Lib/site-packages/packaging-24.2.dist-info/WHEEL new file mode 100644 index 00000000..e3c6feef --- /dev/null +++ b/.venv/Lib/site-packages/packaging-24.2.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: flit 3.10.1 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/.venv/Lib/site-packages/packaging/__init__.py b/.venv/Lib/site-packages/packaging/__init__.py new file mode 100644 index 00000000..d79f73c5 --- /dev/null +++ b/.venv/Lib/site-packages/packaging/__init__.py @@ -0,0 +1,15 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +__title__ = "packaging" +__summary__ = "Core utilities for Python packages" +__uri__ = "https://github.com/pypa/packaging" + +__version__ = "24.2" + +__author__ = "Donald Stufft and individual contributors" +__email__ = "donald@stufft.io" + +__license__ = "BSD-2-Clause or Apache-2.0" +__copyright__ = f"2014 {__author__}" diff --git a/.venv/Lib/site-packages/packaging/_elffile.py b/.venv/Lib/site-packages/packaging/_elffile.py new file mode 100644 index 00000000..25f4282c --- /dev/null +++ b/.venv/Lib/site-packages/packaging/_elffile.py @@ -0,0 +1,110 @@ +""" +ELF file parser. + +This provides a class ``ELFFile`` that parses an ELF executable in a similar +interface to ``ZipFile``. Only the read interface is implemented. + +Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca +ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html +""" + +from __future__ import annotations + +import enum +import os +import struct +from typing import IO + + +class ELFInvalid(ValueError): + pass + + +class EIClass(enum.IntEnum): + C32 = 1 + C64 = 2 + + +class EIData(enum.IntEnum): + Lsb = 1 + Msb = 2 + + +class EMachine(enum.IntEnum): + I386 = 3 + S390 = 22 + Arm = 40 + X8664 = 62 + AArc64 = 183 + + +class ELFFile: + """ + Representation of an ELF executable. + """ + + def __init__(self, f: IO[bytes]) -> None: + self._f = f + + try: + ident = self._read("16B") + except struct.error as e: + raise ELFInvalid("unable to parse identification") from e + magic = bytes(ident[:4]) + if magic != b"\x7fELF": + raise ELFInvalid(f"invalid magic: {magic!r}") + + self.capacity = ident[4] # Format for program header (bitness). + self.encoding = ident[5] # Data structure encoding (endianness). + + try: + # e_fmt: Format for program header. + # p_fmt: Format for section header. + # p_idx: Indexes to find p_type, p_offset, and p_filesz. + e_fmt, self._p_fmt, self._p_idx = { + (1, 1): ("HHIIIIIHHH", ">IIIIIIII", (0, 1, 4)), # 32-bit MSB. + (2, 1): ("HHIQQQIHHH", ">IIQQQQQQ", (0, 2, 5)), # 64-bit MSB. + }[(self.capacity, self.encoding)] + except KeyError as e: + raise ELFInvalid( + f"unrecognized capacity ({self.capacity}) or " + f"encoding ({self.encoding})" + ) from e + + try: + ( + _, + self.machine, # Architecture type. + _, + _, + self._e_phoff, # Offset of program header. + _, + self.flags, # Processor-specific flags. + _, + self._e_phentsize, # Size of section. + self._e_phnum, # Number of sections. + ) = self._read(e_fmt) + except struct.error as e: + raise ELFInvalid("unable to parse machine and section information") from e + + def _read(self, fmt: str) -> tuple[int, ...]: + return struct.unpack(fmt, self._f.read(struct.calcsize(fmt))) + + @property + def interpreter(self) -> str | None: + """ + The path recorded in the ``PT_INTERP`` section header. + """ + for index in range(self._e_phnum): + self._f.seek(self._e_phoff + self._e_phentsize * index) + try: + data = self._read(self._p_fmt) + except struct.error: + continue + if data[self._p_idx[0]] != 3: # Not PT_INTERP. + continue + self._f.seek(data[self._p_idx[1]]) + return os.fsdecode(self._f.read(data[self._p_idx[2]])).strip("\0") + return None diff --git a/.venv/Lib/site-packages/packaging/_manylinux.py b/.venv/Lib/site-packages/packaging/_manylinux.py new file mode 100644 index 00000000..61339a6f --- /dev/null +++ b/.venv/Lib/site-packages/packaging/_manylinux.py @@ -0,0 +1,263 @@ +from __future__ import annotations + +import collections +import contextlib +import functools +import os +import re +import sys +import warnings +from typing import Generator, Iterator, NamedTuple, Sequence + +from ._elffile import EIClass, EIData, ELFFile, EMachine + +EF_ARM_ABIMASK = 0xFF000000 +EF_ARM_ABI_VER5 = 0x05000000 +EF_ARM_ABI_FLOAT_HARD = 0x00000400 + + +# `os.PathLike` not a generic type until Python 3.9, so sticking with `str` +# as the type for `path` until then. +@contextlib.contextmanager +def _parse_elf(path: str) -> Generator[ELFFile | None, None, None]: + try: + with open(path, "rb") as f: + yield ELFFile(f) + except (OSError, TypeError, ValueError): + yield None + + +def _is_linux_armhf(executable: str) -> bool: + # hard-float ABI can be detected from the ELF header of the running + # process + # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf + with _parse_elf(executable) as f: + return ( + f is not None + and f.capacity == EIClass.C32 + and f.encoding == EIData.Lsb + and f.machine == EMachine.Arm + and f.flags & EF_ARM_ABIMASK == EF_ARM_ABI_VER5 + and f.flags & EF_ARM_ABI_FLOAT_HARD == EF_ARM_ABI_FLOAT_HARD + ) + + +def _is_linux_i686(executable: str) -> bool: + with _parse_elf(executable) as f: + return ( + f is not None + and f.capacity == EIClass.C32 + and f.encoding == EIData.Lsb + and f.machine == EMachine.I386 + ) + + +def _have_compatible_abi(executable: str, archs: Sequence[str]) -> bool: + if "armv7l" in archs: + return _is_linux_armhf(executable) + if "i686" in archs: + return _is_linux_i686(executable) + allowed_archs = { + "x86_64", + "aarch64", + "ppc64", + "ppc64le", + "s390x", + "loongarch64", + "riscv64", + } + return any(arch in allowed_archs for arch in archs) + + +# If glibc ever changes its major version, we need to know what the last +# minor version was, so we can build the complete list of all versions. +# For now, guess what the highest minor version might be, assume it will +# be 50 for testing. Once this actually happens, update the dictionary +# with the actual value. +_LAST_GLIBC_MINOR: dict[int, int] = collections.defaultdict(lambda: 50) + + +class _GLibCVersion(NamedTuple): + major: int + minor: int + + +def _glibc_version_string_confstr() -> str | None: + """ + Primary implementation of glibc_version_string using os.confstr. + """ + # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely + # to be broken or missing. This strategy is used in the standard library + # platform module. + # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183 + try: + # Should be a string like "glibc 2.17". + version_string: str | None = os.confstr("CS_GNU_LIBC_VERSION") + assert version_string is not None + _, version = version_string.rsplit() + except (AssertionError, AttributeError, OSError, ValueError): + # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)... + return None + return version + + +def _glibc_version_string_ctypes() -> str | None: + """ + Fallback implementation of glibc_version_string using ctypes. + """ + try: + import ctypes + except ImportError: + return None + + # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen + # manpage says, "If filename is NULL, then the returned handle is for the + # main program". This way we can let the linker do the work to figure out + # which libc our process is actually using. + # + # We must also handle the special case where the executable is not a + # dynamically linked executable. This can occur when using musl libc, + # for example. In this situation, dlopen() will error, leading to an + # OSError. Interestingly, at least in the case of musl, there is no + # errno set on the OSError. The single string argument used to construct + # OSError comes from libc itself and is therefore not portable to + # hard code here. In any case, failure to call dlopen() means we + # can proceed, so we bail on our attempt. + try: + process_namespace = ctypes.CDLL(None) + except OSError: + return None + + try: + gnu_get_libc_version = process_namespace.gnu_get_libc_version + except AttributeError: + # Symbol doesn't exist -> therefore, we are not linked to + # glibc. + return None + + # Call gnu_get_libc_version, which returns a string like "2.5" + gnu_get_libc_version.restype = ctypes.c_char_p + version_str: str = gnu_get_libc_version() + # py2 / py3 compatibility: + if not isinstance(version_str, str): + version_str = version_str.decode("ascii") + + return version_str + + +def _glibc_version_string() -> str | None: + """Returns glibc version string, or None if not using glibc.""" + return _glibc_version_string_confstr() or _glibc_version_string_ctypes() + + +def _parse_glibc_version(version_str: str) -> tuple[int, int]: + """Parse glibc version. + + We use a regexp instead of str.split because we want to discard any + random junk that might come after the minor version -- this might happen + in patched/forked versions of glibc (e.g. Linaro's version of glibc + uses version strings like "2.20-2014.11"). See gh-3588. + """ + m = re.match(r"(?P[0-9]+)\.(?P[0-9]+)", version_str) + if not m: + warnings.warn( + f"Expected glibc version with 2 components major.minor," + f" got: {version_str}", + RuntimeWarning, + stacklevel=2, + ) + return -1, -1 + return int(m.group("major")), int(m.group("minor")) + + +@functools.lru_cache +def _get_glibc_version() -> tuple[int, int]: + version_str = _glibc_version_string() + if version_str is None: + return (-1, -1) + return _parse_glibc_version(version_str) + + +# From PEP 513, PEP 600 +def _is_compatible(arch: str, version: _GLibCVersion) -> bool: + sys_glibc = _get_glibc_version() + if sys_glibc < version: + return False + # Check for presence of _manylinux module. + try: + import _manylinux + except ImportError: + return True + if hasattr(_manylinux, "manylinux_compatible"): + result = _manylinux.manylinux_compatible(version[0], version[1], arch) + if result is not None: + return bool(result) + return True + if version == _GLibCVersion(2, 5): + if hasattr(_manylinux, "manylinux1_compatible"): + return bool(_manylinux.manylinux1_compatible) + if version == _GLibCVersion(2, 12): + if hasattr(_manylinux, "manylinux2010_compatible"): + return bool(_manylinux.manylinux2010_compatible) + if version == _GLibCVersion(2, 17): + if hasattr(_manylinux, "manylinux2014_compatible"): + return bool(_manylinux.manylinux2014_compatible) + return True + + +_LEGACY_MANYLINUX_MAP = { + # CentOS 7 w/ glibc 2.17 (PEP 599) + (2, 17): "manylinux2014", + # CentOS 6 w/ glibc 2.12 (PEP 571) + (2, 12): "manylinux2010", + # CentOS 5 w/ glibc 2.5 (PEP 513) + (2, 5): "manylinux1", +} + + +def platform_tags(archs: Sequence[str]) -> Iterator[str]: + """Generate manylinux tags compatible to the current platform. + + :param archs: Sequence of compatible architectures. + The first one shall be the closest to the actual architecture and be the part of + platform tag after the ``linux_`` prefix, e.g. ``x86_64``. + The ``linux_`` prefix is assumed as a prerequisite for the current platform to + be manylinux-compatible. + + :returns: An iterator of compatible manylinux tags. + """ + if not _have_compatible_abi(sys.executable, archs): + return + # Oldest glibc to be supported regardless of architecture is (2, 17). + too_old_glibc2 = _GLibCVersion(2, 16) + if set(archs) & {"x86_64", "i686"}: + # On x86/i686 also oldest glibc to be supported is (2, 5). + too_old_glibc2 = _GLibCVersion(2, 4) + current_glibc = _GLibCVersion(*_get_glibc_version()) + glibc_max_list = [current_glibc] + # We can assume compatibility across glibc major versions. + # https://sourceware.org/bugzilla/show_bug.cgi?id=24636 + # + # Build a list of maximum glibc versions so that we can + # output the canonical list of all glibc from current_glibc + # down to too_old_glibc2, including all intermediary versions. + for glibc_major in range(current_glibc.major - 1, 1, -1): + glibc_minor = _LAST_GLIBC_MINOR[glibc_major] + glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor)) + for arch in archs: + for glibc_max in glibc_max_list: + if glibc_max.major == too_old_glibc2.major: + min_minor = too_old_glibc2.minor + else: + # For other glibc major versions oldest supported is (x, 0). + min_minor = -1 + for glibc_minor in range(glibc_max.minor, min_minor, -1): + glibc_version = _GLibCVersion(glibc_max.major, glibc_minor) + tag = "manylinux_{}_{}".format(*glibc_version) + if _is_compatible(arch, glibc_version): + yield f"{tag}_{arch}" + # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags. + if glibc_version in _LEGACY_MANYLINUX_MAP: + legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version] + if _is_compatible(arch, glibc_version): + yield f"{legacy_tag}_{arch}" diff --git a/.venv/Lib/site-packages/packaging/_musllinux.py b/.venv/Lib/site-packages/packaging/_musllinux.py new file mode 100644 index 00000000..d2bf30b5 --- /dev/null +++ b/.venv/Lib/site-packages/packaging/_musllinux.py @@ -0,0 +1,85 @@ +"""PEP 656 support. + +This module implements logic to detect if the currently running Python is +linked against musl, and what musl version is used. +""" + +from __future__ import annotations + +import functools +import re +import subprocess +import sys +from typing import Iterator, NamedTuple, Sequence + +from ._elffile import ELFFile + + +class _MuslVersion(NamedTuple): + major: int + minor: int + + +def _parse_musl_version(output: str) -> _MuslVersion | None: + lines = [n for n in (n.strip() for n in output.splitlines()) if n] + if len(lines) < 2 or lines[0][:4] != "musl": + return None + m = re.match(r"Version (\d+)\.(\d+)", lines[1]) + if not m: + return None + return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2))) + + +@functools.lru_cache +def _get_musl_version(executable: str) -> _MuslVersion | None: + """Detect currently-running musl runtime version. + + This is done by checking the specified executable's dynamic linking + information, and invoking the loader to parse its output for a version + string. If the loader is musl, the output would be something like:: + + musl libc (x86_64) + Version 1.2.2 + Dynamic Program Loader + """ + try: + with open(executable, "rb") as f: + ld = ELFFile(f).interpreter + except (OSError, TypeError, ValueError): + return None + if ld is None or "musl" not in ld: + return None + proc = subprocess.run([ld], stderr=subprocess.PIPE, text=True) + return _parse_musl_version(proc.stderr) + + +def platform_tags(archs: Sequence[str]) -> Iterator[str]: + """Generate musllinux tags compatible to the current platform. + + :param archs: Sequence of compatible architectures. + The first one shall be the closest to the actual architecture and be the part of + platform tag after the ``linux_`` prefix, e.g. ``x86_64``. + The ``linux_`` prefix is assumed as a prerequisite for the current platform to + be musllinux-compatible. + + :returns: An iterator of compatible musllinux tags. + """ + sys_musl = _get_musl_version(sys.executable) + if sys_musl is None: # Python not dynamically linked against musl. + return + for arch in archs: + for minor in range(sys_musl.minor, -1, -1): + yield f"musllinux_{sys_musl.major}_{minor}_{arch}" + + +if __name__ == "__main__": # pragma: no cover + import sysconfig + + plat = sysconfig.get_platform() + assert plat.startswith("linux-"), "not linux" + + print("plat:", plat) + print("musl:", _get_musl_version(sys.executable)) + print("tags:", end=" ") + for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])): + print(t, end="\n ") diff --git a/.venv/Lib/site-packages/packaging/_parser.py b/.venv/Lib/site-packages/packaging/_parser.py new file mode 100644 index 00000000..c1238c06 --- /dev/null +++ b/.venv/Lib/site-packages/packaging/_parser.py @@ -0,0 +1,354 @@ +"""Handwritten parser of dependency specifiers. + +The docstring for each __parse_* function contains EBNF-inspired grammar representing +the implementation. +""" + +from __future__ import annotations + +import ast +from typing import NamedTuple, Sequence, Tuple, Union + +from ._tokenizer import DEFAULT_RULES, Tokenizer + + +class Node: + def __init__(self, value: str) -> None: + self.value = value + + def __str__(self) -> str: + return self.value + + def __repr__(self) -> str: + return f"<{self.__class__.__name__}('{self}')>" + + def serialize(self) -> str: + raise NotImplementedError + + +class Variable(Node): + def serialize(self) -> str: + return str(self) + + +class Value(Node): + def serialize(self) -> str: + return f'"{self}"' + + +class Op(Node): + def serialize(self) -> str: + return str(self) + + +MarkerVar = Union[Variable, Value] +MarkerItem = Tuple[MarkerVar, Op, MarkerVar] +MarkerAtom = Union[MarkerItem, Sequence["MarkerAtom"]] +MarkerList = Sequence[Union["MarkerList", MarkerAtom, str]] + + +class ParsedRequirement(NamedTuple): + name: str + url: str + extras: list[str] + specifier: str + marker: MarkerList | None + + +# -------------------------------------------------------------------------------------- +# Recursive descent parser for dependency specifier +# -------------------------------------------------------------------------------------- +def parse_requirement(source: str) -> ParsedRequirement: + return _parse_requirement(Tokenizer(source, rules=DEFAULT_RULES)) + + +def _parse_requirement(tokenizer: Tokenizer) -> ParsedRequirement: + """ + requirement = WS? IDENTIFIER WS? extras WS? requirement_details + """ + tokenizer.consume("WS") + + name_token = tokenizer.expect( + "IDENTIFIER", expected="package name at the start of dependency specifier" + ) + name = name_token.text + tokenizer.consume("WS") + + extras = _parse_extras(tokenizer) + tokenizer.consume("WS") + + url, specifier, marker = _parse_requirement_details(tokenizer) + tokenizer.expect("END", expected="end of dependency specifier") + + return ParsedRequirement(name, url, extras, specifier, marker) + + +def _parse_requirement_details( + tokenizer: Tokenizer, +) -> tuple[str, str, MarkerList | None]: + """ + requirement_details = AT URL (WS requirement_marker?)? + | specifier WS? (requirement_marker)? + """ + + specifier = "" + url = "" + marker = None + + if tokenizer.check("AT"): + tokenizer.read() + tokenizer.consume("WS") + + url_start = tokenizer.position + url = tokenizer.expect("URL", expected="URL after @").text + if tokenizer.check("END", peek=True): + return (url, specifier, marker) + + tokenizer.expect("WS", expected="whitespace after URL") + + # The input might end after whitespace. + if tokenizer.check("END", peek=True): + return (url, specifier, marker) + + marker = _parse_requirement_marker( + tokenizer, span_start=url_start, after="URL and whitespace" + ) + else: + specifier_start = tokenizer.position + specifier = _parse_specifier(tokenizer) + tokenizer.consume("WS") + + if tokenizer.check("END", peek=True): + return (url, specifier, marker) + + marker = _parse_requirement_marker( + tokenizer, + span_start=specifier_start, + after=( + "version specifier" + if specifier + else "name and no valid version specifier" + ), + ) + + return (url, specifier, marker) + + +def _parse_requirement_marker( + tokenizer: Tokenizer, *, span_start: int, after: str +) -> MarkerList: + """ + requirement_marker = SEMICOLON marker WS? + """ + + if not tokenizer.check("SEMICOLON"): + tokenizer.raise_syntax_error( + f"Expected end or semicolon (after {after})", + span_start=span_start, + ) + tokenizer.read() + + marker = _parse_marker(tokenizer) + tokenizer.consume("WS") + + return marker + + +def _parse_extras(tokenizer: Tokenizer) -> list[str]: + """ + extras = (LEFT_BRACKET wsp* extras_list? wsp* RIGHT_BRACKET)? + """ + if not tokenizer.check("LEFT_BRACKET", peek=True): + return [] + + with tokenizer.enclosing_tokens( + "LEFT_BRACKET", + "RIGHT_BRACKET", + around="extras", + ): + tokenizer.consume("WS") + extras = _parse_extras_list(tokenizer) + tokenizer.consume("WS") + + return extras + + +def _parse_extras_list(tokenizer: Tokenizer) -> list[str]: + """ + extras_list = identifier (wsp* ',' wsp* identifier)* + """ + extras: list[str] = [] + + if not tokenizer.check("IDENTIFIER"): + return extras + + extras.append(tokenizer.read().text) + + while True: + tokenizer.consume("WS") + if tokenizer.check("IDENTIFIER", peek=True): + tokenizer.raise_syntax_error("Expected comma between extra names") + elif not tokenizer.check("COMMA"): + break + + tokenizer.read() + tokenizer.consume("WS") + + extra_token = tokenizer.expect("IDENTIFIER", expected="extra name after comma") + extras.append(extra_token.text) + + return extras + + +def _parse_specifier(tokenizer: Tokenizer) -> str: + """ + specifier = LEFT_PARENTHESIS WS? version_many WS? RIGHT_PARENTHESIS + | WS? version_many WS? + """ + with tokenizer.enclosing_tokens( + "LEFT_PARENTHESIS", + "RIGHT_PARENTHESIS", + around="version specifier", + ): + tokenizer.consume("WS") + parsed_specifiers = _parse_version_many(tokenizer) + tokenizer.consume("WS") + + return parsed_specifiers + + +def _parse_version_many(tokenizer: Tokenizer) -> str: + """ + version_many = (SPECIFIER (WS? COMMA WS? SPECIFIER)*)? + """ + parsed_specifiers = "" + while tokenizer.check("SPECIFIER"): + span_start = tokenizer.position + parsed_specifiers += tokenizer.read().text + if tokenizer.check("VERSION_PREFIX_TRAIL", peek=True): + tokenizer.raise_syntax_error( + ".* suffix can only be used with `==` or `!=` operators", + span_start=span_start, + span_end=tokenizer.position + 1, + ) + if tokenizer.check("VERSION_LOCAL_LABEL_TRAIL", peek=True): + tokenizer.raise_syntax_error( + "Local version label can only be used with `==` or `!=` operators", + span_start=span_start, + span_end=tokenizer.position, + ) + tokenizer.consume("WS") + if not tokenizer.check("COMMA"): + break + parsed_specifiers += tokenizer.read().text + tokenizer.consume("WS") + + return parsed_specifiers + + +# -------------------------------------------------------------------------------------- +# Recursive descent parser for marker expression +# -------------------------------------------------------------------------------------- +def parse_marker(source: str) -> MarkerList: + return _parse_full_marker(Tokenizer(source, rules=DEFAULT_RULES)) + + +def _parse_full_marker(tokenizer: Tokenizer) -> MarkerList: + retval = _parse_marker(tokenizer) + tokenizer.expect("END", expected="end of marker expression") + return retval + + +def _parse_marker(tokenizer: Tokenizer) -> MarkerList: + """ + marker = marker_atom (BOOLOP marker_atom)+ + """ + expression = [_parse_marker_atom(tokenizer)] + while tokenizer.check("BOOLOP"): + token = tokenizer.read() + expr_right = _parse_marker_atom(tokenizer) + expression.extend((token.text, expr_right)) + return expression + + +def _parse_marker_atom(tokenizer: Tokenizer) -> MarkerAtom: + """ + marker_atom = WS? LEFT_PARENTHESIS WS? marker WS? RIGHT_PARENTHESIS WS? + | WS? marker_item WS? + """ + + tokenizer.consume("WS") + if tokenizer.check("LEFT_PARENTHESIS", peek=True): + with tokenizer.enclosing_tokens( + "LEFT_PARENTHESIS", + "RIGHT_PARENTHESIS", + around="marker expression", + ): + tokenizer.consume("WS") + marker: MarkerAtom = _parse_marker(tokenizer) + tokenizer.consume("WS") + else: + marker = _parse_marker_item(tokenizer) + tokenizer.consume("WS") + return marker + + +def _parse_marker_item(tokenizer: Tokenizer) -> MarkerItem: + """ + marker_item = WS? marker_var WS? marker_op WS? marker_var WS? + """ + tokenizer.consume("WS") + marker_var_left = _parse_marker_var(tokenizer) + tokenizer.consume("WS") + marker_op = _parse_marker_op(tokenizer) + tokenizer.consume("WS") + marker_var_right = _parse_marker_var(tokenizer) + tokenizer.consume("WS") + return (marker_var_left, marker_op, marker_var_right) + + +def _parse_marker_var(tokenizer: Tokenizer) -> MarkerVar: + """ + marker_var = VARIABLE | QUOTED_STRING + """ + if tokenizer.check("VARIABLE"): + return process_env_var(tokenizer.read().text.replace(".", "_")) + elif tokenizer.check("QUOTED_STRING"): + return process_python_str(tokenizer.read().text) + else: + tokenizer.raise_syntax_error( + message="Expected a marker variable or quoted string" + ) + + +def process_env_var(env_var: str) -> Variable: + if env_var in ("platform_python_implementation", "python_implementation"): + return Variable("platform_python_implementation") + else: + return Variable(env_var) + + +def process_python_str(python_str: str) -> Value: + value = ast.literal_eval(python_str) + return Value(str(value)) + + +def _parse_marker_op(tokenizer: Tokenizer) -> Op: + """ + marker_op = IN | NOT IN | OP + """ + if tokenizer.check("IN"): + tokenizer.read() + return Op("in") + elif tokenizer.check("NOT"): + tokenizer.read() + tokenizer.expect("WS", expected="whitespace after 'not'") + tokenizer.expect("IN", expected="'in' after 'not'") + return Op("not in") + elif tokenizer.check("OP"): + return Op(tokenizer.read().text) + else: + return tokenizer.raise_syntax_error( + "Expected marker operator, one of " + "<=, <, !=, ==, >=, >, ~=, ===, in, not in" + ) diff --git a/.venv/Lib/site-packages/packaging/_structures.py b/.venv/Lib/site-packages/packaging/_structures.py new file mode 100644 index 00000000..90a6465f --- /dev/null +++ b/.venv/Lib/site-packages/packaging/_structures.py @@ -0,0 +1,61 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +class InfinityType: + def __repr__(self) -> str: + return "Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return False + + def __le__(self, other: object) -> bool: + return False + + def __eq__(self, other: object) -> bool: + return isinstance(other, self.__class__) + + def __gt__(self, other: object) -> bool: + return True + + def __ge__(self, other: object) -> bool: + return True + + def __neg__(self: object) -> "NegativeInfinityType": + return NegativeInfinity + + +Infinity = InfinityType() + + +class NegativeInfinityType: + def __repr__(self) -> str: + return "-Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return True + + def __le__(self, other: object) -> bool: + return True + + def __eq__(self, other: object) -> bool: + return isinstance(other, self.__class__) + + def __gt__(self, other: object) -> bool: + return False + + def __ge__(self, other: object) -> bool: + return False + + def __neg__(self: object) -> InfinityType: + return Infinity + + +NegativeInfinity = NegativeInfinityType() diff --git a/.venv/Lib/site-packages/packaging/_tokenizer.py b/.venv/Lib/site-packages/packaging/_tokenizer.py new file mode 100644 index 00000000..89d04160 --- /dev/null +++ b/.venv/Lib/site-packages/packaging/_tokenizer.py @@ -0,0 +1,194 @@ +from __future__ import annotations + +import contextlib +import re +from dataclasses import dataclass +from typing import Iterator, NoReturn + +from .specifiers import Specifier + + +@dataclass +class Token: + name: str + text: str + position: int + + +class ParserSyntaxError(Exception): + """The provided source text could not be parsed correctly.""" + + def __init__( + self, + message: str, + *, + source: str, + span: tuple[int, int], + ) -> None: + self.span = span + self.message = message + self.source = source + + super().__init__() + + def __str__(self) -> str: + marker = " " * self.span[0] + "~" * (self.span[1] - self.span[0]) + "^" + return "\n ".join([self.message, self.source, marker]) + + +DEFAULT_RULES: dict[str, str | re.Pattern[str]] = { + "LEFT_PARENTHESIS": r"\(", + "RIGHT_PARENTHESIS": r"\)", + "LEFT_BRACKET": r"\[", + "RIGHT_BRACKET": r"\]", + "SEMICOLON": r";", + "COMMA": r",", + "QUOTED_STRING": re.compile( + r""" + ( + ('[^']*') + | + ("[^"]*") + ) + """, + re.VERBOSE, + ), + "OP": r"(===|==|~=|!=|<=|>=|<|>)", + "BOOLOP": r"\b(or|and)\b", + "IN": r"\bin\b", + "NOT": r"\bnot\b", + "VARIABLE": re.compile( + r""" + \b( + python_version + |python_full_version + |os[._]name + |sys[._]platform + |platform_(release|system) + |platform[._](version|machine|python_implementation) + |python_implementation + |implementation_(name|version) + |extra + )\b + """, + re.VERBOSE, + ), + "SPECIFIER": re.compile( + Specifier._operator_regex_str + Specifier._version_regex_str, + re.VERBOSE | re.IGNORECASE, + ), + "AT": r"\@", + "URL": r"[^ \t]+", + "IDENTIFIER": r"\b[a-zA-Z0-9][a-zA-Z0-9._-]*\b", + "VERSION_PREFIX_TRAIL": r"\.\*", + "VERSION_LOCAL_LABEL_TRAIL": r"\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*", + "WS": r"[ \t]+", + "END": r"$", +} + + +class Tokenizer: + """Context-sensitive token parsing. + + Provides methods to examine the input stream to check whether the next token + matches. + """ + + def __init__( + self, + source: str, + *, + rules: dict[str, str | re.Pattern[str]], + ) -> None: + self.source = source + self.rules: dict[str, re.Pattern[str]] = { + name: re.compile(pattern) for name, pattern in rules.items() + } + self.next_token: Token | None = None + self.position = 0 + + def consume(self, name: str) -> None: + """Move beyond provided token name, if at current position.""" + if self.check(name): + self.read() + + def check(self, name: str, *, peek: bool = False) -> bool: + """Check whether the next token has the provided name. + + By default, if the check succeeds, the token *must* be read before + another check. If `peek` is set to `True`, the token is not loaded and + would need to be checked again. + """ + assert ( + self.next_token is None + ), f"Cannot check for {name!r}, already have {self.next_token!r}" + assert name in self.rules, f"Unknown token name: {name!r}" + + expression = self.rules[name] + + match = expression.match(self.source, self.position) + if match is None: + return False + if not peek: + self.next_token = Token(name, match[0], self.position) + return True + + def expect(self, name: str, *, expected: str) -> Token: + """Expect a certain token name next, failing with a syntax error otherwise. + + The token is *not* read. + """ + if not self.check(name): + raise self.raise_syntax_error(f"Expected {expected}") + return self.read() + + def read(self) -> Token: + """Consume the next token and return it.""" + token = self.next_token + assert token is not None + + self.position += len(token.text) + self.next_token = None + + return token + + def raise_syntax_error( + self, + message: str, + *, + span_start: int | None = None, + span_end: int | None = None, + ) -> NoReturn: + """Raise ParserSyntaxError at the given position.""" + span = ( + self.position if span_start is None else span_start, + self.position if span_end is None else span_end, + ) + raise ParserSyntaxError( + message, + source=self.source, + span=span, + ) + + @contextlib.contextmanager + def enclosing_tokens( + self, open_token: str, close_token: str, *, around: str + ) -> Iterator[None]: + if self.check(open_token): + open_position = self.position + self.read() + else: + open_position = None + + yield + + if open_position is None: + return + + if not self.check(close_token): + self.raise_syntax_error( + f"Expected matching {close_token} for {open_token}, after {around}", + span_start=open_position, + ) + + self.read() diff --git a/.venv/Lib/site-packages/packaging/licenses/__init__.py b/.venv/Lib/site-packages/packaging/licenses/__init__.py new file mode 100644 index 00000000..569156d6 --- /dev/null +++ b/.venv/Lib/site-packages/packaging/licenses/__init__.py @@ -0,0 +1,145 @@ +####################################################################################### +# +# Adapted from: +# https://github.com/pypa/hatch/blob/5352e44/backend/src/hatchling/licenses/parse.py +# +# MIT License +# +# Copyright (c) 2017-present Ofek Lev +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this +# software and associated documentation files (the "Software"), to deal in the Software +# without restriction, including without limitation the rights to use, copy, modify, +# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to the following +# conditions: +# +# The above copyright notice and this permission notice shall be included in all copies +# or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, +# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF +# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE +# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# +# +# With additional allowance of arbitrary `LicenseRef-` identifiers, not just +# `LicenseRef-Public-Domain` and `LicenseRef-Proprietary`. +# +####################################################################################### +from __future__ import annotations + +import re +from typing import NewType, cast + +from packaging.licenses._spdx import EXCEPTIONS, LICENSES + +__all__ = [ + "NormalizedLicenseExpression", + "InvalidLicenseExpression", + "canonicalize_license_expression", +] + +license_ref_allowed = re.compile("^[A-Za-z0-9.-]*$") + +NormalizedLicenseExpression = NewType("NormalizedLicenseExpression", str) + + +class InvalidLicenseExpression(ValueError): + """Raised when a license-expression string is invalid + + >>> canonicalize_license_expression("invalid") + Traceback (most recent call last): + ... + packaging.licenses.InvalidLicenseExpression: Invalid license expression: 'invalid' + """ + + +def canonicalize_license_expression( + raw_license_expression: str, +) -> NormalizedLicenseExpression: + if not raw_license_expression: + message = f"Invalid license expression: {raw_license_expression!r}" + raise InvalidLicenseExpression(message) + + # Pad any parentheses so tokenization can be achieved by merely splitting on + # whitespace. + license_expression = raw_license_expression.replace("(", " ( ").replace(")", " ) ") + licenseref_prefix = "LicenseRef-" + license_refs = { + ref.lower(): "LicenseRef-" + ref[len(licenseref_prefix) :] + for ref in license_expression.split() + if ref.lower().startswith(licenseref_prefix.lower()) + } + + # Normalize to lower case so we can look up licenses/exceptions + # and so boolean operators are Python-compatible. + license_expression = license_expression.lower() + + tokens = license_expression.split() + + # Rather than implementing boolean logic, we create an expression that Python can + # parse. Everything that is not involved with the grammar itself is treated as + # `False` and the expression should evaluate as such. + python_tokens = [] + for token in tokens: + if token not in {"or", "and", "with", "(", ")"}: + python_tokens.append("False") + elif token == "with": + python_tokens.append("or") + elif token == "(" and python_tokens and python_tokens[-1] not in {"or", "and"}: + message = f"Invalid license expression: {raw_license_expression!r}" + raise InvalidLicenseExpression(message) + else: + python_tokens.append(token) + + python_expression = " ".join(python_tokens) + try: + invalid = eval(python_expression, globals(), locals()) + except Exception: + invalid = True + + if invalid is not False: + message = f"Invalid license expression: {raw_license_expression!r}" + raise InvalidLicenseExpression(message) from None + + # Take a final pass to check for unknown licenses/exceptions. + normalized_tokens = [] + for token in tokens: + if token in {"or", "and", "with", "(", ")"}: + normalized_tokens.append(token.upper()) + continue + + if normalized_tokens and normalized_tokens[-1] == "WITH": + if token not in EXCEPTIONS: + message = f"Unknown license exception: {token!r}" + raise InvalidLicenseExpression(message) + + normalized_tokens.append(EXCEPTIONS[token]["id"]) + else: + if token.endswith("+"): + final_token = token[:-1] + suffix = "+" + else: + final_token = token + suffix = "" + + if final_token.startswith("licenseref-"): + if not license_ref_allowed.match(final_token): + message = f"Invalid licenseref: {final_token!r}" + raise InvalidLicenseExpression(message) + normalized_tokens.append(license_refs[final_token] + suffix) + else: + if final_token not in LICENSES: + message = f"Unknown license: {final_token!r}" + raise InvalidLicenseExpression(message) + normalized_tokens.append(LICENSES[final_token]["id"] + suffix) + + normalized_expression = " ".join(normalized_tokens) + + return cast( + NormalizedLicenseExpression, + normalized_expression.replace("( ", "(").replace(" )", ")"), + ) diff --git a/.venv/Lib/site-packages/packaging/licenses/_spdx.py b/.venv/Lib/site-packages/packaging/licenses/_spdx.py new file mode 100644 index 00000000..eac22276 --- /dev/null +++ b/.venv/Lib/site-packages/packaging/licenses/_spdx.py @@ -0,0 +1,759 @@ + +from __future__ import annotations + +from typing import TypedDict + +class SPDXLicense(TypedDict): + id: str + deprecated: bool + +class SPDXException(TypedDict): + id: str + deprecated: bool + + +VERSION = '3.25.0' + +LICENSES: dict[str, SPDXLicense] = { + '0bsd': {'id': '0BSD', 'deprecated': False}, + '3d-slicer-1.0': {'id': '3D-Slicer-1.0', 'deprecated': False}, + 'aal': {'id': 'AAL', 'deprecated': False}, + 'abstyles': {'id': 'Abstyles', 'deprecated': False}, + 'adacore-doc': {'id': 'AdaCore-doc', 'deprecated': False}, + 'adobe-2006': {'id': 'Adobe-2006', 'deprecated': False}, + 'adobe-display-postscript': {'id': 'Adobe-Display-PostScript', 'deprecated': False}, + 'adobe-glyph': {'id': 'Adobe-Glyph', 'deprecated': False}, + 'adobe-utopia': {'id': 'Adobe-Utopia', 'deprecated': False}, + 'adsl': {'id': 'ADSL', 'deprecated': False}, + 'afl-1.1': {'id': 'AFL-1.1', 'deprecated': False}, + 'afl-1.2': {'id': 'AFL-1.2', 'deprecated': False}, + 'afl-2.0': {'id': 'AFL-2.0', 'deprecated': False}, + 'afl-2.1': {'id': 'AFL-2.1', 'deprecated': False}, + 'afl-3.0': {'id': 'AFL-3.0', 'deprecated': False}, + 'afmparse': {'id': 'Afmparse', 'deprecated': False}, + 'agpl-1.0': {'id': 'AGPL-1.0', 'deprecated': True}, + 'agpl-1.0-only': {'id': 'AGPL-1.0-only', 'deprecated': False}, + 'agpl-1.0-or-later': {'id': 'AGPL-1.0-or-later', 'deprecated': False}, + 'agpl-3.0': {'id': 'AGPL-3.0', 'deprecated': True}, + 'agpl-3.0-only': {'id': 'AGPL-3.0-only', 'deprecated': False}, + 'agpl-3.0-or-later': {'id': 'AGPL-3.0-or-later', 'deprecated': False}, + 'aladdin': {'id': 'Aladdin', 'deprecated': False}, + 'amd-newlib': {'id': 'AMD-newlib', 'deprecated': False}, + 'amdplpa': {'id': 'AMDPLPA', 'deprecated': False}, + 'aml': {'id': 'AML', 'deprecated': False}, + 'aml-glslang': {'id': 'AML-glslang', 'deprecated': False}, + 'ampas': {'id': 'AMPAS', 'deprecated': False}, + 'antlr-pd': {'id': 'ANTLR-PD', 'deprecated': False}, + 'antlr-pd-fallback': {'id': 'ANTLR-PD-fallback', 'deprecated': False}, + 'any-osi': {'id': 'any-OSI', 'deprecated': False}, + 'apache-1.0': {'id': 'Apache-1.0', 'deprecated': False}, + 'apache-1.1': {'id': 'Apache-1.1', 'deprecated': False}, + 'apache-2.0': {'id': 'Apache-2.0', 'deprecated': False}, + 'apafml': {'id': 'APAFML', 'deprecated': False}, + 'apl-1.0': {'id': 'APL-1.0', 'deprecated': False}, + 'app-s2p': {'id': 'App-s2p', 'deprecated': False}, + 'apsl-1.0': {'id': 'APSL-1.0', 'deprecated': False}, + 'apsl-1.1': {'id': 'APSL-1.1', 'deprecated': False}, + 'apsl-1.2': {'id': 'APSL-1.2', 'deprecated': False}, + 'apsl-2.0': {'id': 'APSL-2.0', 'deprecated': False}, + 'arphic-1999': {'id': 'Arphic-1999', 'deprecated': False}, + 'artistic-1.0': {'id': 'Artistic-1.0', 'deprecated': False}, + 'artistic-1.0-cl8': {'id': 'Artistic-1.0-cl8', 'deprecated': False}, + 'artistic-1.0-perl': {'id': 'Artistic-1.0-Perl', 'deprecated': False}, + 'artistic-2.0': {'id': 'Artistic-2.0', 'deprecated': False}, + 'aswf-digital-assets-1.0': {'id': 'ASWF-Digital-Assets-1.0', 'deprecated': False}, + 'aswf-digital-assets-1.1': {'id': 'ASWF-Digital-Assets-1.1', 'deprecated': False}, + 'baekmuk': {'id': 'Baekmuk', 'deprecated': False}, + 'bahyph': {'id': 'Bahyph', 'deprecated': False}, + 'barr': {'id': 'Barr', 'deprecated': False}, + 'bcrypt-solar-designer': {'id': 'bcrypt-Solar-Designer', 'deprecated': False}, + 'beerware': {'id': 'Beerware', 'deprecated': False}, + 'bitstream-charter': {'id': 'Bitstream-Charter', 'deprecated': False}, + 'bitstream-vera': {'id': 'Bitstream-Vera', 'deprecated': False}, + 'bittorrent-1.0': {'id': 'BitTorrent-1.0', 'deprecated': False}, + 'bittorrent-1.1': {'id': 'BitTorrent-1.1', 'deprecated': False}, + 'blessing': {'id': 'blessing', 'deprecated': False}, + 'blueoak-1.0.0': {'id': 'BlueOak-1.0.0', 'deprecated': False}, + 'boehm-gc': {'id': 'Boehm-GC', 'deprecated': False}, + 'borceux': {'id': 'Borceux', 'deprecated': False}, + 'brian-gladman-2-clause': {'id': 'Brian-Gladman-2-Clause', 'deprecated': False}, + 'brian-gladman-3-clause': {'id': 'Brian-Gladman-3-Clause', 'deprecated': False}, + 'bsd-1-clause': {'id': 'BSD-1-Clause', 'deprecated': False}, + 'bsd-2-clause': {'id': 'BSD-2-Clause', 'deprecated': False}, + 'bsd-2-clause-darwin': {'id': 'BSD-2-Clause-Darwin', 'deprecated': False}, + 'bsd-2-clause-first-lines': {'id': 'BSD-2-Clause-first-lines', 'deprecated': False}, + 'bsd-2-clause-freebsd': {'id': 'BSD-2-Clause-FreeBSD', 'deprecated': True}, + 'bsd-2-clause-netbsd': {'id': 'BSD-2-Clause-NetBSD', 'deprecated': True}, + 'bsd-2-clause-patent': {'id': 'BSD-2-Clause-Patent', 'deprecated': False}, + 'bsd-2-clause-views': {'id': 'BSD-2-Clause-Views', 'deprecated': False}, + 'bsd-3-clause': {'id': 'BSD-3-Clause', 'deprecated': False}, + 'bsd-3-clause-acpica': {'id': 'BSD-3-Clause-acpica', 'deprecated': False}, + 'bsd-3-clause-attribution': {'id': 'BSD-3-Clause-Attribution', 'deprecated': False}, + 'bsd-3-clause-clear': {'id': 'BSD-3-Clause-Clear', 'deprecated': False}, + 'bsd-3-clause-flex': {'id': 'BSD-3-Clause-flex', 'deprecated': False}, + 'bsd-3-clause-hp': {'id': 'BSD-3-Clause-HP', 'deprecated': False}, + 'bsd-3-clause-lbnl': {'id': 'BSD-3-Clause-LBNL', 'deprecated': False}, + 'bsd-3-clause-modification': {'id': 'BSD-3-Clause-Modification', 'deprecated': False}, + 'bsd-3-clause-no-military-license': {'id': 'BSD-3-Clause-No-Military-License', 'deprecated': False}, + 'bsd-3-clause-no-nuclear-license': {'id': 'BSD-3-Clause-No-Nuclear-License', 'deprecated': False}, + 'bsd-3-clause-no-nuclear-license-2014': {'id': 'BSD-3-Clause-No-Nuclear-License-2014', 'deprecated': False}, + 'bsd-3-clause-no-nuclear-warranty': {'id': 'BSD-3-Clause-No-Nuclear-Warranty', 'deprecated': False}, + 'bsd-3-clause-open-mpi': {'id': 'BSD-3-Clause-Open-MPI', 'deprecated': False}, + 'bsd-3-clause-sun': {'id': 'BSD-3-Clause-Sun', 'deprecated': False}, + 'bsd-4-clause': {'id': 'BSD-4-Clause', 'deprecated': False}, + 'bsd-4-clause-shortened': {'id': 'BSD-4-Clause-Shortened', 'deprecated': False}, + 'bsd-4-clause-uc': {'id': 'BSD-4-Clause-UC', 'deprecated': False}, + 'bsd-4.3reno': {'id': 'BSD-4.3RENO', 'deprecated': False}, + 'bsd-4.3tahoe': {'id': 'BSD-4.3TAHOE', 'deprecated': False}, + 'bsd-advertising-acknowledgement': {'id': 'BSD-Advertising-Acknowledgement', 'deprecated': False}, + 'bsd-attribution-hpnd-disclaimer': {'id': 'BSD-Attribution-HPND-disclaimer', 'deprecated': False}, + 'bsd-inferno-nettverk': {'id': 'BSD-Inferno-Nettverk', 'deprecated': False}, + 'bsd-protection': {'id': 'BSD-Protection', 'deprecated': False}, + 'bsd-source-beginning-file': {'id': 'BSD-Source-beginning-file', 'deprecated': False}, + 'bsd-source-code': {'id': 'BSD-Source-Code', 'deprecated': False}, + 'bsd-systemics': {'id': 'BSD-Systemics', 'deprecated': False}, + 'bsd-systemics-w3works': {'id': 'BSD-Systemics-W3Works', 'deprecated': False}, + 'bsl-1.0': {'id': 'BSL-1.0', 'deprecated': False}, + 'busl-1.1': {'id': 'BUSL-1.1', 'deprecated': False}, + 'bzip2-1.0.5': {'id': 'bzip2-1.0.5', 'deprecated': True}, + 'bzip2-1.0.6': {'id': 'bzip2-1.0.6', 'deprecated': False}, + 'c-uda-1.0': {'id': 'C-UDA-1.0', 'deprecated': False}, + 'cal-1.0': {'id': 'CAL-1.0', 'deprecated': False}, + 'cal-1.0-combined-work-exception': {'id': 'CAL-1.0-Combined-Work-Exception', 'deprecated': False}, + 'caldera': {'id': 'Caldera', 'deprecated': False}, + 'caldera-no-preamble': {'id': 'Caldera-no-preamble', 'deprecated': False}, + 'catharon': {'id': 'Catharon', 'deprecated': False}, + 'catosl-1.1': {'id': 'CATOSL-1.1', 'deprecated': False}, + 'cc-by-1.0': {'id': 'CC-BY-1.0', 'deprecated': False}, + 'cc-by-2.0': {'id': 'CC-BY-2.0', 'deprecated': False}, + 'cc-by-2.5': {'id': 'CC-BY-2.5', 'deprecated': False}, + 'cc-by-2.5-au': {'id': 'CC-BY-2.5-AU', 'deprecated': False}, + 'cc-by-3.0': {'id': 'CC-BY-3.0', 'deprecated': False}, + 'cc-by-3.0-at': {'id': 'CC-BY-3.0-AT', 'deprecated': False}, + 'cc-by-3.0-au': {'id': 'CC-BY-3.0-AU', 'deprecated': False}, + 'cc-by-3.0-de': {'id': 'CC-BY-3.0-DE', 'deprecated': False}, + 'cc-by-3.0-igo': {'id': 'CC-BY-3.0-IGO', 'deprecated': False}, + 'cc-by-3.0-nl': {'id': 'CC-BY-3.0-NL', 'deprecated': False}, + 'cc-by-3.0-us': {'id': 'CC-BY-3.0-US', 'deprecated': False}, + 'cc-by-4.0': {'id': 'CC-BY-4.0', 'deprecated': False}, + 'cc-by-nc-1.0': {'id': 'CC-BY-NC-1.0', 'deprecated': False}, + 'cc-by-nc-2.0': {'id': 'CC-BY-NC-2.0', 'deprecated': False}, + 'cc-by-nc-2.5': {'id': 'CC-BY-NC-2.5', 'deprecated': False}, + 'cc-by-nc-3.0': {'id': 'CC-BY-NC-3.0', 'deprecated': False}, + 'cc-by-nc-3.0-de': {'id': 'CC-BY-NC-3.0-DE', 'deprecated': False}, + 'cc-by-nc-4.0': {'id': 'CC-BY-NC-4.0', 'deprecated': False}, + 'cc-by-nc-nd-1.0': {'id': 'CC-BY-NC-ND-1.0', 'deprecated': False}, + 'cc-by-nc-nd-2.0': {'id': 'CC-BY-NC-ND-2.0', 'deprecated': False}, + 'cc-by-nc-nd-2.5': {'id': 'CC-BY-NC-ND-2.5', 'deprecated': False}, + 'cc-by-nc-nd-3.0': {'id': 'CC-BY-NC-ND-3.0', 'deprecated': False}, + 'cc-by-nc-nd-3.0-de': {'id': 'CC-BY-NC-ND-3.0-DE', 'deprecated': False}, + 'cc-by-nc-nd-3.0-igo': {'id': 'CC-BY-NC-ND-3.0-IGO', 'deprecated': False}, + 'cc-by-nc-nd-4.0': {'id': 'CC-BY-NC-ND-4.0', 'deprecated': False}, + 'cc-by-nc-sa-1.0': {'id': 'CC-BY-NC-SA-1.0', 'deprecated': False}, + 'cc-by-nc-sa-2.0': {'id': 'CC-BY-NC-SA-2.0', 'deprecated': False}, + 'cc-by-nc-sa-2.0-de': {'id': 'CC-BY-NC-SA-2.0-DE', 'deprecated': False}, + 'cc-by-nc-sa-2.0-fr': {'id': 'CC-BY-NC-SA-2.0-FR', 'deprecated': False}, + 'cc-by-nc-sa-2.0-uk': {'id': 'CC-BY-NC-SA-2.0-UK', 'deprecated': False}, + 'cc-by-nc-sa-2.5': {'id': 'CC-BY-NC-SA-2.5', 'deprecated': False}, + 'cc-by-nc-sa-3.0': {'id': 'CC-BY-NC-SA-3.0', 'deprecated': False}, + 'cc-by-nc-sa-3.0-de': {'id': 'CC-BY-NC-SA-3.0-DE', 'deprecated': False}, + 'cc-by-nc-sa-3.0-igo': {'id': 'CC-BY-NC-SA-3.0-IGO', 'deprecated': False}, + 'cc-by-nc-sa-4.0': {'id': 'CC-BY-NC-SA-4.0', 'deprecated': False}, + 'cc-by-nd-1.0': {'id': 'CC-BY-ND-1.0', 'deprecated': False}, + 'cc-by-nd-2.0': {'id': 'CC-BY-ND-2.0', 'deprecated': False}, + 'cc-by-nd-2.5': {'id': 'CC-BY-ND-2.5', 'deprecated': False}, + 'cc-by-nd-3.0': {'id': 'CC-BY-ND-3.0', 'deprecated': False}, + 'cc-by-nd-3.0-de': {'id': 'CC-BY-ND-3.0-DE', 'deprecated': False}, + 'cc-by-nd-4.0': {'id': 'CC-BY-ND-4.0', 'deprecated': False}, + 'cc-by-sa-1.0': {'id': 'CC-BY-SA-1.0', 'deprecated': False}, + 'cc-by-sa-2.0': {'id': 'CC-BY-SA-2.0', 'deprecated': False}, + 'cc-by-sa-2.0-uk': {'id': 'CC-BY-SA-2.0-UK', 'deprecated': False}, + 'cc-by-sa-2.1-jp': {'id': 'CC-BY-SA-2.1-JP', 'deprecated': False}, + 'cc-by-sa-2.5': {'id': 'CC-BY-SA-2.5', 'deprecated': False}, + 'cc-by-sa-3.0': {'id': 'CC-BY-SA-3.0', 'deprecated': False}, + 'cc-by-sa-3.0-at': {'id': 'CC-BY-SA-3.0-AT', 'deprecated': False}, + 'cc-by-sa-3.0-de': {'id': 'CC-BY-SA-3.0-DE', 'deprecated': False}, + 'cc-by-sa-3.0-igo': {'id': 'CC-BY-SA-3.0-IGO', 'deprecated': False}, + 'cc-by-sa-4.0': {'id': 'CC-BY-SA-4.0', 'deprecated': False}, + 'cc-pddc': {'id': 'CC-PDDC', 'deprecated': False}, + 'cc0-1.0': {'id': 'CC0-1.0', 'deprecated': False}, + 'cddl-1.0': {'id': 'CDDL-1.0', 'deprecated': False}, + 'cddl-1.1': {'id': 'CDDL-1.1', 'deprecated': False}, + 'cdl-1.0': {'id': 'CDL-1.0', 'deprecated': False}, + 'cdla-permissive-1.0': {'id': 'CDLA-Permissive-1.0', 'deprecated': False}, + 'cdla-permissive-2.0': {'id': 'CDLA-Permissive-2.0', 'deprecated': False}, + 'cdla-sharing-1.0': {'id': 'CDLA-Sharing-1.0', 'deprecated': False}, + 'cecill-1.0': {'id': 'CECILL-1.0', 'deprecated': False}, + 'cecill-1.1': {'id': 'CECILL-1.1', 'deprecated': False}, + 'cecill-2.0': {'id': 'CECILL-2.0', 'deprecated': False}, + 'cecill-2.1': {'id': 'CECILL-2.1', 'deprecated': False}, + 'cecill-b': {'id': 'CECILL-B', 'deprecated': False}, + 'cecill-c': {'id': 'CECILL-C', 'deprecated': False}, + 'cern-ohl-1.1': {'id': 'CERN-OHL-1.1', 'deprecated': False}, + 'cern-ohl-1.2': {'id': 'CERN-OHL-1.2', 'deprecated': False}, + 'cern-ohl-p-2.0': {'id': 'CERN-OHL-P-2.0', 'deprecated': False}, + 'cern-ohl-s-2.0': {'id': 'CERN-OHL-S-2.0', 'deprecated': False}, + 'cern-ohl-w-2.0': {'id': 'CERN-OHL-W-2.0', 'deprecated': False}, + 'cfitsio': {'id': 'CFITSIO', 'deprecated': False}, + 'check-cvs': {'id': 'check-cvs', 'deprecated': False}, + 'checkmk': {'id': 'checkmk', 'deprecated': False}, + 'clartistic': {'id': 'ClArtistic', 'deprecated': False}, + 'clips': {'id': 'Clips', 'deprecated': False}, + 'cmu-mach': {'id': 'CMU-Mach', 'deprecated': False}, + 'cmu-mach-nodoc': {'id': 'CMU-Mach-nodoc', 'deprecated': False}, + 'cnri-jython': {'id': 'CNRI-Jython', 'deprecated': False}, + 'cnri-python': {'id': 'CNRI-Python', 'deprecated': False}, + 'cnri-python-gpl-compatible': {'id': 'CNRI-Python-GPL-Compatible', 'deprecated': False}, + 'coil-1.0': {'id': 'COIL-1.0', 'deprecated': False}, + 'community-spec-1.0': {'id': 'Community-Spec-1.0', 'deprecated': False}, + 'condor-1.1': {'id': 'Condor-1.1', 'deprecated': False}, + 'copyleft-next-0.3.0': {'id': 'copyleft-next-0.3.0', 'deprecated': False}, + 'copyleft-next-0.3.1': {'id': 'copyleft-next-0.3.1', 'deprecated': False}, + 'cornell-lossless-jpeg': {'id': 'Cornell-Lossless-JPEG', 'deprecated': False}, + 'cpal-1.0': {'id': 'CPAL-1.0', 'deprecated': False}, + 'cpl-1.0': {'id': 'CPL-1.0', 'deprecated': False}, + 'cpol-1.02': {'id': 'CPOL-1.02', 'deprecated': False}, + 'cronyx': {'id': 'Cronyx', 'deprecated': False}, + 'crossword': {'id': 'Crossword', 'deprecated': False}, + 'crystalstacker': {'id': 'CrystalStacker', 'deprecated': False}, + 'cua-opl-1.0': {'id': 'CUA-OPL-1.0', 'deprecated': False}, + 'cube': {'id': 'Cube', 'deprecated': False}, + 'curl': {'id': 'curl', 'deprecated': False}, + 'cve-tou': {'id': 'cve-tou', 'deprecated': False}, + 'd-fsl-1.0': {'id': 'D-FSL-1.0', 'deprecated': False}, + 'dec-3-clause': {'id': 'DEC-3-Clause', 'deprecated': False}, + 'diffmark': {'id': 'diffmark', 'deprecated': False}, + 'dl-de-by-2.0': {'id': 'DL-DE-BY-2.0', 'deprecated': False}, + 'dl-de-zero-2.0': {'id': 'DL-DE-ZERO-2.0', 'deprecated': False}, + 'doc': {'id': 'DOC', 'deprecated': False}, + 'docbook-schema': {'id': 'DocBook-Schema', 'deprecated': False}, + 'docbook-xml': {'id': 'DocBook-XML', 'deprecated': False}, + 'dotseqn': {'id': 'Dotseqn', 'deprecated': False}, + 'drl-1.0': {'id': 'DRL-1.0', 'deprecated': False}, + 'drl-1.1': {'id': 'DRL-1.1', 'deprecated': False}, + 'dsdp': {'id': 'DSDP', 'deprecated': False}, + 'dtoa': {'id': 'dtoa', 'deprecated': False}, + 'dvipdfm': {'id': 'dvipdfm', 'deprecated': False}, + 'ecl-1.0': {'id': 'ECL-1.0', 'deprecated': False}, + 'ecl-2.0': {'id': 'ECL-2.0', 'deprecated': False}, + 'ecos-2.0': {'id': 'eCos-2.0', 'deprecated': True}, + 'efl-1.0': {'id': 'EFL-1.0', 'deprecated': False}, + 'efl-2.0': {'id': 'EFL-2.0', 'deprecated': False}, + 'egenix': {'id': 'eGenix', 'deprecated': False}, + 'elastic-2.0': {'id': 'Elastic-2.0', 'deprecated': False}, + 'entessa': {'id': 'Entessa', 'deprecated': False}, + 'epics': {'id': 'EPICS', 'deprecated': False}, + 'epl-1.0': {'id': 'EPL-1.0', 'deprecated': False}, + 'epl-2.0': {'id': 'EPL-2.0', 'deprecated': False}, + 'erlpl-1.1': {'id': 'ErlPL-1.1', 'deprecated': False}, + 'etalab-2.0': {'id': 'etalab-2.0', 'deprecated': False}, + 'eudatagrid': {'id': 'EUDatagrid', 'deprecated': False}, + 'eupl-1.0': {'id': 'EUPL-1.0', 'deprecated': False}, + 'eupl-1.1': {'id': 'EUPL-1.1', 'deprecated': False}, + 'eupl-1.2': {'id': 'EUPL-1.2', 'deprecated': False}, + 'eurosym': {'id': 'Eurosym', 'deprecated': False}, + 'fair': {'id': 'Fair', 'deprecated': False}, + 'fbm': {'id': 'FBM', 'deprecated': False}, + 'fdk-aac': {'id': 'FDK-AAC', 'deprecated': False}, + 'ferguson-twofish': {'id': 'Ferguson-Twofish', 'deprecated': False}, + 'frameworx-1.0': {'id': 'Frameworx-1.0', 'deprecated': False}, + 'freebsd-doc': {'id': 'FreeBSD-DOC', 'deprecated': False}, + 'freeimage': {'id': 'FreeImage', 'deprecated': False}, + 'fsfap': {'id': 'FSFAP', 'deprecated': False}, + 'fsfap-no-warranty-disclaimer': {'id': 'FSFAP-no-warranty-disclaimer', 'deprecated': False}, + 'fsful': {'id': 'FSFUL', 'deprecated': False}, + 'fsfullr': {'id': 'FSFULLR', 'deprecated': False}, + 'fsfullrwd': {'id': 'FSFULLRWD', 'deprecated': False}, + 'ftl': {'id': 'FTL', 'deprecated': False}, + 'furuseth': {'id': 'Furuseth', 'deprecated': False}, + 'fwlw': {'id': 'fwlw', 'deprecated': False}, + 'gcr-docs': {'id': 'GCR-docs', 'deprecated': False}, + 'gd': {'id': 'GD', 'deprecated': False}, + 'gfdl-1.1': {'id': 'GFDL-1.1', 'deprecated': True}, + 'gfdl-1.1-invariants-only': {'id': 'GFDL-1.1-invariants-only', 'deprecated': False}, + 'gfdl-1.1-invariants-or-later': {'id': 'GFDL-1.1-invariants-or-later', 'deprecated': False}, + 'gfdl-1.1-no-invariants-only': {'id': 'GFDL-1.1-no-invariants-only', 'deprecated': False}, + 'gfdl-1.1-no-invariants-or-later': {'id': 'GFDL-1.1-no-invariants-or-later', 'deprecated': False}, + 'gfdl-1.1-only': {'id': 'GFDL-1.1-only', 'deprecated': False}, + 'gfdl-1.1-or-later': {'id': 'GFDL-1.1-or-later', 'deprecated': False}, + 'gfdl-1.2': {'id': 'GFDL-1.2', 'deprecated': True}, + 'gfdl-1.2-invariants-only': {'id': 'GFDL-1.2-invariants-only', 'deprecated': False}, + 'gfdl-1.2-invariants-or-later': {'id': 'GFDL-1.2-invariants-or-later', 'deprecated': False}, + 'gfdl-1.2-no-invariants-only': {'id': 'GFDL-1.2-no-invariants-only', 'deprecated': False}, + 'gfdl-1.2-no-invariants-or-later': {'id': 'GFDL-1.2-no-invariants-or-later', 'deprecated': False}, + 'gfdl-1.2-only': {'id': 'GFDL-1.2-only', 'deprecated': False}, + 'gfdl-1.2-or-later': {'id': 'GFDL-1.2-or-later', 'deprecated': False}, + 'gfdl-1.3': {'id': 'GFDL-1.3', 'deprecated': True}, + 'gfdl-1.3-invariants-only': {'id': 'GFDL-1.3-invariants-only', 'deprecated': False}, + 'gfdl-1.3-invariants-or-later': {'id': 'GFDL-1.3-invariants-or-later', 'deprecated': False}, + 'gfdl-1.3-no-invariants-only': {'id': 'GFDL-1.3-no-invariants-only', 'deprecated': False}, + 'gfdl-1.3-no-invariants-or-later': {'id': 'GFDL-1.3-no-invariants-or-later', 'deprecated': False}, + 'gfdl-1.3-only': {'id': 'GFDL-1.3-only', 'deprecated': False}, + 'gfdl-1.3-or-later': {'id': 'GFDL-1.3-or-later', 'deprecated': False}, + 'giftware': {'id': 'Giftware', 'deprecated': False}, + 'gl2ps': {'id': 'GL2PS', 'deprecated': False}, + 'glide': {'id': 'Glide', 'deprecated': False}, + 'glulxe': {'id': 'Glulxe', 'deprecated': False}, + 'glwtpl': {'id': 'GLWTPL', 'deprecated': False}, + 'gnuplot': {'id': 'gnuplot', 'deprecated': False}, + 'gpl-1.0': {'id': 'GPL-1.0', 'deprecated': True}, + 'gpl-1.0+': {'id': 'GPL-1.0+', 'deprecated': True}, + 'gpl-1.0-only': {'id': 'GPL-1.0-only', 'deprecated': False}, + 'gpl-1.0-or-later': {'id': 'GPL-1.0-or-later', 'deprecated': False}, + 'gpl-2.0': {'id': 'GPL-2.0', 'deprecated': True}, + 'gpl-2.0+': {'id': 'GPL-2.0+', 'deprecated': True}, + 'gpl-2.0-only': {'id': 'GPL-2.0-only', 'deprecated': False}, + 'gpl-2.0-or-later': {'id': 'GPL-2.0-or-later', 'deprecated': False}, + 'gpl-2.0-with-autoconf-exception': {'id': 'GPL-2.0-with-autoconf-exception', 'deprecated': True}, + 'gpl-2.0-with-bison-exception': {'id': 'GPL-2.0-with-bison-exception', 'deprecated': True}, + 'gpl-2.0-with-classpath-exception': {'id': 'GPL-2.0-with-classpath-exception', 'deprecated': True}, + 'gpl-2.0-with-font-exception': {'id': 'GPL-2.0-with-font-exception', 'deprecated': True}, + 'gpl-2.0-with-gcc-exception': {'id': 'GPL-2.0-with-GCC-exception', 'deprecated': True}, + 'gpl-3.0': {'id': 'GPL-3.0', 'deprecated': True}, + 'gpl-3.0+': {'id': 'GPL-3.0+', 'deprecated': True}, + 'gpl-3.0-only': {'id': 'GPL-3.0-only', 'deprecated': False}, + 'gpl-3.0-or-later': {'id': 'GPL-3.0-or-later', 'deprecated': False}, + 'gpl-3.0-with-autoconf-exception': {'id': 'GPL-3.0-with-autoconf-exception', 'deprecated': True}, + 'gpl-3.0-with-gcc-exception': {'id': 'GPL-3.0-with-GCC-exception', 'deprecated': True}, + 'graphics-gems': {'id': 'Graphics-Gems', 'deprecated': False}, + 'gsoap-1.3b': {'id': 'gSOAP-1.3b', 'deprecated': False}, + 'gtkbook': {'id': 'gtkbook', 'deprecated': False}, + 'gutmann': {'id': 'Gutmann', 'deprecated': False}, + 'haskellreport': {'id': 'HaskellReport', 'deprecated': False}, + 'hdparm': {'id': 'hdparm', 'deprecated': False}, + 'hidapi': {'id': 'HIDAPI', 'deprecated': False}, + 'hippocratic-2.1': {'id': 'Hippocratic-2.1', 'deprecated': False}, + 'hp-1986': {'id': 'HP-1986', 'deprecated': False}, + 'hp-1989': {'id': 'HP-1989', 'deprecated': False}, + 'hpnd': {'id': 'HPND', 'deprecated': False}, + 'hpnd-dec': {'id': 'HPND-DEC', 'deprecated': False}, + 'hpnd-doc': {'id': 'HPND-doc', 'deprecated': False}, + 'hpnd-doc-sell': {'id': 'HPND-doc-sell', 'deprecated': False}, + 'hpnd-export-us': {'id': 'HPND-export-US', 'deprecated': False}, + 'hpnd-export-us-acknowledgement': {'id': 'HPND-export-US-acknowledgement', 'deprecated': False}, + 'hpnd-export-us-modify': {'id': 'HPND-export-US-modify', 'deprecated': False}, + 'hpnd-export2-us': {'id': 'HPND-export2-US', 'deprecated': False}, + 'hpnd-fenneberg-livingston': {'id': 'HPND-Fenneberg-Livingston', 'deprecated': False}, + 'hpnd-inria-imag': {'id': 'HPND-INRIA-IMAG', 'deprecated': False}, + 'hpnd-intel': {'id': 'HPND-Intel', 'deprecated': False}, + 'hpnd-kevlin-henney': {'id': 'HPND-Kevlin-Henney', 'deprecated': False}, + 'hpnd-markus-kuhn': {'id': 'HPND-Markus-Kuhn', 'deprecated': False}, + 'hpnd-merchantability-variant': {'id': 'HPND-merchantability-variant', 'deprecated': False}, + 'hpnd-mit-disclaimer': {'id': 'HPND-MIT-disclaimer', 'deprecated': False}, + 'hpnd-netrek': {'id': 'HPND-Netrek', 'deprecated': False}, + 'hpnd-pbmplus': {'id': 'HPND-Pbmplus', 'deprecated': False}, + 'hpnd-sell-mit-disclaimer-xserver': {'id': 'HPND-sell-MIT-disclaimer-xserver', 'deprecated': False}, + 'hpnd-sell-regexpr': {'id': 'HPND-sell-regexpr', 'deprecated': False}, + 'hpnd-sell-variant': {'id': 'HPND-sell-variant', 'deprecated': False}, + 'hpnd-sell-variant-mit-disclaimer': {'id': 'HPND-sell-variant-MIT-disclaimer', 'deprecated': False}, + 'hpnd-sell-variant-mit-disclaimer-rev': {'id': 'HPND-sell-variant-MIT-disclaimer-rev', 'deprecated': False}, + 'hpnd-uc': {'id': 'HPND-UC', 'deprecated': False}, + 'hpnd-uc-export-us': {'id': 'HPND-UC-export-US', 'deprecated': False}, + 'htmltidy': {'id': 'HTMLTIDY', 'deprecated': False}, + 'ibm-pibs': {'id': 'IBM-pibs', 'deprecated': False}, + 'icu': {'id': 'ICU', 'deprecated': False}, + 'iec-code-components-eula': {'id': 'IEC-Code-Components-EULA', 'deprecated': False}, + 'ijg': {'id': 'IJG', 'deprecated': False}, + 'ijg-short': {'id': 'IJG-short', 'deprecated': False}, + 'imagemagick': {'id': 'ImageMagick', 'deprecated': False}, + 'imatix': {'id': 'iMatix', 'deprecated': False}, + 'imlib2': {'id': 'Imlib2', 'deprecated': False}, + 'info-zip': {'id': 'Info-ZIP', 'deprecated': False}, + 'inner-net-2.0': {'id': 'Inner-Net-2.0', 'deprecated': False}, + 'intel': {'id': 'Intel', 'deprecated': False}, + 'intel-acpi': {'id': 'Intel-ACPI', 'deprecated': False}, + 'interbase-1.0': {'id': 'Interbase-1.0', 'deprecated': False}, + 'ipa': {'id': 'IPA', 'deprecated': False}, + 'ipl-1.0': {'id': 'IPL-1.0', 'deprecated': False}, + 'isc': {'id': 'ISC', 'deprecated': False}, + 'isc-veillard': {'id': 'ISC-Veillard', 'deprecated': False}, + 'jam': {'id': 'Jam', 'deprecated': False}, + 'jasper-2.0': {'id': 'JasPer-2.0', 'deprecated': False}, + 'jpl-image': {'id': 'JPL-image', 'deprecated': False}, + 'jpnic': {'id': 'JPNIC', 'deprecated': False}, + 'json': {'id': 'JSON', 'deprecated': False}, + 'kastrup': {'id': 'Kastrup', 'deprecated': False}, + 'kazlib': {'id': 'Kazlib', 'deprecated': False}, + 'knuth-ctan': {'id': 'Knuth-CTAN', 'deprecated': False}, + 'lal-1.2': {'id': 'LAL-1.2', 'deprecated': False}, + 'lal-1.3': {'id': 'LAL-1.3', 'deprecated': False}, + 'latex2e': {'id': 'Latex2e', 'deprecated': False}, + 'latex2e-translated-notice': {'id': 'Latex2e-translated-notice', 'deprecated': False}, + 'leptonica': {'id': 'Leptonica', 'deprecated': False}, + 'lgpl-2.0': {'id': 'LGPL-2.0', 'deprecated': True}, + 'lgpl-2.0+': {'id': 'LGPL-2.0+', 'deprecated': True}, + 'lgpl-2.0-only': {'id': 'LGPL-2.0-only', 'deprecated': False}, + 'lgpl-2.0-or-later': {'id': 'LGPL-2.0-or-later', 'deprecated': False}, + 'lgpl-2.1': {'id': 'LGPL-2.1', 'deprecated': True}, + 'lgpl-2.1+': {'id': 'LGPL-2.1+', 'deprecated': True}, + 'lgpl-2.1-only': {'id': 'LGPL-2.1-only', 'deprecated': False}, + 'lgpl-2.1-or-later': {'id': 'LGPL-2.1-or-later', 'deprecated': False}, + 'lgpl-3.0': {'id': 'LGPL-3.0', 'deprecated': True}, + 'lgpl-3.0+': {'id': 'LGPL-3.0+', 'deprecated': True}, + 'lgpl-3.0-only': {'id': 'LGPL-3.0-only', 'deprecated': False}, + 'lgpl-3.0-or-later': {'id': 'LGPL-3.0-or-later', 'deprecated': False}, + 'lgpllr': {'id': 'LGPLLR', 'deprecated': False}, + 'libpng': {'id': 'Libpng', 'deprecated': False}, + 'libpng-2.0': {'id': 'libpng-2.0', 'deprecated': False}, + 'libselinux-1.0': {'id': 'libselinux-1.0', 'deprecated': False}, + 'libtiff': {'id': 'libtiff', 'deprecated': False}, + 'libutil-david-nugent': {'id': 'libutil-David-Nugent', 'deprecated': False}, + 'liliq-p-1.1': {'id': 'LiLiQ-P-1.1', 'deprecated': False}, + 'liliq-r-1.1': {'id': 'LiLiQ-R-1.1', 'deprecated': False}, + 'liliq-rplus-1.1': {'id': 'LiLiQ-Rplus-1.1', 'deprecated': False}, + 'linux-man-pages-1-para': {'id': 'Linux-man-pages-1-para', 'deprecated': False}, + 'linux-man-pages-copyleft': {'id': 'Linux-man-pages-copyleft', 'deprecated': False}, + 'linux-man-pages-copyleft-2-para': {'id': 'Linux-man-pages-copyleft-2-para', 'deprecated': False}, + 'linux-man-pages-copyleft-var': {'id': 'Linux-man-pages-copyleft-var', 'deprecated': False}, + 'linux-openib': {'id': 'Linux-OpenIB', 'deprecated': False}, + 'loop': {'id': 'LOOP', 'deprecated': False}, + 'lpd-document': {'id': 'LPD-document', 'deprecated': False}, + 'lpl-1.0': {'id': 'LPL-1.0', 'deprecated': False}, + 'lpl-1.02': {'id': 'LPL-1.02', 'deprecated': False}, + 'lppl-1.0': {'id': 'LPPL-1.0', 'deprecated': False}, + 'lppl-1.1': {'id': 'LPPL-1.1', 'deprecated': False}, + 'lppl-1.2': {'id': 'LPPL-1.2', 'deprecated': False}, + 'lppl-1.3a': {'id': 'LPPL-1.3a', 'deprecated': False}, + 'lppl-1.3c': {'id': 'LPPL-1.3c', 'deprecated': False}, + 'lsof': {'id': 'lsof', 'deprecated': False}, + 'lucida-bitmap-fonts': {'id': 'Lucida-Bitmap-Fonts', 'deprecated': False}, + 'lzma-sdk-9.11-to-9.20': {'id': 'LZMA-SDK-9.11-to-9.20', 'deprecated': False}, + 'lzma-sdk-9.22': {'id': 'LZMA-SDK-9.22', 'deprecated': False}, + 'mackerras-3-clause': {'id': 'Mackerras-3-Clause', 'deprecated': False}, + 'mackerras-3-clause-acknowledgment': {'id': 'Mackerras-3-Clause-acknowledgment', 'deprecated': False}, + 'magaz': {'id': 'magaz', 'deprecated': False}, + 'mailprio': {'id': 'mailprio', 'deprecated': False}, + 'makeindex': {'id': 'MakeIndex', 'deprecated': False}, + 'martin-birgmeier': {'id': 'Martin-Birgmeier', 'deprecated': False}, + 'mcphee-slideshow': {'id': 'McPhee-slideshow', 'deprecated': False}, + 'metamail': {'id': 'metamail', 'deprecated': False}, + 'minpack': {'id': 'Minpack', 'deprecated': False}, + 'miros': {'id': 'MirOS', 'deprecated': False}, + 'mit': {'id': 'MIT', 'deprecated': False}, + 'mit-0': {'id': 'MIT-0', 'deprecated': False}, + 'mit-advertising': {'id': 'MIT-advertising', 'deprecated': False}, + 'mit-cmu': {'id': 'MIT-CMU', 'deprecated': False}, + 'mit-enna': {'id': 'MIT-enna', 'deprecated': False}, + 'mit-feh': {'id': 'MIT-feh', 'deprecated': False}, + 'mit-festival': {'id': 'MIT-Festival', 'deprecated': False}, + 'mit-khronos-old': {'id': 'MIT-Khronos-old', 'deprecated': False}, + 'mit-modern-variant': {'id': 'MIT-Modern-Variant', 'deprecated': False}, + 'mit-open-group': {'id': 'MIT-open-group', 'deprecated': False}, + 'mit-testregex': {'id': 'MIT-testregex', 'deprecated': False}, + 'mit-wu': {'id': 'MIT-Wu', 'deprecated': False}, + 'mitnfa': {'id': 'MITNFA', 'deprecated': False}, + 'mmixware': {'id': 'MMIXware', 'deprecated': False}, + 'motosoto': {'id': 'Motosoto', 'deprecated': False}, + 'mpeg-ssg': {'id': 'MPEG-SSG', 'deprecated': False}, + 'mpi-permissive': {'id': 'mpi-permissive', 'deprecated': False}, + 'mpich2': {'id': 'mpich2', 'deprecated': False}, + 'mpl-1.0': {'id': 'MPL-1.0', 'deprecated': False}, + 'mpl-1.1': {'id': 'MPL-1.1', 'deprecated': False}, + 'mpl-2.0': {'id': 'MPL-2.0', 'deprecated': False}, + 'mpl-2.0-no-copyleft-exception': {'id': 'MPL-2.0-no-copyleft-exception', 'deprecated': False}, + 'mplus': {'id': 'mplus', 'deprecated': False}, + 'ms-lpl': {'id': 'MS-LPL', 'deprecated': False}, + 'ms-pl': {'id': 'MS-PL', 'deprecated': False}, + 'ms-rl': {'id': 'MS-RL', 'deprecated': False}, + 'mtll': {'id': 'MTLL', 'deprecated': False}, + 'mulanpsl-1.0': {'id': 'MulanPSL-1.0', 'deprecated': False}, + 'mulanpsl-2.0': {'id': 'MulanPSL-2.0', 'deprecated': False}, + 'multics': {'id': 'Multics', 'deprecated': False}, + 'mup': {'id': 'Mup', 'deprecated': False}, + 'naist-2003': {'id': 'NAIST-2003', 'deprecated': False}, + 'nasa-1.3': {'id': 'NASA-1.3', 'deprecated': False}, + 'naumen': {'id': 'Naumen', 'deprecated': False}, + 'nbpl-1.0': {'id': 'NBPL-1.0', 'deprecated': False}, + 'ncbi-pd': {'id': 'NCBI-PD', 'deprecated': False}, + 'ncgl-uk-2.0': {'id': 'NCGL-UK-2.0', 'deprecated': False}, + 'ncl': {'id': 'NCL', 'deprecated': False}, + 'ncsa': {'id': 'NCSA', 'deprecated': False}, + 'net-snmp': {'id': 'Net-SNMP', 'deprecated': True}, + 'netcdf': {'id': 'NetCDF', 'deprecated': False}, + 'newsletr': {'id': 'Newsletr', 'deprecated': False}, + 'ngpl': {'id': 'NGPL', 'deprecated': False}, + 'nicta-1.0': {'id': 'NICTA-1.0', 'deprecated': False}, + 'nist-pd': {'id': 'NIST-PD', 'deprecated': False}, + 'nist-pd-fallback': {'id': 'NIST-PD-fallback', 'deprecated': False}, + 'nist-software': {'id': 'NIST-Software', 'deprecated': False}, + 'nlod-1.0': {'id': 'NLOD-1.0', 'deprecated': False}, + 'nlod-2.0': {'id': 'NLOD-2.0', 'deprecated': False}, + 'nlpl': {'id': 'NLPL', 'deprecated': False}, + 'nokia': {'id': 'Nokia', 'deprecated': False}, + 'nosl': {'id': 'NOSL', 'deprecated': False}, + 'noweb': {'id': 'Noweb', 'deprecated': False}, + 'npl-1.0': {'id': 'NPL-1.0', 'deprecated': False}, + 'npl-1.1': {'id': 'NPL-1.1', 'deprecated': False}, + 'nposl-3.0': {'id': 'NPOSL-3.0', 'deprecated': False}, + 'nrl': {'id': 'NRL', 'deprecated': False}, + 'ntp': {'id': 'NTP', 'deprecated': False}, + 'ntp-0': {'id': 'NTP-0', 'deprecated': False}, + 'nunit': {'id': 'Nunit', 'deprecated': True}, + 'o-uda-1.0': {'id': 'O-UDA-1.0', 'deprecated': False}, + 'oar': {'id': 'OAR', 'deprecated': False}, + 'occt-pl': {'id': 'OCCT-PL', 'deprecated': False}, + 'oclc-2.0': {'id': 'OCLC-2.0', 'deprecated': False}, + 'odbl-1.0': {'id': 'ODbL-1.0', 'deprecated': False}, + 'odc-by-1.0': {'id': 'ODC-By-1.0', 'deprecated': False}, + 'offis': {'id': 'OFFIS', 'deprecated': False}, + 'ofl-1.0': {'id': 'OFL-1.0', 'deprecated': False}, + 'ofl-1.0-no-rfn': {'id': 'OFL-1.0-no-RFN', 'deprecated': False}, + 'ofl-1.0-rfn': {'id': 'OFL-1.0-RFN', 'deprecated': False}, + 'ofl-1.1': {'id': 'OFL-1.1', 'deprecated': False}, + 'ofl-1.1-no-rfn': {'id': 'OFL-1.1-no-RFN', 'deprecated': False}, + 'ofl-1.1-rfn': {'id': 'OFL-1.1-RFN', 'deprecated': False}, + 'ogc-1.0': {'id': 'OGC-1.0', 'deprecated': False}, + 'ogdl-taiwan-1.0': {'id': 'OGDL-Taiwan-1.0', 'deprecated': False}, + 'ogl-canada-2.0': {'id': 'OGL-Canada-2.0', 'deprecated': False}, + 'ogl-uk-1.0': {'id': 'OGL-UK-1.0', 'deprecated': False}, + 'ogl-uk-2.0': {'id': 'OGL-UK-2.0', 'deprecated': False}, + 'ogl-uk-3.0': {'id': 'OGL-UK-3.0', 'deprecated': False}, + 'ogtsl': {'id': 'OGTSL', 'deprecated': False}, + 'oldap-1.1': {'id': 'OLDAP-1.1', 'deprecated': False}, + 'oldap-1.2': {'id': 'OLDAP-1.2', 'deprecated': False}, + 'oldap-1.3': {'id': 'OLDAP-1.3', 'deprecated': False}, + 'oldap-1.4': {'id': 'OLDAP-1.4', 'deprecated': False}, + 'oldap-2.0': {'id': 'OLDAP-2.0', 'deprecated': False}, + 'oldap-2.0.1': {'id': 'OLDAP-2.0.1', 'deprecated': False}, + 'oldap-2.1': {'id': 'OLDAP-2.1', 'deprecated': False}, + 'oldap-2.2': {'id': 'OLDAP-2.2', 'deprecated': False}, + 'oldap-2.2.1': {'id': 'OLDAP-2.2.1', 'deprecated': False}, + 'oldap-2.2.2': {'id': 'OLDAP-2.2.2', 'deprecated': False}, + 'oldap-2.3': {'id': 'OLDAP-2.3', 'deprecated': False}, + 'oldap-2.4': {'id': 'OLDAP-2.4', 'deprecated': False}, + 'oldap-2.5': {'id': 'OLDAP-2.5', 'deprecated': False}, + 'oldap-2.6': {'id': 'OLDAP-2.6', 'deprecated': False}, + 'oldap-2.7': {'id': 'OLDAP-2.7', 'deprecated': False}, + 'oldap-2.8': {'id': 'OLDAP-2.8', 'deprecated': False}, + 'olfl-1.3': {'id': 'OLFL-1.3', 'deprecated': False}, + 'oml': {'id': 'OML', 'deprecated': False}, + 'openpbs-2.3': {'id': 'OpenPBS-2.3', 'deprecated': False}, + 'openssl': {'id': 'OpenSSL', 'deprecated': False}, + 'openssl-standalone': {'id': 'OpenSSL-standalone', 'deprecated': False}, + 'openvision': {'id': 'OpenVision', 'deprecated': False}, + 'opl-1.0': {'id': 'OPL-1.0', 'deprecated': False}, + 'opl-uk-3.0': {'id': 'OPL-UK-3.0', 'deprecated': False}, + 'opubl-1.0': {'id': 'OPUBL-1.0', 'deprecated': False}, + 'oset-pl-2.1': {'id': 'OSET-PL-2.1', 'deprecated': False}, + 'osl-1.0': {'id': 'OSL-1.0', 'deprecated': False}, + 'osl-1.1': {'id': 'OSL-1.1', 'deprecated': False}, + 'osl-2.0': {'id': 'OSL-2.0', 'deprecated': False}, + 'osl-2.1': {'id': 'OSL-2.1', 'deprecated': False}, + 'osl-3.0': {'id': 'OSL-3.0', 'deprecated': False}, + 'padl': {'id': 'PADL', 'deprecated': False}, + 'parity-6.0.0': {'id': 'Parity-6.0.0', 'deprecated': False}, + 'parity-7.0.0': {'id': 'Parity-7.0.0', 'deprecated': False}, + 'pddl-1.0': {'id': 'PDDL-1.0', 'deprecated': False}, + 'php-3.0': {'id': 'PHP-3.0', 'deprecated': False}, + 'php-3.01': {'id': 'PHP-3.01', 'deprecated': False}, + 'pixar': {'id': 'Pixar', 'deprecated': False}, + 'pkgconf': {'id': 'pkgconf', 'deprecated': False}, + 'plexus': {'id': 'Plexus', 'deprecated': False}, + 'pnmstitch': {'id': 'pnmstitch', 'deprecated': False}, + 'polyform-noncommercial-1.0.0': {'id': 'PolyForm-Noncommercial-1.0.0', 'deprecated': False}, + 'polyform-small-business-1.0.0': {'id': 'PolyForm-Small-Business-1.0.0', 'deprecated': False}, + 'postgresql': {'id': 'PostgreSQL', 'deprecated': False}, + 'ppl': {'id': 'PPL', 'deprecated': False}, + 'psf-2.0': {'id': 'PSF-2.0', 'deprecated': False}, + 'psfrag': {'id': 'psfrag', 'deprecated': False}, + 'psutils': {'id': 'psutils', 'deprecated': False}, + 'python-2.0': {'id': 'Python-2.0', 'deprecated': False}, + 'python-2.0.1': {'id': 'Python-2.0.1', 'deprecated': False}, + 'python-ldap': {'id': 'python-ldap', 'deprecated': False}, + 'qhull': {'id': 'Qhull', 'deprecated': False}, + 'qpl-1.0': {'id': 'QPL-1.0', 'deprecated': False}, + 'qpl-1.0-inria-2004': {'id': 'QPL-1.0-INRIA-2004', 'deprecated': False}, + 'radvd': {'id': 'radvd', 'deprecated': False}, + 'rdisc': {'id': 'Rdisc', 'deprecated': False}, + 'rhecos-1.1': {'id': 'RHeCos-1.1', 'deprecated': False}, + 'rpl-1.1': {'id': 'RPL-1.1', 'deprecated': False}, + 'rpl-1.5': {'id': 'RPL-1.5', 'deprecated': False}, + 'rpsl-1.0': {'id': 'RPSL-1.0', 'deprecated': False}, + 'rsa-md': {'id': 'RSA-MD', 'deprecated': False}, + 'rscpl': {'id': 'RSCPL', 'deprecated': False}, + 'ruby': {'id': 'Ruby', 'deprecated': False}, + 'ruby-pty': {'id': 'Ruby-pty', 'deprecated': False}, + 'sax-pd': {'id': 'SAX-PD', 'deprecated': False}, + 'sax-pd-2.0': {'id': 'SAX-PD-2.0', 'deprecated': False}, + 'saxpath': {'id': 'Saxpath', 'deprecated': False}, + 'scea': {'id': 'SCEA', 'deprecated': False}, + 'schemereport': {'id': 'SchemeReport', 'deprecated': False}, + 'sendmail': {'id': 'Sendmail', 'deprecated': False}, + 'sendmail-8.23': {'id': 'Sendmail-8.23', 'deprecated': False}, + 'sgi-b-1.0': {'id': 'SGI-B-1.0', 'deprecated': False}, + 'sgi-b-1.1': {'id': 'SGI-B-1.1', 'deprecated': False}, + 'sgi-b-2.0': {'id': 'SGI-B-2.0', 'deprecated': False}, + 'sgi-opengl': {'id': 'SGI-OpenGL', 'deprecated': False}, + 'sgp4': {'id': 'SGP4', 'deprecated': False}, + 'shl-0.5': {'id': 'SHL-0.5', 'deprecated': False}, + 'shl-0.51': {'id': 'SHL-0.51', 'deprecated': False}, + 'simpl-2.0': {'id': 'SimPL-2.0', 'deprecated': False}, + 'sissl': {'id': 'SISSL', 'deprecated': False}, + 'sissl-1.2': {'id': 'SISSL-1.2', 'deprecated': False}, + 'sl': {'id': 'SL', 'deprecated': False}, + 'sleepycat': {'id': 'Sleepycat', 'deprecated': False}, + 'smlnj': {'id': 'SMLNJ', 'deprecated': False}, + 'smppl': {'id': 'SMPPL', 'deprecated': False}, + 'snia': {'id': 'SNIA', 'deprecated': False}, + 'snprintf': {'id': 'snprintf', 'deprecated': False}, + 'softsurfer': {'id': 'softSurfer', 'deprecated': False}, + 'soundex': {'id': 'Soundex', 'deprecated': False}, + 'spencer-86': {'id': 'Spencer-86', 'deprecated': False}, + 'spencer-94': {'id': 'Spencer-94', 'deprecated': False}, + 'spencer-99': {'id': 'Spencer-99', 'deprecated': False}, + 'spl-1.0': {'id': 'SPL-1.0', 'deprecated': False}, + 'ssh-keyscan': {'id': 'ssh-keyscan', 'deprecated': False}, + 'ssh-openssh': {'id': 'SSH-OpenSSH', 'deprecated': False}, + 'ssh-short': {'id': 'SSH-short', 'deprecated': False}, + 'ssleay-standalone': {'id': 'SSLeay-standalone', 'deprecated': False}, + 'sspl-1.0': {'id': 'SSPL-1.0', 'deprecated': False}, + 'standardml-nj': {'id': 'StandardML-NJ', 'deprecated': True}, + 'sugarcrm-1.1.3': {'id': 'SugarCRM-1.1.3', 'deprecated': False}, + 'sun-ppp': {'id': 'Sun-PPP', 'deprecated': False}, + 'sun-ppp-2000': {'id': 'Sun-PPP-2000', 'deprecated': False}, + 'sunpro': {'id': 'SunPro', 'deprecated': False}, + 'swl': {'id': 'SWL', 'deprecated': False}, + 'swrule': {'id': 'swrule', 'deprecated': False}, + 'symlinks': {'id': 'Symlinks', 'deprecated': False}, + 'tapr-ohl-1.0': {'id': 'TAPR-OHL-1.0', 'deprecated': False}, + 'tcl': {'id': 'TCL', 'deprecated': False}, + 'tcp-wrappers': {'id': 'TCP-wrappers', 'deprecated': False}, + 'termreadkey': {'id': 'TermReadKey', 'deprecated': False}, + 'tgppl-1.0': {'id': 'TGPPL-1.0', 'deprecated': False}, + 'threeparttable': {'id': 'threeparttable', 'deprecated': False}, + 'tmate': {'id': 'TMate', 'deprecated': False}, + 'torque-1.1': {'id': 'TORQUE-1.1', 'deprecated': False}, + 'tosl': {'id': 'TOSL', 'deprecated': False}, + 'tpdl': {'id': 'TPDL', 'deprecated': False}, + 'tpl-1.0': {'id': 'TPL-1.0', 'deprecated': False}, + 'ttwl': {'id': 'TTWL', 'deprecated': False}, + 'ttyp0': {'id': 'TTYP0', 'deprecated': False}, + 'tu-berlin-1.0': {'id': 'TU-Berlin-1.0', 'deprecated': False}, + 'tu-berlin-2.0': {'id': 'TU-Berlin-2.0', 'deprecated': False}, + 'ubuntu-font-1.0': {'id': 'Ubuntu-font-1.0', 'deprecated': False}, + 'ucar': {'id': 'UCAR', 'deprecated': False}, + 'ucl-1.0': {'id': 'UCL-1.0', 'deprecated': False}, + 'ulem': {'id': 'ulem', 'deprecated': False}, + 'umich-merit': {'id': 'UMich-Merit', 'deprecated': False}, + 'unicode-3.0': {'id': 'Unicode-3.0', 'deprecated': False}, + 'unicode-dfs-2015': {'id': 'Unicode-DFS-2015', 'deprecated': False}, + 'unicode-dfs-2016': {'id': 'Unicode-DFS-2016', 'deprecated': False}, + 'unicode-tou': {'id': 'Unicode-TOU', 'deprecated': False}, + 'unixcrypt': {'id': 'UnixCrypt', 'deprecated': False}, + 'unlicense': {'id': 'Unlicense', 'deprecated': False}, + 'upl-1.0': {'id': 'UPL-1.0', 'deprecated': False}, + 'urt-rle': {'id': 'URT-RLE', 'deprecated': False}, + 'vim': {'id': 'Vim', 'deprecated': False}, + 'vostrom': {'id': 'VOSTROM', 'deprecated': False}, + 'vsl-1.0': {'id': 'VSL-1.0', 'deprecated': False}, + 'w3c': {'id': 'W3C', 'deprecated': False}, + 'w3c-19980720': {'id': 'W3C-19980720', 'deprecated': False}, + 'w3c-20150513': {'id': 'W3C-20150513', 'deprecated': False}, + 'w3m': {'id': 'w3m', 'deprecated': False}, + 'watcom-1.0': {'id': 'Watcom-1.0', 'deprecated': False}, + 'widget-workshop': {'id': 'Widget-Workshop', 'deprecated': False}, + 'wsuipa': {'id': 'Wsuipa', 'deprecated': False}, + 'wtfpl': {'id': 'WTFPL', 'deprecated': False}, + 'wxwindows': {'id': 'wxWindows', 'deprecated': True}, + 'x11': {'id': 'X11', 'deprecated': False}, + 'x11-distribute-modifications-variant': {'id': 'X11-distribute-modifications-variant', 'deprecated': False}, + 'x11-swapped': {'id': 'X11-swapped', 'deprecated': False}, + 'xdebug-1.03': {'id': 'Xdebug-1.03', 'deprecated': False}, + 'xerox': {'id': 'Xerox', 'deprecated': False}, + 'xfig': {'id': 'Xfig', 'deprecated': False}, + 'xfree86-1.1': {'id': 'XFree86-1.1', 'deprecated': False}, + 'xinetd': {'id': 'xinetd', 'deprecated': False}, + 'xkeyboard-config-zinoviev': {'id': 'xkeyboard-config-Zinoviev', 'deprecated': False}, + 'xlock': {'id': 'xlock', 'deprecated': False}, + 'xnet': {'id': 'Xnet', 'deprecated': False}, + 'xpp': {'id': 'xpp', 'deprecated': False}, + 'xskat': {'id': 'XSkat', 'deprecated': False}, + 'xzoom': {'id': 'xzoom', 'deprecated': False}, + 'ypl-1.0': {'id': 'YPL-1.0', 'deprecated': False}, + 'ypl-1.1': {'id': 'YPL-1.1', 'deprecated': False}, + 'zed': {'id': 'Zed', 'deprecated': False}, + 'zeeff': {'id': 'Zeeff', 'deprecated': False}, + 'zend-2.0': {'id': 'Zend-2.0', 'deprecated': False}, + 'zimbra-1.3': {'id': 'Zimbra-1.3', 'deprecated': False}, + 'zimbra-1.4': {'id': 'Zimbra-1.4', 'deprecated': False}, + 'zlib': {'id': 'Zlib', 'deprecated': False}, + 'zlib-acknowledgement': {'id': 'zlib-acknowledgement', 'deprecated': False}, + 'zpl-1.1': {'id': 'ZPL-1.1', 'deprecated': False}, + 'zpl-2.0': {'id': 'ZPL-2.0', 'deprecated': False}, + 'zpl-2.1': {'id': 'ZPL-2.1', 'deprecated': False}, +} + +EXCEPTIONS: dict[str, SPDXException] = { + '389-exception': {'id': '389-exception', 'deprecated': False}, + 'asterisk-exception': {'id': 'Asterisk-exception', 'deprecated': False}, + 'asterisk-linking-protocols-exception': {'id': 'Asterisk-linking-protocols-exception', 'deprecated': False}, + 'autoconf-exception-2.0': {'id': 'Autoconf-exception-2.0', 'deprecated': False}, + 'autoconf-exception-3.0': {'id': 'Autoconf-exception-3.0', 'deprecated': False}, + 'autoconf-exception-generic': {'id': 'Autoconf-exception-generic', 'deprecated': False}, + 'autoconf-exception-generic-3.0': {'id': 'Autoconf-exception-generic-3.0', 'deprecated': False}, + 'autoconf-exception-macro': {'id': 'Autoconf-exception-macro', 'deprecated': False}, + 'bison-exception-1.24': {'id': 'Bison-exception-1.24', 'deprecated': False}, + 'bison-exception-2.2': {'id': 'Bison-exception-2.2', 'deprecated': False}, + 'bootloader-exception': {'id': 'Bootloader-exception', 'deprecated': False}, + 'classpath-exception-2.0': {'id': 'Classpath-exception-2.0', 'deprecated': False}, + 'clisp-exception-2.0': {'id': 'CLISP-exception-2.0', 'deprecated': False}, + 'cryptsetup-openssl-exception': {'id': 'cryptsetup-OpenSSL-exception', 'deprecated': False}, + 'digirule-foss-exception': {'id': 'DigiRule-FOSS-exception', 'deprecated': False}, + 'ecos-exception-2.0': {'id': 'eCos-exception-2.0', 'deprecated': False}, + 'erlang-otp-linking-exception': {'id': 'erlang-otp-linking-exception', 'deprecated': False}, + 'fawkes-runtime-exception': {'id': 'Fawkes-Runtime-exception', 'deprecated': False}, + 'fltk-exception': {'id': 'FLTK-exception', 'deprecated': False}, + 'fmt-exception': {'id': 'fmt-exception', 'deprecated': False}, + 'font-exception-2.0': {'id': 'Font-exception-2.0', 'deprecated': False}, + 'freertos-exception-2.0': {'id': 'freertos-exception-2.0', 'deprecated': False}, + 'gcc-exception-2.0': {'id': 'GCC-exception-2.0', 'deprecated': False}, + 'gcc-exception-2.0-note': {'id': 'GCC-exception-2.0-note', 'deprecated': False}, + 'gcc-exception-3.1': {'id': 'GCC-exception-3.1', 'deprecated': False}, + 'gmsh-exception': {'id': 'Gmsh-exception', 'deprecated': False}, + 'gnat-exception': {'id': 'GNAT-exception', 'deprecated': False}, + 'gnome-examples-exception': {'id': 'GNOME-examples-exception', 'deprecated': False}, + 'gnu-compiler-exception': {'id': 'GNU-compiler-exception', 'deprecated': False}, + 'gnu-javamail-exception': {'id': 'gnu-javamail-exception', 'deprecated': False}, + 'gpl-3.0-interface-exception': {'id': 'GPL-3.0-interface-exception', 'deprecated': False}, + 'gpl-3.0-linking-exception': {'id': 'GPL-3.0-linking-exception', 'deprecated': False}, + 'gpl-3.0-linking-source-exception': {'id': 'GPL-3.0-linking-source-exception', 'deprecated': False}, + 'gpl-cc-1.0': {'id': 'GPL-CC-1.0', 'deprecated': False}, + 'gstreamer-exception-2005': {'id': 'GStreamer-exception-2005', 'deprecated': False}, + 'gstreamer-exception-2008': {'id': 'GStreamer-exception-2008', 'deprecated': False}, + 'i2p-gpl-java-exception': {'id': 'i2p-gpl-java-exception', 'deprecated': False}, + 'kicad-libraries-exception': {'id': 'KiCad-libraries-exception', 'deprecated': False}, + 'lgpl-3.0-linking-exception': {'id': 'LGPL-3.0-linking-exception', 'deprecated': False}, + 'libpri-openh323-exception': {'id': 'libpri-OpenH323-exception', 'deprecated': False}, + 'libtool-exception': {'id': 'Libtool-exception', 'deprecated': False}, + 'linux-syscall-note': {'id': 'Linux-syscall-note', 'deprecated': False}, + 'llgpl': {'id': 'LLGPL', 'deprecated': False}, + 'llvm-exception': {'id': 'LLVM-exception', 'deprecated': False}, + 'lzma-exception': {'id': 'LZMA-exception', 'deprecated': False}, + 'mif-exception': {'id': 'mif-exception', 'deprecated': False}, + 'nokia-qt-exception-1.1': {'id': 'Nokia-Qt-exception-1.1', 'deprecated': True}, + 'ocaml-lgpl-linking-exception': {'id': 'OCaml-LGPL-linking-exception', 'deprecated': False}, + 'occt-exception-1.0': {'id': 'OCCT-exception-1.0', 'deprecated': False}, + 'openjdk-assembly-exception-1.0': {'id': 'OpenJDK-assembly-exception-1.0', 'deprecated': False}, + 'openvpn-openssl-exception': {'id': 'openvpn-openssl-exception', 'deprecated': False}, + 'pcre2-exception': {'id': 'PCRE2-exception', 'deprecated': False}, + 'ps-or-pdf-font-exception-20170817': {'id': 'PS-or-PDF-font-exception-20170817', 'deprecated': False}, + 'qpl-1.0-inria-2004-exception': {'id': 'QPL-1.0-INRIA-2004-exception', 'deprecated': False}, + 'qt-gpl-exception-1.0': {'id': 'Qt-GPL-exception-1.0', 'deprecated': False}, + 'qt-lgpl-exception-1.1': {'id': 'Qt-LGPL-exception-1.1', 'deprecated': False}, + 'qwt-exception-1.0': {'id': 'Qwt-exception-1.0', 'deprecated': False}, + 'romic-exception': {'id': 'romic-exception', 'deprecated': False}, + 'rrdtool-floss-exception-2.0': {'id': 'RRDtool-FLOSS-exception-2.0', 'deprecated': False}, + 'sane-exception': {'id': 'SANE-exception', 'deprecated': False}, + 'shl-2.0': {'id': 'SHL-2.0', 'deprecated': False}, + 'shl-2.1': {'id': 'SHL-2.1', 'deprecated': False}, + 'stunnel-exception': {'id': 'stunnel-exception', 'deprecated': False}, + 'swi-exception': {'id': 'SWI-exception', 'deprecated': False}, + 'swift-exception': {'id': 'Swift-exception', 'deprecated': False}, + 'texinfo-exception': {'id': 'Texinfo-exception', 'deprecated': False}, + 'u-boot-exception-2.0': {'id': 'u-boot-exception-2.0', 'deprecated': False}, + 'ubdl-exception': {'id': 'UBDL-exception', 'deprecated': False}, + 'universal-foss-exception-1.0': {'id': 'Universal-FOSS-exception-1.0', 'deprecated': False}, + 'vsftpd-openssl-exception': {'id': 'vsftpd-openssl-exception', 'deprecated': False}, + 'wxwindows-exception-3.1': {'id': 'WxWindows-exception-3.1', 'deprecated': False}, + 'x11vnc-openssl-exception': {'id': 'x11vnc-openssl-exception', 'deprecated': False}, +} diff --git a/.venv/Lib/site-packages/packaging/markers.py b/.venv/Lib/site-packages/packaging/markers.py new file mode 100644 index 00000000..fb7f49cf --- /dev/null +++ b/.venv/Lib/site-packages/packaging/markers.py @@ -0,0 +1,331 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import annotations + +import operator +import os +import platform +import sys +from typing import Any, Callable, TypedDict, cast + +from ._parser import MarkerAtom, MarkerList, Op, Value, Variable +from ._parser import parse_marker as _parse_marker +from ._tokenizer import ParserSyntaxError +from .specifiers import InvalidSpecifier, Specifier +from .utils import canonicalize_name + +__all__ = [ + "InvalidMarker", + "Marker", + "UndefinedComparison", + "UndefinedEnvironmentName", + "default_environment", +] + +Operator = Callable[[str, str], bool] + + +class InvalidMarker(ValueError): + """ + An invalid marker was found, users should refer to PEP 508. + """ + + +class UndefinedComparison(ValueError): + """ + An invalid operation was attempted on a value that doesn't support it. + """ + + +class UndefinedEnvironmentName(ValueError): + """ + A name was attempted to be used that does not exist inside of the + environment. + """ + + +class Environment(TypedDict): + implementation_name: str + """The implementation's identifier, e.g. ``'cpython'``.""" + + implementation_version: str + """ + The implementation's version, e.g. ``'3.13.0a2'`` for CPython 3.13.0a2, or + ``'7.3.13'`` for PyPy3.10 v7.3.13. + """ + + os_name: str + """ + The value of :py:data:`os.name`. The name of the operating system dependent module + imported, e.g. ``'posix'``. + """ + + platform_machine: str + """ + Returns the machine type, e.g. ``'i386'``. + + An empty string if the value cannot be determined. + """ + + platform_release: str + """ + The system's release, e.g. ``'2.2.0'`` or ``'NT'``. + + An empty string if the value cannot be determined. + """ + + platform_system: str + """ + The system/OS name, e.g. ``'Linux'``, ``'Windows'`` or ``'Java'``. + + An empty string if the value cannot be determined. + """ + + platform_version: str + """ + The system's release version, e.g. ``'#3 on degas'``. + + An empty string if the value cannot be determined. + """ + + python_full_version: str + """ + The Python version as string ``'major.minor.patchlevel'``. + + Note that unlike the Python :py:data:`sys.version`, this value will always include + the patchlevel (it defaults to 0). + """ + + platform_python_implementation: str + """ + A string identifying the Python implementation, e.g. ``'CPython'``. + """ + + python_version: str + """The Python version as string ``'major.minor'``.""" + + sys_platform: str + """ + This string contains a platform identifier that can be used to append + platform-specific components to :py:data:`sys.path`, for instance. + + For Unix systems, except on Linux and AIX, this is the lowercased OS name as + returned by ``uname -s`` with the first part of the version as returned by + ``uname -r`` appended, e.g. ``'sunos5'`` or ``'freebsd8'``, at the time when Python + was built. + """ + + +def _normalize_extra_values(results: Any) -> Any: + """ + Normalize extra values. + """ + if isinstance(results[0], tuple): + lhs, op, rhs = results[0] + if isinstance(lhs, Variable) and lhs.value == "extra": + normalized_extra = canonicalize_name(rhs.value) + rhs = Value(normalized_extra) + elif isinstance(rhs, Variable) and rhs.value == "extra": + normalized_extra = canonicalize_name(lhs.value) + lhs = Value(normalized_extra) + results[0] = lhs, op, rhs + return results + + +def _format_marker( + marker: list[str] | MarkerAtom | str, first: bool | None = True +) -> str: + assert isinstance(marker, (list, tuple, str)) + + # Sometimes we have a structure like [[...]] which is a single item list + # where the single item is itself it's own list. In that case we want skip + # the rest of this function so that we don't get extraneous () on the + # outside. + if ( + isinstance(marker, list) + and len(marker) == 1 + and isinstance(marker[0], (list, tuple)) + ): + return _format_marker(marker[0]) + + if isinstance(marker, list): + inner = (_format_marker(m, first=False) for m in marker) + if first: + return " ".join(inner) + else: + return "(" + " ".join(inner) + ")" + elif isinstance(marker, tuple): + return " ".join([m.serialize() for m in marker]) + else: + return marker + + +_operators: dict[str, Operator] = { + "in": lambda lhs, rhs: lhs in rhs, + "not in": lambda lhs, rhs: lhs not in rhs, + "<": operator.lt, + "<=": operator.le, + "==": operator.eq, + "!=": operator.ne, + ">=": operator.ge, + ">": operator.gt, +} + + +def _eval_op(lhs: str, op: Op, rhs: str) -> bool: + try: + spec = Specifier("".join([op.serialize(), rhs])) + except InvalidSpecifier: + pass + else: + return spec.contains(lhs, prereleases=True) + + oper: Operator | None = _operators.get(op.serialize()) + if oper is None: + raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.") + + return oper(lhs, rhs) + + +def _normalize(*values: str, key: str) -> tuple[str, ...]: + # PEP 685 – Comparison of extra names for optional distribution dependencies + # https://peps.python.org/pep-0685/ + # > When comparing extra names, tools MUST normalize the names being + # > compared using the semantics outlined in PEP 503 for names + if key == "extra": + return tuple(canonicalize_name(v) for v in values) + + # other environment markers don't have such standards + return values + + +def _evaluate_markers(markers: MarkerList, environment: dict[str, str]) -> bool: + groups: list[list[bool]] = [[]] + + for marker in markers: + assert isinstance(marker, (list, tuple, str)) + + if isinstance(marker, list): + groups[-1].append(_evaluate_markers(marker, environment)) + elif isinstance(marker, tuple): + lhs, op, rhs = marker + + if isinstance(lhs, Variable): + environment_key = lhs.value + lhs_value = environment[environment_key] + rhs_value = rhs.value + else: + lhs_value = lhs.value + environment_key = rhs.value + rhs_value = environment[environment_key] + + lhs_value, rhs_value = _normalize(lhs_value, rhs_value, key=environment_key) + groups[-1].append(_eval_op(lhs_value, op, rhs_value)) + else: + assert marker in ["and", "or"] + if marker == "or": + groups.append([]) + + return any(all(item) for item in groups) + + +def format_full_version(info: sys._version_info) -> str: + version = f"{info.major}.{info.minor}.{info.micro}" + kind = info.releaselevel + if kind != "final": + version += kind[0] + str(info.serial) + return version + + +def default_environment() -> Environment: + iver = format_full_version(sys.implementation.version) + implementation_name = sys.implementation.name + return { + "implementation_name": implementation_name, + "implementation_version": iver, + "os_name": os.name, + "platform_machine": platform.machine(), + "platform_release": platform.release(), + "platform_system": platform.system(), + "platform_version": platform.version(), + "python_full_version": platform.python_version(), + "platform_python_implementation": platform.python_implementation(), + "python_version": ".".join(platform.python_version_tuple()[:2]), + "sys_platform": sys.platform, + } + + +class Marker: + def __init__(self, marker: str) -> None: + # Note: We create a Marker object without calling this constructor in + # packaging.requirements.Requirement. If any additional logic is + # added here, make sure to mirror/adapt Requirement. + try: + self._markers = _normalize_extra_values(_parse_marker(marker)) + # The attribute `_markers` can be described in terms of a recursive type: + # MarkerList = List[Union[Tuple[Node, ...], str, MarkerList]] + # + # For example, the following expression: + # python_version > "3.6" or (python_version == "3.6" and os_name == "unix") + # + # is parsed into: + # [ + # (, ')>, ), + # 'and', + # [ + # (, , ), + # 'or', + # (, , ) + # ] + # ] + except ParserSyntaxError as e: + raise InvalidMarker(str(e)) from e + + def __str__(self) -> str: + return _format_marker(self._markers) + + def __repr__(self) -> str: + return f"" + + def __hash__(self) -> int: + return hash((self.__class__.__name__, str(self))) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, Marker): + return NotImplemented + + return str(self) == str(other) + + def evaluate(self, environment: dict[str, str] | None = None) -> bool: + """Evaluate a marker. + + Return the boolean from evaluating the given marker against the + environment. environment is an optional argument to override all or + part of the determined environment. + + The environment is determined from the current Python process. + """ + current_environment = cast("dict[str, str]", default_environment()) + current_environment["extra"] = "" + if environment is not None: + current_environment.update(environment) + # The API used to allow setting extra to None. We need to handle this + # case for backwards compatibility. + if current_environment["extra"] is None: + current_environment["extra"] = "" + + return _evaluate_markers( + self._markers, _repair_python_full_version(current_environment) + ) + + +def _repair_python_full_version(env: dict[str, str]) -> dict[str, str]: + """ + Work around platform.python_version() returning something that is not PEP 440 + compliant for non-tagged Python builds. + """ + if env["python_full_version"].endswith("+"): + env["python_full_version"] += "local" + return env diff --git a/.venv/Lib/site-packages/packaging/metadata.py b/.venv/Lib/site-packages/packaging/metadata.py new file mode 100644 index 00000000..721f411c --- /dev/null +++ b/.venv/Lib/site-packages/packaging/metadata.py @@ -0,0 +1,863 @@ +from __future__ import annotations + +import email.feedparser +import email.header +import email.message +import email.parser +import email.policy +import pathlib +import sys +import typing +from typing import ( + Any, + Callable, + Generic, + Literal, + TypedDict, + cast, +) + +from . import licenses, requirements, specifiers, utils +from . import version as version_module +from .licenses import NormalizedLicenseExpression + +T = typing.TypeVar("T") + + +if sys.version_info >= (3, 11): # pragma: no cover + ExceptionGroup = ExceptionGroup +else: # pragma: no cover + + class ExceptionGroup(Exception): + """A minimal implementation of :external:exc:`ExceptionGroup` from Python 3.11. + + If :external:exc:`ExceptionGroup` is already defined by Python itself, + that version is used instead. + """ + + message: str + exceptions: list[Exception] + + def __init__(self, message: str, exceptions: list[Exception]) -> None: + self.message = message + self.exceptions = exceptions + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.message!r}, {self.exceptions!r})" + + +class InvalidMetadata(ValueError): + """A metadata field contains invalid data.""" + + field: str + """The name of the field that contains invalid data.""" + + def __init__(self, field: str, message: str) -> None: + self.field = field + super().__init__(message) + + +# The RawMetadata class attempts to make as few assumptions about the underlying +# serialization formats as possible. The idea is that as long as a serialization +# formats offer some very basic primitives in *some* way then we can support +# serializing to and from that format. +class RawMetadata(TypedDict, total=False): + """A dictionary of raw core metadata. + + Each field in core metadata maps to a key of this dictionary (when data is + provided). The key is lower-case and underscores are used instead of dashes + compared to the equivalent core metadata field. Any core metadata field that + can be specified multiple times or can hold multiple values in a single + field have a key with a plural name. See :class:`Metadata` whose attributes + match the keys of this dictionary. + + Core metadata fields that can be specified multiple times are stored as a + list or dict depending on which is appropriate for the field. Any fields + which hold multiple values in a single field are stored as a list. + + """ + + # Metadata 1.0 - PEP 241 + metadata_version: str + name: str + version: str + platforms: list[str] + summary: str + description: str + keywords: list[str] + home_page: str + author: str + author_email: str + license: str + + # Metadata 1.1 - PEP 314 + supported_platforms: list[str] + download_url: str + classifiers: list[str] + requires: list[str] + provides: list[str] + obsoletes: list[str] + + # Metadata 1.2 - PEP 345 + maintainer: str + maintainer_email: str + requires_dist: list[str] + provides_dist: list[str] + obsoletes_dist: list[str] + requires_python: str + requires_external: list[str] + project_urls: dict[str, str] + + # Metadata 2.0 + # PEP 426 attempted to completely revamp the metadata format + # but got stuck without ever being able to build consensus on + # it and ultimately ended up withdrawn. + # + # However, a number of tools had started emitting METADATA with + # `2.0` Metadata-Version, so for historical reasons, this version + # was skipped. + + # Metadata 2.1 - PEP 566 + description_content_type: str + provides_extra: list[str] + + # Metadata 2.2 - PEP 643 + dynamic: list[str] + + # Metadata 2.3 - PEP 685 + # No new fields were added in PEP 685, just some edge case were + # tightened up to provide better interoptability. + + # Metadata 2.4 - PEP 639 + license_expression: str + license_files: list[str] + + +_STRING_FIELDS = { + "author", + "author_email", + "description", + "description_content_type", + "download_url", + "home_page", + "license", + "license_expression", + "maintainer", + "maintainer_email", + "metadata_version", + "name", + "requires_python", + "summary", + "version", +} + +_LIST_FIELDS = { + "classifiers", + "dynamic", + "license_files", + "obsoletes", + "obsoletes_dist", + "platforms", + "provides", + "provides_dist", + "provides_extra", + "requires", + "requires_dist", + "requires_external", + "supported_platforms", +} + +_DICT_FIELDS = { + "project_urls", +} + + +def _parse_keywords(data: str) -> list[str]: + """Split a string of comma-separated keywords into a list of keywords.""" + return [k.strip() for k in data.split(",")] + + +def _parse_project_urls(data: list[str]) -> dict[str, str]: + """Parse a list of label/URL string pairings separated by a comma.""" + urls = {} + for pair in data: + # Our logic is slightly tricky here as we want to try and do + # *something* reasonable with malformed data. + # + # The main thing that we have to worry about, is data that does + # not have a ',' at all to split the label from the Value. There + # isn't a singular right answer here, and we will fail validation + # later on (if the caller is validating) so it doesn't *really* + # matter, but since the missing value has to be an empty str + # and our return value is dict[str, str], if we let the key + # be the missing value, then they'd have multiple '' values that + # overwrite each other in a accumulating dict. + # + # The other potentional issue is that it's possible to have the + # same label multiple times in the metadata, with no solid "right" + # answer with what to do in that case. As such, we'll do the only + # thing we can, which is treat the field as unparseable and add it + # to our list of unparsed fields. + parts = [p.strip() for p in pair.split(",", 1)] + parts.extend([""] * (max(0, 2 - len(parts)))) # Ensure 2 items + + # TODO: The spec doesn't say anything about if the keys should be + # considered case sensitive or not... logically they should + # be case-preserving and case-insensitive, but doing that + # would open up more cases where we might have duplicate + # entries. + label, url = parts + if label in urls: + # The label already exists in our set of urls, so this field + # is unparseable, and we can just add the whole thing to our + # unparseable data and stop processing it. + raise KeyError("duplicate labels in project urls") + urls[label] = url + + return urls + + +def _get_payload(msg: email.message.Message, source: bytes | str) -> str: + """Get the body of the message.""" + # If our source is a str, then our caller has managed encodings for us, + # and we don't need to deal with it. + if isinstance(source, str): + payload = msg.get_payload() + assert isinstance(payload, str) + return payload + # If our source is a bytes, then we're managing the encoding and we need + # to deal with it. + else: + bpayload = msg.get_payload(decode=True) + assert isinstance(bpayload, bytes) + try: + return bpayload.decode("utf8", "strict") + except UnicodeDecodeError as exc: + raise ValueError("payload in an invalid encoding") from exc + + +# The various parse_FORMAT functions here are intended to be as lenient as +# possible in their parsing, while still returning a correctly typed +# RawMetadata. +# +# To aid in this, we also generally want to do as little touching of the +# data as possible, except where there are possibly some historic holdovers +# that make valid data awkward to work with. +# +# While this is a lower level, intermediate format than our ``Metadata`` +# class, some light touch ups can make a massive difference in usability. + +# Map METADATA fields to RawMetadata. +_EMAIL_TO_RAW_MAPPING = { + "author": "author", + "author-email": "author_email", + "classifier": "classifiers", + "description": "description", + "description-content-type": "description_content_type", + "download-url": "download_url", + "dynamic": "dynamic", + "home-page": "home_page", + "keywords": "keywords", + "license": "license", + "license-expression": "license_expression", + "license-file": "license_files", + "maintainer": "maintainer", + "maintainer-email": "maintainer_email", + "metadata-version": "metadata_version", + "name": "name", + "obsoletes": "obsoletes", + "obsoletes-dist": "obsoletes_dist", + "platform": "platforms", + "project-url": "project_urls", + "provides": "provides", + "provides-dist": "provides_dist", + "provides-extra": "provides_extra", + "requires": "requires", + "requires-dist": "requires_dist", + "requires-external": "requires_external", + "requires-python": "requires_python", + "summary": "summary", + "supported-platform": "supported_platforms", + "version": "version", +} +_RAW_TO_EMAIL_MAPPING = {raw: email for email, raw in _EMAIL_TO_RAW_MAPPING.items()} + + +def parse_email(data: bytes | str) -> tuple[RawMetadata, dict[str, list[str]]]: + """Parse a distribution's metadata stored as email headers (e.g. from ``METADATA``). + + This function returns a two-item tuple of dicts. The first dict is of + recognized fields from the core metadata specification. Fields that can be + parsed and translated into Python's built-in types are converted + appropriately. All other fields are left as-is. Fields that are allowed to + appear multiple times are stored as lists. + + The second dict contains all other fields from the metadata. This includes + any unrecognized fields. It also includes any fields which are expected to + be parsed into a built-in type but were not formatted appropriately. Finally, + any fields that are expected to appear only once but are repeated are + included in this dict. + + """ + raw: dict[str, str | list[str] | dict[str, str]] = {} + unparsed: dict[str, list[str]] = {} + + if isinstance(data, str): + parsed = email.parser.Parser(policy=email.policy.compat32).parsestr(data) + else: + parsed = email.parser.BytesParser(policy=email.policy.compat32).parsebytes(data) + + # We have to wrap parsed.keys() in a set, because in the case of multiple + # values for a key (a list), the key will appear multiple times in the + # list of keys, but we're avoiding that by using get_all(). + for name in frozenset(parsed.keys()): + # Header names in RFC are case insensitive, so we'll normalize to all + # lower case to make comparisons easier. + name = name.lower() + + # We use get_all() here, even for fields that aren't multiple use, + # because otherwise someone could have e.g. two Name fields, and we + # would just silently ignore it rather than doing something about it. + headers = parsed.get_all(name) or [] + + # The way the email module works when parsing bytes is that it + # unconditionally decodes the bytes as ascii using the surrogateescape + # handler. When you pull that data back out (such as with get_all() ), + # it looks to see if the str has any surrogate escapes, and if it does + # it wraps it in a Header object instead of returning the string. + # + # As such, we'll look for those Header objects, and fix up the encoding. + value = [] + # Flag if we have run into any issues processing the headers, thus + # signalling that the data belongs in 'unparsed'. + valid_encoding = True + for h in headers: + # It's unclear if this can return more types than just a Header or + # a str, so we'll just assert here to make sure. + assert isinstance(h, (email.header.Header, str)) + + # If it's a header object, we need to do our little dance to get + # the real data out of it. In cases where there is invalid data + # we're going to end up with mojibake, but there's no obvious, good + # way around that without reimplementing parts of the Header object + # ourselves. + # + # That should be fine since, if mojibacked happens, this key is + # going into the unparsed dict anyways. + if isinstance(h, email.header.Header): + # The Header object stores it's data as chunks, and each chunk + # can be independently encoded, so we'll need to check each + # of them. + chunks: list[tuple[bytes, str | None]] = [] + for bin, encoding in email.header.decode_header(h): + try: + bin.decode("utf8", "strict") + except UnicodeDecodeError: + # Enable mojibake. + encoding = "latin1" + valid_encoding = False + else: + encoding = "utf8" + chunks.append((bin, encoding)) + + # Turn our chunks back into a Header object, then let that + # Header object do the right thing to turn them into a + # string for us. + value.append(str(email.header.make_header(chunks))) + # This is already a string, so just add it. + else: + value.append(h) + + # We've processed all of our values to get them into a list of str, + # but we may have mojibake data, in which case this is an unparsed + # field. + if not valid_encoding: + unparsed[name] = value + continue + + raw_name = _EMAIL_TO_RAW_MAPPING.get(name) + if raw_name is None: + # This is a bit of a weird situation, we've encountered a key that + # we don't know what it means, so we don't know whether it's meant + # to be a list or not. + # + # Since we can't really tell one way or another, we'll just leave it + # as a list, even though it may be a single item list, because that's + # what makes the most sense for email headers. + unparsed[name] = value + continue + + # If this is one of our string fields, then we'll check to see if our + # value is a list of a single item. If it is then we'll assume that + # it was emitted as a single string, and unwrap the str from inside + # the list. + # + # If it's any other kind of data, then we haven't the faintest clue + # what we should parse it as, and we have to just add it to our list + # of unparsed stuff. + if raw_name in _STRING_FIELDS and len(value) == 1: + raw[raw_name] = value[0] + # If this is one of our list of string fields, then we can just assign + # the value, since email *only* has strings, and our get_all() call + # above ensures that this is a list. + elif raw_name in _LIST_FIELDS: + raw[raw_name] = value + # Special Case: Keywords + # The keywords field is implemented in the metadata spec as a str, + # but it conceptually is a list of strings, and is serialized using + # ", ".join(keywords), so we'll do some light data massaging to turn + # this into what it logically is. + elif raw_name == "keywords" and len(value) == 1: + raw[raw_name] = _parse_keywords(value[0]) + # Special Case: Project-URL + # The project urls is implemented in the metadata spec as a list of + # specially-formatted strings that represent a key and a value, which + # is fundamentally a mapping, however the email format doesn't support + # mappings in a sane way, so it was crammed into a list of strings + # instead. + # + # We will do a little light data massaging to turn this into a map as + # it logically should be. + elif raw_name == "project_urls": + try: + raw[raw_name] = _parse_project_urls(value) + except KeyError: + unparsed[name] = value + # Nothing that we've done has managed to parse this, so it'll just + # throw it in our unparseable data and move on. + else: + unparsed[name] = value + + # We need to support getting the Description from the message payload in + # addition to getting it from the the headers. This does mean, though, there + # is the possibility of it being set both ways, in which case we put both + # in 'unparsed' since we don't know which is right. + try: + payload = _get_payload(parsed, data) + except ValueError: + unparsed.setdefault("description", []).append( + parsed.get_payload(decode=isinstance(data, bytes)) # type: ignore[call-overload] + ) + else: + if payload: + # Check to see if we've already got a description, if so then both + # it, and this body move to unparseable. + if "description" in raw: + description_header = cast(str, raw.pop("description")) + unparsed.setdefault("description", []).extend( + [description_header, payload] + ) + elif "description" in unparsed: + unparsed["description"].append(payload) + else: + raw["description"] = payload + + # We need to cast our `raw` to a metadata, because a TypedDict only support + # literal key names, but we're computing our key names on purpose, but the + # way this function is implemented, our `TypedDict` can only have valid key + # names. + return cast(RawMetadata, raw), unparsed + + +_NOT_FOUND = object() + + +# Keep the two values in sync. +_VALID_METADATA_VERSIONS = ["1.0", "1.1", "1.2", "2.1", "2.2", "2.3", "2.4"] +_MetadataVersion = Literal["1.0", "1.1", "1.2", "2.1", "2.2", "2.3", "2.4"] + +_REQUIRED_ATTRS = frozenset(["metadata_version", "name", "version"]) + + +class _Validator(Generic[T]): + """Validate a metadata field. + + All _process_*() methods correspond to a core metadata field. The method is + called with the field's raw value. If the raw value is valid it is returned + in its "enriched" form (e.g. ``version.Version`` for the ``Version`` field). + If the raw value is invalid, :exc:`InvalidMetadata` is raised (with a cause + as appropriate). + """ + + name: str + raw_name: str + added: _MetadataVersion + + def __init__( + self, + *, + added: _MetadataVersion = "1.0", + ) -> None: + self.added = added + + def __set_name__(self, _owner: Metadata, name: str) -> None: + self.name = name + self.raw_name = _RAW_TO_EMAIL_MAPPING[name] + + def __get__(self, instance: Metadata, _owner: type[Metadata]) -> T: + # With Python 3.8, the caching can be replaced with functools.cached_property(). + # No need to check the cache as attribute lookup will resolve into the + # instance's __dict__ before __get__ is called. + cache = instance.__dict__ + value = instance._raw.get(self.name) + + # To make the _process_* methods easier, we'll check if the value is None + # and if this field is NOT a required attribute, and if both of those + # things are true, we'll skip the the converter. This will mean that the + # converters never have to deal with the None union. + if self.name in _REQUIRED_ATTRS or value is not None: + try: + converter: Callable[[Any], T] = getattr(self, f"_process_{self.name}") + except AttributeError: + pass + else: + value = converter(value) + + cache[self.name] = value + try: + del instance._raw[self.name] # type: ignore[misc] + except KeyError: + pass + + return cast(T, value) + + def _invalid_metadata( + self, msg: str, cause: Exception | None = None + ) -> InvalidMetadata: + exc = InvalidMetadata( + self.raw_name, msg.format_map({"field": repr(self.raw_name)}) + ) + exc.__cause__ = cause + return exc + + def _process_metadata_version(self, value: str) -> _MetadataVersion: + # Implicitly makes Metadata-Version required. + if value not in _VALID_METADATA_VERSIONS: + raise self._invalid_metadata(f"{value!r} is not a valid metadata version") + return cast(_MetadataVersion, value) + + def _process_name(self, value: str) -> str: + if not value: + raise self._invalid_metadata("{field} is a required field") + # Validate the name as a side-effect. + try: + utils.canonicalize_name(value, validate=True) + except utils.InvalidName as exc: + raise self._invalid_metadata( + f"{value!r} is invalid for {{field}}", cause=exc + ) from exc + else: + return value + + def _process_version(self, value: str) -> version_module.Version: + if not value: + raise self._invalid_metadata("{field} is a required field") + try: + return version_module.parse(value) + except version_module.InvalidVersion as exc: + raise self._invalid_metadata( + f"{value!r} is invalid for {{field}}", cause=exc + ) from exc + + def _process_summary(self, value: str) -> str: + """Check the field contains no newlines.""" + if "\n" in value: + raise self._invalid_metadata("{field} must be a single line") + return value + + def _process_description_content_type(self, value: str) -> str: + content_types = {"text/plain", "text/x-rst", "text/markdown"} + message = email.message.EmailMessage() + message["content-type"] = value + + content_type, parameters = ( + # Defaults to `text/plain` if parsing failed. + message.get_content_type().lower(), + message["content-type"].params, + ) + # Check if content-type is valid or defaulted to `text/plain` and thus was + # not parseable. + if content_type not in content_types or content_type not in value.lower(): + raise self._invalid_metadata( + f"{{field}} must be one of {list(content_types)}, not {value!r}" + ) + + charset = parameters.get("charset", "UTF-8") + if charset != "UTF-8": + raise self._invalid_metadata( + f"{{field}} can only specify the UTF-8 charset, not {list(charset)}" + ) + + markdown_variants = {"GFM", "CommonMark"} + variant = parameters.get("variant", "GFM") # Use an acceptable default. + if content_type == "text/markdown" and variant not in markdown_variants: + raise self._invalid_metadata( + f"valid Markdown variants for {{field}} are {list(markdown_variants)}, " + f"not {variant!r}", + ) + return value + + def _process_dynamic(self, value: list[str]) -> list[str]: + for dynamic_field in map(str.lower, value): + if dynamic_field in {"name", "version", "metadata-version"}: + raise self._invalid_metadata( + f"{dynamic_field!r} is not allowed as a dynamic field" + ) + elif dynamic_field not in _EMAIL_TO_RAW_MAPPING: + raise self._invalid_metadata( + f"{dynamic_field!r} is not a valid dynamic field" + ) + return list(map(str.lower, value)) + + def _process_provides_extra( + self, + value: list[str], + ) -> list[utils.NormalizedName]: + normalized_names = [] + try: + for name in value: + normalized_names.append(utils.canonicalize_name(name, validate=True)) + except utils.InvalidName as exc: + raise self._invalid_metadata( + f"{name!r} is invalid for {{field}}", cause=exc + ) from exc + else: + return normalized_names + + def _process_requires_python(self, value: str) -> specifiers.SpecifierSet: + try: + return specifiers.SpecifierSet(value) + except specifiers.InvalidSpecifier as exc: + raise self._invalid_metadata( + f"{value!r} is invalid for {{field}}", cause=exc + ) from exc + + def _process_requires_dist( + self, + value: list[str], + ) -> list[requirements.Requirement]: + reqs = [] + try: + for req in value: + reqs.append(requirements.Requirement(req)) + except requirements.InvalidRequirement as exc: + raise self._invalid_metadata( + f"{req!r} is invalid for {{field}}", cause=exc + ) from exc + else: + return reqs + + def _process_license_expression( + self, value: str + ) -> NormalizedLicenseExpression | None: + try: + return licenses.canonicalize_license_expression(value) + except ValueError as exc: + raise self._invalid_metadata( + f"{value!r} is invalid for {{field}}", cause=exc + ) from exc + + def _process_license_files(self, value: list[str]) -> list[str]: + paths = [] + for path in value: + if ".." in path: + raise self._invalid_metadata( + f"{path!r} is invalid for {{field}}, " + "parent directory indicators are not allowed" + ) + if "*" in path: + raise self._invalid_metadata( + f"{path!r} is invalid for {{field}}, paths must be resolved" + ) + if ( + pathlib.PurePosixPath(path).is_absolute() + or pathlib.PureWindowsPath(path).is_absolute() + ): + raise self._invalid_metadata( + f"{path!r} is invalid for {{field}}, paths must be relative" + ) + if pathlib.PureWindowsPath(path).as_posix() != path: + raise self._invalid_metadata( + f"{path!r} is invalid for {{field}}, " + "paths must use '/' delimiter" + ) + paths.append(path) + return paths + + +class Metadata: + """Representation of distribution metadata. + + Compared to :class:`RawMetadata`, this class provides objects representing + metadata fields instead of only using built-in types. Any invalid metadata + will cause :exc:`InvalidMetadata` to be raised (with a + :py:attr:`~BaseException.__cause__` attribute as appropriate). + """ + + _raw: RawMetadata + + @classmethod + def from_raw(cls, data: RawMetadata, *, validate: bool = True) -> Metadata: + """Create an instance from :class:`RawMetadata`. + + If *validate* is true, all metadata will be validated. All exceptions + related to validation will be gathered and raised as an :class:`ExceptionGroup`. + """ + ins = cls() + ins._raw = data.copy() # Mutations occur due to caching enriched values. + + if validate: + exceptions: list[Exception] = [] + try: + metadata_version = ins.metadata_version + metadata_age = _VALID_METADATA_VERSIONS.index(metadata_version) + except InvalidMetadata as metadata_version_exc: + exceptions.append(metadata_version_exc) + metadata_version = None + + # Make sure to check for the fields that are present, the required + # fields (so their absence can be reported). + fields_to_check = frozenset(ins._raw) | _REQUIRED_ATTRS + # Remove fields that have already been checked. + fields_to_check -= {"metadata_version"} + + for key in fields_to_check: + try: + if metadata_version: + # Can't use getattr() as that triggers descriptor protocol which + # will fail due to no value for the instance argument. + try: + field_metadata_version = cls.__dict__[key].added + except KeyError: + exc = InvalidMetadata(key, f"unrecognized field: {key!r}") + exceptions.append(exc) + continue + field_age = _VALID_METADATA_VERSIONS.index( + field_metadata_version + ) + if field_age > metadata_age: + field = _RAW_TO_EMAIL_MAPPING[key] + exc = InvalidMetadata( + field, + f"{field} introduced in metadata version " + f"{field_metadata_version}, not {metadata_version}", + ) + exceptions.append(exc) + continue + getattr(ins, key) + except InvalidMetadata as exc: + exceptions.append(exc) + + if exceptions: + raise ExceptionGroup("invalid metadata", exceptions) + + return ins + + @classmethod + def from_email(cls, data: bytes | str, *, validate: bool = True) -> Metadata: + """Parse metadata from email headers. + + If *validate* is true, the metadata will be validated. All exceptions + related to validation will be gathered and raised as an :class:`ExceptionGroup`. + """ + raw, unparsed = parse_email(data) + + if validate: + exceptions: list[Exception] = [] + for unparsed_key in unparsed: + if unparsed_key in _EMAIL_TO_RAW_MAPPING: + message = f"{unparsed_key!r} has invalid data" + else: + message = f"unrecognized field: {unparsed_key!r}" + exceptions.append(InvalidMetadata(unparsed_key, message)) + + if exceptions: + raise ExceptionGroup("unparsed", exceptions) + + try: + return cls.from_raw(raw, validate=validate) + except ExceptionGroup as exc_group: + raise ExceptionGroup( + "invalid or unparsed metadata", exc_group.exceptions + ) from None + + metadata_version: _Validator[_MetadataVersion] = _Validator() + """:external:ref:`core-metadata-metadata-version` + (required; validated to be a valid metadata version)""" + # `name` is not normalized/typed to NormalizedName so as to provide access to + # the original/raw name. + name: _Validator[str] = _Validator() + """:external:ref:`core-metadata-name` + (required; validated using :func:`~packaging.utils.canonicalize_name` and its + *validate* parameter)""" + version: _Validator[version_module.Version] = _Validator() + """:external:ref:`core-metadata-version` (required)""" + dynamic: _Validator[list[str] | None] = _Validator( + added="2.2", + ) + """:external:ref:`core-metadata-dynamic` + (validated against core metadata field names and lowercased)""" + platforms: _Validator[list[str] | None] = _Validator() + """:external:ref:`core-metadata-platform`""" + supported_platforms: _Validator[list[str] | None] = _Validator(added="1.1") + """:external:ref:`core-metadata-supported-platform`""" + summary: _Validator[str | None] = _Validator() + """:external:ref:`core-metadata-summary` (validated to contain no newlines)""" + description: _Validator[str | None] = _Validator() # TODO 2.1: can be in body + """:external:ref:`core-metadata-description`""" + description_content_type: _Validator[str | None] = _Validator(added="2.1") + """:external:ref:`core-metadata-description-content-type` (validated)""" + keywords: _Validator[list[str] | None] = _Validator() + """:external:ref:`core-metadata-keywords`""" + home_page: _Validator[str | None] = _Validator() + """:external:ref:`core-metadata-home-page`""" + download_url: _Validator[str | None] = _Validator(added="1.1") + """:external:ref:`core-metadata-download-url`""" + author: _Validator[str | None] = _Validator() + """:external:ref:`core-metadata-author`""" + author_email: _Validator[str | None] = _Validator() + """:external:ref:`core-metadata-author-email`""" + maintainer: _Validator[str | None] = _Validator(added="1.2") + """:external:ref:`core-metadata-maintainer`""" + maintainer_email: _Validator[str | None] = _Validator(added="1.2") + """:external:ref:`core-metadata-maintainer-email`""" + license: _Validator[str | None] = _Validator() + """:external:ref:`core-metadata-license`""" + license_expression: _Validator[NormalizedLicenseExpression | None] = _Validator( + added="2.4" + ) + """:external:ref:`core-metadata-license-expression`""" + license_files: _Validator[list[str] | None] = _Validator(added="2.4") + """:external:ref:`core-metadata-license-file`""" + classifiers: _Validator[list[str] | None] = _Validator(added="1.1") + """:external:ref:`core-metadata-classifier`""" + requires_dist: _Validator[list[requirements.Requirement] | None] = _Validator( + added="1.2" + ) + """:external:ref:`core-metadata-requires-dist`""" + requires_python: _Validator[specifiers.SpecifierSet | None] = _Validator( + added="1.2" + ) + """:external:ref:`core-metadata-requires-python`""" + # Because `Requires-External` allows for non-PEP 440 version specifiers, we + # don't do any processing on the values. + requires_external: _Validator[list[str] | None] = _Validator(added="1.2") + """:external:ref:`core-metadata-requires-external`""" + project_urls: _Validator[dict[str, str] | None] = _Validator(added="1.2") + """:external:ref:`core-metadata-project-url`""" + # PEP 685 lets us raise an error if an extra doesn't pass `Name` validation + # regardless of metadata version. + provides_extra: _Validator[list[utils.NormalizedName] | None] = _Validator( + added="2.1", + ) + """:external:ref:`core-metadata-provides-extra`""" + provides_dist: _Validator[list[str] | None] = _Validator(added="1.2") + """:external:ref:`core-metadata-provides-dist`""" + obsoletes_dist: _Validator[list[str] | None] = _Validator(added="1.2") + """:external:ref:`core-metadata-obsoletes-dist`""" + requires: _Validator[list[str] | None] = _Validator(added="1.1") + """``Requires`` (deprecated)""" + provides: _Validator[list[str] | None] = _Validator(added="1.1") + """``Provides`` (deprecated)""" + obsoletes: _Validator[list[str] | None] = _Validator(added="1.1") + """``Obsoletes`` (deprecated)""" diff --git a/.venv/Lib/site-packages/packaging/py.typed b/.venv/Lib/site-packages/packaging/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/.venv/Lib/site-packages/packaging/requirements.py b/.venv/Lib/site-packages/packaging/requirements.py new file mode 100644 index 00000000..4e068c95 --- /dev/null +++ b/.venv/Lib/site-packages/packaging/requirements.py @@ -0,0 +1,91 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import annotations + +from typing import Any, Iterator + +from ._parser import parse_requirement as _parse_requirement +from ._tokenizer import ParserSyntaxError +from .markers import Marker, _normalize_extra_values +from .specifiers import SpecifierSet +from .utils import canonicalize_name + + +class InvalidRequirement(ValueError): + """ + An invalid requirement was found, users should refer to PEP 508. + """ + + +class Requirement: + """Parse a requirement. + + Parse a given requirement string into its parts, such as name, specifier, + URL, and extras. Raises InvalidRequirement on a badly-formed requirement + string. + """ + + # TODO: Can we test whether something is contained within a requirement? + # If so how do we do that? Do we need to test against the _name_ of + # the thing as well as the version? What about the markers? + # TODO: Can we normalize the name and extra name? + + def __init__(self, requirement_string: str) -> None: + try: + parsed = _parse_requirement(requirement_string) + except ParserSyntaxError as e: + raise InvalidRequirement(str(e)) from e + + self.name: str = parsed.name + self.url: str | None = parsed.url or None + self.extras: set[str] = set(parsed.extras or []) + self.specifier: SpecifierSet = SpecifierSet(parsed.specifier) + self.marker: Marker | None = None + if parsed.marker is not None: + self.marker = Marker.__new__(Marker) + self.marker._markers = _normalize_extra_values(parsed.marker) + + def _iter_parts(self, name: str) -> Iterator[str]: + yield name + + if self.extras: + formatted_extras = ",".join(sorted(self.extras)) + yield f"[{formatted_extras}]" + + if self.specifier: + yield str(self.specifier) + + if self.url: + yield f"@ {self.url}" + if self.marker: + yield " " + + if self.marker: + yield f"; {self.marker}" + + def __str__(self) -> str: + return "".join(self._iter_parts(self.name)) + + def __repr__(self) -> str: + return f"" + + def __hash__(self) -> int: + return hash( + ( + self.__class__.__name__, + *self._iter_parts(canonicalize_name(self.name)), + ) + ) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, Requirement): + return NotImplemented + + return ( + canonicalize_name(self.name) == canonicalize_name(other.name) + and self.extras == other.extras + and self.specifier == other.specifier + and self.url == other.url + and self.marker == other.marker + ) diff --git a/.venv/Lib/site-packages/packaging/specifiers.py b/.venv/Lib/site-packages/packaging/specifiers.py new file mode 100644 index 00000000..b30926af --- /dev/null +++ b/.venv/Lib/site-packages/packaging/specifiers.py @@ -0,0 +1,1020 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +""" +.. testsetup:: + + from packaging.specifiers import Specifier, SpecifierSet, InvalidSpecifier + from packaging.version import Version +""" + +from __future__ import annotations + +import abc +import itertools +import re +from typing import Callable, Iterable, Iterator, TypeVar, Union + +from .utils import canonicalize_version +from .version import Version + +UnparsedVersion = Union[Version, str] +UnparsedVersionVar = TypeVar("UnparsedVersionVar", bound=UnparsedVersion) +CallableOperator = Callable[[Version, str], bool] + + +def _coerce_version(version: UnparsedVersion) -> Version: + if not isinstance(version, Version): + version = Version(version) + return version + + +class InvalidSpecifier(ValueError): + """ + Raised when attempting to create a :class:`Specifier` with a specifier + string that is invalid. + + >>> Specifier("lolwat") + Traceback (most recent call last): + ... + packaging.specifiers.InvalidSpecifier: Invalid specifier: 'lolwat' + """ + + +class BaseSpecifier(metaclass=abc.ABCMeta): + @abc.abstractmethod + def __str__(self) -> str: + """ + Returns the str representation of this Specifier-like object. This + should be representative of the Specifier itself. + """ + + @abc.abstractmethod + def __hash__(self) -> int: + """ + Returns a hash value for this Specifier-like object. + """ + + @abc.abstractmethod + def __eq__(self, other: object) -> bool: + """ + Returns a boolean representing whether or not the two Specifier-like + objects are equal. + + :param other: The other object to check against. + """ + + @property + @abc.abstractmethod + def prereleases(self) -> bool | None: + """Whether or not pre-releases as a whole are allowed. + + This can be set to either ``True`` or ``False`` to explicitly enable or disable + prereleases or it can be set to ``None`` (the default) to use default semantics. + """ + + @prereleases.setter + def prereleases(self, value: bool) -> None: + """Setter for :attr:`prereleases`. + + :param value: The value to set. + """ + + @abc.abstractmethod + def contains(self, item: str, prereleases: bool | None = None) -> bool: + """ + Determines if the given item is contained within this specifier. + """ + + @abc.abstractmethod + def filter( + self, iterable: Iterable[UnparsedVersionVar], prereleases: bool | None = None + ) -> Iterator[UnparsedVersionVar]: + """ + Takes an iterable of items and filters them so that only items which + are contained within this specifier are allowed in it. + """ + + +class Specifier(BaseSpecifier): + """This class abstracts handling of version specifiers. + + .. tip:: + + It is generally not required to instantiate this manually. You should instead + prefer to work with :class:`SpecifierSet` instead, which can parse + comma-separated version specifiers (which is what package metadata contains). + """ + + _operator_regex_str = r""" + (?P(~=|==|!=|<=|>=|<|>|===)) + """ + _version_regex_str = r""" + (?P + (?: + # The identity operators allow for an escape hatch that will + # do an exact string match of the version you wish to install. + # This will not be parsed by PEP 440 and we cannot determine + # any semantic meaning from it. This operator is discouraged + # but included entirely as an escape hatch. + (?<====) # Only match for the identity operator + \s* + [^\s;)]* # The arbitrary version can be just about anything, + # we match everything except for whitespace, a + # semi-colon for marker support, and a closing paren + # since versions can be enclosed in them. + ) + | + (?: + # The (non)equality operators allow for wild card and local + # versions to be specified so we have to define these two + # operators separately to enable that. + (?<===|!=) # Only match for equals and not equals + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)* # release + + # You cannot use a wild card and a pre-release, post-release, a dev or + # local version together so group them with a | and make them optional. + (?: + \.\* # Wild card syntax of .* + | + (?: # pre release + [-_\.]? + (alpha|beta|preview|pre|a|b|c|rc) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local + )? + ) + | + (?: + # The compatible operator requires at least two digits in the + # release segment. + (?<=~=) # Only match for the compatible operator + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) + (?: # pre release + [-_\.]? + (alpha|beta|preview|pre|a|b|c|rc) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + ) + | + (?: + # All other operators only allow a sub set of what the + # (non)equality operators do. Specifically they do not allow + # local versions to be specified nor do they allow the prefix + # matching wild cards. + (?=": "greater_than_equal", + "<": "less_than", + ">": "greater_than", + "===": "arbitrary", + } + + def __init__(self, spec: str = "", prereleases: bool | None = None) -> None: + """Initialize a Specifier instance. + + :param spec: + The string representation of a specifier which will be parsed and + normalized before use. + :param prereleases: + This tells the specifier if it should accept prerelease versions if + applicable or not. The default of ``None`` will autodetect it from the + given specifiers. + :raises InvalidSpecifier: + If the given specifier is invalid (i.e. bad syntax). + """ + match = self._regex.search(spec) + if not match: + raise InvalidSpecifier(f"Invalid specifier: {spec!r}") + + self._spec: tuple[str, str] = ( + match.group("operator").strip(), + match.group("version").strip(), + ) + + # Store whether or not this Specifier should accept prereleases + self._prereleases = prereleases + + # https://github.com/python/mypy/pull/13475#pullrequestreview-1079784515 + @property # type: ignore[override] + def prereleases(self) -> bool: + # If there is an explicit prereleases set for this, then we'll just + # blindly use that. + if self._prereleases is not None: + return self._prereleases + + # Look at all of our specifiers and determine if they are inclusive + # operators, and if they are if they are including an explicit + # prerelease. + operator, version = self._spec + if operator in ["==", ">=", "<=", "~=", "===", ">", "<"]: + # The == specifier can include a trailing .*, if it does we + # want to remove before parsing. + if operator == "==" and version.endswith(".*"): + version = version[:-2] + + # Parse the version, and if it is a pre-release than this + # specifier allows pre-releases. + if Version(version).is_prerelease: + return True + + return False + + @prereleases.setter + def prereleases(self, value: bool) -> None: + self._prereleases = value + + @property + def operator(self) -> str: + """The operator of this specifier. + + >>> Specifier("==1.2.3").operator + '==' + """ + return self._spec[0] + + @property + def version(self) -> str: + """The version of this specifier. + + >>> Specifier("==1.2.3").version + '1.2.3' + """ + return self._spec[1] + + def __repr__(self) -> str: + """A representation of the Specifier that shows all internal state. + + >>> Specifier('>=1.0.0') + =1.0.0')> + >>> Specifier('>=1.0.0', prereleases=False) + =1.0.0', prereleases=False)> + >>> Specifier('>=1.0.0', prereleases=True) + =1.0.0', prereleases=True)> + """ + pre = ( + f", prereleases={self.prereleases!r}" + if self._prereleases is not None + else "" + ) + + return f"<{self.__class__.__name__}({str(self)!r}{pre})>" + + def __str__(self) -> str: + """A string representation of the Specifier that can be round-tripped. + + >>> str(Specifier('>=1.0.0')) + '>=1.0.0' + >>> str(Specifier('>=1.0.0', prereleases=False)) + '>=1.0.0' + """ + return "{}{}".format(*self._spec) + + @property + def _canonical_spec(self) -> tuple[str, str]: + canonical_version = canonicalize_version( + self._spec[1], + strip_trailing_zero=(self._spec[0] != "~="), + ) + return self._spec[0], canonical_version + + def __hash__(self) -> int: + return hash(self._canonical_spec) + + def __eq__(self, other: object) -> bool: + """Whether or not the two Specifier-like objects are equal. + + :param other: The other object to check against. + + The value of :attr:`prereleases` is ignored. + + >>> Specifier("==1.2.3") == Specifier("== 1.2.3.0") + True + >>> (Specifier("==1.2.3", prereleases=False) == + ... Specifier("==1.2.3", prereleases=True)) + True + >>> Specifier("==1.2.3") == "==1.2.3" + True + >>> Specifier("==1.2.3") == Specifier("==1.2.4") + False + >>> Specifier("==1.2.3") == Specifier("~=1.2.3") + False + """ + if isinstance(other, str): + try: + other = self.__class__(str(other)) + except InvalidSpecifier: + return NotImplemented + elif not isinstance(other, self.__class__): + return NotImplemented + + return self._canonical_spec == other._canonical_spec + + def _get_operator(self, op: str) -> CallableOperator: + operator_callable: CallableOperator = getattr( + self, f"_compare_{self._operators[op]}" + ) + return operator_callable + + def _compare_compatible(self, prospective: Version, spec: str) -> bool: + # Compatible releases have an equivalent combination of >= and ==. That + # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to + # implement this in terms of the other specifiers instead of + # implementing it ourselves. The only thing we need to do is construct + # the other specifiers. + + # We want everything but the last item in the version, but we want to + # ignore suffix segments. + prefix = _version_join( + list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1] + ) + + # Add the prefix notation to the end of our string + prefix += ".*" + + return self._get_operator(">=")(prospective, spec) and self._get_operator("==")( + prospective, prefix + ) + + def _compare_equal(self, prospective: Version, spec: str) -> bool: + # We need special logic to handle prefix matching + if spec.endswith(".*"): + # In the case of prefix matching we want to ignore local segment. + normalized_prospective = canonicalize_version( + prospective.public, strip_trailing_zero=False + ) + # Get the normalized version string ignoring the trailing .* + normalized_spec = canonicalize_version(spec[:-2], strip_trailing_zero=False) + # Split the spec out by bangs and dots, and pretend that there is + # an implicit dot in between a release segment and a pre-release segment. + split_spec = _version_split(normalized_spec) + + # Split the prospective version out by bangs and dots, and pretend + # that there is an implicit dot in between a release segment and + # a pre-release segment. + split_prospective = _version_split(normalized_prospective) + + # 0-pad the prospective version before shortening it to get the correct + # shortened version. + padded_prospective, _ = _pad_version(split_prospective, split_spec) + + # Shorten the prospective version to be the same length as the spec + # so that we can determine if the specifier is a prefix of the + # prospective version or not. + shortened_prospective = padded_prospective[: len(split_spec)] + + return shortened_prospective == split_spec + else: + # Convert our spec string into a Version + spec_version = Version(spec) + + # If the specifier does not have a local segment, then we want to + # act as if the prospective version also does not have a local + # segment. + if not spec_version.local: + prospective = Version(prospective.public) + + return prospective == spec_version + + def _compare_not_equal(self, prospective: Version, spec: str) -> bool: + return not self._compare_equal(prospective, spec) + + def _compare_less_than_equal(self, prospective: Version, spec: str) -> bool: + # NB: Local version identifiers are NOT permitted in the version + # specifier, so local version labels can be universally removed from + # the prospective version. + return Version(prospective.public) <= Version(spec) + + def _compare_greater_than_equal(self, prospective: Version, spec: str) -> bool: + # NB: Local version identifiers are NOT permitted in the version + # specifier, so local version labels can be universally removed from + # the prospective version. + return Version(prospective.public) >= Version(spec) + + def _compare_less_than(self, prospective: Version, spec_str: str) -> bool: + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec_str) + + # Check to see if the prospective version is less than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective < spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a pre-release version, that we do not accept pre-release + # versions for the version mentioned in the specifier (e.g. <3.1 should + # not match 3.1.dev0, but should match 3.0.dev0). + if not spec.is_prerelease and prospective.is_prerelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # less than the spec version *and* it's not a pre-release of the same + # version in the spec. + return True + + def _compare_greater_than(self, prospective: Version, spec_str: str) -> bool: + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec_str) + + # Check to see if the prospective version is greater than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective > spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a post-release version, that we do not accept + # post-release versions for the version mentioned in the specifier + # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0). + if not spec.is_postrelease and prospective.is_postrelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # Ensure that we do not allow a local version of the version mentioned + # in the specifier, which is technically greater than, to match. + if prospective.local is not None: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # greater than the spec version *and* it's not a pre-release of the + # same version in the spec. + return True + + def _compare_arbitrary(self, prospective: Version, spec: str) -> bool: + return str(prospective).lower() == str(spec).lower() + + def __contains__(self, item: str | Version) -> bool: + """Return whether or not the item is contained in this specifier. + + :param item: The item to check for. + + This is used for the ``in`` operator and behaves the same as + :meth:`contains` with no ``prereleases`` argument passed. + + >>> "1.2.3" in Specifier(">=1.2.3") + True + >>> Version("1.2.3") in Specifier(">=1.2.3") + True + >>> "1.0.0" in Specifier(">=1.2.3") + False + >>> "1.3.0a1" in Specifier(">=1.2.3") + False + >>> "1.3.0a1" in Specifier(">=1.2.3", prereleases=True) + True + """ + return self.contains(item) + + def contains(self, item: UnparsedVersion, prereleases: bool | None = None) -> bool: + """Return whether or not the item is contained in this specifier. + + :param item: + The item to check for, which can be a version string or a + :class:`Version` instance. + :param prereleases: + Whether or not to match prereleases with this Specifier. If set to + ``None`` (the default), it uses :attr:`prereleases` to determine + whether or not prereleases are allowed. + + >>> Specifier(">=1.2.3").contains("1.2.3") + True + >>> Specifier(">=1.2.3").contains(Version("1.2.3")) + True + >>> Specifier(">=1.2.3").contains("1.0.0") + False + >>> Specifier(">=1.2.3").contains("1.3.0a1") + False + >>> Specifier(">=1.2.3", prereleases=True).contains("1.3.0a1") + True + >>> Specifier(">=1.2.3").contains("1.3.0a1", prereleases=True) + True + """ + + # Determine if prereleases are to be allowed or not. + if prereleases is None: + prereleases = self.prereleases + + # Normalize item to a Version, this allows us to have a shortcut for + # "2.0" in Specifier(">=2") + normalized_item = _coerce_version(item) + + # Determine if we should be supporting prereleases in this specifier + # or not, if we do not support prereleases than we can short circuit + # logic if this version is a prereleases. + if normalized_item.is_prerelease and not prereleases: + return False + + # Actually do the comparison to determine if this item is contained + # within this Specifier or not. + operator_callable: CallableOperator = self._get_operator(self.operator) + return operator_callable(normalized_item, self.version) + + def filter( + self, iterable: Iterable[UnparsedVersionVar], prereleases: bool | None = None + ) -> Iterator[UnparsedVersionVar]: + """Filter items in the given iterable, that match the specifier. + + :param iterable: + An iterable that can contain version strings and :class:`Version` instances. + The items in the iterable will be filtered according to the specifier. + :param prereleases: + Whether or not to allow prereleases in the returned iterator. If set to + ``None`` (the default), it will be intelligently decide whether to allow + prereleases or not (based on the :attr:`prereleases` attribute, and + whether the only versions matching are prereleases). + + This method is smarter than just ``filter(Specifier().contains, [...])`` + because it implements the rule from :pep:`440` that a prerelease item + SHOULD be accepted if no other versions match the given specifier. + + >>> list(Specifier(">=1.2.3").filter(["1.2", "1.3", "1.5a1"])) + ['1.3'] + >>> list(Specifier(">=1.2.3").filter(["1.2", "1.2.3", "1.3", Version("1.4")])) + ['1.2.3', '1.3', ] + >>> list(Specifier(">=1.2.3").filter(["1.2", "1.5a1"])) + ['1.5a1'] + >>> list(Specifier(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True)) + ['1.3', '1.5a1'] + >>> list(Specifier(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"])) + ['1.3', '1.5a1'] + """ + + yielded = False + found_prereleases = [] + + kw = {"prereleases": prereleases if prereleases is not None else True} + + # Attempt to iterate over all the values in the iterable and if any of + # them match, yield them. + for version in iterable: + parsed_version = _coerce_version(version) + + if self.contains(parsed_version, **kw): + # If our version is a prerelease, and we were not set to allow + # prereleases, then we'll store it for later in case nothing + # else matches this specifier. + if parsed_version.is_prerelease and not ( + prereleases or self.prereleases + ): + found_prereleases.append(version) + # Either this is not a prerelease, or we should have been + # accepting prereleases from the beginning. + else: + yielded = True + yield version + + # Now that we've iterated over everything, determine if we've yielded + # any values, and if we have not and we have any prereleases stored up + # then we will go ahead and yield the prereleases. + if not yielded and found_prereleases: + for version in found_prereleases: + yield version + + +_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") + + +def _version_split(version: str) -> list[str]: + """Split version into components. + + The split components are intended for version comparison. The logic does + not attempt to retain the original version string, so joining the + components back with :func:`_version_join` may not produce the original + version string. + """ + result: list[str] = [] + + epoch, _, rest = version.rpartition("!") + result.append(epoch or "0") + + for item in rest.split("."): + match = _prefix_regex.search(item) + if match: + result.extend(match.groups()) + else: + result.append(item) + return result + + +def _version_join(components: list[str]) -> str: + """Join split version components into a version string. + + This function assumes the input came from :func:`_version_split`, where the + first component must be the epoch (either empty or numeric), and all other + components numeric. + """ + epoch, *rest = components + return f"{epoch}!{'.'.join(rest)}" + + +def _is_not_suffix(segment: str) -> bool: + return not any( + segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post") + ) + + +def _pad_version(left: list[str], right: list[str]) -> tuple[list[str], list[str]]: + left_split, right_split = [], [] + + # Get the release segment of our versions + left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left))) + right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right))) + + # Get the rest of our versions + left_split.append(left[len(left_split[0]) :]) + right_split.append(right[len(right_split[0]) :]) + + # Insert our padding + left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0]))) + right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0]))) + + return ( + list(itertools.chain.from_iterable(left_split)), + list(itertools.chain.from_iterable(right_split)), + ) + + +class SpecifierSet(BaseSpecifier): + """This class abstracts handling of a set of version specifiers. + + It can be passed a single specifier (``>=3.0``), a comma-separated list of + specifiers (``>=3.0,!=3.1``), or no specifier at all. + """ + + def __init__( + self, + specifiers: str | Iterable[Specifier] = "", + prereleases: bool | None = None, + ) -> None: + """Initialize a SpecifierSet instance. + + :param specifiers: + The string representation of a specifier or a comma-separated list of + specifiers which will be parsed and normalized before use. + May also be an iterable of ``Specifier`` instances, which will be used + as is. + :param prereleases: + This tells the SpecifierSet if it should accept prerelease versions if + applicable or not. The default of ``None`` will autodetect it from the + given specifiers. + + :raises InvalidSpecifier: + If the given ``specifiers`` are not parseable than this exception will be + raised. + """ + + if isinstance(specifiers, str): + # Split on `,` to break each individual specifier into its own item, and + # strip each item to remove leading/trailing whitespace. + split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] + + # Make each individual specifier a Specifier and save in a frozen set + # for later. + self._specs = frozenset(map(Specifier, split_specifiers)) + else: + # Save the supplied specifiers in a frozen set. + self._specs = frozenset(specifiers) + + # Store our prereleases value so we can use it later to determine if + # we accept prereleases or not. + self._prereleases = prereleases + + @property + def prereleases(self) -> bool | None: + # If we have been given an explicit prerelease modifier, then we'll + # pass that through here. + if self._prereleases is not None: + return self._prereleases + + # If we don't have any specifiers, and we don't have a forced value, + # then we'll just return None since we don't know if this should have + # pre-releases or not. + if not self._specs: + return None + + # Otherwise we'll see if any of the given specifiers accept + # prereleases, if any of them do we'll return True, otherwise False. + return any(s.prereleases for s in self._specs) + + @prereleases.setter + def prereleases(self, value: bool) -> None: + self._prereleases = value + + def __repr__(self) -> str: + """A representation of the specifier set that shows all internal state. + + Note that the ordering of the individual specifiers within the set may not + match the input string. + + >>> SpecifierSet('>=1.0.0,!=2.0.0') + =1.0.0')> + >>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=False) + =1.0.0', prereleases=False)> + >>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=True) + =1.0.0', prereleases=True)> + """ + pre = ( + f", prereleases={self.prereleases!r}" + if self._prereleases is not None + else "" + ) + + return f"" + + def __str__(self) -> str: + """A string representation of the specifier set that can be round-tripped. + + Note that the ordering of the individual specifiers within the set may not + match the input string. + + >>> str(SpecifierSet(">=1.0.0,!=1.0.1")) + '!=1.0.1,>=1.0.0' + >>> str(SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False)) + '!=1.0.1,>=1.0.0' + """ + return ",".join(sorted(str(s) for s in self._specs)) + + def __hash__(self) -> int: + return hash(self._specs) + + def __and__(self, other: SpecifierSet | str) -> SpecifierSet: + """Return a SpecifierSet which is a combination of the two sets. + + :param other: The other object to combine with. + + >>> SpecifierSet(">=1.0.0,!=1.0.1") & '<=2.0.0,!=2.0.1' + =1.0.0')> + >>> SpecifierSet(">=1.0.0,!=1.0.1") & SpecifierSet('<=2.0.0,!=2.0.1') + =1.0.0')> + """ + if isinstance(other, str): + other = SpecifierSet(other) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + specifier = SpecifierSet() + specifier._specs = frozenset(self._specs | other._specs) + + if self._prereleases is None and other._prereleases is not None: + specifier._prereleases = other._prereleases + elif self._prereleases is not None and other._prereleases is None: + specifier._prereleases = self._prereleases + elif self._prereleases == other._prereleases: + specifier._prereleases = self._prereleases + else: + raise ValueError( + "Cannot combine SpecifierSets with True and False prerelease " + "overrides." + ) + + return specifier + + def __eq__(self, other: object) -> bool: + """Whether or not the two SpecifierSet-like objects are equal. + + :param other: The other object to check against. + + The value of :attr:`prereleases` is ignored. + + >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.1") + True + >>> (SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False) == + ... SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True)) + True + >>> SpecifierSet(">=1.0.0,!=1.0.1") == ">=1.0.0,!=1.0.1" + True + >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0") + False + >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.2") + False + """ + if isinstance(other, (str, Specifier)): + other = SpecifierSet(str(other)) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + return self._specs == other._specs + + def __len__(self) -> int: + """Returns the number of specifiers in this specifier set.""" + return len(self._specs) + + def __iter__(self) -> Iterator[Specifier]: + """ + Returns an iterator over all the underlying :class:`Specifier` instances + in this specifier set. + + >>> sorted(SpecifierSet(">=1.0.0,!=1.0.1"), key=str) + [, =1.0.0')>] + """ + return iter(self._specs) + + def __contains__(self, item: UnparsedVersion) -> bool: + """Return whether or not the item is contained in this specifier. + + :param item: The item to check for. + + This is used for the ``in`` operator and behaves the same as + :meth:`contains` with no ``prereleases`` argument passed. + + >>> "1.2.3" in SpecifierSet(">=1.0.0,!=1.0.1") + True + >>> Version("1.2.3") in SpecifierSet(">=1.0.0,!=1.0.1") + True + >>> "1.0.1" in SpecifierSet(">=1.0.0,!=1.0.1") + False + >>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1") + False + >>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True) + True + """ + return self.contains(item) + + def contains( + self, + item: UnparsedVersion, + prereleases: bool | None = None, + installed: bool | None = None, + ) -> bool: + """Return whether or not the item is contained in this SpecifierSet. + + :param item: + The item to check for, which can be a version string or a + :class:`Version` instance. + :param prereleases: + Whether or not to match prereleases with this SpecifierSet. If set to + ``None`` (the default), it uses :attr:`prereleases` to determine + whether or not prereleases are allowed. + + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.2.3") + True + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains(Version("1.2.3")) + True + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.0.1") + False + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1") + False + >>> SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True).contains("1.3.0a1") + True + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1", prereleases=True) + True + """ + # Ensure that our item is a Version instance. + if not isinstance(item, Version): + item = Version(item) + + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # We can determine if we're going to allow pre-releases by looking to + # see if any of the underlying items supports them. If none of them do + # and this item is a pre-release then we do not allow it and we can + # short circuit that here. + # Note: This means that 1.0.dev1 would not be contained in something + # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0 + if not prereleases and item.is_prerelease: + return False + + if installed and item.is_prerelease: + item = Version(item.base_version) + + # We simply dispatch to the underlying specs here to make sure that the + # given version is contained within all of them. + # Note: This use of all() here means that an empty set of specifiers + # will always return True, this is an explicit design decision. + return all(s.contains(item, prereleases=prereleases) for s in self._specs) + + def filter( + self, iterable: Iterable[UnparsedVersionVar], prereleases: bool | None = None + ) -> Iterator[UnparsedVersionVar]: + """Filter items in the given iterable, that match the specifiers in this set. + + :param iterable: + An iterable that can contain version strings and :class:`Version` instances. + The items in the iterable will be filtered according to the specifier. + :param prereleases: + Whether or not to allow prereleases in the returned iterator. If set to + ``None`` (the default), it will be intelligently decide whether to allow + prereleases or not (based on the :attr:`prereleases` attribute, and + whether the only versions matching are prereleases). + + This method is smarter than just ``filter(SpecifierSet(...).contains, [...])`` + because it implements the rule from :pep:`440` that a prerelease item + SHOULD be accepted if no other versions match the given specifier. + + >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", "1.5a1"])) + ['1.3'] + >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", Version("1.4")])) + ['1.3', ] + >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.5a1"])) + [] + >>> list(SpecifierSet(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True)) + ['1.3', '1.5a1'] + >>> list(SpecifierSet(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"])) + ['1.3', '1.5a1'] + + An "empty" SpecifierSet will filter items based on the presence of prerelease + versions in the set. + + >>> list(SpecifierSet("").filter(["1.3", "1.5a1"])) + ['1.3'] + >>> list(SpecifierSet("").filter(["1.5a1"])) + ['1.5a1'] + >>> list(SpecifierSet("", prereleases=True).filter(["1.3", "1.5a1"])) + ['1.3', '1.5a1'] + >>> list(SpecifierSet("").filter(["1.3", "1.5a1"], prereleases=True)) + ['1.3', '1.5a1'] + """ + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # If we have any specifiers, then we want to wrap our iterable in the + # filter method for each one, this will act as a logical AND amongst + # each specifier. + if self._specs: + for spec in self._specs: + iterable = spec.filter(iterable, prereleases=bool(prereleases)) + return iter(iterable) + # If we do not have any specifiers, then we need to have a rough filter + # which will filter out any pre-releases, unless there are no final + # releases. + else: + filtered: list[UnparsedVersionVar] = [] + found_prereleases: list[UnparsedVersionVar] = [] + + for item in iterable: + parsed_version = _coerce_version(item) + + # Store any item which is a pre-release for later unless we've + # already found a final version or we are accepting prereleases + if parsed_version.is_prerelease and not prereleases: + if not filtered: + found_prereleases.append(item) + else: + filtered.append(item) + + # If we've found no items except for pre-releases, then we'll go + # ahead and use the pre-releases + if not filtered and found_prereleases and prereleases is None: + return iter(found_prereleases) + + return iter(filtered) diff --git a/.venv/Lib/site-packages/packaging/tags.py b/.venv/Lib/site-packages/packaging/tags.py new file mode 100644 index 00000000..f5903402 --- /dev/null +++ b/.venv/Lib/site-packages/packaging/tags.py @@ -0,0 +1,617 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import annotations + +import logging +import platform +import re +import struct +import subprocess +import sys +import sysconfig +from importlib.machinery import EXTENSION_SUFFIXES +from typing import ( + Iterable, + Iterator, + Sequence, + Tuple, + cast, +) + +from . import _manylinux, _musllinux + +logger = logging.getLogger(__name__) + +PythonVersion = Sequence[int] +AppleVersion = Tuple[int, int] + +INTERPRETER_SHORT_NAMES: dict[str, str] = { + "python": "py", # Generic. + "cpython": "cp", + "pypy": "pp", + "ironpython": "ip", + "jython": "jy", +} + + +_32_BIT_INTERPRETER = struct.calcsize("P") == 4 + + +class Tag: + """ + A representation of the tag triple for a wheel. + + Instances are considered immutable and thus are hashable. Equality checking + is also supported. + """ + + __slots__ = ["_abi", "_hash", "_interpreter", "_platform"] + + def __init__(self, interpreter: str, abi: str, platform: str) -> None: + self._interpreter = interpreter.lower() + self._abi = abi.lower() + self._platform = platform.lower() + # The __hash__ of every single element in a Set[Tag] will be evaluated each time + # that a set calls its `.disjoint()` method, which may be called hundreds of + # times when scanning a page of links for packages with tags matching that + # Set[Tag]. Pre-computing the value here produces significant speedups for + # downstream consumers. + self._hash = hash((self._interpreter, self._abi, self._platform)) + + @property + def interpreter(self) -> str: + return self._interpreter + + @property + def abi(self) -> str: + return self._abi + + @property + def platform(self) -> str: + return self._platform + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Tag): + return NotImplemented + + return ( + (self._hash == other._hash) # Short-circuit ASAP for perf reasons. + and (self._platform == other._platform) + and (self._abi == other._abi) + and (self._interpreter == other._interpreter) + ) + + def __hash__(self) -> int: + return self._hash + + def __str__(self) -> str: + return f"{self._interpreter}-{self._abi}-{self._platform}" + + def __repr__(self) -> str: + return f"<{self} @ {id(self)}>" + + +def parse_tag(tag: str) -> frozenset[Tag]: + """ + Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances. + + Returning a set is required due to the possibility that the tag is a + compressed tag set. + """ + tags = set() + interpreters, abis, platforms = tag.split("-") + for interpreter in interpreters.split("."): + for abi in abis.split("."): + for platform_ in platforms.split("."): + tags.add(Tag(interpreter, abi, platform_)) + return frozenset(tags) + + +def _get_config_var(name: str, warn: bool = False) -> int | str | None: + value: int | str | None = sysconfig.get_config_var(name) + if value is None and warn: + logger.debug( + "Config variable '%s' is unset, Python ABI tag may be incorrect", name + ) + return value + + +def _normalize_string(string: str) -> str: + return string.replace(".", "_").replace("-", "_").replace(" ", "_") + + +def _is_threaded_cpython(abis: list[str]) -> bool: + """ + Determine if the ABI corresponds to a threaded (`--disable-gil`) build. + + The threaded builds are indicated by a "t" in the abiflags. + """ + if len(abis) == 0: + return False + # expect e.g., cp313 + m = re.match(r"cp\d+(.*)", abis[0]) + if not m: + return False + abiflags = m.group(1) + return "t" in abiflags + + +def _abi3_applies(python_version: PythonVersion, threading: bool) -> bool: + """ + Determine if the Python version supports abi3. + + PEP 384 was first implemented in Python 3.2. The threaded (`--disable-gil`) + builds do not support abi3. + """ + return len(python_version) > 1 and tuple(python_version) >= (3, 2) and not threading + + +def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> list[str]: + py_version = tuple(py_version) # To allow for version comparison. + abis = [] + version = _version_nodot(py_version[:2]) + threading = debug = pymalloc = ucs4 = "" + with_debug = _get_config_var("Py_DEBUG", warn) + has_refcount = hasattr(sys, "gettotalrefcount") + # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled + # extension modules is the best option. + # https://github.com/pypa/pip/issues/3383#issuecomment-173267692 + has_ext = "_d.pyd" in EXTENSION_SUFFIXES + if with_debug or (with_debug is None and (has_refcount or has_ext)): + debug = "d" + if py_version >= (3, 13) and _get_config_var("Py_GIL_DISABLED", warn): + threading = "t" + if py_version < (3, 8): + with_pymalloc = _get_config_var("WITH_PYMALLOC", warn) + if with_pymalloc or with_pymalloc is None: + pymalloc = "m" + if py_version < (3, 3): + unicode_size = _get_config_var("Py_UNICODE_SIZE", warn) + if unicode_size == 4 or ( + unicode_size is None and sys.maxunicode == 0x10FFFF + ): + ucs4 = "u" + elif debug: + # Debug builds can also load "normal" extension modules. + # We can also assume no UCS-4 or pymalloc requirement. + abis.append(f"cp{version}{threading}") + abis.insert(0, f"cp{version}{threading}{debug}{pymalloc}{ucs4}") + return abis + + +def cpython_tags( + python_version: PythonVersion | None = None, + abis: Iterable[str] | None = None, + platforms: Iterable[str] | None = None, + *, + warn: bool = False, +) -> Iterator[Tag]: + """ + Yields the tags for a CPython interpreter. + + The tags consist of: + - cp-- + - cp-abi3- + - cp-none- + - cp-abi3- # Older Python versions down to 3.2. + + If python_version only specifies a major version then user-provided ABIs and + the 'none' ABItag will be used. + + If 'abi3' or 'none' are specified in 'abis' then they will be yielded at + their normal position and not at the beginning. + """ + if not python_version: + python_version = sys.version_info[:2] + + interpreter = f"cp{_version_nodot(python_version[:2])}" + + if abis is None: + if len(python_version) > 1: + abis = _cpython_abis(python_version, warn) + else: + abis = [] + abis = list(abis) + # 'abi3' and 'none' are explicitly handled later. + for explicit_abi in ("abi3", "none"): + try: + abis.remove(explicit_abi) + except ValueError: + pass + + platforms = list(platforms or platform_tags()) + for abi in abis: + for platform_ in platforms: + yield Tag(interpreter, abi, platform_) + + threading = _is_threaded_cpython(abis) + use_abi3 = _abi3_applies(python_version, threading) + if use_abi3: + yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms) + yield from (Tag(interpreter, "none", platform_) for platform_ in platforms) + + if use_abi3: + for minor_version in range(python_version[1] - 1, 1, -1): + for platform_ in platforms: + version = _version_nodot((python_version[0], minor_version)) + interpreter = f"cp{version}" + yield Tag(interpreter, "abi3", platform_) + + +def _generic_abi() -> list[str]: + """ + Return the ABI tag based on EXT_SUFFIX. + """ + # The following are examples of `EXT_SUFFIX`. + # We want to keep the parts which are related to the ABI and remove the + # parts which are related to the platform: + # - linux: '.cpython-310-x86_64-linux-gnu.so' => cp310 + # - mac: '.cpython-310-darwin.so' => cp310 + # - win: '.cp310-win_amd64.pyd' => cp310 + # - win: '.pyd' => cp37 (uses _cpython_abis()) + # - pypy: '.pypy38-pp73-x86_64-linux-gnu.so' => pypy38_pp73 + # - graalpy: '.graalpy-38-native-x86_64-darwin.dylib' + # => graalpy_38_native + + ext_suffix = _get_config_var("EXT_SUFFIX", warn=True) + if not isinstance(ext_suffix, str) or ext_suffix[0] != ".": + raise SystemError("invalid sysconfig.get_config_var('EXT_SUFFIX')") + parts = ext_suffix.split(".") + if len(parts) < 3: + # CPython3.7 and earlier uses ".pyd" on Windows. + return _cpython_abis(sys.version_info[:2]) + soabi = parts[1] + if soabi.startswith("cpython"): + # non-windows + abi = "cp" + soabi.split("-")[1] + elif soabi.startswith("cp"): + # windows + abi = soabi.split("-")[0] + elif soabi.startswith("pypy"): + abi = "-".join(soabi.split("-")[:2]) + elif soabi.startswith("graalpy"): + abi = "-".join(soabi.split("-")[:3]) + elif soabi: + # pyston, ironpython, others? + abi = soabi + else: + return [] + return [_normalize_string(abi)] + + +def generic_tags( + interpreter: str | None = None, + abis: Iterable[str] | None = None, + platforms: Iterable[str] | None = None, + *, + warn: bool = False, +) -> Iterator[Tag]: + """ + Yields the tags for a generic interpreter. + + The tags consist of: + - -- + + The "none" ABI will be added if it was not explicitly provided. + """ + if not interpreter: + interp_name = interpreter_name() + interp_version = interpreter_version(warn=warn) + interpreter = "".join([interp_name, interp_version]) + if abis is None: + abis = _generic_abi() + else: + abis = list(abis) + platforms = list(platforms or platform_tags()) + if "none" not in abis: + abis.append("none") + for abi in abis: + for platform_ in platforms: + yield Tag(interpreter, abi, platform_) + + +def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]: + """ + Yields Python versions in descending order. + + After the latest version, the major-only version will be yielded, and then + all previous versions of that major version. + """ + if len(py_version) > 1: + yield f"py{_version_nodot(py_version[:2])}" + yield f"py{py_version[0]}" + if len(py_version) > 1: + for minor in range(py_version[1] - 1, -1, -1): + yield f"py{_version_nodot((py_version[0], minor))}" + + +def compatible_tags( + python_version: PythonVersion | None = None, + interpreter: str | None = None, + platforms: Iterable[str] | None = None, +) -> Iterator[Tag]: + """ + Yields the sequence of tags that are compatible with a specific version of Python. + + The tags consist of: + - py*-none- + - -none-any # ... if `interpreter` is provided. + - py*-none-any + """ + if not python_version: + python_version = sys.version_info[:2] + platforms = list(platforms or platform_tags()) + for version in _py_interpreter_range(python_version): + for platform_ in platforms: + yield Tag(version, "none", platform_) + if interpreter: + yield Tag(interpreter, "none", "any") + for version in _py_interpreter_range(python_version): + yield Tag(version, "none", "any") + + +def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str: + if not is_32bit: + return arch + + if arch.startswith("ppc"): + return "ppc" + + return "i386" + + +def _mac_binary_formats(version: AppleVersion, cpu_arch: str) -> list[str]: + formats = [cpu_arch] + if cpu_arch == "x86_64": + if version < (10, 4): + return [] + formats.extend(["intel", "fat64", "fat32"]) + + elif cpu_arch == "i386": + if version < (10, 4): + return [] + formats.extend(["intel", "fat32", "fat"]) + + elif cpu_arch == "ppc64": + # TODO: Need to care about 32-bit PPC for ppc64 through 10.2? + if version > (10, 5) or version < (10, 4): + return [] + formats.append("fat64") + + elif cpu_arch == "ppc": + if version > (10, 6): + return [] + formats.extend(["fat32", "fat"]) + + if cpu_arch in {"arm64", "x86_64"}: + formats.append("universal2") + + if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}: + formats.append("universal") + + return formats + + +def mac_platforms( + version: AppleVersion | None = None, arch: str | None = None +) -> Iterator[str]: + """ + Yields the platform tags for a macOS system. + + The `version` parameter is a two-item tuple specifying the macOS version to + generate platform tags for. The `arch` parameter is the CPU architecture to + generate platform tags for. Both parameters default to the appropriate value + for the current system. + """ + version_str, _, cpu_arch = platform.mac_ver() + if version is None: + version = cast("AppleVersion", tuple(map(int, version_str.split(".")[:2]))) + if version == (10, 16): + # When built against an older macOS SDK, Python will report macOS 10.16 + # instead of the real version. + version_str = subprocess.run( + [ + sys.executable, + "-sS", + "-c", + "import platform; print(platform.mac_ver()[0])", + ], + check=True, + env={"SYSTEM_VERSION_COMPAT": "0"}, + stdout=subprocess.PIPE, + text=True, + ).stdout + version = cast("AppleVersion", tuple(map(int, version_str.split(".")[:2]))) + else: + version = version + if arch is None: + arch = _mac_arch(cpu_arch) + else: + arch = arch + + if (10, 0) <= version and version < (11, 0): + # Prior to Mac OS 11, each yearly release of Mac OS bumped the + # "minor" version number. The major version was always 10. + major_version = 10 + for minor_version in range(version[1], -1, -1): + compat_version = major_version, minor_version + binary_formats = _mac_binary_formats(compat_version, arch) + for binary_format in binary_formats: + yield f"macosx_{major_version}_{minor_version}_{binary_format}" + + if version >= (11, 0): + # Starting with Mac OS 11, each yearly release bumps the major version + # number. The minor versions are now the midyear updates. + minor_version = 0 + for major_version in range(version[0], 10, -1): + compat_version = major_version, minor_version + binary_formats = _mac_binary_formats(compat_version, arch) + for binary_format in binary_formats: + yield f"macosx_{major_version}_{minor_version}_{binary_format}" + + if version >= (11, 0): + # Mac OS 11 on x86_64 is compatible with binaries from previous releases. + # Arm64 support was introduced in 11.0, so no Arm binaries from previous + # releases exist. + # + # However, the "universal2" binary format can have a + # macOS version earlier than 11.0 when the x86_64 part of the binary supports + # that version of macOS. + major_version = 10 + if arch == "x86_64": + for minor_version in range(16, 3, -1): + compat_version = major_version, minor_version + binary_formats = _mac_binary_formats(compat_version, arch) + for binary_format in binary_formats: + yield f"macosx_{major_version}_{minor_version}_{binary_format}" + else: + for minor_version in range(16, 3, -1): + compat_version = major_version, minor_version + binary_format = "universal2" + yield f"macosx_{major_version}_{minor_version}_{binary_format}" + + +def ios_platforms( + version: AppleVersion | None = None, multiarch: str | None = None +) -> Iterator[str]: + """ + Yields the platform tags for an iOS system. + + :param version: A two-item tuple specifying the iOS version to generate + platform tags for. Defaults to the current iOS version. + :param multiarch: The CPU architecture+ABI to generate platform tags for - + (the value used by `sys.implementation._multiarch` e.g., + `arm64_iphoneos` or `x84_64_iphonesimulator`). Defaults to the current + multiarch value. + """ + if version is None: + # if iOS is the current platform, ios_ver *must* be defined. However, + # it won't exist for CPython versions before 3.13, which causes a mypy + # error. + _, release, _, _ = platform.ios_ver() # type: ignore[attr-defined, unused-ignore] + version = cast("AppleVersion", tuple(map(int, release.split(".")[:2]))) + + if multiarch is None: + multiarch = sys.implementation._multiarch + multiarch = multiarch.replace("-", "_") + + ios_platform_template = "ios_{major}_{minor}_{multiarch}" + + # Consider any iOS major.minor version from the version requested, down to + # 12.0. 12.0 is the first iOS version that is known to have enough features + # to support CPython. Consider every possible minor release up to X.9. There + # highest the minor has ever gone is 8 (14.8 and 15.8) but having some extra + # candidates that won't ever match doesn't really hurt, and it saves us from + # having to keep an explicit list of known iOS versions in the code. Return + # the results descending order of version number. + + # If the requested major version is less than 12, there won't be any matches. + if version[0] < 12: + return + + # Consider the actual X.Y version that was requested. + yield ios_platform_template.format( + major=version[0], minor=version[1], multiarch=multiarch + ) + + # Consider every minor version from X.0 to the minor version prior to the + # version requested by the platform. + for minor in range(version[1] - 1, -1, -1): + yield ios_platform_template.format( + major=version[0], minor=minor, multiarch=multiarch + ) + + for major in range(version[0] - 1, 11, -1): + for minor in range(9, -1, -1): + yield ios_platform_template.format( + major=major, minor=minor, multiarch=multiarch + ) + + +def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]: + linux = _normalize_string(sysconfig.get_platform()) + if not linux.startswith("linux_"): + # we should never be here, just yield the sysconfig one and return + yield linux + return + if is_32bit: + if linux == "linux_x86_64": + linux = "linux_i686" + elif linux == "linux_aarch64": + linux = "linux_armv8l" + _, arch = linux.split("_", 1) + archs = {"armv8l": ["armv8l", "armv7l"]}.get(arch, [arch]) + yield from _manylinux.platform_tags(archs) + yield from _musllinux.platform_tags(archs) + for arch in archs: + yield f"linux_{arch}" + + +def _generic_platforms() -> Iterator[str]: + yield _normalize_string(sysconfig.get_platform()) + + +def platform_tags() -> Iterator[str]: + """ + Provides the platform tags for this installation. + """ + if platform.system() == "Darwin": + return mac_platforms() + elif platform.system() == "iOS": + return ios_platforms() + elif platform.system() == "Linux": + return _linux_platforms() + else: + return _generic_platforms() + + +def interpreter_name() -> str: + """ + Returns the name of the running interpreter. + + Some implementations have a reserved, two-letter abbreviation which will + be returned when appropriate. + """ + name = sys.implementation.name + return INTERPRETER_SHORT_NAMES.get(name) or name + + +def interpreter_version(*, warn: bool = False) -> str: + """ + Returns the version of the running interpreter. + """ + version = _get_config_var("py_version_nodot", warn=warn) + if version: + version = str(version) + else: + version = _version_nodot(sys.version_info[:2]) + return version + + +def _version_nodot(version: PythonVersion) -> str: + return "".join(map(str, version)) + + +def sys_tags(*, warn: bool = False) -> Iterator[Tag]: + """ + Returns the sequence of tag triples for the running interpreter. + + The order of the sequence corresponds to priority order for the + interpreter, from most to least important. + """ + + interp_name = interpreter_name() + if interp_name == "cp": + yield from cpython_tags(warn=warn) + else: + yield from generic_tags() + + if interp_name == "pp": + interp = "pp3" + elif interp_name == "cp": + interp = "cp" + interpreter_version(warn=warn) + else: + interp = None + yield from compatible_tags(interpreter=interp) diff --git a/.venv/Lib/site-packages/packaging/utils.py b/.venv/Lib/site-packages/packaging/utils.py new file mode 100644 index 00000000..23450953 --- /dev/null +++ b/.venv/Lib/site-packages/packaging/utils.py @@ -0,0 +1,163 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import annotations + +import functools +import re +from typing import NewType, Tuple, Union, cast + +from .tags import Tag, parse_tag +from .version import InvalidVersion, Version, _TrimmedRelease + +BuildTag = Union[Tuple[()], Tuple[int, str]] +NormalizedName = NewType("NormalizedName", str) + + +class InvalidName(ValueError): + """ + An invalid distribution name; users should refer to the packaging user guide. + """ + + +class InvalidWheelFilename(ValueError): + """ + An invalid wheel filename was found, users should refer to PEP 427. + """ + + +class InvalidSdistFilename(ValueError): + """ + An invalid sdist filename was found, users should refer to the packaging user guide. + """ + + +# Core metadata spec for `Name` +_validate_regex = re.compile( + r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$", re.IGNORECASE +) +_canonicalize_regex = re.compile(r"[-_.]+") +_normalized_regex = re.compile(r"^([a-z0-9]|[a-z0-9]([a-z0-9-](?!--))*[a-z0-9])$") +# PEP 427: The build number must start with a digit. +_build_tag_regex = re.compile(r"(\d+)(.*)") + + +def canonicalize_name(name: str, *, validate: bool = False) -> NormalizedName: + if validate and not _validate_regex.match(name): + raise InvalidName(f"name is invalid: {name!r}") + # This is taken from PEP 503. + value = _canonicalize_regex.sub("-", name).lower() + return cast(NormalizedName, value) + + +def is_normalized_name(name: str) -> bool: + return _normalized_regex.match(name) is not None + + +@functools.singledispatch +def canonicalize_version( + version: Version | str, *, strip_trailing_zero: bool = True +) -> str: + """ + Return a canonical form of a version as a string. + + >>> canonicalize_version('1.0.1') + '1.0.1' + + Per PEP 625, versions may have multiple canonical forms, differing + only by trailing zeros. + + >>> canonicalize_version('1.0.0') + '1' + >>> canonicalize_version('1.0.0', strip_trailing_zero=False) + '1.0.0' + + Invalid versions are returned unaltered. + + >>> canonicalize_version('foo bar baz') + 'foo bar baz' + """ + return str(_TrimmedRelease(str(version)) if strip_trailing_zero else version) + + +@canonicalize_version.register +def _(version: str, *, strip_trailing_zero: bool = True) -> str: + try: + parsed = Version(version) + except InvalidVersion: + # Legacy versions cannot be normalized + return version + return canonicalize_version(parsed, strip_trailing_zero=strip_trailing_zero) + + +def parse_wheel_filename( + filename: str, +) -> tuple[NormalizedName, Version, BuildTag, frozenset[Tag]]: + if not filename.endswith(".whl"): + raise InvalidWheelFilename( + f"Invalid wheel filename (extension must be '.whl'): {filename!r}" + ) + + filename = filename[:-4] + dashes = filename.count("-") + if dashes not in (4, 5): + raise InvalidWheelFilename( + f"Invalid wheel filename (wrong number of parts): {filename!r}" + ) + + parts = filename.split("-", dashes - 2) + name_part = parts[0] + # See PEP 427 for the rules on escaping the project name. + if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None: + raise InvalidWheelFilename(f"Invalid project name: {filename!r}") + name = canonicalize_name(name_part) + + try: + version = Version(parts[1]) + except InvalidVersion as e: + raise InvalidWheelFilename( + f"Invalid wheel filename (invalid version): {filename!r}" + ) from e + + if dashes == 5: + build_part = parts[2] + build_match = _build_tag_regex.match(build_part) + if build_match is None: + raise InvalidWheelFilename( + f"Invalid build number: {build_part} in {filename!r}" + ) + build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2))) + else: + build = () + tags = parse_tag(parts[-1]) + return (name, version, build, tags) + + +def parse_sdist_filename(filename: str) -> tuple[NormalizedName, Version]: + if filename.endswith(".tar.gz"): + file_stem = filename[: -len(".tar.gz")] + elif filename.endswith(".zip"): + file_stem = filename[: -len(".zip")] + else: + raise InvalidSdistFilename( + f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):" + f" {filename!r}" + ) + + # We are requiring a PEP 440 version, which cannot contain dashes, + # so we split on the last dash. + name_part, sep, version_part = file_stem.rpartition("-") + if not sep: + raise InvalidSdistFilename(f"Invalid sdist filename: {filename!r}") + + name = canonicalize_name(name_part) + + try: + version = Version(version_part) + except InvalidVersion as e: + raise InvalidSdistFilename( + f"Invalid sdist filename (invalid version): {filename!r}" + ) from e + + return (name, version) diff --git a/.venv/Lib/site-packages/packaging/version.py b/.venv/Lib/site-packages/packaging/version.py new file mode 100644 index 00000000..c9bbda20 --- /dev/null +++ b/.venv/Lib/site-packages/packaging/version.py @@ -0,0 +1,582 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +""" +.. testsetup:: + + from packaging.version import parse, Version +""" + +from __future__ import annotations + +import itertools +import re +from typing import Any, Callable, NamedTuple, SupportsInt, Tuple, Union + +from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType + +__all__ = ["VERSION_PATTERN", "InvalidVersion", "Version", "parse"] + +LocalType = Tuple[Union[int, str], ...] + +CmpPrePostDevType = Union[InfinityType, NegativeInfinityType, Tuple[str, int]] +CmpLocalType = Union[ + NegativeInfinityType, + Tuple[Union[Tuple[int, str], Tuple[NegativeInfinityType, Union[int, str]]], ...], +] +CmpKey = Tuple[ + int, + Tuple[int, ...], + CmpPrePostDevType, + CmpPrePostDevType, + CmpPrePostDevType, + CmpLocalType, +] +VersionComparisonMethod = Callable[[CmpKey, CmpKey], bool] + + +class _Version(NamedTuple): + epoch: int + release: tuple[int, ...] + dev: tuple[str, int] | None + pre: tuple[str, int] | None + post: tuple[str, int] | None + local: LocalType | None + + +def parse(version: str) -> Version: + """Parse the given version string. + + >>> parse('1.0.dev1') + + + :param version: The version string to parse. + :raises InvalidVersion: When the version string is not a valid version. + """ + return Version(version) + + +class InvalidVersion(ValueError): + """Raised when a version string is not a valid version. + + >>> Version("invalid") + Traceback (most recent call last): + ... + packaging.version.InvalidVersion: Invalid version: 'invalid' + """ + + +class _BaseVersion: + _key: tuple[Any, ...] + + def __hash__(self) -> int: + return hash(self._key) + + # Please keep the duplicated `isinstance` check + # in the six comparisons hereunder + # unless you find a way to avoid adding overhead function calls. + def __lt__(self, other: _BaseVersion) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key < other._key + + def __le__(self, other: _BaseVersion) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key <= other._key + + def __eq__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key == other._key + + def __ge__(self, other: _BaseVersion) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key >= other._key + + def __gt__(self, other: _BaseVersion) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key > other._key + + def __ne__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key != other._key + + +# Deliberately not anchored to the start and end of the string, to make it +# easier for 3rd party code to reuse +_VERSION_PATTERN = r""" + v? + (?: + (?:(?P[0-9]+)!)? # epoch + (?P[0-9]+(?:\.[0-9]+)*) # release segment + (?P
                                          # pre-release
+            [-_\.]?
+            (?Palpha|a|beta|b|preview|pre|c|rc)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+        (?P                                         # post release
+            (?:-(?P[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?Ppost|rev|r)
+                [-_\.]?
+                (?P[0-9]+)?
+            )
+        )?
+        (?P                                          # dev release
+            [-_\.]?
+            (?Pdev)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+    )
+    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+VERSION_PATTERN = _VERSION_PATTERN
+"""
+A string containing the regular expression used to match a valid version.
+
+The pattern is not anchored at either end, and is intended for embedding in larger
+expressions (for example, matching a version number as part of a file name). The
+regular expression should be compiled with the ``re.VERBOSE`` and ``re.IGNORECASE``
+flags set.
+
+:meta hide-value:
+"""
+
+
+class Version(_BaseVersion):
+    """This class abstracts handling of a project's versions.
+
+    A :class:`Version` instance is comparison aware and can be compared and
+    sorted using the standard Python interfaces.
+
+    >>> v1 = Version("1.0a5")
+    >>> v2 = Version("1.0")
+    >>> v1
+    
+    >>> v2
+    
+    >>> v1 < v2
+    True
+    >>> v1 == v2
+    False
+    >>> v1 > v2
+    False
+    >>> v1 >= v2
+    False
+    >>> v1 <= v2
+    True
+    """
+
+    _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
+    _key: CmpKey
+
+    def __init__(self, version: str) -> None:
+        """Initialize a Version object.
+
+        :param version:
+            The string representation of a version which will be parsed and normalized
+            before use.
+        :raises InvalidVersion:
+            If the ``version`` does not conform to PEP 440 in any way then this
+            exception will be raised.
+        """
+
+        # Validate the version and parse it into pieces
+        match = self._regex.search(version)
+        if not match:
+            raise InvalidVersion(f"Invalid version: {version!r}")
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
+            post=_parse_letter_version(
+                match.group("post_l"), match.group("post_n1") or match.group("post_n2")
+            ),
+            dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self) -> str:
+        """A representation of the Version that shows all internal state.
+
+        >>> Version('1.0.0')
+        
+        """
+        return f""
+
+    def __str__(self) -> str:
+        """A string representation of the version that can be round-tripped.
+
+        >>> str(Version("1.0a5"))
+        '1.0a5'
+        """
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        # Pre-release
+        if self.pre is not None:
+            parts.append("".join(str(x) for x in self.pre))
+
+        # Post-release
+        if self.post is not None:
+            parts.append(f".post{self.post}")
+
+        # Development release
+        if self.dev is not None:
+            parts.append(f".dev{self.dev}")
+
+        # Local version segment
+        if self.local is not None:
+            parts.append(f"+{self.local}")
+
+        return "".join(parts)
+
+    @property
+    def epoch(self) -> int:
+        """The epoch of the version.
+
+        >>> Version("2.0.0").epoch
+        0
+        >>> Version("1!2.0.0").epoch
+        1
+        """
+        return self._version.epoch
+
+    @property
+    def release(self) -> tuple[int, ...]:
+        """The components of the "release" segment of the version.
+
+        >>> Version("1.2.3").release
+        (1, 2, 3)
+        >>> Version("2.0.0").release
+        (2, 0, 0)
+        >>> Version("1!2.0.0.post0").release
+        (2, 0, 0)
+
+        Includes trailing zeroes but not the epoch or any pre-release / development /
+        post-release suffixes.
+        """
+        return self._version.release
+
+    @property
+    def pre(self) -> tuple[str, int] | None:
+        """The pre-release segment of the version.
+
+        >>> print(Version("1.2.3").pre)
+        None
+        >>> Version("1.2.3a1").pre
+        ('a', 1)
+        >>> Version("1.2.3b1").pre
+        ('b', 1)
+        >>> Version("1.2.3rc1").pre
+        ('rc', 1)
+        """
+        return self._version.pre
+
+    @property
+    def post(self) -> int | None:
+        """The post-release number of the version.
+
+        >>> print(Version("1.2.3").post)
+        None
+        >>> Version("1.2.3.post1").post
+        1
+        """
+        return self._version.post[1] if self._version.post else None
+
+    @property
+    def dev(self) -> int | None:
+        """The development number of the version.
+
+        >>> print(Version("1.2.3").dev)
+        None
+        >>> Version("1.2.3.dev1").dev
+        1
+        """
+        return self._version.dev[1] if self._version.dev else None
+
+    @property
+    def local(self) -> str | None:
+        """The local version segment of the version.
+
+        >>> print(Version("1.2.3").local)
+        None
+        >>> Version("1.2.3+abc").local
+        'abc'
+        """
+        if self._version.local:
+            return ".".join(str(x) for x in self._version.local)
+        else:
+            return None
+
+    @property
+    def public(self) -> str:
+        """The public portion of the version.
+
+        >>> Version("1.2.3").public
+        '1.2.3'
+        >>> Version("1.2.3+abc").public
+        '1.2.3'
+        >>> Version("1!1.2.3dev1+abc").public
+        '1!1.2.3.dev1'
+        """
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self) -> str:
+        """The "base version" of the version.
+
+        >>> Version("1.2.3").base_version
+        '1.2.3'
+        >>> Version("1.2.3+abc").base_version
+        '1.2.3'
+        >>> Version("1!1.2.3dev1+abc").base_version
+        '1!1.2.3'
+
+        The "base version" is the public version of the project without any pre or post
+        release markers.
+        """
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        return "".join(parts)
+
+    @property
+    def is_prerelease(self) -> bool:
+        """Whether this version is a pre-release.
+
+        >>> Version("1.2.3").is_prerelease
+        False
+        >>> Version("1.2.3a1").is_prerelease
+        True
+        >>> Version("1.2.3b1").is_prerelease
+        True
+        >>> Version("1.2.3rc1").is_prerelease
+        True
+        >>> Version("1.2.3dev1").is_prerelease
+        True
+        """
+        return self.dev is not None or self.pre is not None
+
+    @property
+    def is_postrelease(self) -> bool:
+        """Whether this version is a post-release.
+
+        >>> Version("1.2.3").is_postrelease
+        False
+        >>> Version("1.2.3.post1").is_postrelease
+        True
+        """
+        return self.post is not None
+
+    @property
+    def is_devrelease(self) -> bool:
+        """Whether this version is a development release.
+
+        >>> Version("1.2.3").is_devrelease
+        False
+        >>> Version("1.2.3.dev1").is_devrelease
+        True
+        """
+        return self.dev is not None
+
+    @property
+    def major(self) -> int:
+        """The first item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").major
+        1
+        """
+        return self.release[0] if len(self.release) >= 1 else 0
+
+    @property
+    def minor(self) -> int:
+        """The second item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").minor
+        2
+        >>> Version("1").minor
+        0
+        """
+        return self.release[1] if len(self.release) >= 2 else 0
+
+    @property
+    def micro(self) -> int:
+        """The third item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").micro
+        3
+        >>> Version("1").micro
+        0
+        """
+        return self.release[2] if len(self.release) >= 3 else 0
+
+
+class _TrimmedRelease(Version):
+    @property
+    def release(self) -> tuple[int, ...]:
+        """
+        Release segment without any trailing zeros.
+
+        >>> _TrimmedRelease('1.0.0').release
+        (1,)
+        >>> _TrimmedRelease('0.0').release
+        (0,)
+        """
+        rel = super().release
+        nonzeros = (index for index, val in enumerate(rel) if val)
+        last_nonzero = max(nonzeros, default=0)
+        return rel[: last_nonzero + 1]
+
+
+def _parse_letter_version(
+    letter: str | None, number: str | bytes | SupportsInt | None
+) -> tuple[str, int] | None:
+    if letter:
+        # We consider there to be an implicit 0 in a pre-release if there is
+        # not a numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+
+    assert not letter
+    if number:
+        # We assume if we are given a number, but we are not given a letter
+        # then this is using the implicit post release syntax (e.g. 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+    return None
+
+
+_local_version_separators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local: str | None) -> LocalType | None:
+    """
+    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+    """
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_separators.split(local)
+        )
+    return None
+
+
+def _cmpkey(
+    epoch: int,
+    release: tuple[int, ...],
+    pre: tuple[str, int] | None,
+    post: tuple[str, int] | None,
+    dev: tuple[str, int] | None,
+    local: LocalType | None,
+) -> CmpKey:
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non zero, then take the rest
+    # re-reverse it back into the correct order and make it a tuple and use
+    # that for our sorting key.
+    _release = tuple(
+        reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre segment, but we _only_ want to do this
+    # if there is not a pre or a post segment. If we have one of those then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        _pre: CmpPrePostDevType = NegativeInfinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        _pre = Infinity
+    else:
+        _pre = pre
+
+    # Versions without a post segment should sort before those with one.
+    if post is None:
+        _post: CmpPrePostDevType = NegativeInfinity
+
+    else:
+        _post = post
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        _dev: CmpPrePostDevType = Infinity
+
+    else:
+        _dev = dev
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        _local: CmpLocalType = NegativeInfinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alpha numeric segments sort before numeric segments
+        # - Alpha numeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        _local = tuple(
+            (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
+        )
+
+    return epoch, _release, _pre, _post, _dev, _local
diff --git a/.venv/Lib/site-packages/pluggy-1.5.0.dist-info/INSTALLER b/.venv/Lib/site-packages/pluggy-1.5.0.dist-info/INSTALLER
new file mode 100644
index 00000000..a1b589e3
--- /dev/null
+++ b/.venv/Lib/site-packages/pluggy-1.5.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/.venv/Lib/site-packages/pluggy-1.5.0.dist-info/LICENSE b/.venv/Lib/site-packages/pluggy-1.5.0.dist-info/LICENSE
new file mode 100644
index 00000000..85f4dd63
--- /dev/null
+++ b/.venv/Lib/site-packages/pluggy-1.5.0.dist-info/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 holger krekel (rather uses bitbucket/hpk42)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/.venv/Lib/site-packages/pluggy-1.5.0.dist-info/METADATA b/.venv/Lib/site-packages/pluggy-1.5.0.dist-info/METADATA
new file mode 100644
index 00000000..2d697b0d
--- /dev/null
+++ b/.venv/Lib/site-packages/pluggy-1.5.0.dist-info/METADATA
@@ -0,0 +1,155 @@
+Metadata-Version: 2.1
+Name: pluggy
+Version: 1.5.0
+Summary: plugin and hook calling mechanisms for python
+Home-page: https://github.com/pytest-dev/pluggy
+Author: Holger Krekel
+Author-email: holger@merlinux.eu
+License: MIT
+Platform: unix
+Platform: linux
+Platform: osx
+Platform: win32
+Classifier: Development Status :: 6 - Mature
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: POSIX
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: MacOS :: MacOS X
+Classifier: Topic :: Software Development :: Testing
+Classifier: Topic :: Software Development :: Libraries
+Classifier: Topic :: Utilities
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Requires-Python: >=3.8
+Description-Content-Type: text/x-rst
+License-File: LICENSE
+Provides-Extra: dev
+Requires-Dist: pre-commit ; extra == 'dev'
+Requires-Dist: tox ; extra == 'dev'
+Provides-Extra: testing
+Requires-Dist: pytest ; extra == 'testing'
+Requires-Dist: pytest-benchmark ; extra == 'testing'
+
+====================================================
+pluggy - A minimalist production ready plugin system
+====================================================
+
+|pypi| |conda-forge| |versions| |github-actions| |gitter| |black| |codecov|
+
+This is the core framework used by the `pytest`_, `tox`_, and `devpi`_ projects.
+
+Please `read the docs`_ to learn more!
+
+A definitive example
+====================
+.. code-block:: python
+
+    import pluggy
+
+    hookspec = pluggy.HookspecMarker("myproject")
+    hookimpl = pluggy.HookimplMarker("myproject")
+
+
+    class MySpec:
+        """A hook specification namespace."""
+
+        @hookspec
+        def myhook(self, arg1, arg2):
+            """My special little hook that you can customize."""
+
+
+    class Plugin_1:
+        """A hook implementation namespace."""
+
+        @hookimpl
+        def myhook(self, arg1, arg2):
+            print("inside Plugin_1.myhook()")
+            return arg1 + arg2
+
+
+    class Plugin_2:
+        """A 2nd hook implementation namespace."""
+
+        @hookimpl
+        def myhook(self, arg1, arg2):
+            print("inside Plugin_2.myhook()")
+            return arg1 - arg2
+
+
+    # create a manager and add the spec
+    pm = pluggy.PluginManager("myproject")
+    pm.add_hookspecs(MySpec)
+
+    # register plugins
+    pm.register(Plugin_1())
+    pm.register(Plugin_2())
+
+    # call our ``myhook`` hook
+    results = pm.hook.myhook(arg1=1, arg2=2)
+    print(results)
+
+
+Running this directly gets us::
+
+    $ python docs/examples/toy-example.py
+    inside Plugin_2.myhook()
+    inside Plugin_1.myhook()
+    [-1, 3]
+
+
+.. badges
+
+.. |pypi| image:: https://img.shields.io/pypi/v/pluggy.svg
+    :target: https://pypi.org/pypi/pluggy
+
+.. |versions| image:: https://img.shields.io/pypi/pyversions/pluggy.svg
+    :target: https://pypi.org/pypi/pluggy
+
+.. |github-actions| image:: https://github.com/pytest-dev/pluggy/workflows/main/badge.svg
+    :target: https://github.com/pytest-dev/pluggy/actions
+
+.. |conda-forge| image:: https://img.shields.io/conda/vn/conda-forge/pluggy.svg
+    :target: https://anaconda.org/conda-forge/pytest
+
+.. |gitter| image:: https://badges.gitter.im/pytest-dev/pluggy.svg
+    :alt: Join the chat at https://gitter.im/pytest-dev/pluggy
+    :target: https://gitter.im/pytest-dev/pluggy?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
+
+.. |black| image:: https://img.shields.io/badge/code%20style-black-000000.svg
+    :target: https://github.com/ambv/black
+
+.. |codecov| image:: https://codecov.io/gh/pytest-dev/pluggy/branch/master/graph/badge.svg
+    :target: https://codecov.io/gh/pytest-dev/pluggy
+    :alt: Code coverage Status
+
+.. links
+.. _pytest:
+    http://pytest.org
+.. _tox:
+    https://tox.readthedocs.org
+.. _devpi:
+    http://doc.devpi.net
+.. _read the docs:
+   https://pluggy.readthedocs.io/en/latest/
+
+
+Support pluggy
+--------------
+
+`Open Collective`_ is an online funding platform for open and transparent communities.
+It provides tools to raise money and share your finances in full transparency.
+
+It is the platform of choice for individuals and companies that want to make one-time or
+monthly donations directly to the project.
+
+``pluggy`` is part of the ``pytest-dev`` project, see more details in the `pytest collective`_.
+
+.. _Open Collective: https://opencollective.com
+.. _pytest collective: https://opencollective.com/pytest
diff --git a/.venv/Lib/site-packages/pluggy-1.5.0.dist-info/RECORD b/.venv/Lib/site-packages/pluggy-1.5.0.dist-info/RECORD
new file mode 100644
index 00000000..0f37fff5
--- /dev/null
+++ b/.venv/Lib/site-packages/pluggy-1.5.0.dist-info/RECORD
@@ -0,0 +1,23 @@
+pluggy-1.5.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+pluggy-1.5.0.dist-info/LICENSE,sha256=1rZebCE6XQtXeRHTTW5ZSbn1nXbCOMUHGi8_wWz7JgY,1110
+pluggy-1.5.0.dist-info/METADATA,sha256=6JeHn3o9P9iqwK20MgVHdoqxick1SS3SORb65Iyb-Fw,4812
+pluggy-1.5.0.dist-info/RECORD,,
+pluggy-1.5.0.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
+pluggy-1.5.0.dist-info/top_level.txt,sha256=xKSCRhai-v9MckvMuWqNz16c1tbsmOggoMSwTgcpYHE,7
+pluggy/__init__.py,sha256=U8qtIRmmr0SRdbxAF8VJJs01jMUYgKAc9oAjYYCLgz4,980
+pluggy/__pycache__/__init__.cpython-312.pyc,,
+pluggy/__pycache__/_callers.cpython-312.pyc,,
+pluggy/__pycache__/_hooks.cpython-312.pyc,,
+pluggy/__pycache__/_manager.cpython-312.pyc,,
+pluggy/__pycache__/_result.cpython-312.pyc,,
+pluggy/__pycache__/_tracing.cpython-312.pyc,,
+pluggy/__pycache__/_version.cpython-312.pyc,,
+pluggy/__pycache__/_warnings.cpython-312.pyc,,
+pluggy/_callers.py,sha256=8k8i3GVBT_gtccCPFpN8Ww0towWSnSazrl0vbP9UXSY,7316
+pluggy/_hooks.py,sha256=m-3qVLDdn4S9y3pffLOpMQeDI4PDw8hrATK1SC8rQkU,25108
+pluggy/_manager.py,sha256=ylIDFwrUP_mMAGpdRPj9zwxukG7nWJAfY1yylXyXAMo,20265
+pluggy/_result.py,sha256=eEak-7Ie88bRkylsgbLwB6iMogogIMZheq8W3bImmcs,2849
+pluggy/_tracing.py,sha256=kSBr25F_rNklV2QhLD6h1jx6Z1kcKDRbuYvF5jv35pU,2089
+pluggy/_version.py,sha256=OYzqgMEgfFG0au4hzbEdgYI-c7Hxo3wdBtrpEjK1RoY,411
+pluggy/_warnings.py,sha256=td0AvZBpfamriCC3OqsLwxMh-SzAMjfjmc58T5vP3lw,828
+pluggy/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
diff --git a/.venv/Lib/site-packages/pluggy-1.5.0.dist-info/WHEEL b/.venv/Lib/site-packages/pluggy-1.5.0.dist-info/WHEEL
new file mode 100644
index 00000000..bab98d67
--- /dev/null
+++ b/.venv/Lib/site-packages/pluggy-1.5.0.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.43.0)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/.venv/Lib/site-packages/pluggy-1.5.0.dist-info/top_level.txt b/.venv/Lib/site-packages/pluggy-1.5.0.dist-info/top_level.txt
new file mode 100644
index 00000000..11bdb5c1
--- /dev/null
+++ b/.venv/Lib/site-packages/pluggy-1.5.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+pluggy
diff --git a/.venv/Lib/site-packages/pluggy/__init__.py b/.venv/Lib/site-packages/pluggy/__init__.py
new file mode 100644
index 00000000..36ce1680
--- /dev/null
+++ b/.venv/Lib/site-packages/pluggy/__init__.py
@@ -0,0 +1,37 @@
+try:
+    from ._version import version as __version__
+except ImportError:
+    # broken installation, we don't even try
+    # unknown only works because we do poor mans version compare
+    __version__ = "unknown"
+
+__all__ = [
+    "__version__",
+    "PluginManager",
+    "PluginValidationError",
+    "HookCaller",
+    "HookCallError",
+    "HookspecOpts",
+    "HookimplOpts",
+    "HookImpl",
+    "HookRelay",
+    "HookspecMarker",
+    "HookimplMarker",
+    "Result",
+    "PluggyWarning",
+    "PluggyTeardownRaisedWarning",
+]
+
+from ._hooks import HookCaller
+from ._hooks import HookImpl
+from ._hooks import HookimplMarker
+from ._hooks import HookimplOpts
+from ._hooks import HookRelay
+from ._hooks import HookspecMarker
+from ._hooks import HookspecOpts
+from ._manager import PluginManager
+from ._manager import PluginValidationError
+from ._result import HookCallError
+from ._result import Result
+from ._warnings import PluggyTeardownRaisedWarning
+from ._warnings import PluggyWarning
diff --git a/.venv/Lib/site-packages/pluggy/_callers.py b/.venv/Lib/site-packages/pluggy/_callers.py
new file mode 100644
index 00000000..d01f925c
--- /dev/null
+++ b/.venv/Lib/site-packages/pluggy/_callers.py
@@ -0,0 +1,182 @@
+"""
+Call loop machinery
+"""
+
+from __future__ import annotations
+
+from typing import cast
+from typing import Generator
+from typing import Mapping
+from typing import NoReturn
+from typing import Sequence
+from typing import Tuple
+from typing import Union
+import warnings
+
+from ._hooks import HookImpl
+from ._result import HookCallError
+from ._result import Result
+from ._warnings import PluggyTeardownRaisedWarning
+
+
+# Need to distinguish between old- and new-style hook wrappers.
+# Wrapping with a tuple is the fastest type-safe way I found to do it.
+Teardown = Union[
+    Tuple[Generator[None, Result[object], None], HookImpl],
+    Generator[None, object, object],
+]
+
+
+def _raise_wrapfail(
+    wrap_controller: (
+        Generator[None, Result[object], None] | Generator[None, object, object]
+    ),
+    msg: str,
+) -> NoReturn:
+    co = wrap_controller.gi_code
+    raise RuntimeError(
+        "wrap_controller at %r %s:%d %s"
+        % (co.co_name, co.co_filename, co.co_firstlineno, msg)
+    )
+
+
+def _warn_teardown_exception(
+    hook_name: str, hook_impl: HookImpl, e: BaseException
+) -> None:
+    msg = "A plugin raised an exception during an old-style hookwrapper teardown.\n"
+    msg += f"Plugin: {hook_impl.plugin_name}, Hook: {hook_name}\n"
+    msg += f"{type(e).__name__}: {e}\n"
+    msg += "For more information see https://pluggy.readthedocs.io/en/stable/api_reference.html#pluggy.PluggyTeardownRaisedWarning"  # noqa: E501
+    warnings.warn(PluggyTeardownRaisedWarning(msg), stacklevel=5)
+
+
+def _multicall(
+    hook_name: str,
+    hook_impls: Sequence[HookImpl],
+    caller_kwargs: Mapping[str, object],
+    firstresult: bool,
+) -> object | list[object]:
+    """Execute a call into multiple python functions/methods and return the
+    result(s).
+
+    ``caller_kwargs`` comes from HookCaller.__call__().
+    """
+    __tracebackhide__ = True
+    results: list[object] = []
+    exception = None
+    only_new_style_wrappers = True
+    try:  # run impl and wrapper setup functions in a loop
+        teardowns: list[Teardown] = []
+        try:
+            for hook_impl in reversed(hook_impls):
+                try:
+                    args = [caller_kwargs[argname] for argname in hook_impl.argnames]
+                except KeyError:
+                    for argname in hook_impl.argnames:
+                        if argname not in caller_kwargs:
+                            raise HookCallError(
+                                f"hook call must provide argument {argname!r}"
+                            )
+
+                if hook_impl.hookwrapper:
+                    only_new_style_wrappers = False
+                    try:
+                        # If this cast is not valid, a type error is raised below,
+                        # which is the desired response.
+                        res = hook_impl.function(*args)
+                        wrapper_gen = cast(Generator[None, Result[object], None], res)
+                        next(wrapper_gen)  # first yield
+                        teardowns.append((wrapper_gen, hook_impl))
+                    except StopIteration:
+                        _raise_wrapfail(wrapper_gen, "did not yield")
+                elif hook_impl.wrapper:
+                    try:
+                        # If this cast is not valid, a type error is raised below,
+                        # which is the desired response.
+                        res = hook_impl.function(*args)
+                        function_gen = cast(Generator[None, object, object], res)
+                        next(function_gen)  # first yield
+                        teardowns.append(function_gen)
+                    except StopIteration:
+                        _raise_wrapfail(function_gen, "did not yield")
+                else:
+                    res = hook_impl.function(*args)
+                    if res is not None:
+                        results.append(res)
+                        if firstresult:  # halt further impl calls
+                            break
+        except BaseException as exc:
+            exception = exc
+    finally:
+        # Fast path - only new-style wrappers, no Result.
+        if only_new_style_wrappers:
+            if firstresult:  # first result hooks return a single value
+                result = results[0] if results else None
+            else:
+                result = results
+
+            # run all wrapper post-yield blocks
+            for teardown in reversed(teardowns):
+                try:
+                    if exception is not None:
+                        teardown.throw(exception)  # type: ignore[union-attr]
+                    else:
+                        teardown.send(result)  # type: ignore[union-attr]
+                    # Following is unreachable for a well behaved hook wrapper.
+                    # Try to force finalizers otherwise postponed till GC action.
+                    # Note: close() may raise if generator handles GeneratorExit.
+                    teardown.close()  # type: ignore[union-attr]
+                except StopIteration as si:
+                    result = si.value
+                    exception = None
+                    continue
+                except BaseException as e:
+                    exception = e
+                    continue
+                _raise_wrapfail(teardown, "has second yield")  # type: ignore[arg-type]
+
+            if exception is not None:
+                raise exception.with_traceback(exception.__traceback__)
+            else:
+                return result
+
+        # Slow path - need to support old-style wrappers.
+        else:
+            if firstresult:  # first result hooks return a single value
+                outcome: Result[object | list[object]] = Result(
+                    results[0] if results else None, exception
+                )
+            else:
+                outcome = Result(results, exception)
+
+            # run all wrapper post-yield blocks
+            for teardown in reversed(teardowns):
+                if isinstance(teardown, tuple):
+                    try:
+                        teardown[0].send(outcome)
+                    except StopIteration:
+                        pass
+                    except BaseException as e:
+                        _warn_teardown_exception(hook_name, teardown[1], e)
+                        raise
+                    else:
+                        _raise_wrapfail(teardown[0], "has second yield")
+                else:
+                    try:
+                        if outcome._exception is not None:
+                            teardown.throw(outcome._exception)
+                        else:
+                            teardown.send(outcome._result)
+                        # Following is unreachable for a well behaved hook wrapper.
+                        # Try to force finalizers otherwise postponed till GC action.
+                        # Note: close() may raise if generator handles GeneratorExit.
+                        teardown.close()
+                    except StopIteration as si:
+                        outcome.force_result(si.value)
+                        continue
+                    except BaseException as e:
+                        outcome.force_exception(e)
+                        continue
+                    _raise_wrapfail(teardown, "has second yield")
+
+            return outcome.get_result()
diff --git a/.venv/Lib/site-packages/pluggy/_hooks.py b/.venv/Lib/site-packages/pluggy/_hooks.py
new file mode 100644
index 00000000..362d7918
--- /dev/null
+++ b/.venv/Lib/site-packages/pluggy/_hooks.py
@@ -0,0 +1,715 @@
+"""
+Internal hook annotation, representation and calling machinery.
+"""
+
+from __future__ import annotations
+
+import inspect
+import sys
+from types import ModuleType
+from typing import AbstractSet
+from typing import Any
+from typing import Callable
+from typing import Final
+from typing import final
+from typing import Generator
+from typing import List
+from typing import Mapping
+from typing import Optional
+from typing import overload
+from typing import Sequence
+from typing import Tuple
+from typing import TYPE_CHECKING
+from typing import TypedDict
+from typing import TypeVar
+from typing import Union
+import warnings
+
+from ._result import Result
+
+
+_T = TypeVar("_T")
+_F = TypeVar("_F", bound=Callable[..., object])
+_Namespace = Union[ModuleType, type]
+_Plugin = object
+_HookExec = Callable[
+    [str, Sequence["HookImpl"], Mapping[str, object], bool],
+    Union[object, List[object]],
+]
+_HookImplFunction = Callable[..., Union[_T, Generator[None, Result[_T], None]]]
+
+
+class HookspecOpts(TypedDict):
+    """Options for a hook specification."""
+
+    #: Whether the hook is :ref:`first result only `.
+    firstresult: bool
+    #: Whether the hook is :ref:`historic `.
+    historic: bool
+    #: Whether the hook :ref:`warns when implemented `.
+    warn_on_impl: Warning | None
+    #: Whether the hook warns when :ref:`certain arguments are requested
+    #: `.
+    #:
+    #: .. versionadded:: 1.5
+    warn_on_impl_args: Mapping[str, Warning] | None
+
+
+class HookimplOpts(TypedDict):
+    """Options for a hook implementation."""
+
+    #: Whether the hook implementation is a :ref:`wrapper `.
+    wrapper: bool
+    #: Whether the hook implementation is an :ref:`old-style wrapper
+    #: `.
+    hookwrapper: bool
+    #: Whether validation against a hook specification is :ref:`optional
+    #: `.
+    optionalhook: bool
+    #: Whether to try to order this hook implementation :ref:`first
+    #: `.
+    tryfirst: bool
+    #: Whether to try to order this hook implementation :ref:`last
+    #: `.
+    trylast: bool
+    #: The name of the hook specification to match, see :ref:`specname`.
+    specname: str | None
+
+
+@final
+class HookspecMarker:
+    """Decorator for marking functions as hook specifications.
+
+    Instantiate it with a project_name to get a decorator.
+    Calling :meth:`PluginManager.add_hookspecs` later will discover all marked
+    functions if the :class:`PluginManager` uses the same project name.
+    """
+
+    __slots__ = ("project_name",)
+
+    def __init__(self, project_name: str) -> None:
+        self.project_name: Final = project_name
+
+    @overload
+    def __call__(
+        self,
+        function: _F,
+        firstresult: bool = False,
+        historic: bool = False,
+        warn_on_impl: Warning | None = None,
+        warn_on_impl_args: Mapping[str, Warning] | None = None,
+    ) -> _F: ...
+
+    @overload  # noqa: F811
+    def __call__(  # noqa: F811
+        self,
+        function: None = ...,
+        firstresult: bool = ...,
+        historic: bool = ...,
+        warn_on_impl: Warning | None = ...,
+        warn_on_impl_args: Mapping[str, Warning] | None = ...,
+    ) -> Callable[[_F], _F]: ...
+
+    def __call__(  # noqa: F811
+        self,
+        function: _F | None = None,
+        firstresult: bool = False,
+        historic: bool = False,
+        warn_on_impl: Warning | None = None,
+        warn_on_impl_args: Mapping[str, Warning] | None = None,
+    ) -> _F | Callable[[_F], _F]:
+        """If passed a function, directly sets attributes on the function
+        which will make it discoverable to :meth:`PluginManager.add_hookspecs`.
+
+        If passed no function, returns a decorator which can be applied to a
+        function later using the attributes supplied.
+
+        :param firstresult:
+            If ``True``, the 1:N hook call (N being the number of registered
+            hook implementation functions) will stop at I<=N when the I'th
+            function returns a non-``None`` result. See :ref:`firstresult`.
+
+        :param historic:
+            If ``True``, every call to the hook will be memorized and replayed
+            on plugins registered after the call was made. See :ref:`historic`.
+
+        :param warn_on_impl:
+            If given, every implementation of this hook will trigger the given
+            warning. See :ref:`warn_on_impl`.
+
+        :param warn_on_impl_args:
+            If given, every implementation of this hook which requests one of
+            the arguments in the dict will trigger the corresponding warning.
+            See :ref:`warn_on_impl`.
+
+            .. versionadded:: 1.5
+        """
+
+        def setattr_hookspec_opts(func: _F) -> _F:
+            if historic and firstresult:
+                raise ValueError("cannot have a historic firstresult hook")
+            opts: HookspecOpts = {
+                "firstresult": firstresult,
+                "historic": historic,
+                "warn_on_impl": warn_on_impl,
+                "warn_on_impl_args": warn_on_impl_args,
+            }
+            setattr(func, self.project_name + "_spec", opts)
+            return func
+
+        if function is not None:
+            return setattr_hookspec_opts(function)
+        else:
+            return setattr_hookspec_opts
+
+
+@final
+class HookimplMarker:
+    """Decorator for marking functions as hook implementations.
+
+    Instantiate it with a ``project_name`` to get a decorator.
+    Calling :meth:`PluginManager.register` later will discover all marked
+    functions if the :class:`PluginManager` uses the same project name.
+    """
+
+    __slots__ = ("project_name",)
+
+    def __init__(self, project_name: str) -> None:
+        self.project_name: Final = project_name
+
+    @overload
+    def __call__(
+        self,
+        function: _F,
+        hookwrapper: bool = ...,
+        optionalhook: bool = ...,
+        tryfirst: bool = ...,
+        trylast: bool = ...,
+        specname: str | None = ...,
+        wrapper: bool = ...,
+    ) -> _F: ...
+
+    @overload  # noqa: F811
+    def __call__(  # noqa: F811
+        self,
+        function: None = ...,
+        hookwrapper: bool = ...,
+        optionalhook: bool = ...,
+        tryfirst: bool = ...,
+        trylast: bool = ...,
+        specname: str | None = ...,
+        wrapper: bool = ...,
+    ) -> Callable[[_F], _F]: ...
+
+    def __call__(  # noqa: F811
+        self,
+        function: _F | None = None,
+        hookwrapper: bool = False,
+        optionalhook: bool = False,
+        tryfirst: bool = False,
+        trylast: bool = False,
+        specname: str | None = None,
+        wrapper: bool = False,
+    ) -> _F | Callable[[_F], _F]:
+        """If passed a function, directly sets attributes on the function
+        which will make it discoverable to :meth:`PluginManager.register`.
+
+        If passed no function, returns a decorator which can be applied to a
+        function later using the attributes supplied.
+
+        :param optionalhook:
+            If ``True``, a missing matching hook specification will not result
+            in an error (by default it is an error if no matching spec is
+            found). See :ref:`optionalhook`.
+
+        :param tryfirst:
+            If ``True``, this hook implementation will run as early as possible
+            in the chain of N hook implementations for a specification. See
+            :ref:`callorder`.
+
+        :param trylast:
+            If ``True``, this hook implementation will run as late as possible
+            in the chain of N hook implementations for a specification. See
+            :ref:`callorder`.
+
+        :param wrapper:
+            If ``True`` ("new-style hook wrapper"), the hook implementation
+            needs to execute exactly one ``yield``. The code before the
+            ``yield`` is run early before any non-hook-wrapper function is run.
+            The code after the ``yield`` is run after all non-hook-wrapper
+            functions have run. The ``yield`` receives the result value of the
+            inner calls, or raises the exception of inner calls (including
+            earlier hook wrapper calls). The return value of the function
+            becomes the return value of the hook, and a raised exception becomes
+            the exception of the hook. See :ref:`hookwrapper`.
+
+        :param hookwrapper:
+            If ``True`` ("old-style hook wrapper"), the hook implementation
+            needs to execute exactly one ``yield``. The code before the
+            ``yield`` is run early before any non-hook-wrapper function is run.
+            The code after the ``yield`` is run after all non-hook-wrapper
+            function have run  The ``yield`` receives a :class:`Result` object
+            representing the exception or result outcome of the inner calls
+            (including earlier hook wrapper calls). This option is mutually
+            exclusive with ``wrapper``. See :ref:`old_style_hookwrapper`.
+
+        :param specname:
+            If provided, the given name will be used instead of the function
+            name when matching this hook implementation to a hook specification
+            during registration. See :ref:`specname`.
+
+        .. versionadded:: 1.2.0
+            The ``wrapper`` parameter.
+        """
+
+        def setattr_hookimpl_opts(func: _F) -> _F:
+            opts: HookimplOpts = {
+                "wrapper": wrapper,
+                "hookwrapper": hookwrapper,
+                "optionalhook": optionalhook,
+                "tryfirst": tryfirst,
+                "trylast": trylast,
+                "specname": specname,
+            }
+            setattr(func, self.project_name + "_impl", opts)
+            return func
+
+        if function is None:
+            return setattr_hookimpl_opts
+        else:
+            return setattr_hookimpl_opts(function)
+
+
+def normalize_hookimpl_opts(opts: HookimplOpts) -> None:
+    opts.setdefault("tryfirst", False)
+    opts.setdefault("trylast", False)
+    opts.setdefault("wrapper", False)
+    opts.setdefault("hookwrapper", False)
+    opts.setdefault("optionalhook", False)
+    opts.setdefault("specname", None)
+
+
+_PYPY = hasattr(sys, "pypy_version_info")
+
+
+def varnames(func: object) -> tuple[tuple[str, ...], tuple[str, ...]]:
+    """Return tuple of positional and keywrord argument names for a function,
+    method, class or callable.
+
+    In case of a class, its ``__init__`` method is considered.
+    For methods the ``self`` parameter is not included.
+    """
+    if inspect.isclass(func):
+        try:
+            func = func.__init__
+        except AttributeError:
+            return (), ()
+    elif not inspect.isroutine(func):  # callable object?
+        try:
+            func = getattr(func, "__call__", func)
+        except Exception:
+            return (), ()
+
+    try:
+        # func MUST be a function or method here or we won't parse any args.
+        sig = inspect.signature(
+            func.__func__ if inspect.ismethod(func) else func  # type:ignore[arg-type]
+        )
+    except TypeError:
+        return (), ()
+
+    _valid_param_kinds = (
+        inspect.Parameter.POSITIONAL_ONLY,
+        inspect.Parameter.POSITIONAL_OR_KEYWORD,
+    )
+    _valid_params = {
+        name: param
+        for name, param in sig.parameters.items()
+        if param.kind in _valid_param_kinds
+    }
+    args = tuple(_valid_params)
+    defaults = (
+        tuple(
+            param.default
+            for param in _valid_params.values()
+            if param.default is not param.empty
+        )
+        or None
+    )
+
+    if defaults:
+        index = -len(defaults)
+        args, kwargs = args[:index], tuple(args[index:])
+    else:
+        kwargs = ()
+
+    # strip any implicit instance arg
+    # pypy3 uses "obj" instead of "self" for default dunder methods
+    if not _PYPY:
+        implicit_names: tuple[str, ...] = ("self",)
+    else:
+        implicit_names = ("self", "obj")
+    if args:
+        qualname: str = getattr(func, "__qualname__", "")
+        if inspect.ismethod(func) or ("." in qualname and args[0] in implicit_names):
+            args = args[1:]
+
+    return args, kwargs
+
+
+@final
+class HookRelay:
+    """Hook holder object for performing 1:N hook calls where N is the number
+    of registered plugins."""
+
+    __slots__ = ("__dict__",)
+
+    def __init__(self) -> None:
+        """:meta private:"""
+
+    if TYPE_CHECKING:
+
+        def __getattr__(self, name: str) -> HookCaller: ...
+
+
+# Historical name (pluggy<=1.2), kept for backward compatibility.
+_HookRelay = HookRelay
+
+
+_CallHistory = List[Tuple[Mapping[str, object], Optional[Callable[[Any], None]]]]
+
+
+class HookCaller:
+    """A caller of all registered implementations of a hook specification."""
+
+    __slots__ = (
+        "name",
+        "spec",
+        "_hookexec",
+        "_hookimpls",
+        "_call_history",
+    )
+
+    def __init__(
+        self,
+        name: str,
+        hook_execute: _HookExec,
+        specmodule_or_class: _Namespace | None = None,
+        spec_opts: HookspecOpts | None = None,
+    ) -> None:
+        """:meta private:"""
+        #: Name of the hook getting called.
+        self.name: Final = name
+        self._hookexec: Final = hook_execute
+        # The hookimpls list. The caller iterates it *in reverse*. Format:
+        # 1. trylast nonwrappers
+        # 2. nonwrappers
+        # 3. tryfirst nonwrappers
+        # 4. trylast wrappers
+        # 5. wrappers
+        # 6. tryfirst wrappers
+        self._hookimpls: Final[list[HookImpl]] = []
+        self._call_history: _CallHistory | None = None
+        # TODO: Document, or make private.
+        self.spec: HookSpec | None = None
+        if specmodule_or_class is not None:
+            assert spec_opts is not None
+            self.set_specification(specmodule_or_class, spec_opts)
+
+    # TODO: Document, or make private.
+    def has_spec(self) -> bool:
+        return self.spec is not None
+
+    # TODO: Document, or make private.
+    def set_specification(
+        self,
+        specmodule_or_class: _Namespace,
+        spec_opts: HookspecOpts,
+    ) -> None:
+        if self.spec is not None:
+            raise ValueError(
+                f"Hook {self.spec.name!r} is already registered "
+                f"within namespace {self.spec.namespace}"
+            )
+        self.spec = HookSpec(specmodule_or_class, self.name, spec_opts)
+        if spec_opts.get("historic"):
+            self._call_history = []
+
+    def is_historic(self) -> bool:
+        """Whether this caller is :ref:`historic `."""
+        return self._call_history is not None
+
+    def _remove_plugin(self, plugin: _Plugin) -> None:
+        for i, method in enumerate(self._hookimpls):
+            if method.plugin == plugin:
+                del self._hookimpls[i]
+                return
+        raise ValueError(f"plugin {plugin!r} not found")
+
+    def get_hookimpls(self) -> list[HookImpl]:
+        """Get all registered hook implementations for this hook."""
+        return self._hookimpls.copy()
+
+    def _add_hookimpl(self, hookimpl: HookImpl) -> None:
+        """Add an implementation to the callback chain."""
+        for i, method in enumerate(self._hookimpls):
+            if method.hookwrapper or method.wrapper:
+                splitpoint = i
+                break
+        else:
+            splitpoint = len(self._hookimpls)
+        if hookimpl.hookwrapper or hookimpl.wrapper:
+            start, end = splitpoint, len(self._hookimpls)
+        else:
+            start, end = 0, splitpoint
+
+        if hookimpl.trylast:
+            self._hookimpls.insert(start, hookimpl)
+        elif hookimpl.tryfirst:
+            self._hookimpls.insert(end, hookimpl)
+        else:
+            # find last non-tryfirst method
+            i = end - 1
+            while i >= start and self._hookimpls[i].tryfirst:
+                i -= 1
+            self._hookimpls.insert(i + 1, hookimpl)
+
+    def __repr__(self) -> str:
+        return f""
+
+    def _verify_all_args_are_provided(self, kwargs: Mapping[str, object]) -> None:
+        # This is written to avoid expensive operations when not needed.
+        if self.spec:
+            for argname in self.spec.argnames:
+                if argname not in kwargs:
+                    notincall = ", ".join(
+                        repr(argname)
+                        for argname in self.spec.argnames
+                        # Avoid self.spec.argnames - kwargs.keys() - doesn't preserve order.
+                        if argname not in kwargs.keys()
+                    )
+                    warnings.warn(
+                        "Argument(s) {} which are declared in the hookspec "
+                        "cannot be found in this hook call".format(notincall),
+                        stacklevel=2,
+                    )
+                    break
+
+    def __call__(self, **kwargs: object) -> Any:
+        """Call the hook.
+
+        Only accepts keyword arguments, which should match the hook
+        specification.
+
+        Returns the result(s) of calling all registered plugins, see
+        :ref:`calling`.
+        """
+        assert (
+            not self.is_historic()
+        ), "Cannot directly call a historic hook - use call_historic instead."
+        self._verify_all_args_are_provided(kwargs)
+        firstresult = self.spec.opts.get("firstresult", False) if self.spec else False
+        # Copy because plugins may register other plugins during iteration (#438).
+        return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult)
+
+    def call_historic(
+        self,
+        result_callback: Callable[[Any], None] | None = None,
+        kwargs: Mapping[str, object] | None = None,
+    ) -> None:
+        """Call the hook with given ``kwargs`` for all registered plugins and
+        for all plugins which will be registered afterwards, see
+        :ref:`historic`.
+
+        :param result_callback:
+            If provided, will be called for each non-``None`` result obtained
+            from a hook implementation.
+        """
+        assert self._call_history is not None
+        kwargs = kwargs or {}
+        self._verify_all_args_are_provided(kwargs)
+        self._call_history.append((kwargs, result_callback))
+        # Historizing hooks don't return results.
+        # Remember firstresult isn't compatible with historic.
+        # Copy because plugins may register other plugins during iteration (#438).
+        res = self._hookexec(self.name, self._hookimpls.copy(), kwargs, False)
+        if result_callback is None:
+            return
+        if isinstance(res, list):
+            for x in res:
+                result_callback(x)
+
+    def call_extra(
+        self, methods: Sequence[Callable[..., object]], kwargs: Mapping[str, object]
+    ) -> Any:
+        """Call the hook with some additional temporarily participating
+        methods using the specified ``kwargs`` as call parameters, see
+        :ref:`call_extra`."""
+        assert (
+            not self.is_historic()
+        ), "Cannot directly call a historic hook - use call_historic instead."
+        self._verify_all_args_are_provided(kwargs)
+        opts: HookimplOpts = {
+            "wrapper": False,
+            "hookwrapper": False,
+            "optionalhook": False,
+            "trylast": False,
+            "tryfirst": False,
+            "specname": None,
+        }
+        hookimpls = self._hookimpls.copy()
+        for method in methods:
+            hookimpl = HookImpl(None, "", method, opts)
+            # Find last non-tryfirst nonwrapper method.
+            i = len(hookimpls) - 1
+            while i >= 0 and (
+                # Skip wrappers.
+                (hookimpls[i].hookwrapper or hookimpls[i].wrapper)
+                # Skip tryfirst nonwrappers.
+                or hookimpls[i].tryfirst
+            ):
+                i -= 1
+            hookimpls.insert(i + 1, hookimpl)
+        firstresult = self.spec.opts.get("firstresult", False) if self.spec else False
+        return self._hookexec(self.name, hookimpls, kwargs, firstresult)
+
+    def _maybe_apply_history(self, method: HookImpl) -> None:
+        """Apply call history to a new hookimpl if it is marked as historic."""
+        if self.is_historic():
+            assert self._call_history is not None
+            for kwargs, result_callback in self._call_history:
+                res = self._hookexec(self.name, [method], kwargs, False)
+                if res and result_callback is not None:
+                    # XXX: remember firstresult isn't compat with historic
+                    assert isinstance(res, list)
+                    result_callback(res[0])
+
+
+# Historical name (pluggy<=1.2), kept for backward compatibility.
+_HookCaller = HookCaller
+
+
+class _SubsetHookCaller(HookCaller):
+    """A proxy to another HookCaller which manages calls to all registered
+    plugins except the ones from remove_plugins."""
+
+    # This class is unusual: in inhertits from `HookCaller` so all of
+    # the *code* runs in the class, but it delegates all underlying *data*
+    # to the original HookCaller.
+    # `subset_hook_caller` used to be implemented by creating a full-fledged
+    # HookCaller, copying all hookimpls from the original. This had problems
+    # with memory leaks (#346) and historic calls (#347), which make a proxy
+    # approach better.
+    # An alternative implementation is to use a `_getattr__`/`__getattribute__`
+    # proxy, however that adds more overhead and is more tricky to implement.
+
+    __slots__ = (
+        "_orig",
+        "_remove_plugins",
+    )
+
+    def __init__(self, orig: HookCaller, remove_plugins: AbstractSet[_Plugin]) -> None:
+        self._orig = orig
+        self._remove_plugins = remove_plugins
+        self.name = orig.name  # type: ignore[misc]
+        self._hookexec = orig._hookexec  # type: ignore[misc]
+
+    @property  # type: ignore[misc]
+    def _hookimpls(self) -> list[HookImpl]:
+        return [
+            impl
+            for impl in self._orig._hookimpls
+            if impl.plugin not in self._remove_plugins
+        ]
+
+    @property
+    def spec(self) -> HookSpec | None:  # type: ignore[override]
+        return self._orig.spec
+
+    @property
+    def _call_history(self) -> _CallHistory | None:  # type: ignore[override]
+        return self._orig._call_history
+
+    def __repr__(self) -> str:
+        return f"<_SubsetHookCaller {self.name!r}>"
+
+
+@final
+class HookImpl:
+    """A hook implementation in a :class:`HookCaller`."""
+
+    __slots__ = (
+        "function",
+        "argnames",
+        "kwargnames",
+        "plugin",
+        "opts",
+        "plugin_name",
+        "wrapper",
+        "hookwrapper",
+        "optionalhook",
+        "tryfirst",
+        "trylast",
+    )
+
+    def __init__(
+        self,
+        plugin: _Plugin,
+        plugin_name: str,
+        function: _HookImplFunction[object],
+        hook_impl_opts: HookimplOpts,
+    ) -> None:
+        """:meta private:"""
+        #: The hook implementation function.
+        self.function: Final = function
+        argnames, kwargnames = varnames(self.function)
+        #: The positional parameter names of ``function```.
+        self.argnames: Final = argnames
+        #: The keyword parameter names of ``function```.
+        self.kwargnames: Final = kwargnames
+        #: The plugin which defined this hook implementation.
+        self.plugin: Final = plugin
+        #: The :class:`HookimplOpts` used to configure this hook implementation.
+        self.opts: Final = hook_impl_opts
+        #: The name of the plugin which defined this hook implementation.
+        self.plugin_name: Final = plugin_name
+        #: Whether the hook implementation is a :ref:`wrapper `.
+        self.wrapper: Final = hook_impl_opts["wrapper"]
+        #: Whether the hook implementation is an :ref:`old-style wrapper
+        #: `.
+        self.hookwrapper: Final = hook_impl_opts["hookwrapper"]
+        #: Whether validation against a hook specification is :ref:`optional
+        #: `.
+        self.optionalhook: Final = hook_impl_opts["optionalhook"]
+        #: Whether to try to order this hook implementation :ref:`first
+        #: `.
+        self.tryfirst: Final = hook_impl_opts["tryfirst"]
+        #: Whether to try to order this hook implementation :ref:`last
+        #: `.
+        self.trylast: Final = hook_impl_opts["trylast"]
+
+    def __repr__(self) -> str:
+        return f""
+
+
+@final
+class HookSpec:
+    __slots__ = (
+        "namespace",
+        "function",
+        "name",
+        "argnames",
+        "kwargnames",
+        "opts",
+        "warn_on_impl",
+        "warn_on_impl_args",
+    )
+
+    def __init__(self, namespace: _Namespace, name: str, opts: HookspecOpts) -> None:
+        self.namespace = namespace
+        self.function: Callable[..., object] = getattr(namespace, name)
+        self.name = name
+        self.argnames, self.kwargnames = varnames(self.function)
+        self.opts = opts
+        self.warn_on_impl = opts.get("warn_on_impl")
+        self.warn_on_impl_args = opts.get("warn_on_impl_args")
diff --git a/.venv/Lib/site-packages/pluggy/_manager.py b/.venv/Lib/site-packages/pluggy/_manager.py
new file mode 100644
index 00000000..9998dd81
--- /dev/null
+++ b/.venv/Lib/site-packages/pluggy/_manager.py
@@ -0,0 +1,528 @@
+from __future__ import annotations
+
+import inspect
+import types
+from typing import Any
+from typing import Callable
+from typing import cast
+from typing import Final
+from typing import Iterable
+from typing import Mapping
+from typing import Sequence
+from typing import TYPE_CHECKING
+import warnings
+
+from . import _tracing
+from ._callers import _multicall
+from ._hooks import _HookImplFunction
+from ._hooks import _Namespace
+from ._hooks import _Plugin
+from ._hooks import _SubsetHookCaller
+from ._hooks import HookCaller
+from ._hooks import HookImpl
+from ._hooks import HookimplOpts
+from ._hooks import HookRelay
+from ._hooks import HookspecOpts
+from ._hooks import normalize_hookimpl_opts
+from ._result import Result
+
+
+if TYPE_CHECKING:
+    # importtlib.metadata import is slow, defer it.
+    import importlib.metadata
+
+
+_BeforeTrace = Callable[[str, Sequence[HookImpl], Mapping[str, Any]], None]
+_AfterTrace = Callable[[Result[Any], str, Sequence[HookImpl], Mapping[str, Any]], None]
+
+
+def _warn_for_function(warning: Warning, function: Callable[..., object]) -> None:
+    func = cast(types.FunctionType, function)
+    warnings.warn_explicit(
+        warning,
+        type(warning),
+        lineno=func.__code__.co_firstlineno,
+        filename=func.__code__.co_filename,
+    )
+
+
+class PluginValidationError(Exception):
+    """Plugin failed validation.
+
+    :param plugin: The plugin which failed validation.
+    :param message: Error message.
+    """
+
+    def __init__(self, plugin: _Plugin, message: str) -> None:
+        super().__init__(message)
+        #: The plugin which failed validation.
+        self.plugin = plugin
+
+
+class DistFacade:
+    """Emulate a pkg_resources Distribution"""
+
+    def __init__(self, dist: importlib.metadata.Distribution) -> None:
+        self._dist = dist
+
+    @property
+    def project_name(self) -> str:
+        name: str = self.metadata["name"]
+        return name
+
+    def __getattr__(self, attr: str, default=None):
+        return getattr(self._dist, attr, default)
+
+    def __dir__(self) -> list[str]:
+        return sorted(dir(self._dist) + ["_dist", "project_name"])
+
+
+class PluginManager:
+    """Core class which manages registration of plugin objects and 1:N hook
+    calling.
+
+    You can register new hooks by calling :meth:`add_hookspecs(module_or_class)
+    `.
+
+    You can register plugin objects (which contain hook implementations) by
+    calling :meth:`register(plugin) `.
+
+    For debugging purposes you can call :meth:`PluginManager.enable_tracing`
+    which will subsequently send debug information to the trace helper.
+
+    :param project_name:
+        The short project name. Prefer snake case. Make sure it's unique!
+    """
+
+    def __init__(self, project_name: str) -> None:
+        #: The project name.
+        self.project_name: Final = project_name
+        self._name2plugin: Final[dict[str, _Plugin]] = {}
+        self._plugin_distinfo: Final[list[tuple[_Plugin, DistFacade]]] = []
+        #: The "hook relay", used to call a hook on all registered plugins.
+        #: See :ref:`calling`.
+        self.hook: Final = HookRelay()
+        #: The tracing entry point. See :ref:`tracing`.
+        self.trace: Final[_tracing.TagTracerSub] = _tracing.TagTracer().get(
+            "pluginmanage"
+        )
+        self._inner_hookexec = _multicall
+
+    def _hookexec(
+        self,
+        hook_name: str,
+        methods: Sequence[HookImpl],
+        kwargs: Mapping[str, object],
+        firstresult: bool,
+    ) -> object | list[object]:
+        # called from all hookcaller instances.
+        # enable_tracing will set its own wrapping function at self._inner_hookexec
+        return self._inner_hookexec(hook_name, methods, kwargs, firstresult)
+
+    def register(self, plugin: _Plugin, name: str | None = None) -> str | None:
+        """Register a plugin and return its name.
+
+        :param name:
+            The name under which to register the plugin. If not specified, a
+            name is generated using :func:`get_canonical_name`.
+
+        :returns:
+            The plugin name. If the name is blocked from registering, returns
+            ``None``.
+
+        If the plugin is already registered, raises a :exc:`ValueError`.
+        """
+        plugin_name = name or self.get_canonical_name(plugin)
+
+        if plugin_name in self._name2plugin:
+            if self._name2plugin.get(plugin_name, -1) is None:
+                return None  # blocked plugin, return None to indicate no registration
+            raise ValueError(
+                "Plugin name already registered: %s=%s\n%s"
+                % (plugin_name, plugin, self._name2plugin)
+            )
+
+        if plugin in self._name2plugin.values():
+            raise ValueError(
+                "Plugin already registered under a different name: %s=%s\n%s"
+                % (plugin_name, plugin, self._name2plugin)
+            )
+
+        # XXX if an error happens we should make sure no state has been
+        # changed at point of return
+        self._name2plugin[plugin_name] = plugin
+
+        # register matching hook implementations of the plugin
+        for name in dir(plugin):
+            hookimpl_opts = self.parse_hookimpl_opts(plugin, name)
+            if hookimpl_opts is not None:
+                normalize_hookimpl_opts(hookimpl_opts)
+                method: _HookImplFunction[object] = getattr(plugin, name)
+                hookimpl = HookImpl(plugin, plugin_name, method, hookimpl_opts)
+                name = hookimpl_opts.get("specname") or name
+                hook: HookCaller | None = getattr(self.hook, name, None)
+                if hook is None:
+                    hook = HookCaller(name, self._hookexec)
+                    setattr(self.hook, name, hook)
+                elif hook.has_spec():
+                    self._verify_hook(hook, hookimpl)
+                    hook._maybe_apply_history(hookimpl)
+                hook._add_hookimpl(hookimpl)
+        return plugin_name
+
+    def parse_hookimpl_opts(self, plugin: _Plugin, name: str) -> HookimplOpts | None:
+        """Try to obtain a hook implementation from an item with the given name
+        in the given plugin which is being searched for hook impls.
+
+        :returns:
+            The parsed hookimpl options, or None to skip the given item.
+
+        This method can be overridden by ``PluginManager`` subclasses to
+        customize how hook implementation are picked up. By default, returns the
+        options for items decorated with :class:`HookimplMarker`.
+        """
+        method: object = getattr(plugin, name)
+        if not inspect.isroutine(method):
+            return None
+        try:
+            res: HookimplOpts | None = getattr(
+                method, self.project_name + "_impl", None
+            )
+        except Exception:
+            res = {}  # type: ignore[assignment]
+        if res is not None and not isinstance(res, dict):
+            # false positive
+            res = None  # type:ignore[unreachable]
+        return res
+
+    def unregister(
+        self, plugin: _Plugin | None = None, name: str | None = None
+    ) -> Any | None:
+        """Unregister a plugin and all of its hook implementations.
+
+        The plugin can be specified either by the plugin object or the plugin
+        name. If both are specified, they must agree.
+
+        Returns the unregistered plugin, or ``None`` if not found.
+        """
+        if name is None:
+            assert plugin is not None, "one of name or plugin needs to be specified"
+            name = self.get_name(plugin)
+            assert name is not None, "plugin is not registered"
+
+        if plugin is None:
+            plugin = self.get_plugin(name)
+            if plugin is None:
+                return None
+
+        hookcallers = self.get_hookcallers(plugin)
+        if hookcallers:
+            for hookcaller in hookcallers:
+                hookcaller._remove_plugin(plugin)
+
+        # if self._name2plugin[name] == None registration was blocked: ignore
+        if self._name2plugin.get(name):
+            assert name is not None
+            del self._name2plugin[name]
+
+        return plugin
+
+    def set_blocked(self, name: str) -> None:
+        """Block registrations of the given name, unregister if already registered."""
+        self.unregister(name=name)
+        self._name2plugin[name] = None
+
+    def is_blocked(self, name: str) -> bool:
+        """Return whether the given plugin name is blocked."""
+        return name in self._name2plugin and self._name2plugin[name] is None
+
+    def unblock(self, name: str) -> bool:
+        """Unblocks a name.
+
+        Returns whether the name was actually blocked.
+        """
+        if self._name2plugin.get(name, -1) is None:
+            del self._name2plugin[name]
+            return True
+        return False
+
+    def add_hookspecs(self, module_or_class: _Namespace) -> None:
+        """Add new hook specifications defined in the given ``module_or_class``.
+
+        Functions are recognized as hook specifications if they have been
+        decorated with a matching :class:`HookspecMarker`.
+        """
+        names = []
+        for name in dir(module_or_class):
+            spec_opts = self.parse_hookspec_opts(module_or_class, name)
+            if spec_opts is not None:
+                hc: HookCaller | None = getattr(self.hook, name, None)
+                if hc is None:
+                    hc = HookCaller(name, self._hookexec, module_or_class, spec_opts)
+                    setattr(self.hook, name, hc)
+                else:
+                    # Plugins registered this hook without knowing the spec.
+                    hc.set_specification(module_or_class, spec_opts)
+                    for hookfunction in hc.get_hookimpls():
+                        self._verify_hook(hc, hookfunction)
+                names.append(name)
+
+        if not names:
+            raise ValueError(
+                f"did not find any {self.project_name!r} hooks in {module_or_class!r}"
+            )
+
+    def parse_hookspec_opts(
+        self, module_or_class: _Namespace, name: str
+    ) -> HookspecOpts | None:
+        """Try to obtain a hook specification from an item with the given name
+        in the given module or class which is being searched for hook specs.
+
+        :returns:
+            The parsed hookspec options for defining a hook, or None to skip the
+            given item.
+
+        This method can be overridden by ``PluginManager`` subclasses to
+        customize how hook specifications are picked up. By default, returns the
+        options for items decorated with :class:`HookspecMarker`.
+        """
+        method = getattr(module_or_class, name)
+        opts: HookspecOpts | None = getattr(method, self.project_name + "_spec", None)
+        return opts
+
+    def get_plugins(self) -> set[Any]:
+        """Return a set of all registered plugin objects."""
+        return {x for x in self._name2plugin.values() if x is not None}
+
+    def is_registered(self, plugin: _Plugin) -> bool:
+        """Return whether the plugin is already registered."""
+        return any(plugin == val for val in self._name2plugin.values())
+
+    def get_canonical_name(self, plugin: _Plugin) -> str:
+        """Return a canonical name for a plugin object.
+
+        Note that a plugin may be registered under a different name
+        specified by the caller of :meth:`register(plugin, name) `.
+        To obtain the name of a registered plugin use :meth:`get_name(plugin)
+        ` instead.
+        """
+        name: str | None = getattr(plugin, "__name__", None)
+        return name or str(id(plugin))
+
+    def get_plugin(self, name: str) -> Any | None:
+        """Return the plugin registered under the given name, if any."""
+        return self._name2plugin.get(name)
+
+    def has_plugin(self, name: str) -> bool:
+        """Return whether a plugin with the given name is registered."""
+        return self.get_plugin(name) is not None
+
+    def get_name(self, plugin: _Plugin) -> str | None:
+        """Return the name the plugin is registered under, or ``None`` if
+        is isn't."""
+        for name, val in self._name2plugin.items():
+            if plugin == val:
+                return name
+        return None
+
+    def _verify_hook(self, hook: HookCaller, hookimpl: HookImpl) -> None:
+        if hook.is_historic() and (hookimpl.hookwrapper or hookimpl.wrapper):
+            raise PluginValidationError(
+                hookimpl.plugin,
+                "Plugin %r\nhook %r\nhistoric incompatible with yield/wrapper/hookwrapper"
+                % (hookimpl.plugin_name, hook.name),
+            )
+
+        assert hook.spec is not None
+        if hook.spec.warn_on_impl:
+            _warn_for_function(hook.spec.warn_on_impl, hookimpl.function)
+
+        # positional arg checking
+        notinspec = set(hookimpl.argnames) - set(hook.spec.argnames)
+        if notinspec:
+            raise PluginValidationError(
+                hookimpl.plugin,
+                "Plugin %r for hook %r\nhookimpl definition: %s\n"
+                "Argument(s) %s are declared in the hookimpl but "
+                "can not be found in the hookspec"
+                % (
+                    hookimpl.plugin_name,
+                    hook.name,
+                    _formatdef(hookimpl.function),
+                    notinspec,
+                ),
+            )
+
+        if hook.spec.warn_on_impl_args:
+            for hookimpl_argname in hookimpl.argnames:
+                argname_warning = hook.spec.warn_on_impl_args.get(hookimpl_argname)
+                if argname_warning is not None:
+                    _warn_for_function(argname_warning, hookimpl.function)
+
+        if (
+            hookimpl.wrapper or hookimpl.hookwrapper
+        ) and not inspect.isgeneratorfunction(hookimpl.function):
+            raise PluginValidationError(
+                hookimpl.plugin,
+                "Plugin %r for hook %r\nhookimpl definition: %s\n"
+                "Declared as wrapper=True or hookwrapper=True "
+                "but function is not a generator function"
+                % (hookimpl.plugin_name, hook.name, _formatdef(hookimpl.function)),
+            )
+
+        if hookimpl.wrapper and hookimpl.hookwrapper:
+            raise PluginValidationError(
+                hookimpl.plugin,
+                "Plugin %r for hook %r\nhookimpl definition: %s\n"
+                "The wrapper=True and hookwrapper=True options are mutually exclusive"
+                % (hookimpl.plugin_name, hook.name, _formatdef(hookimpl.function)),
+            )
+
+    def check_pending(self) -> None:
+        """Verify that all hooks which have not been verified against a
+        hook specification are optional, otherwise raise
+        :exc:`PluginValidationError`."""
+        for name in self.hook.__dict__:
+            if name[0] != "_":
+                hook: HookCaller = getattr(self.hook, name)
+                if not hook.has_spec():
+                    for hookimpl in hook.get_hookimpls():
+                        if not hookimpl.optionalhook:
+                            raise PluginValidationError(
+                                hookimpl.plugin,
+                                "unknown hook %r in plugin %r"
+                                % (name, hookimpl.plugin),
+                            )
+
+    def load_setuptools_entrypoints(self, group: str, name: str | None = None) -> int:
+        """Load modules from querying the specified setuptools ``group``.
+
+        :param group:
+            Entry point group to load plugins.
+        :param name:
+            If given, loads only plugins with the given ``name``.
+
+        :return:
+            The number of plugins loaded by this call.
+        """
+        import importlib.metadata
+
+        count = 0
+        for dist in list(importlib.metadata.distributions()):
+            for ep in dist.entry_points:
+                if (
+                    ep.group != group
+                    or (name is not None and ep.name != name)
+                    # already registered
+                    or self.get_plugin(ep.name)
+                    or self.is_blocked(ep.name)
+                ):
+                    continue
+                plugin = ep.load()
+                self.register(plugin, name=ep.name)
+                self._plugin_distinfo.append((plugin, DistFacade(dist)))
+                count += 1
+        return count
+
+    def list_plugin_distinfo(self) -> list[tuple[_Plugin, DistFacade]]:
+        """Return a list of (plugin, distinfo) pairs for all
+        setuptools-registered plugins."""
+        return list(self._plugin_distinfo)
+
+    def list_name_plugin(self) -> list[tuple[str, _Plugin]]:
+        """Return a list of (name, plugin) pairs for all registered plugins."""
+        return list(self._name2plugin.items())
+
+    def get_hookcallers(self, plugin: _Plugin) -> list[HookCaller] | None:
+        """Get all hook callers for the specified plugin.
+
+        :returns:
+            The hook callers, or ``None`` if ``plugin`` is not registered in
+            this plugin manager.
+        """
+        if self.get_name(plugin) is None:
+            return None
+        hookcallers = []
+        for hookcaller in self.hook.__dict__.values():
+            for hookimpl in hookcaller.get_hookimpls():
+                if hookimpl.plugin is plugin:
+                    hookcallers.append(hookcaller)
+        return hookcallers
+
+    def add_hookcall_monitoring(
+        self, before: _BeforeTrace, after: _AfterTrace
+    ) -> Callable[[], None]:
+        """Add before/after tracing functions for all hooks.
+
+        Returns an undo function which, when called, removes the added tracers.
+
+        ``before(hook_name, hook_impls, kwargs)`` will be called ahead
+        of all hook calls and receive a hookcaller instance, a list
+        of HookImpl instances and the keyword arguments for the hook call.
+
+        ``after(outcome, hook_name, hook_impls, kwargs)`` receives the
+        same arguments as ``before`` but also a :class:`~pluggy.Result` object
+        which represents the result of the overall hook call.
+        """
+        oldcall = self._inner_hookexec
+
+        def traced_hookexec(
+            hook_name: str,
+            hook_impls: Sequence[HookImpl],
+            caller_kwargs: Mapping[str, object],
+            firstresult: bool,
+        ) -> object | list[object]:
+            before(hook_name, hook_impls, caller_kwargs)
+            outcome = Result.from_call(
+                lambda: oldcall(hook_name, hook_impls, caller_kwargs, firstresult)
+            )
+            after(outcome, hook_name, hook_impls, caller_kwargs)
+            return outcome.get_result()
+
+        self._inner_hookexec = traced_hookexec
+
+        def undo() -> None:
+            self._inner_hookexec = oldcall
+
+        return undo
+
+    def enable_tracing(self) -> Callable[[], None]:
+        """Enable tracing of hook calls.
+
+        Returns an undo function which, when called, removes the added tracing.
+        """
+        hooktrace = self.trace.root.get("hook")
+
+        def before(
+            hook_name: str, methods: Sequence[HookImpl], kwargs: Mapping[str, object]
+        ) -> None:
+            hooktrace.root.indent += 1
+            hooktrace(hook_name, kwargs)
+
+        def after(
+            outcome: Result[object],
+            hook_name: str,
+            methods: Sequence[HookImpl],
+            kwargs: Mapping[str, object],
+        ) -> None:
+            if outcome.exception is None:
+                hooktrace("finish", hook_name, "-->", outcome.get_result())
+            hooktrace.root.indent -= 1
+
+        return self.add_hookcall_monitoring(before, after)
+
+    def subset_hook_caller(
+        self, name: str, remove_plugins: Iterable[_Plugin]
+    ) -> HookCaller:
+        """Return a proxy :class:`~pluggy.HookCaller` instance for the named
+        method which manages calls to all registered plugins except the ones
+        from remove_plugins."""
+        orig: HookCaller = getattr(self.hook, name)
+        plugins_to_remove = {plug for plug in remove_plugins if hasattr(plug, name)}
+        if plugins_to_remove:
+            return _SubsetHookCaller(orig, plugins_to_remove)
+        return orig
+
+
+def _formatdef(func: Callable[..., object]) -> str:
+    return f"{func.__name__}{inspect.signature(func)}"
diff --git a/.venv/Lib/site-packages/pluggy/_result.py b/.venv/Lib/site-packages/pluggy/_result.py
new file mode 100644
index 00000000..f9a081c4
--- /dev/null
+++ b/.venv/Lib/site-packages/pluggy/_result.py
@@ -0,0 +1,104 @@
+"""
+Hook wrapper "result" utilities.
+"""
+
+from __future__ import annotations
+
+from types import TracebackType
+from typing import Callable
+from typing import cast
+from typing import final
+from typing import Generic
+from typing import Optional
+from typing import Tuple
+from typing import Type
+from typing import TypeVar
+
+
+_ExcInfo = Tuple[Type[BaseException], BaseException, Optional[TracebackType]]
+ResultType = TypeVar("ResultType")
+
+
+class HookCallError(Exception):
+    """Hook was called incorrectly."""
+
+
+@final
+class Result(Generic[ResultType]):
+    """An object used to inspect and set the result in a :ref:`hook wrapper
+    `."""
+
+    __slots__ = ("_result", "_exception")
+
+    def __init__(
+        self,
+        result: ResultType | None,
+        exception: BaseException | None,
+    ) -> None:
+        """:meta private:"""
+        self._result = result
+        self._exception = exception
+
+    @property
+    def excinfo(self) -> _ExcInfo | None:
+        """:meta private:"""
+        exc = self._exception
+        if exc is None:
+            return None
+        else:
+            return (type(exc), exc, exc.__traceback__)
+
+    @property
+    def exception(self) -> BaseException | None:
+        """:meta private:"""
+        return self._exception
+
+    @classmethod
+    def from_call(cls, func: Callable[[], ResultType]) -> Result[ResultType]:
+        """:meta private:"""
+        __tracebackhide__ = True
+        result = exception = None
+        try:
+            result = func()
+        except BaseException as exc:
+            exception = exc
+        return cls(result, exception)
+
+    def force_result(self, result: ResultType) -> None:
+        """Force the result(s) to ``result``.
+
+        If the hook was marked as a ``firstresult`` a single value should
+        be set, otherwise set a (modified) list of results. Any exceptions
+        found during invocation will be deleted.
+
+        This overrides any previous result or exception.
+        """
+        self._result = result
+        self._exception = None
+
+    def force_exception(self, exception: BaseException) -> None:
+        """Force the result to fail with ``exception``.
+
+        This overrides any previous result or exception.
+
+        .. versionadded:: 1.1.0
+        """
+        self._result = None
+        self._exception = exception
+
+    def get_result(self) -> ResultType:
+        """Get the result(s) for this hook call.
+
+        If the hook was marked as a ``firstresult`` only a single value
+        will be returned, otherwise a list of results.
+        """
+        __tracebackhide__ = True
+        exc = self._exception
+        if exc is None:
+            return cast(ResultType, self._result)
+        else:
+            raise exc.with_traceback(exc.__traceback__)
+
+
+# Historical name (pluggy<=1.2), kept for backward compatibility.
+_Result = Result
diff --git a/.venv/Lib/site-packages/pluggy/_tracing.py b/.venv/Lib/site-packages/pluggy/_tracing.py
new file mode 100644
index 00000000..cd238ad7
--- /dev/null
+++ b/.venv/Lib/site-packages/pluggy/_tracing.py
@@ -0,0 +1,73 @@
+"""
+Tracing utils
+"""
+
+from __future__ import annotations
+
+from typing import Any
+from typing import Callable
+from typing import Sequence
+from typing import Tuple
+
+
+_Writer = Callable[[str], object]
+_Processor = Callable[[Tuple[str, ...], Tuple[Any, ...]], object]
+
+
+class TagTracer:
+    def __init__(self) -> None:
+        self._tags2proc: dict[tuple[str, ...], _Processor] = {}
+        self._writer: _Writer | None = None
+        self.indent = 0
+
+    def get(self, name: str) -> TagTracerSub:
+        return TagTracerSub(self, (name,))
+
+    def _format_message(self, tags: Sequence[str], args: Sequence[object]) -> str:
+        if isinstance(args[-1], dict):
+            extra = args[-1]
+            args = args[:-1]
+        else:
+            extra = {}
+
+        content = " ".join(map(str, args))
+        indent = "  " * self.indent
+
+        lines = ["{}{} [{}]\n".format(indent, content, ":".join(tags))]
+
+        for name, value in extra.items():
+            lines.append(f"{indent}    {name}: {value}\n")
+
+        return "".join(lines)
+
+    def _processmessage(self, tags: tuple[str, ...], args: tuple[object, ...]) -> None:
+        if self._writer is not None and args:
+            self._writer(self._format_message(tags, args))
+        try:
+            processor = self._tags2proc[tags]
+        except KeyError:
+            pass
+        else:
+            processor(tags, args)
+
+    def setwriter(self, writer: _Writer | None) -> None:
+        self._writer = writer
+
+    def setprocessor(self, tags: str | tuple[str, ...], processor: _Processor) -> None:
+        if isinstance(tags, str):
+            tags = tuple(tags.split(":"))
+        else:
+            assert isinstance(tags, tuple)
+        self._tags2proc[tags] = processor
+
+
+class TagTracerSub:
+    def __init__(self, root: TagTracer, tags: tuple[str, ...]) -> None:
+        self.root = root
+        self.tags = tags
+
+    def __call__(self, *args: object) -> None:
+        self.root._processmessage(self.tags, args)
+
+    def get(self, name: str) -> TagTracerSub:
+        return self.__class__(self.root, self.tags + (name,))
diff --git a/.venv/Lib/site-packages/pluggy/_version.py b/.venv/Lib/site-packages/pluggy/_version.py
new file mode 100644
index 00000000..c565007e
--- /dev/null
+++ b/.venv/Lib/site-packages/pluggy/_version.py
@@ -0,0 +1,16 @@
+# file generated by setuptools_scm
+# don't change, don't track in version control
+TYPE_CHECKING = False
+if TYPE_CHECKING:
+    from typing import Tuple, Union
+    VERSION_TUPLE = Tuple[Union[int, str], ...]
+else:
+    VERSION_TUPLE = object
+
+version: str
+__version__: str
+__version_tuple__: VERSION_TUPLE
+version_tuple: VERSION_TUPLE
+
+__version__ = version = '1.5.0'
+__version_tuple__ = version_tuple = (1, 5, 0)
diff --git a/.venv/Lib/site-packages/pluggy/_warnings.py b/.venv/Lib/site-packages/pluggy/_warnings.py
new file mode 100644
index 00000000..6356c770
--- /dev/null
+++ b/.venv/Lib/site-packages/pluggy/_warnings.py
@@ -0,0 +1,27 @@
+from typing import final
+
+
+class PluggyWarning(UserWarning):
+    """Base class for all warnings emitted by pluggy."""
+
+    __module__ = "pluggy"
+
+
+@final
+class PluggyTeardownRaisedWarning(PluggyWarning):
+    """A plugin raised an exception during an :ref:`old-style hookwrapper
+    ` teardown.
+
+    Such exceptions are not handled by pluggy, and may cause subsequent
+    teardowns to be executed at unexpected times, or be skipped entirely.
+
+    This is an issue in the plugin implementation.
+
+    If the exception is unintended, fix the underlying cause.
+
+    If the exception is intended, switch to :ref:`new-style hook wrappers
+    `, or use :func:`result.force_exception()
+    ` to set the exception instead of raising.
+    """
+
+    __module__ = "pluggy"
diff --git a/.venv/Lib/site-packages/pluggy/py.typed b/.venv/Lib/site-packages/pluggy/py.typed
new file mode 100644
index 00000000..e69de29b
diff --git a/.venv/Lib/site-packages/py.py b/.venv/Lib/site-packages/py.py
new file mode 100644
index 00000000..5c661e66
--- /dev/null
+++ b/.venv/Lib/site-packages/py.py
@@ -0,0 +1,15 @@
+# shim for pylib going away
+# if pylib is installed this file will get skipped
+# (`py/__init__.py` has higher precedence)
+from __future__ import annotations
+
+import sys
+
+import _pytest._py.error as error
+import _pytest._py.path as path
+
+
+sys.modules["py.error"] = error
+sys.modules["py.path"] = path
+
+__all__ = ["error", "path"]
diff --git a/.venv/Lib/site-packages/pytest-8.3.3.dist-info/AUTHORS b/.venv/Lib/site-packages/pytest-8.3.3.dist-info/AUTHORS
new file mode 100644
index 00000000..4f1b172f
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest-8.3.3.dist-info/AUTHORS
@@ -0,0 +1,469 @@
+Holger Krekel, holger at merlinux eu
+merlinux GmbH, Germany, office at merlinux eu
+
+Contributors include::
+
+Aaron Coleman
+Abdeali JK
+Abdelrahman Elbehery
+Abhijeet Kasurde
+Adam Johnson
+Adam Stewart
+Adam Uhlir
+Ahn Ki-Wook
+Akhilesh Ramakrishnan
+Akiomi Kamakura
+Alan Velasco
+Alessio Izzo
+Alex Jones
+Alex Lambson
+Alexander Johnson
+Alexander King
+Alexei Kozlenok
+Alice Purcell
+Allan Feldman
+Aly Sivji
+Amir Elkess
+Anatoly Bubenkoff
+Anders Hovmöller
+Andras Mitzki
+Andras Tim
+Andrea Cimatoribus
+Andreas Motl
+Andreas Zeidler
+Andrew Shapton
+Andrey Paramonov
+Andrzej Klajnert
+Andrzej Ostrowski
+Andy Freeland
+Anita Hammer
+Anthon van der Neut
+Anthony Shaw
+Anthony Sottile
+Anton Grinevich
+Anton Lodder
+Antony Lee
+Arel Cordero
+Arias Emmanuel
+Ariel Pillemer
+Armin Rigo
+Aron Coyle
+Aron Curzon
+Arthur Richard
+Ashish Kurmi
+Aviral Verma
+Aviv Palivoda
+Babak Keyvani
+Barney Gale
+Ben Brown
+Ben Gartner
+Ben Leith
+Ben Webb
+Benjamin Peterson
+Benjamin Schubert
+Bernard Pratz
+Bo Wu
+Bob Ippolito
+Brian Dorsey
+Brian Larsen
+Brian Maissy
+Brian Okken
+Brianna Laugher
+Bruno Oliveira
+Cal Jacobson
+Cal Leeming
+Carl Friedrich Bolz
+Carlos Jenkins
+Ceridwen
+Charles Cloud
+Charles Machalow
+Charnjit SiNGH (CCSJ)
+Cheuk Ting Ho
+Chris Mahoney
+Chris Lamb
+Chris NeJame
+Chris Rose
+Chris Wheeler
+Christian Boelsen
+Christian Clauss
+Christian Fetzer
+Christian Neumüller
+Christian Theunert
+Christian Tismer
+Christine Mecklenborg
+Christoph Buelter
+Christopher Dignam
+Christopher Gilling
+Claire Cecil
+Claudio Madotto
+Clément M.T. Robert
+Cornelius Riemenschneider
+CrazyMerlyn
+Cristian Vera
+Cyrus Maden
+Damian Skrzypczak
+Daniel Grana
+Daniel Hahler
+Daniel Miller
+Daniel Nuri
+Daniel Sánchez Castelló
+Daniel Valenzuela Zenteno
+Daniel Wandschneider
+Daniele Procida
+Danielle Jenkins
+Daniil Galiev
+Dave Hunt
+David Díaz-Barquero
+David Mohr
+David Paul Röthlisberger
+David Szotten
+David Vierra
+Daw-Ran Liou
+Debi Mishra
+Denis Kirisov
+Denivy Braiam Rück
+Dheeraj C K
+Dhiren Serai
+Diego Russo
+Dmitry Dygalo
+Dmitry Pribysh
+Dominic Mortlock
+Duncan Betts
+Edison Gustavo Muenz
+Edoardo Batini
+Edson Tadeu M. Manoel
+Eduardo Schettino
+Edward Haigh
+Eero Vaher
+Eli Boyarski
+Elizaveta Shashkova
+Éloi Rivard
+Endre Galaczi
+Eric Hunsberger
+Eric Liu
+Eric Siegerman
+Eric Yuan
+Erik Aronesty
+Erik Hasse
+Erik M. Bray
+Evan Kepner
+Evgeny Seliverstov
+Fabian Sturm
+Fabien Zarifian
+Fabio Zadrozny
+Farbod Ahmadian
+faph
+Felix Hofstätter
+Felix Nieuwenhuizen
+Feng Ma
+Florian Bruhin
+Florian Dahlitz
+Floris Bruynooghe
+Fraser Stark
+Gabriel Landau
+Gabriel Reis
+Garvit Shubham
+Gene Wood
+George Kussumoto
+Georgy Dyuldin
+Gergely Kalmár
+Gleb Nikonorov
+Graeme Smecher
+Graham Horler
+Greg Price
+Gregory Lee
+Grig Gheorghiu
+Grigorii Eremeev (budulianin)
+Guido Wesdorp
+Guoqiang Zhang
+Harald Armin Massa
+Harshna
+Henk-Jaap Wagenaar
+Holger Kohr
+Hugo van Kemenade
+Hui Wang (coldnight)
+Ian Bicking
+Ian Lesperance
+Ilya Konstantinov
+Ionuț Turturică
+Isaac Virshup
+Israel Fruchter
+Itxaso Aizpurua
+Iwan Briquemont
+Jaap Broekhuizen
+Jake VanderPlas
+Jakob van Santen
+Jakub Mitoraj
+James Bourbeau
+James Frost
+Jan Balster
+Janne Vanhala
+Jason R. Coombs
+Javier Domingo Cansino
+Javier Romero
+Jeff Rackauckas
+Jeff Widman
+Jenni Rinker
+Jens Tröger
+John Eddie Ayson
+John Litborn
+John Towler
+Jon Parise
+Jon Sonesen
+Jonas Obrist
+Jordan Guymon
+Jordan Moldow
+Jordan Speicher
+Joseph Hunkeler
+Joseph Sawaya
+Josh Karpel
+Joshua Bronson
+Jurko Gospodnetić
+Justice Ndou
+Justyna Janczyszyn
+Kale Kundert
+Kamran Ahmad
+Kenny Y
+Karl O. Pinc
+Karthikeyan Singaravelan
+Katarzyna Jachim
+Katarzyna Król
+Katerina Koukiou
+Keri Volans
+Kevin C
+Kevin Cox
+Kevin Hierro Carrasco
+Kevin J. Foley
+Kian Eliasi
+Kian-Meng Ang
+Kodi B. Arfer
+Kojo Idrissa
+Kostis Anagnostopoulos
+Kristoffer Nordström
+Kyle Altendorf
+Lawrence Mitchell
+Lee Kamentsky
+Lev Maximov
+Levon Saldamli
+Lewis Cowles
+Llandy Riveron Del Risco
+Loic Esteve
+lovetheguitar
+Lukas Bednar
+Luke Murphy
+Maciek Fijalkowski
+Maho
+Maik Figura
+Mandeep Bhutani
+Manuel Krebber
+Marc Mueller
+Marc Schlaich
+Marcelo Duarte Trevisani
+Marcin Bachry
+Marc Bresson
+Marco Gorelli
+Mark Abramowitz
+Mark Dickinson
+Mark Vong
+Marko Pacak
+Markus Unterwaditzer
+Martijn Faassen
+Martin Altmayer
+Martin K. Scherer
+Martin Prusse
+Mathieu Clabaut
+Matt Bachmann
+Matt Duck
+Matt Williams
+Matthias Hafner
+Maxim Filipenko
+Maximilian Cosmo Sitter
+mbyt
+Michael Aquilina
+Michael Birtwell
+Michael Droettboom
+Michael Goerz
+Michael Krebs
+Michael Seifert
+Michael Vogt
+Michal Wajszczuk
+Michał Górny
+Michał Zięba
+Mickey Pashov
+Mihai Capotă
+Mihail Milushev
+Mike Hoyle (hoylemd)
+Mike Lundy
+Milan Lesnek
+Miro Hrončok
+mrbean-bremen
+Nathan Goldbaum
+Nathaniel Compton
+Nathaniel Waisbrot
+Nauman Ahmed
+Ned Batchelder
+Neil Martin
+Neven Mundar
+Nicholas Devenish
+Nicholas Murphy
+Niclas Olofsson
+Nicolas Delaby
+Nicolas Simonds
+Nico Vidal
+Nikolay Kondratyev
+Nipunn Koorapati
+Oleg Pidsadnyi
+Oleg Sushchenko
+Olga Matoula
+Oliver Bestwalter
+Omar Kohl
+Omer Hadari
+Ondřej Súkup
+Oscar Benjamin
+Parth Patel
+Patrick Hayes
+Patrick Lannigan
+Paul Müller
+Paul Reece
+Pauli Virtanen
+Pavel Karateev
+Paweł Adamczak
+Pedro Algarvio
+Petter Strandmark
+Philipp Loose
+Pierre Sassoulas
+Pieter Mulder
+Piotr Banaszkiewicz
+Piotr Helm
+Poulami Sau
+Prakhar Gurunani
+Prashant Anand
+Prashant Sharma
+Pulkit Goyal
+Punyashloka Biswal
+Quentin Pradet
+q0w
+Ralf Schmitt
+Ralph Giles
+Ram Rachum
+Ran Benita
+Raphael Castaneda
+Raphael Pierzina
+Rafal Semik
+Raquel Alegre
+Ravi Chandra
+Reagan Lee
+Rob Arrow
+Robert Holt
+Roberto Aldera
+Roberto Polli
+Roland Puntaier
+Romain Dorgueil
+Roman Bolshakov
+Ronny Pfannschmidt
+Ross Lawley
+Ruaridh Williamson
+Russel Winder
+Russell Martin
+Ryan Puddephatt
+Ryan Wooden
+Sadra Barikbin
+Saiprasad Kale
+Samuel Colvin
+Samuel Dion-Girardeau
+Samuel Jirovec
+Samuel Searles-Bryant
+Samuel Therrien (Avasam)
+Samuele Pedroni
+Sanket Duthade
+Sankt Petersbug
+Saravanan Padmanaban
+Sean Malloy
+Segev Finer
+Serhii Mozghovyi
+Seth Junot
+Shantanu Jain
+Sharad Nair
+Shubham Adep
+Simon Blanchard
+Simon Gomizelj
+Simon Holesch
+Simon Kerr
+Skylar Downes
+Srinivas Reddy Thatiparthy
+Stefaan Lippens
+Stefan Farmbauer
+Stefan Scherfke
+Stefan Zimmermann
+Stefanie Molin
+Stefano Taschini
+Steffen Allner
+Stephan Obermann
+Sven-Hendrik Haase
+Sviatoslav Sydorenko
+Sylvain Marié
+Tadek Teleżyński
+Takafumi Arakaki
+Taneli Hukkinen
+Tanvi Mehta
+Tanya Agarwal
+Tarcisio Fischer
+Tareq Alayan
+Tatiana Ovary
+Ted Xiao
+Terje Runde
+Thomas Grainger
+Thomas Hisch
+Tianyu Dongfang
+Tim Hoffmann
+Tim Strazny
+TJ Bruno
+Tobias Diez
+Tom Dalton
+Tom Viner
+Tomáš Gavenčiak
+Tomer Keren
+Tony Narlock
+Tor Colvin
+Trevor Bekolay
+Tushar Sadhwani
+Tyler Goodlet
+Tyler Smart
+Tzu-ping Chung
+Vasily Kuznetsov
+Victor Maryama
+Victor Rodriguez
+Victor Uriarte
+Vidar T. Fauske
+Vijay Arora
+Virendra Patil
+Virgil Dupras
+Vitaly Lashmanov
+Vivaan Verma
+Vlad Dragos
+Vlad Radziuk
+Vladyslav Rachek
+Volodymyr Kochetkov
+Volodymyr Piskun
+Wei Lin
+Wil Cooley
+William Lee
+Wim Glenn
+Wouter van Ackooy
+Xixi Zhao
+Xuan Luong
+Xuecong Liao
+Yannick Péroux
+Yao Xiao
+Yoav Caspi
+Yuliang Shao
+Yusuke Kadowaki
+Yutian Li
+Yuval Shimon
+Zac Hatfield-Dodds
+Zach Snicker
+Zachary Kneupper
+Zachary OBrien
+Zhouxin Qiu
+Zoltán Máté
+Zsolt Cserna
diff --git a/.venv/Lib/site-packages/pytest-8.3.3.dist-info/INSTALLER b/.venv/Lib/site-packages/pytest-8.3.3.dist-info/INSTALLER
new file mode 100644
index 00000000..a1b589e3
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest-8.3.3.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/.venv/Lib/site-packages/pytest-8.3.3.dist-info/LICENSE b/.venv/Lib/site-packages/pytest-8.3.3.dist-info/LICENSE
new file mode 100644
index 00000000..c3f1657f
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest-8.3.3.dist-info/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2004 Holger Krekel and others
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/.venv/Lib/site-packages/pytest-8.3.3.dist-info/METADATA b/.venv/Lib/site-packages/pytest-8.3.3.dist-info/METADATA
new file mode 100644
index 00000000..b693d7fb
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest-8.3.3.dist-info/METADATA
@@ -0,0 +1,212 @@
+Metadata-Version: 2.1
+Name: pytest
+Version: 8.3.3
+Summary: pytest: simple powerful testing with Python
+Author: Holger Krekel, Bruno Oliveira, Ronny Pfannschmidt, Floris Bruynooghe, Brianna Laugher, Florian Bruhin, Others (See AUTHORS)
+License: MIT
+Project-URL: Changelog, https://docs.pytest.org/en/stable/changelog.html
+Project-URL: Homepage, https://docs.pytest.org/en/latest/
+Project-URL: Source, https://github.com/pytest-dev/pytest
+Project-URL: Tracker, https://github.com/pytest-dev/pytest/issues
+Project-URL: Twitter, https://twitter.com/pytestdotorg
+Keywords: test,unittest
+Classifier: Development Status :: 6 - Mature
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: MacOS
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: POSIX
+Classifier: Operating System :: Unix
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Topic :: Software Development :: Libraries
+Classifier: Topic :: Software Development :: Testing
+Classifier: Topic :: Utilities
+Requires-Python: >=3.8
+Description-Content-Type: text/x-rst
+License-File: LICENSE
+License-File: AUTHORS
+Requires-Dist: iniconfig
+Requires-Dist: packaging
+Requires-Dist: pluggy <2,>=1.5
+Requires-Dist: exceptiongroup >=1.0.0rc8 ; python_version < "3.11"
+Requires-Dist: tomli >=1 ; python_version < "3.11"
+Requires-Dist: colorama ; sys_platform == "win32"
+Provides-Extra: dev
+Requires-Dist: argcomplete ; extra == 'dev'
+Requires-Dist: attrs >=19.2 ; extra == 'dev'
+Requires-Dist: hypothesis >=3.56 ; extra == 'dev'
+Requires-Dist: mock ; extra == 'dev'
+Requires-Dist: pygments >=2.7.2 ; extra == 'dev'
+Requires-Dist: requests ; extra == 'dev'
+Requires-Dist: setuptools ; extra == 'dev'
+Requires-Dist: xmlschema ; extra == 'dev'
+
+.. image:: https://github.com/pytest-dev/pytest/raw/main/doc/en/img/pytest_logo_curves.svg
+   :target: https://docs.pytest.org/en/stable/
+   :align: center
+   :height: 200
+   :alt: pytest
+
+
+------
+
+.. image:: https://img.shields.io/pypi/v/pytest.svg
+    :target: https://pypi.org/project/pytest/
+
+.. image:: https://img.shields.io/conda/vn/conda-forge/pytest.svg
+    :target: https://anaconda.org/conda-forge/pytest
+
+.. image:: https://img.shields.io/pypi/pyversions/pytest.svg
+    :target: https://pypi.org/project/pytest/
+
+.. image:: https://codecov.io/gh/pytest-dev/pytest/branch/main/graph/badge.svg
+    :target: https://codecov.io/gh/pytest-dev/pytest
+    :alt: Code coverage Status
+
+.. image:: https://github.com/pytest-dev/pytest/actions/workflows/test.yml/badge.svg
+    :target: https://github.com/pytest-dev/pytest/actions?query=workflow%3Atest
+
+.. image:: https://results.pre-commit.ci/badge/github/pytest-dev/pytest/main.svg
+   :target: https://results.pre-commit.ci/latest/github/pytest-dev/pytest/main
+   :alt: pre-commit.ci status
+
+.. image:: https://www.codetriage.com/pytest-dev/pytest/badges/users.svg
+    :target: https://www.codetriage.com/pytest-dev/pytest
+
+.. image:: https://readthedocs.org/projects/pytest/badge/?version=latest
+    :target: https://pytest.readthedocs.io/en/latest/?badge=latest
+    :alt: Documentation Status
+
+.. image:: https://img.shields.io/badge/Discord-pytest--dev-blue
+    :target: https://discord.com/invite/pytest-dev
+    :alt: Discord
+
+.. image:: https://img.shields.io/badge/Libera%20chat-%23pytest-orange
+    :target: https://web.libera.chat/#pytest
+    :alt: Libera chat
+
+
+The ``pytest`` framework makes it easy to write small tests, yet
+scales to support complex functional testing for applications and libraries.
+
+An example of a simple test:
+
+.. code-block:: python
+
+    # content of test_sample.py
+    def inc(x):
+        return x + 1
+
+
+    def test_answer():
+        assert inc(3) == 5
+
+
+To execute it::
+
+    $ pytest
+    ============================= test session starts =============================
+    collected 1 items
+
+    test_sample.py F
+
+    ================================== FAILURES ===================================
+    _________________________________ test_answer _________________________________
+
+        def test_answer():
+    >       assert inc(3) == 5
+    E       assert 4 == 5
+    E        +  where 4 = inc(3)
+
+    test_sample.py:5: AssertionError
+    ========================== 1 failed in 0.04 seconds ===========================
+
+
+Due to ``pytest``'s detailed assertion introspection, only plain ``assert`` statements are used. See `getting-started `_ for more examples.
+
+
+Features
+--------
+
+- Detailed info on failing `assert statements `_ (no need to remember ``self.assert*`` names)
+
+- `Auto-discovery
+  `_
+  of test modules and functions
+
+- `Modular fixtures `_ for
+  managing small or parametrized long-lived test resources
+
+- Can run `unittest `_ (or trial)
+  test suites out of the box
+
+- Python 3.8+ or PyPy3
+
+- Rich plugin architecture, with over 1300+ `external plugins `_ and thriving community
+
+
+Documentation
+-------------
+
+For full documentation, including installation, tutorials and PDF documents, please see https://docs.pytest.org/en/stable/.
+
+
+Bugs/Requests
+-------------
+
+Please use the `GitHub issue tracker `_ to submit bugs or request features.
+
+
+Changelog
+---------
+
+Consult the `Changelog `__ page for fixes and enhancements of each version.
+
+
+Support pytest
+--------------
+
+`Open Collective`_ is an online funding platform for open and transparent communities.
+It provides tools to raise money and share your finances in full transparency.
+
+It is the platform of choice for individuals and companies that want to make one-time or
+monthly donations directly to the project.
+
+See more details in the `pytest collective`_.
+
+.. _Open Collective: https://opencollective.com
+.. _pytest collective: https://opencollective.com/pytest
+
+
+pytest for enterprise
+---------------------
+
+Available as part of the Tidelift Subscription.
+
+The maintainers of pytest and thousands of other packages are working with Tidelift to deliver commercial support and
+maintenance for the open source dependencies you use to build your applications.
+Save time, reduce risk, and improve code health, while paying the maintainers of the exact dependencies you use.
+
+`Learn more. `_
+
+Security
+^^^^^^^^
+
+pytest has never been associated with a security vulnerability, but in any case, to report a
+security vulnerability please use the `Tidelift security contact `_.
+Tidelift will coordinate the fix and disclosure.
+
+
+License
+-------
+
+Copyright Holger Krekel and others, 2004.
+
+Distributed under the terms of the `MIT`_ license, pytest is free and open source software.
+
+.. _`MIT`: https://github.com/pytest-dev/pytest/blob/main/LICENSE
diff --git a/.venv/Lib/site-packages/pytest-8.3.3.dist-info/RECORD b/.venv/Lib/site-packages/pytest-8.3.3.dist-info/RECORD
new file mode 100644
index 00000000..41f1fea4
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest-8.3.3.dist-info/RECORD
@@ -0,0 +1,155 @@
+../../Scripts/py.test.exe,sha256=ypi4-tLpar0qk9d6o-sO7Lt4fNpKvmhN0YKBMs9VOoA,108433
+../../Scripts/pytest.exe,sha256=ypi4-tLpar0qk9d6o-sO7Lt4fNpKvmhN0YKBMs9VOoA,108433
+__pycache__/py.cpython-312.pyc,,
+_pytest/__init__.py,sha256=4IdRJhnW5XG2KlaJkOxn5_TC9WeQ5tXDSF7tbb4vEso,391
+_pytest/__pycache__/__init__.cpython-312.pyc,,
+_pytest/__pycache__/_argcomplete.cpython-312.pyc,,
+_pytest/__pycache__/_version.cpython-312.pyc,,
+_pytest/__pycache__/cacheprovider.cpython-312.pyc,,
+_pytest/__pycache__/capture.cpython-312.pyc,,
+_pytest/__pycache__/compat.cpython-312.pyc,,
+_pytest/__pycache__/debugging.cpython-312.pyc,,
+_pytest/__pycache__/deprecated.cpython-312.pyc,,
+_pytest/__pycache__/doctest.cpython-312.pyc,,
+_pytest/__pycache__/faulthandler.cpython-312.pyc,,
+_pytest/__pycache__/fixtures.cpython-312.pyc,,
+_pytest/__pycache__/freeze_support.cpython-312.pyc,,
+_pytest/__pycache__/helpconfig.cpython-312.pyc,,
+_pytest/__pycache__/hookspec.cpython-312.pyc,,
+_pytest/__pycache__/junitxml.cpython-312.pyc,,
+_pytest/__pycache__/legacypath.cpython-312.pyc,,
+_pytest/__pycache__/logging.cpython-312.pyc,,
+_pytest/__pycache__/main.cpython-312.pyc,,
+_pytest/__pycache__/monkeypatch.cpython-312.pyc,,
+_pytest/__pycache__/nodes.cpython-312.pyc,,
+_pytest/__pycache__/outcomes.cpython-312.pyc,,
+_pytest/__pycache__/pastebin.cpython-312.pyc,,
+_pytest/__pycache__/pathlib.cpython-312.pyc,,
+_pytest/__pycache__/pytester.cpython-312.pyc,,
+_pytest/__pycache__/pytester_assertions.cpython-312.pyc,,
+_pytest/__pycache__/python.cpython-312.pyc,,
+_pytest/__pycache__/python_api.cpython-312.pyc,,
+_pytest/__pycache__/python_path.cpython-312.pyc,,
+_pytest/__pycache__/recwarn.cpython-312.pyc,,
+_pytest/__pycache__/reports.cpython-312.pyc,,
+_pytest/__pycache__/runner.cpython-312.pyc,,
+_pytest/__pycache__/scope.cpython-312.pyc,,
+_pytest/__pycache__/setuponly.cpython-312.pyc,,
+_pytest/__pycache__/setupplan.cpython-312.pyc,,
+_pytest/__pycache__/skipping.cpython-312.pyc,,
+_pytest/__pycache__/stash.cpython-312.pyc,,
+_pytest/__pycache__/stepwise.cpython-312.pyc,,
+_pytest/__pycache__/terminal.cpython-312.pyc,,
+_pytest/__pycache__/threadexception.cpython-312.pyc,,
+_pytest/__pycache__/timing.cpython-312.pyc,,
+_pytest/__pycache__/tmpdir.cpython-312.pyc,,
+_pytest/__pycache__/unittest.cpython-312.pyc,,
+_pytest/__pycache__/unraisableexception.cpython-312.pyc,,
+_pytest/__pycache__/warning_types.cpython-312.pyc,,
+_pytest/__pycache__/warnings.cpython-312.pyc,,
+_pytest/_argcomplete.py,sha256=gh0pna66p4LVb2D8ST4568WGxvdInGT43m6slYhqNqU,3776
+_pytest/_code/__init__.py,sha256=5h7R-LFINKh7p8QR1HgdjvSGo1ysVJz28MQ9h7ipHK4,521
+_pytest/_code/__pycache__/__init__.cpython-312.pyc,,
+_pytest/_code/__pycache__/code.cpython-312.pyc,,
+_pytest/_code/__pycache__/source.cpython-312.pyc,,
+_pytest/_code/code.py,sha256=nSItRCE6gTRFsWmHW5DPiMEhvVYalE8UJbbiFS_JfZE,49806
+_pytest/_code/source.py,sha256=2w9OZFOrRpiVaD_UdUS1T2XC7c2Is2GZn0iQy-lZfwk,7278
+_pytest/_io/__init__.py,sha256=pkLF29VEFr6Dlr3eOtJL8sf47RLFt1Jf4X1DZBPlYmc,190
+_pytest/_io/__pycache__/__init__.cpython-312.pyc,,
+_pytest/_io/__pycache__/pprint.cpython-312.pyc,,
+_pytest/_io/__pycache__/saferepr.cpython-312.pyc,,
+_pytest/_io/__pycache__/terminalwriter.cpython-312.pyc,,
+_pytest/_io/__pycache__/wcwidth.cpython-312.pyc,,
+_pytest/_io/pprint.py,sha256=BCe8K7Zc0drYC5_JKZBBMVrhK84ARlmPpk9vSWPYhaE,19633
+_pytest/_io/saferepr.py,sha256=Hhx5F-75iz03hdk-WO86Bmy9RBuRHsuJj-YUzozfrgo,4082
+_pytest/_io/terminalwriter.py,sha256=dQ07zJ1-vlpFqWBBu_c0cHxT0yXcGSu7o7LxDCEyB3s,9319
+_pytest/_io/wcwidth.py,sha256=cUEJ74UhweICwbKvU2q6noZcNgD0QlBEB9CfakGYaqA,1289
+_pytest/_py/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+_pytest/_py/__pycache__/__init__.cpython-312.pyc,,
+_pytest/_py/__pycache__/error.cpython-312.pyc,,
+_pytest/_py/__pycache__/path.cpython-312.pyc,,
+_pytest/_py/error.py,sha256=S1BRlfXSD248OFNzAuZ5O9w9W6fr2NUn0X8wYFGMNk0,3015
+_pytest/_py/path.py,sha256=Xl4UspvrwwKYNlLZDGbjhUrnD6fuBmFxxchgltmwGek,49211
+_pytest/_version.py,sha256=_6tcVrnJk8Bv64RxNmrrj_1-CXILcdZloboQyV60lgQ,411
+_pytest/assertion/__init__.py,sha256=lK1YNNAk1VqCK-Y5C5hMJMqJQyxQT11HuDH3w85a3Zc,6791
+_pytest/assertion/__pycache__/__init__.cpython-312.pyc,,
+_pytest/assertion/__pycache__/rewrite.cpython-312.pyc,,
+_pytest/assertion/__pycache__/truncate.cpython-312.pyc,,
+_pytest/assertion/__pycache__/util.cpython-312.pyc,,
+_pytest/assertion/rewrite.py,sha256=HV6AufHVu7eAp1K1aHYElaJbYWAQkMr_pMEVO5ZKS9w,47801
+_pytest/assertion/truncate.py,sha256=GYl5iqDXUuKQHgd_mthWl3ZjxBbDVQliBhks1Ty00kE,4459
+_pytest/assertion/util.py,sha256=6Vg5dZDuIXak9OLupvKyavLDroATgpU6ilkclc0OlJY,20265
+_pytest/cacheprovider.py,sha256=BFQVkna56tlO-v9qaNJVHIcF30hIVGrP9St_vMp4w98,22373
+_pytest/capture.py,sha256=iiu_k5_0ASbINS5_o7ZxMShGaWZFQD-y7jtU-NiJtMs,34680
+_pytest/compat.py,sha256=sPcVQwPd45GaqsgIZEbCTR04GKhkVmIfft6QnKj3hmo,11467
+_pytest/config/__init__.py,sha256=bSvGuLz1tEdkUPk52cp-HB6UK66MO9Ha2hiKjI0mOpk,70379
+_pytest/config/__pycache__/__init__.cpython-312.pyc,,
+_pytest/config/__pycache__/argparsing.cpython-312.pyc,,
+_pytest/config/__pycache__/compat.cpython-312.pyc,,
+_pytest/config/__pycache__/exceptions.cpython-312.pyc,,
+_pytest/config/__pycache__/findpaths.cpython-312.pyc,,
+_pytest/config/argparsing.py,sha256=dNjEvFh2C34XMoiE_R7liJv5cryXUz2WR2VsxdnQdjo,20562
+_pytest/config/compat.py,sha256=-m8G4-LLezCd4KZO6JQufEz7cRDqUSOjIwCtiKWpJvY,2938
+_pytest/config/exceptions.py,sha256=lUKnOtpRqK-qNL6JfOP-8tRqpmHU34CVxguR5y0Qfbw,288
+_pytest/config/findpaths.py,sha256=h4zq5AbLaZGpkeEcD2Xg-rJimh9I5pE042qQOTZT7NM,8062
+_pytest/debugging.py,sha256=yRmmOexsaDeFky37IrD2e9svz8CWebB7L2fSUy4LvuE,13260
+_pytest/deprecated.py,sha256=sO9UiqEdy9Z-NCvDoYYA0QtafYogAb7lP5M9N_Hpnak,3147
+_pytest/doctest.py,sha256=7WJprJGYj7_9Lyr-L49wJ7q5ZwDVj1FBhA9_CX7JdLc,26255
+_pytest/faulthandler.py,sha256=dT0H-MLi62SXeiKjLQJ0EVPuxkTlNOxpWtNxA5uBJPs,3674
+_pytest/fixtures.py,sha256=I5t3pW2lHaVPbN1rAQ9sdX0a3QrpoW_U5VP-Vxejxmg,73550
+_pytest/freeze_support.py,sha256=1EfzuxPd2oV9Ira26K5J4r9ppFZjnGi-xKzsBXe8B4g,1291
+_pytest/helpconfig.py,sha256=ibnZNxKzToLmx-2ZrZKCP9t6jJvpAIlmqdf9a0rhOoI,8895
+_pytest/hookspec.py,sha256=G-wKdmV3pecpeeiIAmzgPUMr22kz-CsqSpWEM-uiamg,42825
+_pytest/junitxml.py,sha256=FnYwq0wAR4Cixzj-a9qhyulUSEpMyjX9ALbjza_We74,25574
+_pytest/legacypath.py,sha256=ewzrdAD9RYlejlRWzvjkTd67tYt0qH5mrVcnXWFuuxE,16777
+_pytest/logging.py,sha256=QfaUUx-T0FiKBJBBb3bDllt8O8eTE7Mpigq7wvDepRc,35124
+_pytest/main.py,sha256=Oowez36UkOwJXkTRq4rVuJRRr18ItBnz_YDjgAmFCV8,37416
+_pytest/mark/__init__.py,sha256=bHORyCAsGnGJq7Tpm7A2sNQX31jwU1TgufM9DYcrTfQ,9307
+_pytest/mark/__pycache__/__init__.cpython-312.pyc,,
+_pytest/mark/__pycache__/expression.cpython-312.pyc,,
+_pytest/mark/__pycache__/structures.cpython-312.pyc,,
+_pytest/mark/expression.py,sha256=H6LmX0MWlxe0uBmuXIpQEntrLtyqIhEJv07YvA79eDQ,10152
+_pytest/mark/structures.py,sha256=6hiIR3d4zxy35Yiw961r9sYrNl-T5WS8_0auSmpdiB0,21039
+_pytest/monkeypatch.py,sha256=SKgteVJz1puqYQ3el6-ju5ZsNABqpoMUuRC6nn3tFpc,14598
+_pytest/nodes.py,sha256=Hqyplow99hb-Zz0KKzL0K3cQ0rCgDXK65vBp6ave3u8,26483
+_pytest/outcomes.py,sha256=SeW14rRKnGSt7K_NxY7HGnedoJawFHwQi2anAYYugk8,10532
+_pytest/pastebin.py,sha256=Ja1z3Z6cXNElobpwy97FiyR5DDexZrDEB6vufmNvE4o,3978
+_pytest/pathlib.py,sha256=7AWVlFKJY7atSU40CEyngk_srlLxC-ivBtNDq1MUXa4,33947
+_pytest/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+_pytest/pytester.py,sha256=-D_SNLfp_AQRMP7GOo6NsXlektiYod79pxBY-2RdUT0,61552
+_pytest/pytester_assertions.py,sha256=EIHeb1llN9TVRfBJqQnwvjKeG-KpzURNq8mtbK7vcyA,2244
+_pytest/python.py,sha256=mkJek4hqp7GMIyk6uPNWclI2dYlg78tTjymdcZViTJM,64851
+_pytest/python_api.py,sha256=tCuGTrlYlk-wxBmvvmrG5hdqWhqMJ4XiiOyXDtBRp1c,39627
+_pytest/python_path.py,sha256=fGP7iR_XMFRPijg4niILo44gWUWLlD635fYO5Abd6IM,745
+_pytest/recwarn.py,sha256=Gvg8OHHFPySW7ho1xoURNOLYd2yD2LBeOiQnDJ9ALvk,13278
+_pytest/reports.py,sha256=5OM_OyQHIS09PW6T_8kAJNS67GvcpvP-lKcna2LcSZ0,21331
+_pytest/runner.py,sha256=LDWKfhiIzWNkXqr1xwex-l1yhsWkdWCJko4bYM-etQ8,19436
+_pytest/scope.py,sha256=MyzqXUuWP1-BgbbCBzJyILiS_jicZO2LNUMUjv7vhh0,2798
+_pytest/setuponly.py,sha256=HNY9Ern-wex9iWSHxJU6ODA0yzYIH65QCkgNZ_BmbuA,3306
+_pytest/setupplan.py,sha256=l-ycFNxDZPyY52wh4f7yaqhzZ7SW1ijSKnQLmqzDZWA,1184
+_pytest/skipping.py,sha256=XbZKDPek9ex8aRXEoEy5iv0_e1b0sUi0PZrWqLBapek,10217
+_pytest/stash.py,sha256=5pE3kDx4q855TW9aVvYTdrkkKlMDU6-xiX4luKpJEgI,3090
+_pytest/stepwise.py,sha256=lYFm6kg000n_WEGOEQAho0j6dRCKJvgKz1Ya2Zz-0Zc,4596
+_pytest/terminal.py,sha256=-xT17xSJs9bu90wqRBc3WckaWTNTPOmVkZlO1X16Wyo,57393
+_pytest/threadexception.py,sha256=GHLYwCYK6I13Xv6bISO2crvPj9Z5ADKgVnUD7m1Oa14,3005
+_pytest/timing.py,sha256=URwa2JENXYkIN_9LFgEmJ4ric7SW8O6a8woS_TN6jXI,413
+_pytest/tmpdir.py,sha256=V2uAp6psFsVbfMufOIzKR4EJOjg1TQZoy4griB0WOag,11679
+_pytest/unittest.py,sha256=wew7w2q5SqgdPppFzv0evwrTLWmMCwKFQvSUyEX2C0Q,15614
+_pytest/unraisableexception.py,sha256=-L6ln8mRnqqPBskzarua49st4ioXoKgllZ3oMmRuCKU,3252
+_pytest/warning_types.py,sha256=m2_Y3zydUZNzPpu88n8wPNWqaxfaATMKEo_zAgXMqyY,4388
+_pytest/warnings.py,sha256=ExyXdM9ZsIUX4o5GCt43fR-YWhIHSuUbV6GbKEVXeiA,5211
+py.py,sha256=txZ1tdmEW6CBTp6Idn-I2sOzzA0xKNoCi9Re27Uj6HE,329
+pytest-8.3.3.dist-info/AUTHORS,sha256=TNVyL6M85uoY2jOxAmqGpSOwgTVtBNxMRLb0ztV419s,6984
+pytest-8.3.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+pytest-8.3.3.dist-info/LICENSE,sha256=yoNqX57Mo7LzUCMPqiCkj7ixRWU7VWjXhIYt-GRwa5s,1091
+pytest-8.3.3.dist-info/METADATA,sha256=QvBgeB_G0uy1wcr37cBL84A3bBcPpAmwCujwN6lCD1E,7527
+pytest-8.3.3.dist-info/RECORD,,
+pytest-8.3.3.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pytest-8.3.3.dist-info/WHEEL,sha256=cVxcB9AmuTcXqmwrtPhNK88dr7IR_b6qagTj0UvIEbY,91
+pytest-8.3.3.dist-info/entry_points.txt,sha256=8IPrHPH3LNZQ7v5tNEOcNTZYk_SheNg64jsTM9erqL4,77
+pytest-8.3.3.dist-info/top_level.txt,sha256=yyhjvmXH7-JOaoQIdmNQHPuoBCxOyXS3jIths_6C8A4,18
+pytest/__init__.py,sha256=jm6h0ZECJdDXlX0i5F20mN3ypV--T7osmtMHzzzY8ug,5169
+pytest/__main__.py,sha256=oVDrGGo7N0TNyzXntUblcgTKbhHGWtivcX5TC7tEcKo,154
+pytest/__pycache__/__init__.cpython-312.pyc,,
+pytest/__pycache__/__main__.cpython-312.pyc,,
+pytest/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
diff --git a/.venv/Lib/site-packages/pytest-8.3.3.dist-info/REQUESTED b/.venv/Lib/site-packages/pytest-8.3.3.dist-info/REQUESTED
new file mode 100644
index 00000000..e69de29b
diff --git a/.venv/Lib/site-packages/pytest-8.3.3.dist-info/WHEEL b/.venv/Lib/site-packages/pytest-8.3.3.dist-info/WHEEL
new file mode 100644
index 00000000..0fde4dd9
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest-8.3.3.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: setuptools (74.1.2)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/.venv/Lib/site-packages/pytest-8.3.3.dist-info/entry_points.txt b/.venv/Lib/site-packages/pytest-8.3.3.dist-info/entry_points.txt
new file mode 100644
index 00000000..192205df
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest-8.3.3.dist-info/entry_points.txt
@@ -0,0 +1,3 @@
+[console_scripts]
+py.test = pytest:console_main
+pytest = pytest:console_main
diff --git a/.venv/Lib/site-packages/pytest-8.3.3.dist-info/top_level.txt b/.venv/Lib/site-packages/pytest-8.3.3.dist-info/top_level.txt
new file mode 100644
index 00000000..3084ae51
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest-8.3.3.dist-info/top_level.txt
@@ -0,0 +1,3 @@
+_pytest
+py
+pytest
diff --git a/.venv/Lib/site-packages/pytest-cov.pth b/.venv/Lib/site-packages/pytest-cov.pth
new file mode 100644
index 00000000..8ed1a516
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest-cov.pth
@@ -0,0 +1 @@
+import os, sys;exec('if \'COV_CORE_SOURCE\' in os.environ:\n try:\n  from pytest_cov.embed import init\n  init()\n except Exception as exc:\n  sys.stderr.write(\n   "pytest-cov: Failed to setup subprocess coverage. "\n   "Environ: {0!r} "\n   "Exception: {1!r}\\n".format(\n    dict((k, v) for k, v in os.environ.items() if k.startswith(\'COV_CORE\')),\n    exc\n   )\n  )\n')
diff --git a/.venv/Lib/site-packages/pytest/__init__.py b/.venv/Lib/site-packages/pytest/__init__.py
new file mode 100644
index 00000000..90abcdab
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest/__init__.py
@@ -0,0 +1,172 @@
+# PYTHON_ARGCOMPLETE_OK
+"""pytest: unit and functional testing with Python."""
+
+from __future__ import annotations
+
+from _pytest import __version__
+from _pytest import version_tuple
+from _pytest._code import ExceptionInfo
+from _pytest.assertion import register_assert_rewrite
+from _pytest.cacheprovider import Cache
+from _pytest.capture import CaptureFixture
+from _pytest.config import cmdline
+from _pytest.config import Config
+from _pytest.config import console_main
+from _pytest.config import ExitCode
+from _pytest.config import hookimpl
+from _pytest.config import hookspec
+from _pytest.config import main
+from _pytest.config import PytestPluginManager
+from _pytest.config import UsageError
+from _pytest.config.argparsing import OptionGroup
+from _pytest.config.argparsing import Parser
+from _pytest.debugging import pytestPDB as __pytestPDB
+from _pytest.doctest import DoctestItem
+from _pytest.fixtures import fixture
+from _pytest.fixtures import FixtureDef
+from _pytest.fixtures import FixtureLookupError
+from _pytest.fixtures import FixtureRequest
+from _pytest.fixtures import yield_fixture
+from _pytest.freeze_support import freeze_includes
+from _pytest.legacypath import TempdirFactory
+from _pytest.legacypath import Testdir
+from _pytest.logging import LogCaptureFixture
+from _pytest.main import Dir
+from _pytest.main import Session
+from _pytest.mark import Mark
+from _pytest.mark import MARK_GEN as mark
+from _pytest.mark import MarkDecorator
+from _pytest.mark import MarkGenerator
+from _pytest.mark import param
+from _pytest.monkeypatch import MonkeyPatch
+from _pytest.nodes import Collector
+from _pytest.nodes import Directory
+from _pytest.nodes import File
+from _pytest.nodes import Item
+from _pytest.outcomes import exit
+from _pytest.outcomes import fail
+from _pytest.outcomes import importorskip
+from _pytest.outcomes import skip
+from _pytest.outcomes import xfail
+from _pytest.pytester import HookRecorder
+from _pytest.pytester import LineMatcher
+from _pytest.pytester import Pytester
+from _pytest.pytester import RecordedHookCall
+from _pytest.pytester import RunResult
+from _pytest.python import Class
+from _pytest.python import Function
+from _pytest.python import Metafunc
+from _pytest.python import Module
+from _pytest.python import Package
+from _pytest.python_api import approx
+from _pytest.python_api import raises
+from _pytest.recwarn import deprecated_call
+from _pytest.recwarn import WarningsRecorder
+from _pytest.recwarn import warns
+from _pytest.reports import CollectReport
+from _pytest.reports import TestReport
+from _pytest.runner import CallInfo
+from _pytest.stash import Stash
+from _pytest.stash import StashKey
+from _pytest.terminal import TestShortLogReport
+from _pytest.tmpdir import TempPathFactory
+from _pytest.warning_types import PytestAssertRewriteWarning
+from _pytest.warning_types import PytestCacheWarning
+from _pytest.warning_types import PytestCollectionWarning
+from _pytest.warning_types import PytestConfigWarning
+from _pytest.warning_types import PytestDeprecationWarning
+from _pytest.warning_types import PytestExperimentalApiWarning
+from _pytest.warning_types import PytestRemovedIn9Warning
+from _pytest.warning_types import PytestReturnNotNoneWarning
+from _pytest.warning_types import PytestUnhandledCoroutineWarning
+from _pytest.warning_types import PytestUnhandledThreadExceptionWarning
+from _pytest.warning_types import PytestUnknownMarkWarning
+from _pytest.warning_types import PytestUnraisableExceptionWarning
+from _pytest.warning_types import PytestWarning
+
+
+set_trace = __pytestPDB.set_trace
+
+
+__all__ = [
+    "__version__",
+    "approx",
+    "Cache",
+    "CallInfo",
+    "CaptureFixture",
+    "Class",
+    "cmdline",
+    "Collector",
+    "CollectReport",
+    "Config",
+    "console_main",
+    "deprecated_call",
+    "Dir",
+    "Directory",
+    "DoctestItem",
+    "exit",
+    "ExceptionInfo",
+    "ExitCode",
+    "fail",
+    "File",
+    "fixture",
+    "FixtureDef",
+    "FixtureLookupError",
+    "FixtureRequest",
+    "freeze_includes",
+    "Function",
+    "hookimpl",
+    "HookRecorder",
+    "hookspec",
+    "importorskip",
+    "Item",
+    "LineMatcher",
+    "LogCaptureFixture",
+    "main",
+    "mark",
+    "Mark",
+    "MarkDecorator",
+    "MarkGenerator",
+    "Metafunc",
+    "Module",
+    "MonkeyPatch",
+    "OptionGroup",
+    "Package",
+    "param",
+    "Parser",
+    "PytestAssertRewriteWarning",
+    "PytestCacheWarning",
+    "PytestCollectionWarning",
+    "PytestConfigWarning",
+    "PytestDeprecationWarning",
+    "PytestExperimentalApiWarning",
+    "PytestRemovedIn9Warning",
+    "PytestReturnNotNoneWarning",
+    "Pytester",
+    "PytestPluginManager",
+    "PytestUnhandledCoroutineWarning",
+    "PytestUnhandledThreadExceptionWarning",
+    "PytestUnknownMarkWarning",
+    "PytestUnraisableExceptionWarning",
+    "PytestWarning",
+    "raises",
+    "RecordedHookCall",
+    "register_assert_rewrite",
+    "RunResult",
+    "Session",
+    "set_trace",
+    "skip",
+    "Stash",
+    "StashKey",
+    "version_tuple",
+    "TempdirFactory",
+    "TempPathFactory",
+    "Testdir",
+    "TestReport",
+    "TestShortLogReport",
+    "UsageError",
+    "WarningsRecorder",
+    "warns",
+    "xfail",
+    "yield_fixture",
+]
diff --git a/.venv/Lib/site-packages/pytest/__main__.py b/.venv/Lib/site-packages/pytest/__main__.py
new file mode 100644
index 00000000..cccab5d5
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest/__main__.py
@@ -0,0 +1,9 @@
+"""The pytest entry point."""
+
+from __future__ import annotations
+
+import pytest
+
+
+if __name__ == "__main__":
+    raise SystemExit(pytest.console_main())
diff --git a/.venv/Lib/site-packages/pytest/py.typed b/.venv/Lib/site-packages/pytest/py.typed
new file mode 100644
index 00000000..e69de29b
diff --git a/.venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/AUTHORS.rst b/.venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/AUTHORS.rst
new file mode 100644
index 00000000..81e0b8e6
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/AUTHORS.rst
@@ -0,0 +1,64 @@
+
+Authors
+=======
+
+* Marc Schlaich - https://github.com/schlamar (\http://www.schlamar.org)
+* Rick van Hattem - http://wol.ph
+* Buck Evan - https://github.com/bukzor
+* Eric Larson - http://larsoner.com
+* Marc Abramowitz - \http://marc-abramowitz.com
+* Thomas Kluyver - https://github.com/takluyver
+* Guillaume Ayoub - http://www.yabz.fr
+* Federico Ceratto - http://firelet.net
+* Josh Kalderimis - \http://blog.cookiestack.com
+* Ionel Cristian Mărieș - https://blog.ionelmc.ro
+* Christian Ledermann - https://github.com/cleder
+* Alec Nikolas Reiter - https://github.com/justanr
+* Patrick Lannigan - https://github.com/plannigan
+* David Szotten - https://github.com/davidszotten
+* Michael Elovskikh - https://github.com/wronglink
+* Saurabh Kumar - https://github.com/theskumar
+* Michael Elovskikh - https://github.com/wronglink
+* Daniel Hahler - https://github.com/blueyed (\https://daniel.hahler.de)
+* Florian Bruhin - http://www.the-compiler.org
+* Zoltan Kozma - https://github.com/kozmaz87
+* Francis Niu - https://flniu.github.io
+* Jannis Leidel - https://github.com/jezdez
+* Ryan Hiebert - http://ryanhiebert.com/
+* Terence Honles - https://github.com/terencehonles
+* Jeremy Bowman - https://github.com/jmbowman
+* Samuel Giffard - https://github.com/Mulugruntz
+* Семён Марьясин - https://github.com/MarSoft
+* Alexander Shadchin - https://github.com/shadchin
+* Thomas Grainger - https://graingert.co.uk
+* Juanjo Bazán - https://github.com/xuanxu
+* Andrew Murray - https://github.com/radarhere
+* Ned Batchelder - https://nedbatchelder.com/
+* Albert Tugushev - https://github.com/atugushev
+* Martín Gaitán - https://github.com/mgaitan
+* Hugo van Kemenade - https://github.com/hugovk
+* Michael Manganiello - https://github.com/adamantike
+* Anders Hovmöller - https://github.com/boxed
+* Zac Hatfield-Dodds - https://zhd.dev
+* Mateus Berardo de Souza Terra - https://github.com/MatTerra
+* Ganden Schaffner - https://github.com/gschaffner
+* Michał Górny - https://github.com/mgorny
+* Bernát Gábor - https://github.com/gaborbernat
+* Pamela McA'Nulty - https://github.com/PamelaM
+* Christian Riedel - https://github.com/Cielquan
+* Chris Sreesangkom - https://github.com/csreesan
+* Sorin Sbarnea - https://github.com/ssbarnea
+* Brian Rutledge - https://github.com/bhrutledge
+* Danilo Šegan - https://github.com/dsegan
+* Michał Bielawski - https://github.com/D3X
+* Zac Hatfield-Dodds - https://github.com/Zac-HD
+* Ben Greiner - https://github.com/bnavigator
+* Delgan - https://github.com/Delgan
+* Andre Brisco - https://github.com/abrisco
+* Colin O'Dell - https://github.com/colinodell
+* Ronny Pfannschmidt - https://github.com/RonnyPfannschmidt
+* Christian Fetzer - https://github.com/fetzerch
+* Jonathan Stewmon - https://github.com/jstewmon
+* Matthew Gamble - https://github.com/mwgamble
+* Christian Clauss - https://github.com/cclauss
+* Dawn James - https://github.com/dawngerpony
diff --git a/.venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/INSTALLER b/.venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/INSTALLER
new file mode 100644
index 00000000..a1b589e3
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/.venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/LICENSE b/.venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/LICENSE
new file mode 100644
index 00000000..5b3634b4
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/LICENSE
@@ -0,0 +1,21 @@
+The MIT License
+
+Copyright (c) 2010 Meme Dough
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/.venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/METADATA b/.venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/METADATA
new file mode 100644
index 00000000..ea629ba9
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/METADATA
@@ -0,0 +1,598 @@
+Metadata-Version: 2.1
+Name: pytest-cov
+Version: 6.0.0
+Summary: Pytest plugin for measuring coverage.
+Home-page: https://github.com/pytest-dev/pytest-cov
+Author: Marc Schlaich
+Author-email: marc.schlaich@gmail.com
+License: MIT
+Project-URL: Documentation, https://pytest-cov.readthedocs.io/
+Project-URL: Changelog, https://pytest-cov.readthedocs.io/en/latest/changelog.html
+Project-URL: Issue Tracker, https://github.com/pytest-dev/pytest-cov/issues
+Keywords: cover,coverage,pytest,py.test,distributed,parallel
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Framework :: Pytest
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: POSIX
+Classifier: Operating System :: Unix
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Software Development :: Testing
+Classifier: Topic :: Utilities
+Requires-Python: >=3.9
+License-File: LICENSE
+License-File: AUTHORS.rst
+Requires-Dist: pytest >=4.6
+Requires-Dist: coverage[toml] >=7.5
+Provides-Extra: testing
+Requires-Dist: fields ; extra == 'testing'
+Requires-Dist: hunter ; extra == 'testing'
+Requires-Dist: process-tests ; extra == 'testing'
+Requires-Dist: pytest-xdist ; extra == 'testing'
+Requires-Dist: virtualenv ; extra == 'testing'
+
+========
+Overview
+========
+
+.. start-badges
+
+.. list-table::
+    :stub-columns: 1
+
+    * - docs
+      - |docs|
+    * - tests
+      - |github-actions|
+    * - package
+      - |version| |conda-forge| |wheel| |supported-versions| |supported-implementations| |commits-since|
+.. |docs| image:: https://readthedocs.org/projects/pytest-cov/badge/?style=flat
+    :target: https://readthedocs.org/projects/pytest-cov/
+    :alt: Documentation Status
+
+.. |github-actions| image:: https://github.com/pytest-dev/pytest-cov/actions/workflows/test.yml/badge.svg
+    :alt: GitHub Actions Status
+    :target: https://github.com/pytest-dev/pytest-cov/actions
+
+.. |version| image:: https://img.shields.io/pypi/v/pytest-cov.svg
+    :alt: PyPI Package latest release
+    :target: https://pypi.org/project/pytest-cov
+
+.. |conda-forge| image:: https://img.shields.io/conda/vn/conda-forge/pytest-cov.svg
+    :target: https://anaconda.org/conda-forge/pytest-cov
+.. |wheel| image:: https://img.shields.io/pypi/wheel/pytest-cov.svg
+    :alt: PyPI Wheel
+    :target: https://pypi.org/project/pytest-cov
+
+.. |supported-versions| image:: https://img.shields.io/pypi/pyversions/pytest-cov.svg
+    :alt: Supported versions
+    :target: https://pypi.org/project/pytest-cov
+
+.. |supported-implementations| image:: https://img.shields.io/pypi/implementation/pytest-cov.svg
+    :alt: Supported implementations
+    :target: https://pypi.org/project/pytest-cov
+
+.. |commits-since| image:: https://img.shields.io/github/commits-since/pytest-dev/pytest-cov/v6.0.0.svg
+    :alt: Commits since latest release
+    :target: https://github.com/pytest-dev/pytest-cov/compare/v6.0.0...master
+
+.. end-badges
+
+This plugin produces coverage reports. Compared to just using ``coverage run`` this plugin does some extras:
+
+* Subprocess support: you can fork or run stuff in a subprocess and will get covered without any fuss.
+* Xdist support: you can use all of pytest-xdist's features and still get coverage.
+* Consistent pytest behavior. If you run ``coverage run -m pytest`` you will have slightly different ``sys.path`` (CWD will be
+  in it, unlike when running ``pytest``).
+
+All features offered by the coverage package should work, either through pytest-cov's command line options or
+through coverage's config file.
+
+* Free software: MIT license
+
+Installation
+============
+
+Install with pip::
+
+    pip install pytest-cov
+
+For distributed testing support install pytest-xdist::
+
+    pip install pytest-xdist
+
+Upgrading from ancient pytest-cov
+---------------------------------
+
+`pytest-cov 2.0` is using a new ``.pth`` file (``pytest-cov.pth``). You may want to manually remove the older
+``init_cov_core.pth`` from site-packages as it's not automatically removed.
+
+Uninstalling
+------------
+
+Uninstall with pip::
+
+    pip uninstall pytest-cov
+
+Under certain scenarios a stray ``.pth`` file may be left around in site-packages.
+
+* `pytest-cov 2.0` may leave a ``pytest-cov.pth`` if you installed without wheels
+  (``easy_install``, ``setup.py install`` etc).
+* `pytest-cov 1.8 or older` will leave a ``init_cov_core.pth``.
+
+Usage
+=====
+
+::
+
+    pytest --cov=myproj tests/
+
+Would produce a report like::
+
+    -------------------- coverage: ... ---------------------
+    Name                 Stmts   Miss  Cover
+    ----------------------------------------
+    myproj/__init__          2      0   100%
+    myproj/myproj          257     13    94%
+    myproj/feature4286      94      7    92%
+    ----------------------------------------
+    TOTAL                  353     20    94%
+
+Documentation
+=============
+
+    https://pytest-cov.readthedocs.io/en/latest/
+
+
+
+
+
+
+Coverage Data File
+==================
+
+The data file is erased at the beginning of testing to ensure clean data for each test run. If you
+need to combine the coverage of several test runs you can use the ``--cov-append`` option to append
+this coverage data to coverage data from previous test runs.
+
+The data file is left at the end of testing so that it is possible to use normal coverage tools to
+examine it.
+
+Limitations
+===========
+
+For distributed testing the workers must have the pytest-cov package installed. This is needed since
+the plugin must be registered through setuptools for pytest to start the plugin on the
+worker.
+
+For subprocess measurement environment variables must make it from the main process to the
+subprocess. The python used by the subprocess must have pytest-cov installed. The subprocess must
+do normal site initialisation so that the environment variables can be detected and coverage
+started. See the `subprocess support docs `_
+for more details of how this works.
+
+Security
+========
+
+To report a security vulnerability please use the `Tidelift security contact `_.
+Tidelift will coordinate the fix and disclosure.
+
+Acknowledgements
+================
+
+Whilst this plugin has been built fresh from the ground up it has been influenced by the work done
+on pytest-coverage (Ross Lawley, James Mills, Holger Krekel) and nose-cover (Jason Pellerin) which are
+other coverage plugins.
+
+Ned Batchelder for coverage and its ability to combine the coverage results of parallel runs.
+
+Holger Krekel for pytest with its distributed testing support.
+
+Jason Pellerin for nose.
+
+Michael Foord for unittest2.
+
+No doubt others have contributed to these tools as well.
+
+
+Changelog
+=========
+
+6.0.0 (2024-10-29)
+------------------
+
+* Updated various documentation inaccuracies, especially on subprocess handling.
+* Changed fail under checks to use the precision set in the coverage configuration.
+  Now it will perform the check just like ``coverage report`` would.
+* Added a ``--cov-precision`` cli option that can override the value set in your coverage configuration.
+* Dropped support for now EOL Python 3.8.
+
+5.0.0 (2024-03-24)
+------------------
+
+* Removed support for xdist rsync (now deprecated).
+  Contributed by Matthias Reichenbach in `#623 `_.
+* Switched docs theme to Furo.
+* Various legacy Python cleanup and CI improvements.
+  Contributed by Christian Clauss and Hugo van Kemenade in
+  `#630 `_,
+  `#631 `_,
+  `#632 `_ and
+  `#633 `_.
+* Added a ``pyproject.toml`` example in the docs.
+  Contributed by Dawn James in `#626 `_.
+* Modernized project's pre-commit hooks to use ruff. Initial POC contributed by
+  Christian Clauss in `#584 `_.
+
+4.1.0 (2023-05-24)
+------------------
+
+* Updated CI with new Pythons and dependencies.
+* Removed rsyncdir support. This makes pytest-cov compatible with xdist 3.0.
+  Contributed by Sorin Sbarnea in `#558 `_.
+* Optimized summary generation to not be performed if no reporting is active (for example,
+  when ``--cov-report=''`` is used without ``--cov-fail-under``).
+  Contributed by Jonathan Stewmon in `#589 `_.
+* Added support for JSON reporting.
+  Contributed by Matthew Gamble in `#582 `_.
+* Refactored code to use f-strings.
+  Contributed by Mark Mayo in `#572 `_.
+* Fixed a skip in the test suite for some old xdist.
+  Contributed by a bunch of people in `#565 `_.
+
+
+4.0.0 (2022-09-28)
+------------------
+
+**Note that this release drops support for multiprocessing.**
+
+
+* `--cov-fail-under` no longer causes `pytest --collect-only` to fail
+  Contributed by Zac Hatfield-Dodds in `#511 `_.
+* Dropped support for multiprocessing (mostly because `issue 82408 `_). This feature was
+  mostly working but very broken in certain scenarios and made the test suite very flaky and slow.
+
+  There is builtin multiprocessing support in coverage and you can migrate to that. All you need is this in your
+  ``.coveragerc``::
+
+    [run]
+    concurrency = multiprocessing
+    parallel = true
+    sigterm = true
+* Fixed deprecation in ``setup.py`` by trying to import setuptools before distutils.
+  Contributed by Ben Greiner in `#545 `_.
+* Removed undesirable new lines that were displayed while reporting was disabled.
+  Contributed by Delgan in `#540 `_.
+* Documentation fixes.
+  Contributed by Andre Brisco in `#543 `_
+  and Colin O'Dell in `#525 `_.
+* Added support for LCOV output format via `--cov-report=lcov`. Only works with coverage 6.3+.
+  Contributed by Christian Fetzer in `#536 `_.
+* Modernized pytest hook implementation.
+  Contributed by Bruno Oliveira in `#549 `_
+  and Ronny Pfannschmidt in `#550 `_.
+
+
+3.0.0 (2021-10-04)
+-------------------
+
+**Note that this release drops support for Python 2.7 and Python 3.5.**
+
+* Added support for Python 3.10 and updated various test dependencies.
+  Contributed by Hugo van Kemenade in
+  `#500 `_.
+* Switched from Travis CI to GitHub Actions. Contributed by Hugo van Kemenade in
+  `#494 `_ and
+  `#495 `_.
+* Add a ``--cov-reset`` CLI option.
+  Contributed by Danilo Šegan in
+  `#459 `_.
+* Improved validation of ``--cov-fail-under`` CLI option.
+  Contributed by ... Ronny Pfannschmidt's desire for skark in
+  `#480 `_.
+* Dropped Python 2.7 support.
+  Contributed by Thomas Grainger in
+  `#488 `_.
+* Updated trove classifiers. Contributed by Michał Bielawski in
+  `#481 `_.
+* Reverted change for `toml` requirement.
+  Contributed by Thomas Grainger in
+  `#477 `_.
+
+2.12.1 (2021-06-01)
+-------------------
+
+* Changed the `toml` requirement to be always be directly required (instead of being required through a coverage extra).
+  This fixes issues with pip-compile (`pip-tools#1300 `_).
+  Contributed by Sorin Sbarnea in `#472 `_.
+* Documented ``show_contexts``.
+  Contributed by Brian Rutledge in `#473 `_.
+
+2.12.0 (2021-05-14)
+-------------------
+
+* Added coverage's `toml` extra to install requirements in setup.py.
+  Contributed by Christian Riedel in `#410 `_.
+* Fixed ``pytest_cov.__version__`` to have the right value (string with version instead of a string
+  including ``__version__ =``).
+* Fixed license classifier in ``setup.py``.
+  Contributed by Chris Sreesangkom in `#467 `_.
+* Fixed *commits since* badge.
+  Contributed by Terence Honles in `#470 `_.
+
+2.11.1 (2021-01-20)
+-------------------
+
+* Fixed support for newer setuptools (v42+).
+  Contributed by Michał Górny in `#451 `_.
+
+2.11.0 (2021-01-18)
+-------------------
+
+* Bumped minimum coverage requirement to 5.2.1. This prevents reporting issues.
+  Contributed by Mateus Berardo de Souza Terra in `#433 `_.
+* Improved sample projects (from the `examples `_
+  directory) to support running `tox -e pyXY`. Now the example configures a suffixed coverage data file,
+  and that makes the cleanup environment unnecessary.
+  Contributed by Ganden Schaffner in `#435 `_.
+* Removed the empty `console_scripts` entrypoint that confused some Gentoo build script.
+  I didn't ask why it was so broken cause I didn't want to ruin my day.
+  Contributed by Michał Górny in `#434 `_.
+* Fixed the missing `coverage context `_
+  when using subprocesses.
+  Contributed by Bernát Gábor in `#443 `_.
+* Updated the config section in the docs.
+  Contributed by Pamela McA'Nulty in `#429 `_.
+* Migrated CI to travis-ci.com (from .org).
+
+2.10.1 (2020-08-14)
+-------------------
+
+* Support for ``pytest-xdist`` 2.0, which breaks compatibility with ``pytest-xdist`` before 1.22.3 (from 2017).
+  Contributed by Zac Hatfield-Dodds in `#412 `_.
+* Fixed the ``LocalPath has no attribute startswith`` failure that occurred when using the ``pytester`` plugin
+  in inline mode.
+
+2.10.0 (2020-06-12)
+-------------------
+
+* Improved the ``--no-cov`` warning. Now it's only shown if ``--no-cov`` is present before ``--cov``.
+* Removed legacy pytest support. Changed ``setup.py`` so that ``pytest>=4.6`` is required.
+
+2.9.0 (2020-05-22)
+------------------
+
+* Fixed ``RemovedInPytest4Warning`` when using Pytest 3.10.
+  Contributed by Michael Manganiello in `#354 `_.
+* Made pytest startup faster when plugin not active by lazy-importing.
+  Contributed by Anders Hovmöller in `#339 `_.
+* Various CI improvements.
+  Contributed by Daniel Hahler in `#363 `_ and
+  `#364 `_.
+* Various Python support updates (drop EOL 3.4, test against 3.8 final).
+  Contributed by Hugo van Kemenade in
+  `#336 `_ and
+  `#367 `_.
+* Changed ``--cov-append`` to always enable ``data_suffix`` (a coverage setting).
+  Contributed by Harm Geerts in
+  `#387 `_.
+* Changed ``--cov-append`` to handle loading previous data better
+  (fixes various path aliasing issues).
+* Various other testing improvements, github issue templates, example updates.
+* Fixed internal failures that are caused by tests that change the current working directory by
+  ensuring a consistent working directory when coverage is called.
+  See `#306 `_ and
+  `coveragepy#881 `_
+
+2.8.1 (2019-10-05)
+------------------
+
+* Fixed `#348 `_ -
+  regression when only certain reports (html or xml) are used then ``--cov-fail-under`` always fails.
+
+2.8.0 (2019-10-04)
+------------------
+
+* Fixed ``RecursionError`` that can occur when using
+  `cleanup_on_signal `__ or
+  `cleanup_on_sigterm `__.
+  See: `#294 `_.
+  The 2.7.x releases of pytest-cov should be considered broken regarding aforementioned cleanup API.
+* Added compatibility with future xdist release that deprecates some internals
+  (match pytest-xdist master/worker terminology).
+  Contributed by Thomas Grainger in `#321 `_
+* Fixed breakage that occurs when multiple reporting options are used.
+  Contributed by Thomas Grainger in `#338 `_.
+* Changed internals to use a stub instead of ``os.devnull``.
+  Contributed by Thomas Grainger in `#332 `_.
+* Added support for Coverage 5.0.
+  Contributed by Ned Batchelder in `#319 `_.
+* Added support for float values in ``--cov-fail-under``.
+  Contributed by Martín Gaitán in `#311 `_.
+* Various documentation fixes. Contributed by
+  Juanjo Bazán,
+  Andrew Murray and
+  Albert Tugushev in
+  `#298 `_,
+  `#299 `_ and
+  `#307 `_.
+* Various testing improvements. Contributed by
+  Ned Batchelder,
+  Daniel Hahler,
+  Ionel Cristian Mărieș and
+  Hugo van Kemenade in
+  `#313 `_,
+  `#314 `_,
+  `#315 `_,
+  `#316 `_,
+  `#325 `_,
+  `#326 `_,
+  `#334 `_ and
+  `#335 `_.
+* Added the ``--cov-context`` CLI options that enables coverage contexts. Only works with coverage 5.0+.
+  Contributed by Ned Batchelder in `#345 `_.
+
+2.7.1 (2019-05-03)
+------------------
+
+* Fixed source distribution manifest so that garbage ain't included in the tarball.
+
+2.7.0 (2019-05-03)
+------------------
+
+* Fixed ``AttributeError: 'NoneType' object has no attribute 'configure_node'`` error when ``--no-cov`` is used.
+  Contributed by Alexander Shadchin in `#263 `_.
+* Various testing and CI improvements. Contributed by Daniel Hahler in
+  `#255 `_,
+  `#266 `_,
+  `#272 `_,
+  `#271 `_ and
+  `#269 `_.
+* Improved ``pytest_cov.embed.cleanup_on_sigterm`` to be reentrant (signal deliveries while signal handling is
+  running won't break stuff).
+* Added ``pytest_cov.embed.cleanup_on_signal`` for customized cleanup.
+* Improved cleanup code and fixed various issues with leftover data files. All contributed in
+  `#265 `_ or
+  `#262 `_.
+* Improved examples. Now there are two examples for the common project layouts, complete with working coverage
+  configuration. The examples have CI testing. Contributed in
+  `#267 `_.
+* Improved help text for CLI options.
+
+2.6.1 (2019-01-07)
+------------------
+
+* Added support for Pytest 4.1. Contributed by Daniel Hahler and Семён Марьясин in
+  `#253 `_ and
+  `#230 `_.
+* Various test and docs fixes. Contributed by Daniel Hahler in
+  `#224 `_ and
+  `#223 `_.
+* Fixed the "Module already imported" issue (`#211 `_).
+  Contributed by Daniel Hahler in `#228 `_.
+
+2.6.0 (2018-09-03)
+------------------
+
+* Dropped support for Python 3 < 3.4, Pytest < 3.5 and Coverage < 4.4.
+* Fixed some documentation formatting. Contributed by Jean Jordaan and Julian.
+* Added an example with ``addopts`` in documentation. Contributed by Samuel Giffard in
+  `#195 `_.
+* Fixed ``TypeError: 'NoneType' object is not iterable`` in certain xdist configurations. Contributed by Jeremy Bowman in
+  `#213 `_.
+* Added a ``no_cover`` marker and fixture. Fixes
+  `#78 `_.
+* Fixed broken ``no_cover`` check when running doctests. Contributed by Terence Honles in
+  `#200 `_.
+* Fixed various issues with path normalization in reports (when combining coverage data from parallel mode). Fixes
+  `#130 `_.
+  Contributed by Ryan Hiebert & Ionel Cristian Mărieș in
+  `#178 `_.
+* Report generation failures don't raise exceptions anymore. A warning will be logged instead. Fixes
+  `#161 `_.
+* Fixed multiprocessing issue on Windows (empty env vars are not passed). Fixes
+  `#165 `_.
+
+2.5.1 (2017-05-11)
+------------------
+
+* Fixed xdist breakage (regression in ``2.5.0``).
+  Fixes `#157 `_.
+* Allow setting custom ``data_file`` name in ``.coveragerc``.
+  Fixes `#145 `_.
+  Contributed by Jannis Leidel & Ionel Cristian Mărieș in
+  `#156 `_.
+
+2.5.0 (2017-05-09)
+------------------
+
+* Always show a summary when ``--cov-fail-under`` is used. Contributed by Francis Niu in `PR#141
+  `_.
+* Added ``--cov-branch`` option. Fixes `#85 `_.
+* Improve exception handling in subprocess setup. Fixes `#144 `_.
+* Fixed handling when ``--cov`` is used multiple times. Fixes `#151 `_.
+
+2.4.0 (2016-10-10)
+------------------
+
+* Added a "disarm" option: ``--no-cov``. It will disable coverage measurements. Contributed by Zoltan Kozma in
+  `PR#135 `_.
+
+  **WARNING: Do not put this in your configuration files, it's meant to be an one-off for situations where you want to
+  disable coverage from command line.**
+* Fixed broken exception handling on ``.pth`` file. See `#136 `_.
+
+2.3.1 (2016-08-07)
+------------------
+
+* Fixed regression causing spurious errors when xdist was used. See `#124
+  `_.
+* Fixed DeprecationWarning about incorrect `addoption` use. Contributed by Florian Bruhin in `PR#127
+  `_.
+* Fixed deprecated use of funcarg fixture API. Contributed by Daniel Hahler in `PR#125
+  `_.
+
+2.3.0 (2016-07-05)
+------------------
+
+* Add support for specifying output location for html, xml, and annotate report.
+  Contributed by Patrick Lannigan in `PR#113 `_.
+* Fix bug hiding test failure when cov-fail-under failed.
+* For coverage >= 4.0, match the default behaviour of `coverage report` and
+  error if coverage fails to find the source instead of just printing a warning.
+  Contributed by David Szotten in `PR#116 `_.
+* Fixed bug occurred when bare ``--cov`` parameter was used with xdist.
+  Contributed by Michael Elovskikh in `PR#120 `_.
+* Add support for ``skip_covered`` and added ``--cov-report=term-skip-covered`` command
+  line options. Contributed by Saurabh Kumar in `PR#115 `_.
+
+2.2.1 (2016-01-30)
+------------------
+
+* Fixed incorrect merging of coverage data when xdist was used and coverage was ``>= 4.0``.
+
+2.2.0 (2015-10-04)
+------------------
+
+* Added support for changing working directory in tests. Previously changing working
+  directory would disable coverage measurements in suprocesses.
+* Fixed broken handling for ``--cov-report=annotate``.
+
+2.1.0 (2015-08-23)
+------------------
+
+* Added support for `coverage 4.0b2`.
+* Added the ``--cov-append`` command line options. Contributed by Christian Ledermann
+  in `PR#80 `_.
+
+2.0.0 (2015-07-28)
+------------------
+
+* Added ``--cov-fail-under``, akin to the new ``fail_under`` option in `coverage-4.0`
+  (automatically activated if there's a ``[report] fail_under = ...`` in ``.coveragerc``).
+* Changed ``--cov-report=term`` to automatically upgrade to ``--cov-report=term-missing``
+  if there's ``[run] show_missing = True`` in ``.coveragerc``.
+* Changed ``--cov`` so it can be used with no path argument (in which case the source
+  settings from ``.coveragerc`` will be used instead).
+* Fixed `.pth` installation to work in all cases (install, easy_install, wheels, develop etc).
+* Fixed `.pth` uninstallation to work for wheel installs.
+* Support for coverage 4.0.
+* Data file suffixing changed to use coverage's ``data_suffix=True`` option (instead of the
+  custom suffixing).
+* Avoid warning about missing coverage data (just like ``coverage.control.process_startup``).
+* Fixed a race condition when running with xdist (all the workers tried to combine the files).
+  It's possible that this issue is not present in `pytest-cov 1.8.X`.
+
+1.8.2 (2014-11-06)
+------------------
+
+* N/A
diff --git a/.venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/RECORD b/.venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/RECORD
new file mode 100644
index 00000000..b9633f7e
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/RECORD
@@ -0,0 +1,20 @@
+pytest-cov.pth,sha256=9HRGpg_fWQXoTn18iSuvkvjVoyJtDaFZm5wBTqtsfds,377
+pytest_cov-6.0.0.dist-info/AUTHORS.rst,sha256=ROOJOVvdjq7rFLkEG__sUQfBSDaLypcEhS69nOLDBPQ,2900
+pytest_cov-6.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+pytest_cov-6.0.0.dist-info/LICENSE,sha256=g1WGrhVnZqJOPBA_vFXZr2saFt9XypMsl0gqJzf9g9U,1071
+pytest_cov-6.0.0.dist-info/METADATA,sha256=phEckNuLGLR8EzbfrI05QiCeAFcecKBrKpIg9rkSaPQ,27794
+pytest_cov-6.0.0.dist-info/RECORD,,
+pytest_cov-6.0.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pytest_cov-6.0.0.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
+pytest_cov-6.0.0.dist-info/entry_points.txt,sha256=1Wx3pjYCY2v4ATD5dhlQN6Ta-C4LKmBa1fXhiEX6C8A,42
+pytest_cov-6.0.0.dist-info/top_level.txt,sha256=HvYHsAFV4MeTUNUwhawY_DKvrpE2lYratTHX_U45oBU,11
+pytest_cov/__init__.py,sha256=HRmkJeglhQBIQNCWKMV1BE_jlI1ovhM4PsLCLPbxHT0,1022
+pytest_cov/__pycache__/__init__.cpython-312.pyc,,
+pytest_cov/__pycache__/compat.cpython-312.pyc,,
+pytest_cov/__pycache__/embed.cpython-312.pyc,,
+pytest_cov/__pycache__/engine.cpython-312.pyc,,
+pytest_cov/__pycache__/plugin.cpython-312.pyc,,
+pytest_cov/compat.py,sha256=u6pVozx0EVDbuNKMsUjT1cgDCla6zO7DwhN8RTIfrXQ,425
+pytest_cov/embed.py,sha256=DCO_u27JxnnJuurYhlheutIVwJyV6nSPcs3YB3OcBZs,3570
+pytest_cov/engine.py,sha256=4B5uBfRLixr292uH6sl01Vdc98qQ39k_F6AmOWAa0ec,16399
+pytest_cov/plugin.py,sha256=4-2Q8p_QLxxKyZ7fslGR_EkOZ9_zFm4Iuq5QSgvIkEE,15603
diff --git a/.venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/REQUESTED b/.venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/REQUESTED
new file mode 100644
index 00000000..e69de29b
diff --git a/.venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/WHEEL b/.venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/WHEEL
new file mode 100644
index 00000000..98c0d20b
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.42.0)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/.venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/entry_points.txt b/.venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/entry_points.txt
new file mode 100644
index 00000000..213605ba
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/entry_points.txt
@@ -0,0 +1,2 @@
+[pytest11]
+pytest_cov = pytest_cov.plugin
diff --git a/.venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/top_level.txt b/.venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/top_level.txt
new file mode 100644
index 00000000..a2fe2811
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+pytest_cov
diff --git a/.venv/Lib/site-packages/pytest_cov/__init__.py b/.venv/Lib/site-packages/pytest_cov/__init__.py
new file mode 100644
index 00000000..4caae9dc
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_cov/__init__.py
@@ -0,0 +1,47 @@
+"""pytest-cov: avoid already-imported warning: PYTEST_DONT_REWRITE."""
+
+__version__ = '6.0.0'
+
+import pytest
+
+
+class CoverageError(Exception):
+    """Indicates that our coverage is too low"""
+
+
+class PytestCovWarning(pytest.PytestWarning):
+    """
+    The base for all pytest-cov warnings, never raised directly.
+    """
+
+
+class CovDisabledWarning(PytestCovWarning):
+    """
+    Indicates that Coverage was manually disabled.
+    """
+
+
+class CovReportWarning(PytestCovWarning):
+    """
+    Indicates that we failed to generate a report.
+    """
+
+
+class CovFailUnderWarning(PytestCovWarning):
+    """
+    Indicates that we failed to generate a report.
+    """
+
+
+class CentralCovContextWarning(PytestCovWarning):
+    """
+    Indicates that dynamic_context was set to test_function instead of using the builtin --cov-context.
+    """
+
+
+class DistCovError(Exception):
+    """
+    Raised when dynamic_context is set to test_function and xdist is also used.
+
+    See: https://github.com/pytest-dev/pytest-cov/issues/604
+    """
diff --git a/.venv/Lib/site-packages/pytest_cov/compat.py b/.venv/Lib/site-packages/pytest_cov/compat.py
new file mode 100644
index 00000000..453709d7
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_cov/compat.py
@@ -0,0 +1,15 @@
+class SessionWrapper:
+    def __init__(self, session):
+        self._session = session
+        if hasattr(session, 'testsfailed'):
+            self._attr = 'testsfailed'
+        else:
+            self._attr = '_testsfailed'
+
+    @property
+    def testsfailed(self):
+        return getattr(self._session, self._attr)
+
+    @testsfailed.setter
+    def testsfailed(self, value):
+        setattr(self._session, self._attr, value)
diff --git a/.venv/Lib/site-packages/pytest_cov/embed.py b/.venv/Lib/site-packages/pytest_cov/embed.py
new file mode 100644
index 00000000..57c2f423
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_cov/embed.py
@@ -0,0 +1,123 @@
+"""Activate coverage at python startup if appropriate.
+
+The python site initialisation will ensure that anything we import
+will be removed and not visible at the end of python startup.  However
+we minimise all work by putting these init actions in this separate
+module and only importing what is needed when needed.
+
+For normal python startup when coverage should not be activated the pth
+file checks a single env var and does not import or call the init fn
+here.
+
+For python startup when an ancestor process has set the env indicating
+that code coverage is being collected we activate coverage based on
+info passed via env vars.
+"""
+
+import atexit
+import os
+import signal
+
+_active_cov = None
+
+
+def init():
+    # Only continue if ancestor process has set everything needed in
+    # the env.
+    global _active_cov
+
+    cov_source = os.environ.get('COV_CORE_SOURCE')
+    cov_config = os.environ.get('COV_CORE_CONFIG')
+    cov_datafile = os.environ.get('COV_CORE_DATAFILE')
+    cov_branch = True if os.environ.get('COV_CORE_BRANCH') == 'enabled' else None
+    cov_context = os.environ.get('COV_CORE_CONTEXT')
+
+    if cov_datafile:
+        if _active_cov:
+            cleanup()
+        # Import what we need to activate coverage.
+        import coverage
+
+        # Determine all source roots.
+        if cov_source in os.pathsep:
+            cov_source = None
+        else:
+            cov_source = cov_source.split(os.pathsep)
+        if cov_config == os.pathsep:
+            cov_config = True
+
+        # Activate coverage for this process.
+        cov = _active_cov = coverage.Coverage(
+            source=cov_source,
+            branch=cov_branch,
+            data_suffix=True,
+            config_file=cov_config,
+            auto_data=True,
+            data_file=cov_datafile,
+        )
+        cov.load()
+        cov.start()
+        if cov_context:
+            cov.switch_context(cov_context)
+        cov._warn_no_data = False
+        cov._warn_unimported_source = False
+        return cov
+
+
+def _cleanup(cov):
+    if cov is not None:
+        cov.stop()
+        cov.save()
+        cov._auto_save = False  # prevent autosaving from cov._atexit in case the interpreter lacks atexit.unregister
+        try:
+            atexit.unregister(cov._atexit)
+        except Exception:  # noqa: S110
+            pass
+
+
+def cleanup():
+    global _active_cov
+    global _cleanup_in_progress
+    global _pending_signal
+
+    _cleanup_in_progress = True
+    _cleanup(_active_cov)
+    _active_cov = None
+    _cleanup_in_progress = False
+    if _pending_signal:
+        pending_signal = _pending_signal
+        _pending_signal = None
+        _signal_cleanup_handler(*pending_signal)
+
+
+_previous_handlers = {}
+_pending_signal = None
+_cleanup_in_progress = False
+
+
+def _signal_cleanup_handler(signum, frame):
+    global _pending_signal
+    if _cleanup_in_progress:
+        _pending_signal = signum, frame
+        return
+    cleanup()
+    _previous_handler = _previous_handlers.get(signum)
+    if _previous_handler == signal.SIG_IGN:
+        return
+    elif _previous_handler and _previous_handler is not _signal_cleanup_handler:
+        _previous_handler(signum, frame)
+    elif signum == signal.SIGTERM:
+        os._exit(128 + signum)
+    elif signum == signal.SIGINT:
+        raise KeyboardInterrupt
+
+
+def cleanup_on_signal(signum):
+    previous = signal.getsignal(signum)
+    if previous is not _signal_cleanup_handler:
+        _previous_handlers[signum] = previous
+        signal.signal(signum, _signal_cleanup_handler)
+
+
+def cleanup_on_sigterm():
+    cleanup_on_signal(signal.SIGTERM)
diff --git a/.venv/Lib/site-packages/pytest_cov/engine.py b/.venv/Lib/site-packages/pytest_cov/engine.py
new file mode 100644
index 00000000..650dbcc9
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_cov/engine.py
@@ -0,0 +1,454 @@
+"""Coverage controllers for use by pytest-cov and nose-cov."""
+
+import contextlib
+import copy
+import functools
+import os
+import random
+import socket
+import sys
+import warnings
+from io import StringIO
+from pathlib import Path
+
+import coverage
+from coverage.data import CoverageData
+from coverage.sqldata import filename_suffix
+
+from . import CentralCovContextWarning
+from . import DistCovError
+from .embed import cleanup
+
+
+class BrokenCovConfigError(Exception):
+    pass
+
+
+class _NullFile:
+    @staticmethod
+    def write(v):
+        pass
+
+
+@contextlib.contextmanager
+def _backup(obj, attr):
+    backup = getattr(obj, attr)
+    try:
+        setattr(obj, attr, copy.copy(backup))
+        yield
+    finally:
+        setattr(obj, attr, backup)
+
+
+def _ensure_topdir(meth):
+    @functools.wraps(meth)
+    def ensure_topdir_wrapper(self, *args, **kwargs):
+        try:
+            original_cwd = Path.cwd()
+        except OSError:
+            # Looks like it's gone, this is non-ideal because a side-effect will
+            # be introduced in the tests here but we can't do anything about it.
+            original_cwd = None
+        os.chdir(self.topdir)
+        try:
+            return meth(self, *args, **kwargs)
+        finally:
+            if original_cwd is not None:
+                os.chdir(original_cwd)
+
+    return ensure_topdir_wrapper
+
+
+def _data_suffix(name):
+    return f'{filename_suffix(True)}.{name}'
+
+
+class CovController:
+    """Base class for different plugin implementations."""
+
+    def __init__(self, cov_source, cov_report, cov_config, cov_append, cov_branch, config=None, nodeid=None):
+        """Get some common config used by multiple derived classes."""
+        self.cov_source = cov_source
+        self.cov_report = cov_report
+        self.cov_config = cov_config
+        self.cov_append = cov_append
+        self.cov_branch = cov_branch
+        self.config = config
+        self.nodeid = nodeid
+
+        self.cov = None
+        self.combining_cov = None
+        self.data_file = None
+        self.node_descs = set()
+        self.failed_workers = []
+        self.topdir = os.fspath(Path.cwd())
+        self.is_collocated = None
+
+    @contextlib.contextmanager
+    def ensure_topdir(self):
+        original_cwd = Path.cwd()
+        os.chdir(self.topdir)
+        yield
+        os.chdir(original_cwd)
+
+    @_ensure_topdir
+    def pause(self):
+        self.cov.stop()
+        self.unset_env()
+
+    @_ensure_topdir
+    def resume(self):
+        self.cov.start()
+        self.set_env()
+
+    @_ensure_topdir
+    def set_env(self):
+        """Put info about coverage into the env so that subprocesses can activate coverage."""
+        if self.cov_source is None:
+            os.environ['COV_CORE_SOURCE'] = os.pathsep
+        else:
+            os.environ['COV_CORE_SOURCE'] = os.pathsep.join(self.cov_source)
+        config_file = Path(self.cov_config)
+        if config_file.exists():
+            os.environ['COV_CORE_CONFIG'] = os.fspath(config_file.resolve())
+        else:
+            os.environ['COV_CORE_CONFIG'] = os.pathsep
+        # this still uses the old abspath cause apparently Python 3.9 on Windows has a buggy Path.resolve()
+        os.environ['COV_CORE_DATAFILE'] = os.path.abspath(self.cov.config.data_file)  # noqa: PTH100
+        if self.cov_branch:
+            os.environ['COV_CORE_BRANCH'] = 'enabled'
+
+    @staticmethod
+    def unset_env():
+        """Remove coverage info from env."""
+        os.environ.pop('COV_CORE_SOURCE', None)
+        os.environ.pop('COV_CORE_CONFIG', None)
+        os.environ.pop('COV_CORE_DATAFILE', None)
+        os.environ.pop('COV_CORE_BRANCH', None)
+        os.environ.pop('COV_CORE_CONTEXT', None)
+
+    @staticmethod
+    def get_node_desc(platform, version_info):
+        """Return a description of this node."""
+
+        return 'platform {}, python {}'.format(platform, '{}.{}.{}-{}-{}'.format(*version_info[:5]))
+
+    @staticmethod
+    def sep(stream, s, txt):
+        if hasattr(stream, 'sep'):
+            stream.sep(s, txt)
+        else:
+            sep_total = max((70 - 2 - len(txt)), 2)
+            sep_len = sep_total // 2
+            sep_extra = sep_total % 2
+            out = f'{s * sep_len} {txt} {s * (sep_len + sep_extra)}\n'
+            stream.write(out)
+
+    @_ensure_topdir
+    def summary(self, stream):
+        """Produce coverage reports."""
+        total = None
+
+        if not self.cov_report:
+            with _backup(self.cov, 'config'):
+                return self.cov.report(show_missing=True, ignore_errors=True, file=_NullFile)
+
+        # Output coverage section header.
+        if len(self.node_descs) == 1:
+            self.sep(stream, '-', f"coverage: {''.join(self.node_descs)}")
+        else:
+            self.sep(stream, '-', 'coverage')
+            for node_desc in sorted(self.node_descs):
+                self.sep(stream, ' ', f'{node_desc}')
+
+        # Report on any failed workers.
+        if self.failed_workers:
+            self.sep(stream, '-', 'coverage: failed workers')
+            stream.write('The following workers failed to return coverage data, ensure that pytest-cov is installed on these workers.\n')
+            for node in self.failed_workers:
+                stream.write(f'{node.gateway.id}\n')
+
+        # Produce terminal report if wanted.
+        if any(x in self.cov_report for x in ['term', 'term-missing']):
+            options = {
+                'show_missing': ('term-missing' in self.cov_report) or None,
+                'ignore_errors': True,
+                'file': stream,
+            }
+            skip_covered = isinstance(self.cov_report, dict) and 'skip-covered' in self.cov_report.values()
+            options.update({'skip_covered': skip_covered or None})
+            with _backup(self.cov, 'config'):
+                total = self.cov.report(**options)
+
+        # Produce annotated source code report if wanted.
+        if 'annotate' in self.cov_report:
+            annotate_dir = self.cov_report['annotate']
+
+            with _backup(self.cov, 'config'):
+                self.cov.annotate(ignore_errors=True, directory=annotate_dir)
+            # We need to call Coverage.report here, just to get the total
+            # Coverage.annotate don't return any total and we need it for --cov-fail-under.
+
+            with _backup(self.cov, 'config'):
+                total = self.cov.report(ignore_errors=True, file=_NullFile)
+            if annotate_dir:
+                stream.write(f'Coverage annotated source written to dir {annotate_dir}\n')
+            else:
+                stream.write('Coverage annotated source written next to source\n')
+
+        # Produce html report if wanted.
+        if 'html' in self.cov_report:
+            output = self.cov_report['html']
+            with _backup(self.cov, 'config'):
+                total = self.cov.html_report(ignore_errors=True, directory=output)
+            stream.write(f'Coverage HTML written to dir {self.cov.config.html_dir if output is None else output}\n')
+
+        # Produce xml report if wanted.
+        if 'xml' in self.cov_report:
+            output = self.cov_report['xml']
+            with _backup(self.cov, 'config'):
+                total = self.cov.xml_report(ignore_errors=True, outfile=output)
+            stream.write(f'Coverage XML written to file {self.cov.config.xml_output if output is None else output}\n')
+
+        # Produce json report if wanted
+        if 'json' in self.cov_report:
+            output = self.cov_report['json']
+            with _backup(self.cov, 'config'):
+                total = self.cov.json_report(ignore_errors=True, outfile=output)
+            stream.write('Coverage JSON written to file %s\n' % (self.cov.config.json_output if output is None else output))
+
+        # Produce lcov report if wanted.
+        if 'lcov' in self.cov_report:
+            output = self.cov_report['lcov']
+            with _backup(self.cov, 'config'):
+                self.cov.lcov_report(ignore_errors=True, outfile=output)
+
+                # We need to call Coverage.report here, just to get the total
+                # Coverage.lcov_report doesn't return any total and we need it for --cov-fail-under.
+                total = self.cov.report(ignore_errors=True, file=_NullFile)
+
+            stream.write(f'Coverage LCOV written to file {self.cov.config.lcov_output if output is None else output}\n')
+
+        return total
+
+
+class Central(CovController):
+    """Implementation for centralised operation."""
+
+    @_ensure_topdir
+    def start(self):
+        cleanup()
+
+        self.cov = coverage.Coverage(
+            source=self.cov_source,
+            branch=self.cov_branch,
+            data_suffix=_data_suffix('c'),
+            config_file=self.cov_config,
+        )
+        if self.cov.config.dynamic_context == 'test_function':
+            message = (
+                'Detected dynamic_context=test_function in coverage configuration. '
+                'This is unnecessary as this plugin provides the more complete --cov-context option.'
+            )
+            warnings.warn(CentralCovContextWarning(message), stacklevel=1)
+
+        self.combining_cov = coverage.Coverage(
+            source=self.cov_source,
+            branch=self.cov_branch,
+            data_suffix=_data_suffix('cc'),
+            data_file=os.path.abspath(self.cov.config.data_file),  # noqa: PTH100
+            config_file=self.cov_config,
+        )
+
+        # Erase or load any previous coverage data and start coverage.
+        if not self.cov_append:
+            self.cov.erase()
+        self.cov.start()
+        self.set_env()
+
+    @_ensure_topdir
+    def finish(self):
+        """Stop coverage, save data to file and set the list of coverage objects to report on."""
+
+        self.unset_env()
+        self.cov.stop()
+        self.cov.save()
+
+        self.cov = self.combining_cov
+        self.cov.load()
+        self.cov.combine()
+        self.cov.save()
+
+        node_desc = self.get_node_desc(sys.platform, sys.version_info)
+        self.node_descs.add(node_desc)
+
+
+class DistMaster(CovController):
+    """Implementation for distributed master."""
+
+    @_ensure_topdir
+    def start(self):
+        cleanup()
+
+        self.cov = coverage.Coverage(
+            source=self.cov_source,
+            branch=self.cov_branch,
+            data_suffix=_data_suffix('m'),
+            config_file=self.cov_config,
+        )
+        if self.cov.config.dynamic_context == 'test_function':
+            raise DistCovError(
+                'Detected dynamic_context=test_function in coverage configuration. '
+                'This is known to cause issues when using xdist, see: https://github.com/pytest-dev/pytest-cov/issues/604\n'
+                'It is recommended to use --cov-context instead.'
+            )
+        self.cov._warn_no_data = False
+        self.cov._warn_unimported_source = False
+        self.cov._warn_preimported_source = False
+        self.combining_cov = coverage.Coverage(
+            source=self.cov_source,
+            branch=self.cov_branch,
+            data_suffix=_data_suffix('mc'),
+            data_file=os.path.abspath(self.cov.config.data_file),  # noqa: PTH100
+            config_file=self.cov_config,
+        )
+        if not self.cov_append:
+            self.cov.erase()
+        self.cov.start()
+        self.cov.config.paths['source'] = [self.topdir]
+
+    def configure_node(self, node):
+        """Workers need to know if they are collocated and what files have moved."""
+
+        node.workerinput.update(
+            {
+                'cov_master_host': socket.gethostname(),
+                'cov_master_topdir': self.topdir,
+                'cov_master_rsync_roots': [str(root) for root in node.nodemanager.roots],
+            }
+        )
+
+    def testnodedown(self, node, error):
+        """Collect data file name from worker."""
+
+        # If worker doesn't return any data then it is likely that this
+        # plugin didn't get activated on the worker side.
+        output = getattr(node, 'workeroutput', {})
+        if 'cov_worker_node_id' not in output:
+            self.failed_workers.append(node)
+            return
+
+        # If worker is not collocated then we must save the data file
+        # that it returns to us.
+        if 'cov_worker_data' in output:
+            data_suffix = '%s.%s.%06d.%s' % (
+                socket.gethostname(),
+                os.getpid(),
+                random.randint(0, 999999),  # noqa: S311
+                output['cov_worker_node_id'],
+            )
+
+            cov = coverage.Coverage(source=self.cov_source, branch=self.cov_branch, data_suffix=data_suffix, config_file=self.cov_config)
+            cov.start()
+            if coverage.version_info < (5, 0):
+                data = CoverageData()
+                data.read_fileobj(StringIO(output['cov_worker_data']))
+                cov.data.update(data)
+            else:
+                data = CoverageData(no_disk=True, suffix='should-not-exist')
+                data.loads(output['cov_worker_data'])
+                cov.get_data().update(data)
+            cov.stop()
+            cov.save()
+            path = output['cov_worker_path']
+            self.cov.config.paths['source'].append(path)
+
+        # Record the worker types that contribute to the data file.
+        rinfo = node.gateway._rinfo()
+        node_desc = self.get_node_desc(rinfo.platform, rinfo.version_info)
+        self.node_descs.add(node_desc)
+
+    @_ensure_topdir
+    def finish(self):
+        """Combines coverage data and sets the list of coverage objects to report on."""
+
+        # Combine all the suffix files into the data file.
+        self.cov.stop()
+        self.cov.save()
+        self.cov = self.combining_cov
+        self.cov.load()
+        self.cov.combine()
+        self.cov.save()
+
+
+class DistWorker(CovController):
+    """Implementation for distributed workers."""
+
+    @_ensure_topdir
+    def start(self):
+        cleanup()
+
+        # Determine whether we are collocated with master.
+        self.is_collocated = (
+            socket.gethostname() == self.config.workerinput['cov_master_host']
+            and self.topdir == self.config.workerinput['cov_master_topdir']
+        )
+
+        # If we are not collocated then rewrite master paths to worker paths.
+        if not self.is_collocated:
+            master_topdir = self.config.workerinput['cov_master_topdir']
+            worker_topdir = self.topdir
+            if self.cov_source is not None:
+                self.cov_source = [source.replace(master_topdir, worker_topdir) for source in self.cov_source]
+            self.cov_config = self.cov_config.replace(master_topdir, worker_topdir)
+
+        # Erase any previous data and start coverage.
+        self.cov = coverage.Coverage(
+            source=self.cov_source,
+            branch=self.cov_branch,
+            data_suffix=_data_suffix(f'w{self.nodeid}'),
+            config_file=self.cov_config,
+        )
+        self.cov.start()
+        self.set_env()
+
+    @_ensure_topdir
+    def finish(self):
+        """Stop coverage and send relevant info back to the master."""
+        self.unset_env()
+        self.cov.stop()
+
+        if self.is_collocated:
+            # We don't combine data if we're collocated - we can get
+            # race conditions in the .combine() call (it's not atomic)
+            # The data is going to be combined in the master.
+            self.cov.save()
+
+            # If we are collocated then just inform the master of our
+            # data file to indicate that we have finished.
+            self.config.workeroutput['cov_worker_node_id'] = self.nodeid
+        else:
+            self.cov.combine()
+            self.cov.save()
+            # If we are not collocated then add the current path
+            # and coverage data to the output so we can combine
+            # it on the master node.
+
+            # Send all the data to the master over the channel.
+            if coverage.version_info < (5, 0):
+                buff = StringIO()
+                self.cov.data.write_fileobj(buff)
+                data = buff.getvalue()
+            else:
+                data = self.cov.get_data().dumps()
+
+            self.config.workeroutput.update(
+                {
+                    'cov_worker_path': self.topdir,
+                    'cov_worker_node_id': self.nodeid,
+                    'cov_worker_data': data,
+                }
+            )
+
+    def summary(self, stream):
+        """Only the master reports so do nothing."""
diff --git a/.venv/Lib/site-packages/pytest_cov/plugin.py b/.venv/Lib/site-packages/pytest_cov/plugin.py
new file mode 100644
index 00000000..916efc8e
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_cov/plugin.py
@@ -0,0 +1,446 @@
+"""Coverage plugin for pytest."""
+
+import argparse
+import os
+import warnings
+from io import StringIO
+from pathlib import Path
+
+import coverage
+import pytest
+from coverage.results import display_covered
+from coverage.results import should_fail_under
+
+from . import CovDisabledWarning
+from . import CovFailUnderWarning
+from . import CovReportWarning
+from . import compat
+from . import embed
+
+
+def validate_report(arg):
+    file_choices = ['annotate', 'html', 'xml', 'json', 'lcov']
+    term_choices = ['term', 'term-missing']
+    term_modifier_choices = ['skip-covered']
+    all_choices = term_choices + file_choices
+    values = arg.split(':', 1)
+    report_type = values[0]
+    if report_type not in [*all_choices, '']:
+        msg = f'invalid choice: "{arg}" (choose from "{all_choices}")'
+        raise argparse.ArgumentTypeError(msg)
+
+    if report_type == 'lcov' and coverage.version_info <= (6, 3):
+        raise argparse.ArgumentTypeError('LCOV output is only supported with coverage.py >= 6.3')
+
+    if len(values) == 1:
+        return report_type, None
+
+    report_modifier = values[1]
+    if report_type in term_choices and report_modifier in term_modifier_choices:
+        return report_type, report_modifier
+
+    if report_type not in file_choices:
+        msg = f'output specifier not supported for: "{arg}" (choose from "{file_choices}")'
+        raise argparse.ArgumentTypeError(msg)
+
+    return values
+
+
+def validate_fail_under(num_str):
+    try:
+        value = int(num_str)
+    except ValueError:
+        try:
+            value = float(num_str)
+        except ValueError:
+            raise argparse.ArgumentTypeError('An integer or float value is required.') from None
+    if value > 100:
+        raise argparse.ArgumentTypeError(
+            'Your desire for over-achievement is admirable but misplaced. '
+            'The maximum value is 100. Perhaps write more integration tests?'
+        )
+    return value
+
+
+def validate_context(arg):
+    if coverage.version_info <= (5, 0):
+        raise argparse.ArgumentTypeError('Contexts are only supported with coverage.py >= 5.x')
+    if arg != 'test':
+        raise argparse.ArgumentTypeError('The only supported value is "test".')
+    return arg
+
+
+class StoreReport(argparse.Action):
+    def __call__(self, parser, namespace, values, option_string=None):
+        report_type, file = values
+        namespace.cov_report[report_type] = file
+
+
+def pytest_addoption(parser):
+    """Add options to control coverage."""
+
+    group = parser.getgroup('cov', 'coverage reporting with distributed testing support')
+    group.addoption(
+        '--cov',
+        action='append',
+        default=[],
+        metavar='SOURCE',
+        nargs='?',
+        const=True,
+        dest='cov_source',
+        help='Path or package name to measure during execution (multi-allowed). '
+        'Use --cov= to not do any source filtering and record everything.',
+    )
+    group.addoption(
+        '--cov-reset',
+        action='store_const',
+        const=[],
+        dest='cov_source',
+        help='Reset cov sources accumulated in options so far. ',
+    )
+    group.addoption(
+        '--cov-report',
+        action=StoreReport,
+        default={},
+        metavar='TYPE',
+        type=validate_report,
+        help='Type of report to generate: term, term-missing, '
+        'annotate, html, xml, json, lcov (multi-allowed). '
+        'term, term-missing may be followed by ":skip-covered". '
+        'annotate, html, xml, json and lcov may be followed by ":DEST" '
+        'where DEST specifies the output location. '
+        'Use --cov-report= to not generate any output.',
+    )
+    group.addoption(
+        '--cov-config',
+        action='store',
+        default='.coveragerc',
+        metavar='PATH',
+        help='Config file for coverage. Default: .coveragerc',
+    )
+    group.addoption(
+        '--no-cov-on-fail',
+        action='store_true',
+        default=False,
+        help='Do not report coverage if test run fails. Default: False',
+    )
+    group.addoption(
+        '--no-cov',
+        action='store_true',
+        default=False,
+        help='Disable coverage report completely (useful for debuggers). Default: False',
+    )
+    group.addoption(
+        '--cov-fail-under',
+        action='store',
+        metavar='MIN',
+        type=validate_fail_under,
+        help='Fail if the total coverage is less than MIN.',
+    )
+    group.addoption(
+        '--cov-append',
+        action='store_true',
+        default=False,
+        help='Do not delete coverage but append to current. Default: False',
+    )
+    group.addoption(
+        '--cov-branch',
+        action='store_true',
+        default=None,
+        help='Enable branch coverage.',
+    )
+    group.addoption(
+        '--cov-precision',
+        type=int,
+        default=None,
+        help='Override the reporting precision.',
+    )
+    group.addoption(
+        '--cov-context',
+        action='store',
+        metavar='CONTEXT',
+        type=validate_context,
+        help='Dynamic contexts to use. "test" for now.',
+    )
+
+
+def _prepare_cov_source(cov_source):
+    """
+    Prepare cov_source so that:
+
+     --cov --cov=foobar is equivalent to --cov (cov_source=None)
+     --cov=foo --cov=bar is equivalent to cov_source=['foo', 'bar']
+    """
+    return None if True in cov_source else [path for path in cov_source if path is not True]
+
+
+@pytest.hookimpl(tryfirst=True)
+def pytest_load_initial_conftests(early_config, parser, args):
+    options = early_config.known_args_namespace
+    no_cov = options.no_cov_should_warn = False
+    for arg in args:
+        arg = str(arg)
+        if arg == '--no-cov':
+            no_cov = True
+        elif arg.startswith('--cov') and no_cov:
+            options.no_cov_should_warn = True
+            break
+
+    if early_config.known_args_namespace.cov_source:
+        plugin = CovPlugin(options, early_config.pluginmanager)
+        early_config.pluginmanager.register(plugin, '_cov')
+
+
+class CovPlugin:
+    """Use coverage package to produce code coverage reports.
+
+    Delegates all work to a particular implementation based on whether
+    this test process is centralised, a distributed master or a
+    distributed worker.
+    """
+
+    def __init__(self, options, pluginmanager, start=True, no_cov_should_warn=False):
+        """Creates a coverage pytest plugin.
+
+        We read the rc file that coverage uses to get the data file
+        name.  This is needed since we give coverage through it's API
+        the data file name.
+        """
+
+        # Our implementation is unknown at this time.
+        self.pid = None
+        self.cov_controller = None
+        self.cov_report = StringIO()
+        self.cov_total = None
+        self.failed = False
+        self._started = False
+        self._start_path = None
+        self._disabled = False
+        self.options = options
+
+        is_dist = getattr(options, 'numprocesses', False) or getattr(options, 'distload', False) or getattr(options, 'dist', 'no') != 'no'
+        if getattr(options, 'no_cov', False):
+            self._disabled = True
+            return
+
+        if not self.options.cov_report:
+            self.options.cov_report = ['term']
+        elif len(self.options.cov_report) == 1 and '' in self.options.cov_report:
+            self.options.cov_report = {}
+        self.options.cov_source = _prepare_cov_source(self.options.cov_source)
+
+        # import engine lazily here to avoid importing
+        # it for unit tests that don't need it
+        from . import engine
+
+        if is_dist and start:
+            self.start(engine.DistMaster)
+        elif start:
+            self.start(engine.Central)
+
+        # worker is started in pytest hook
+
+    def start(self, controller_cls, config=None, nodeid=None):
+        if config is None:
+            # fake config option for engine
+            class Config:
+                option = self.options
+
+            config = Config()
+
+        self.cov_controller = controller_cls(
+            self.options.cov_source,
+            self.options.cov_report,
+            self.options.cov_config,
+            self.options.cov_append,
+            self.options.cov_branch,
+            config,
+            nodeid,
+        )
+        self.cov_controller.start()
+        self._started = True
+        self._start_path = Path.cwd()
+        cov_config = self.cov_controller.cov.config
+        if self.options.cov_fail_under is None and hasattr(cov_config, 'fail_under'):
+            self.options.cov_fail_under = cov_config.fail_under
+        if self.options.cov_precision is None:
+            self.options.cov_precision = getattr(cov_config, 'precision', 0)
+
+    def _is_worker(self, session):
+        return getattr(session.config, 'workerinput', None) is not None
+
+    def pytest_sessionstart(self, session):
+        """At session start determine our implementation and delegate to it."""
+
+        if self.options.no_cov:
+            # Coverage can be disabled because it does not cooperate with debuggers well.
+            self._disabled = True
+            return
+
+        # import engine lazily here to avoid importing
+        # it for unit tests that don't need it
+        from . import engine
+
+        self.pid = os.getpid()
+        if self._is_worker(session):
+            nodeid = session.config.workerinput.get('workerid', session.nodeid)
+            self.start(engine.DistWorker, session.config, nodeid)
+        elif not self._started:
+            self.start(engine.Central)
+
+        if self.options.cov_context == 'test':
+            session.config.pluginmanager.register(TestContextPlugin(self.cov_controller.cov), '_cov_contexts')
+
+    @pytest.hookimpl(optionalhook=True)
+    def pytest_configure_node(self, node):
+        """Delegate to our implementation.
+
+        Mark this hook as optional in case xdist is not installed.
+        """
+        if not self._disabled:
+            self.cov_controller.configure_node(node)
+
+    @pytest.hookimpl(optionalhook=True)
+    def pytest_testnodedown(self, node, error):
+        """Delegate to our implementation.
+
+        Mark this hook as optional in case xdist is not installed.
+        """
+        if not self._disabled:
+            self.cov_controller.testnodedown(node, error)
+
+    def _should_report(self):
+        needed = self.options.cov_report or self.options.cov_fail_under
+        return needed and not (self.failed and self.options.no_cov_on_fail)
+
+    # we need to wrap pytest_runtestloop. by the time pytest_sessionfinish
+    # runs, it's too late to set testsfailed
+    @pytest.hookimpl(hookwrapper=True)
+    def pytest_runtestloop(self, session):
+        yield
+
+        if self._disabled:
+            return
+
+        compat_session = compat.SessionWrapper(session)
+
+        self.failed = bool(compat_session.testsfailed)
+        if self.cov_controller is not None:
+            self.cov_controller.finish()
+
+        if not self._is_worker(session) and self._should_report():
+            # import coverage lazily here to avoid importing
+            # it for unit tests that don't need it
+            from coverage.misc import CoverageException
+
+            try:
+                self.cov_total = self.cov_controller.summary(self.cov_report)
+            except CoverageException as exc:
+                message = f'Failed to generate report: {exc}\n'
+                session.config.pluginmanager.getplugin('terminalreporter').write(f'\nWARNING: {message}\n', red=True, bold=True)
+                warnings.warn(CovReportWarning(message), stacklevel=1)
+                self.cov_total = 0
+            assert self.cov_total is not None, 'Test coverage should never be `None`'
+            cov_fail_under = self.options.cov_fail_under
+            cov_precision = self.options.cov_precision
+            if cov_fail_under is None or self.options.collectonly:
+                return
+            if should_fail_under(self.cov_total, cov_fail_under, cov_precision):
+                message = 'Coverage failure: total of {total} is less than fail-under={fail_under:.{p}f}'.format(
+                    total=display_covered(self.cov_total, cov_precision),
+                    fail_under=cov_fail_under,
+                    p=cov_precision,
+                )
+                session.config.pluginmanager.getplugin('terminalreporter').write(f'\nERROR: {message}\n', red=True, bold=True)
+                warnings.warn(CovFailUnderWarning(message), stacklevel=1)
+                # make sure we get the EXIT_TESTSFAILED exit code
+                compat_session.testsfailed += 1
+
+    def pytest_terminal_summary(self, terminalreporter):
+        if self._disabled:
+            if self.options.no_cov_should_warn:
+                message = 'Coverage disabled via --no-cov switch!'
+                terminalreporter.write(f'WARNING: {message}\n', red=True, bold=True)
+                warnings.warn(CovDisabledWarning(message), stacklevel=1)
+            return
+        if self.cov_controller is None:
+            return
+
+        if self.cov_total is None:
+            # we shouldn't report, or report generation failed (error raised above)
+            return
+
+        report = self.cov_report.getvalue()
+
+        # Avoid undesirable new lines when output is disabled with "--cov-report=".
+        if report:
+            terminalreporter.write('\n' + report + '\n')
+
+        if self.options.cov_fail_under is not None and self.options.cov_fail_under > 0:
+            failed = self.cov_total < self.options.cov_fail_under
+            markup = {'red': True, 'bold': True} if failed else {'green': True}
+            message = '{fail}Required test coverage of {required}% {reached}. ' 'Total coverage: {actual:.2f}%\n'.format(
+                required=self.options.cov_fail_under,
+                actual=self.cov_total,
+                fail='FAIL ' if failed else '',
+                reached='not reached' if failed else 'reached',
+            )
+            terminalreporter.write(message, **markup)
+
+    def pytest_runtest_setup(self, item):
+        if os.getpid() != self.pid:
+            # test is run in another process than session, run
+            # coverage manually
+            embed.init()
+
+    def pytest_runtest_teardown(self, item):
+        embed.cleanup()
+
+    @pytest.hookimpl(hookwrapper=True)
+    def pytest_runtest_call(self, item):
+        if item.get_closest_marker('no_cover') or 'no_cover' in getattr(item, 'fixturenames', ()):
+            self.cov_controller.pause()
+            yield
+            self.cov_controller.resume()
+        else:
+            yield
+
+
+class TestContextPlugin:
+    def __init__(self, cov):
+        self.cov = cov
+
+    def pytest_runtest_setup(self, item):
+        self.switch_context(item, 'setup')
+
+    def pytest_runtest_teardown(self, item):
+        self.switch_context(item, 'teardown')
+
+    def pytest_runtest_call(self, item):
+        self.switch_context(item, 'run')
+
+    def switch_context(self, item, when):
+        context = f'{item.nodeid}|{when}'
+        self.cov.switch_context(context)
+        os.environ['COV_CORE_CONTEXT'] = context
+
+
+@pytest.fixture
+def no_cover():  # noqa: PT004
+    """A pytest fixture to disable coverage."""
+
+
+@pytest.fixture
+def cov(request):
+    """A pytest fixture to provide access to the underlying coverage object."""
+
+    # Check with hasplugin to avoid getplugin exception in older pytest.
+    if request.config.pluginmanager.hasplugin('_cov'):
+        plugin = request.config.pluginmanager.getplugin('_cov')
+        if plugin.cov_controller:
+            return plugin.cov_controller.cov
+    return None
+
+
+def pytest_configure(config):
+    config.addinivalue_line('markers', 'no_cover: disable coverage for this test.')
diff --git a/.venv/Lib/site-packages/pytest_django-2.3.1.dist-info/AUTHORS b/.venv/Lib/site-packages/pytest_django-2.3.1.dist-info/AUTHORS
new file mode 100644
index 00000000..0d174c59
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_django-2.3.1.dist-info/AUTHORS
@@ -0,0 +1,13 @@
+Ben Firshman  created the original version of pytest-django.
+
+This fork is currently maintained by Andreas Pelme .
+
+These people have provided bug fixes, new features, improved the documentation
+or just made pytest-django more awesome:
+
+Ruben Bakker
+Ralf Schmitt 
+Rob Berry 
+Floris Bruynooghe 
+Rafal Stozek
+Donald Stufft 
diff --git a/.venv/Lib/site-packages/pytest_django-2.3.1.dist-info/INSTALLER b/.venv/Lib/site-packages/pytest_django-2.3.1.dist-info/INSTALLER
new file mode 100644
index 00000000..a1b589e3
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_django-2.3.1.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/.venv/Lib/site-packages/pytest_django-2.3.1.dist-info/LICENSE b/.venv/Lib/site-packages/pytest_django-2.3.1.dist-info/LICENSE
new file mode 100644
index 00000000..fb14f1c1
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_django-2.3.1.dist-info/LICENSE
@@ -0,0 +1,54 @@
+pytest_django is released under the BSD (3-clause) license
+----------------------------------------------------------
+Copyright (c) 2013, pytest_django authors (see AUTHORS file)
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+ * The names of its contributors may not be used to endorse or promote products
+   derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+This version of pytest_django is a fork of pytest_django created by Ben Firshman.
+---------------------------------------------------------------------------------
+Copyright (c) 2009, Ben Firshman
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+ * The names of its contributors may not be used to endorse or promote products
+   derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/.venv/Lib/site-packages/pytest_django-2.3.1.dist-info/METADATA b/.venv/Lib/site-packages/pytest_django-2.3.1.dist-info/METADATA
new file mode 100644
index 00000000..82d7e4bd
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_django-2.3.1.dist-info/METADATA
@@ -0,0 +1,86 @@
+Metadata-Version: 2.1
+Name: pytest-django
+Version: 2.3.1
+Summary: A Django plugin for py.test.
+Home-page: http://pytest-django.readthedocs.org/
+Author: Andreas Pelme
+Author-email: andreas@pelme.se
+Maintainer: Andreas Pelme
+Maintainer-email: andreas@pelme.se
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Framework :: Django
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Topic :: Software Development :: Testing
+Classifier: Programming Language :: Python :: 2.5
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3.2
+Classifier: Programming Language :: Python :: 3.3
+License-File: LICENSE
+License-File: AUTHORS
+Requires-Dist: pytest >=2.3.4
+
+.. image:: https://secure.travis-ci.org/pelme/pytest_django.png?branch=master
+   :alt: Build Status
+   :target: https://travis-ci.org/pelme/pytest_django
+
+pytest-django is a plugin for `pytest `_ that provides a set of useful tools for testing `Django `_ applications and projects.
+
+* Authors: Ben Firshman, Andreas Pelme and `contributors `_
+* Licence: BSD
+* Compatibility: Django 1.3, 1.4 and 1.5, python 2.5 - 2.7 and 3.2 - 3.3 or PyPy, pytest >= 2.3.4
+* Project URL: https://github.com/pelme/pytest_django
+* Documentation: http://pytest-django.rtfd.org/
+
+
+Quick Start
+===========
+1. ``pip install pytest-django``
+2. Make sure ``DJANGO_SETTINGS_MODULE`` is defined and and run tests with the ``py.test`` command.
+3. (Optionally) If you put your tests under a tests directory (the standard Django application layout), and your files are not named ``test_FOO.py``, `see the FAQ `_
+
+
+Documentation
+==============
+
+`Documentation is available on Read the Docs. `_
+
+
+Why would I use this instead of Django's manage.py test command?
+================================================================
+
+Running the test suite with pytest offers some features that are not present in Djangos standard test mechanism:
+
+ * `Smarter test discovery `_ (no need for ``from .foo import *`` in your test modules).
+ * Less boilerplate: no need to import unittest, create a subclass with methods. Just write tests as regular functions.
+ * `Injection of test depencies with funcargs `_
+ * No need to run all tests, `it is easy to specify which tests to run `_.
+ * Database re-use: no need to re-create the test database for every test run.
+ * No hacks required to only run your apps, and not the 3rd party/contrib apps that is listed in your ``INSTALLED_APPS``.
+ * There are a lot of other nice plugins available for pytest.
+ * No pain of switching: Existing unittest-style tests will still work without any modifications.
+
+See the `pytest documentation `_ for more information on pytest.
+
+
+Contributing
+============
+
+Read the `contributing page `_ from the documentation.
+
+To run the project's tests::
+
+    make test
+
+To build the project's docs::
+
+    make docs
+
+
+Bugs? Feature suggestions?
+============================
+
+Report issues and feature requests at the `github issue tracker `_.
diff --git a/.venv/Lib/site-packages/pytest_django-2.3.1.dist-info/RECORD b/.venv/Lib/site-packages/pytest_django-2.3.1.dist-info/RECORD
new file mode 100644
index 00000000..293fd4d2
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_django-2.3.1.dist-info/RECORD
@@ -0,0 +1,27 @@
+pytest_django-2.3.1.dist-info/AUTHORS,sha256=hS0Qvss5mZVubqyDv3gGhKSH7NeBhOvmA4whKow9yU8,430
+pytest_django-2.3.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+pytest_django-2.3.1.dist-info/LICENSE,sha256=D6fTvfbhqf8rNxgcnew9ZkPqode8nXulL3giliTI_QM,3214
+pytest_django-2.3.1.dist-info/METADATA,sha256=6yt85XvyXpCuu1iBzbWAGo05tn_jghY-Q4vU7hdf6PI,3876
+pytest_django-2.3.1.dist-info/RECORD,,
+pytest_django-2.3.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pytest_django-2.3.1.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
+pytest_django-2.3.1.dist-info/entry_points.txt,sha256=Nm-_lW2w8Di2hZiXUYmzC53nTHZPhG5lnIZSTgZ2LFw,41
+pytest_django-2.3.1.dist-info/top_level.txt,sha256=aw08WN6tmUlLEZ_GTFW2cK8WNzXhMUKI3TUT0jmkJSw,14
+pytest_django/__init__.py,sha256=0Ns5BhVD3XOEWhtSUJ76K319NapPiQfZKsWZRt-kZOk,22
+pytest_django/__pycache__/__init__.cpython-312.pyc,,
+pytest_django/__pycache__/client.cpython-312.pyc,,
+pytest_django/__pycache__/db_reuse.cpython-312.pyc,,
+pytest_django/__pycache__/deprecated.cpython-312.pyc,,
+pytest_django/__pycache__/django_compat.cpython-312.pyc,,
+pytest_django/__pycache__/fixtures.cpython-312.pyc,,
+pytest_django/__pycache__/lazy_django.cpython-312.pyc,,
+pytest_django/__pycache__/live_server_helper.cpython-312.pyc,,
+pytest_django/__pycache__/plugin.cpython-312.pyc,,
+pytest_django/client.py,sha256=jz8l4XTMqhZaukzV2l7tbMP2Z4KgqLFj8NaP3LHHqMY,1650
+pytest_django/db_reuse.py,sha256=iQ_rI5ClzrBz0FNLIiX5KaGDlIkbCoAMxZeVfWD4m8o,2674
+pytest_django/deprecated.py,sha256=_uNGch6ircnJKRRHiYmq4-To3JTm5w3BRDzmKW8S0ME,571
+pytest_django/django_compat.py,sha256=82O1IIR0ieVM0TcUxYHS9Kk-UxQPWDXUr0U6yIzPW2M,669
+pytest_django/fixtures.py,sha256=eF8I6fIOrZFfNuVgqtymmT5YKQ-szHk8Ckx6M0BnLkc,7578
+pytest_django/lazy_django.py,sha256=XP9dRHk61x56VfMwaPHEa5PmWgsJ5RnmYNR7T10ZuJ0,554
+pytest_django/live_server_helper.py,sha256=R5c4hT-QxrBlcQ5fVh3xXI_wbL-9YVRRlIEciHsc5gA,3239
+pytest_django/plugin.py,sha256=JWJ2uXw9w5eYW6LFeh2YXxrs1V3cwY6F35kvBeWU32g,8920
diff --git a/.venv/Lib/site-packages/pytest_django-2.3.1.dist-info/REQUESTED b/.venv/Lib/site-packages/pytest_django-2.3.1.dist-info/REQUESTED
new file mode 100644
index 00000000..e69de29b
diff --git a/.venv/Lib/site-packages/pytest_django-2.3.1.dist-info/WHEEL b/.venv/Lib/site-packages/pytest_django-2.3.1.dist-info/WHEEL
new file mode 100644
index 00000000..9b78c445
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_django-2.3.1.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: setuptools (75.3.0)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/.venv/Lib/site-packages/pytest_django-2.3.1.dist-info/entry_points.txt b/.venv/Lib/site-packages/pytest_django-2.3.1.dist-info/entry_points.txt
new file mode 100644
index 00000000..a3f6478f
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_django-2.3.1.dist-info/entry_points.txt
@@ -0,0 +1,2 @@
+[pytest11]
+django = pytest_django.plugin
diff --git a/.venv/Lib/site-packages/pytest_django-2.3.1.dist-info/top_level.txt b/.venv/Lib/site-packages/pytest_django-2.3.1.dist-info/top_level.txt
new file mode 100644
index 00000000..864d058f
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_django-2.3.1.dist-info/top_level.txt
@@ -0,0 +1 @@
+pytest_django
diff --git a/.venv/Lib/site-packages/pytest_django-4.9.0.dist-info/AUTHORS b/.venv/Lib/site-packages/pytest_django-4.9.0.dist-info/AUTHORS
new file mode 100644
index 00000000..1e01a483
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_django-4.9.0.dist-info/AUTHORS
@@ -0,0 +1,23 @@
+Ben Firshman  created the original version of pytest-django.
+
+This project is currently maintained by Ran Benita .
+
+Previous maintainers are:
+
+Andreas Pelme 
+Daniel Hahler 
+
+These people have provided bug fixes, new features, improved the documentation
+or just made pytest-django more awesome:
+
+Ruben Bakker
+Ralf Schmitt 
+Rob Berry 
+Floris Bruynooghe 
+Rafal Stozek
+Donald Stufft 
+Nicolas Delaby 
+Hasan Ramezani 
+Michael Howitz
+Mark Gensler
+Pavel Taufer
diff --git a/.venv/Lib/site-packages/pytest_django-4.9.0.dist-info/INSTALLER b/.venv/Lib/site-packages/pytest_django-4.9.0.dist-info/INSTALLER
new file mode 100644
index 00000000..a1b589e3
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_django-4.9.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/.venv/Lib/site-packages/pytest_django-4.9.0.dist-info/LICENSE b/.venv/Lib/site-packages/pytest_django-4.9.0.dist-info/LICENSE
new file mode 100644
index 00000000..4e6787f6
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_django-4.9.0.dist-info/LICENSE
@@ -0,0 +1,54 @@
+pytest-django is released under the BSD (3-clause) license
+----------------------------------------------------------
+Copyright (c) 2015-2018, pytest-django authors (see AUTHORS file)
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+ * The names of its contributors may not be used to endorse or promote products
+   derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+This version of pytest-django is a fork of pytest_django created by Ben Firshman.
+---------------------------------------------------------------------------------
+Copyright (c) 2009, Ben Firshman
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+ * The names of its contributors may not be used to endorse or promote products
+   derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/.venv/Lib/site-packages/pytest_django-4.9.0.dist-info/METADATA b/.venv/Lib/site-packages/pytest_django-4.9.0.dist-info/METADATA
new file mode 100644
index 00000000..a6b0bc74
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_django-4.9.0.dist-info/METADATA
@@ -0,0 +1,161 @@
+Metadata-Version: 2.1
+Name: pytest-django
+Version: 4.9.0
+Summary: A Django plugin for pytest.
+Author-email: Andreas Pelme 
+Maintainer-email: Andreas Pelme 
+License: pytest-django is released under the BSD (3-clause) license
+        ----------------------------------------------------------
+        Copyright (c) 2015-2018, pytest-django authors (see AUTHORS file)
+        All rights reserved.
+        
+        Redistribution and use in source and binary forms, with or without
+        modification, are permitted provided that the following conditions are met:
+        
+         * Redistributions of source code must retain the above copyright notice, this
+           list of conditions and the following disclaimer.
+         * Redistributions in binary form must reproduce the above copyright notice,
+           this list of conditions and the following disclaimer in the documentation
+           and/or other materials provided with the distribution.
+         * The names of its contributors may not be used to endorse or promote products
+           derived from this software without specific prior written permission.
+        
+        THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+        ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+        WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+        DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+        ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+        (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+        LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+        ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+        (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+        SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+        
+        
+        This version of pytest-django is a fork of pytest_django created by Ben Firshman.
+        ---------------------------------------------------------------------------------
+        Copyright (c) 2009, Ben Firshman
+        All rights reserved.
+        
+        Redistribution and use in source and binary forms, with or without
+        modification, are permitted provided that the following conditions are met:
+        
+         * Redistributions of source code must retain the above copyright notice, this
+           list of conditions and the following disclaimer.
+         * Redistributions in binary form must reproduce the above copyright notice,
+           this list of conditions and the following disclaimer in the documentation
+           and/or other materials provided with the distribution.
+         * The names of its contributors may not be used to endorse or promote products
+           derived from this software without specific prior written permission.
+        
+        THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+        ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+        WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+        DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+        ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+        (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+        LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+        ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+        (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+        SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+        
+Project-URL: Documentation, https://pytest-django.readthedocs.io/
+Project-URL: Repository, https://github.com/pytest-dev/pytest-django
+Project-URL: Changelog, https://pytest-django.readthedocs.io/en/latest/changelog.html
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Framework :: Django
+Classifier: Framework :: Django :: 4.2
+Classifier: Framework :: Django :: 5.0
+Classifier: Framework :: Django :: 5.1
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Software Development :: Testing
+Requires-Python: >=3.8
+Description-Content-Type: text/x-rst
+License-File: LICENSE
+License-File: AUTHORS
+Requires-Dist: pytest >=7.0.0
+Provides-Extra: docs
+Requires-Dist: sphinx ; extra == 'docs'
+Requires-Dist: sphinx-rtd-theme ; extra == 'docs'
+Provides-Extra: testing
+Requires-Dist: Django ; extra == 'testing'
+Requires-Dist: django-configurations >=2.0 ; extra == 'testing'
+
+.. image:: https://img.shields.io/pypi/v/pytest-django.svg?style=flat
+    :alt: PyPI Version
+    :target: https://pypi.python.org/pypi/pytest-django
+
+.. image:: https://img.shields.io/pypi/pyversions/pytest-django.svg
+    :alt: Supported Python versions
+    :target: https://pypi.python.org/pypi/pytest-django
+
+.. image:: https://github.com/pytest-dev/pytest-django/workflows/main/badge.svg
+    :alt: Build Status
+    :target: https://github.com/pytest-dev/pytest-django/actions
+
+.. image:: https://img.shields.io/pypi/djversions/pytest-django.svg
+   :alt: Supported Django versions
+   :target: https://pypi.org/project/pytest-django/
+
+.. image:: https://img.shields.io/codecov/c/github/pytest-dev/pytest-django.svg?style=flat
+    :alt: Coverage
+    :target: https://codecov.io/gh/pytest-dev/pytest-django
+
+Welcome to pytest-django!
+=========================
+
+pytest-django allows you to test your Django project/applications with the
+`pytest testing tool `_.
+
+* `Quick start / tutorial
+  `_
+* `Changelog `_
+* Full documentation: https://pytest-django.readthedocs.io/en/latest/
+* `Contribution docs
+  `_
+* Version compatibility:
+
+  * Django: 4.2, 5.0, 5.1 and latest main branch (compatible at the time
+    of each release)
+  * Python: CPython>=3.8 or PyPy 3
+  * pytest: >=7.0
+
+  For compatibility with older versions, use previous pytest-django releases.
+
+* Licence: BSD
+* `All contributors `_
+* GitHub repository: https://github.com/pytest-dev/pytest-django
+* `Issue tracker `_
+* `Python Package Index (PyPI) `_
+
+Install pytest-django
+---------------------
+
+::
+
+    pip install pytest-django
+
+Why would I use this instead of Django's `manage.py test` command?
+------------------------------------------------------------------
+
+Running your test suite with pytest-django allows you to tap into the features
+that are already present in pytest. Here are some advantages:
+
+* `Manage test dependencies with pytest fixtures. `_
+* Less boilerplate tests: no need to import unittest, create a subclass with methods. Write tests as regular functions.
+* Database re-use: no need to re-create the test database for every test run.
+* Run tests in multiple processes for increased speed (with the pytest-xdist plugin).
+* Make use of other `pytest plugins `_.
+* Works with both worlds: Existing unittest-style TestCase's still work without any modifications.
+
+See the `pytest documentation `_ for more information on pytest itself.
diff --git a/.venv/Lib/site-packages/pytest_django-4.9.0.dist-info/RECORD b/.venv/Lib/site-packages/pytest_django-4.9.0.dist-info/RECORD
new file mode 100644
index 00000000..30193091
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_django-4.9.0.dist-info/RECORD
@@ -0,0 +1,26 @@
+pytest_django-4.9.0.dist-info/AUTHORS,sha256=3G0FpewslIWJk_dh1QxbdcUe2lWBaHGKANEwysidC3I,647
+pytest_django-4.9.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+pytest_django-4.9.0.dist-info/LICENSE,sha256=3tPAaed8rUd3y41qzNBqlVYM-xZ1NzFgWAEetSH7MKE,3219
+pytest_django-4.9.0.dist-info/METADATA,sha256=_Ebj9CXG2gPLNyxUqYNGZIG3hZj4LHlm9YfPctAL6BY,8208
+pytest_django-4.9.0.dist-info/RECORD,,
+pytest_django-4.9.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pytest_django-4.9.0.dist-info/WHEEL,sha256=UvcQYKBHoFqaQd6LKyqHw9fxEolWLQnlzP0h_LgJAfI,91
+pytest_django-4.9.0.dist-info/entry_points.txt,sha256=Nm-_lW2w8Di2hZiXUYmzC53nTHZPhG5lnIZSTgZ2LFw,41
+pytest_django-4.9.0.dist-info/top_level.txt,sha256=aw08WN6tmUlLEZ_GTFW2cK8WNzXhMUKI3TUT0jmkJSw,14
+pytest_django/__init__.py,sha256=Jipe9MUs5uy00AuoScKtN2uUZv5X04CB6On7E0hi3Fo,409
+pytest_django/__pycache__/__init__.cpython-312.pyc,,
+pytest_django/__pycache__/_version.cpython-312.pyc,,
+pytest_django/__pycache__/asserts.cpython-312.pyc,,
+pytest_django/__pycache__/django_compat.cpython-312.pyc,,
+pytest_django/__pycache__/fixtures.cpython-312.pyc,,
+pytest_django/__pycache__/lazy_django.cpython-312.pyc,,
+pytest_django/__pycache__/live_server_helper.cpython-312.pyc,,
+pytest_django/__pycache__/plugin.cpython-312.pyc,,
+pytest_django/_version.py,sha256=mrrQhqrp635GGcpjytZ0YQGcY0nKHpKaGOZP-dpUyCg,411
+pytest_django/asserts.py,sha256=V0ilzBkJVYUQwh4mGqiAUH6Ii0FWFMJjbOfULxr4E9E,5281
+pytest_django/django_compat.py,sha256=pbg5YZBz9ngcbtTACKlMXhfGYTEj0YmgoveiSeAeXsE,496
+pytest_django/fixtures.py,sha256=LhBSLdDIFYGvCqzrN9CabG1JUr1zeqb2_kpS898IQtY,21180
+pytest_django/lazy_django.py,sha256=GtT8wWvdikEQfK45LwYEqFWGm5G8s7CZFxtMBmSTHOc,1052
+pytest_django/live_server_helper.py,sha256=24lxEQqxhSvh8mXy_dF2NsWlRiAqHSTaZMw6sadLCZc,2868
+pytest_django/plugin.py,sha256=24j7xp_sXOd28F2QAXWRgCFt1WwqQ4oZd5SBsc91NPA,27070
+pytest_django/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
diff --git a/.venv/Lib/site-packages/pytest_django-4.9.0.dist-info/REQUESTED b/.venv/Lib/site-packages/pytest_django-4.9.0.dist-info/REQUESTED
new file mode 100644
index 00000000..e69de29b
diff --git a/.venv/Lib/site-packages/pytest_django-4.9.0.dist-info/WHEEL b/.venv/Lib/site-packages/pytest_django-4.9.0.dist-info/WHEEL
new file mode 100644
index 00000000..57e56b7e
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_django-4.9.0.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: setuptools (74.0.0)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/.venv/Lib/site-packages/pytest_django-4.9.0.dist-info/entry_points.txt b/.venv/Lib/site-packages/pytest_django-4.9.0.dist-info/entry_points.txt
new file mode 100644
index 00000000..a3f6478f
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_django-4.9.0.dist-info/entry_points.txt
@@ -0,0 +1,2 @@
+[pytest11]
+django = pytest_django.plugin
diff --git a/.venv/Lib/site-packages/pytest_django-4.9.0.dist-info/top_level.txt b/.venv/Lib/site-packages/pytest_django-4.9.0.dist-info/top_level.txt
new file mode 100644
index 00000000..864d058f
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_django-4.9.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+pytest_django
diff --git a/.venv/Lib/site-packages/pytest_django/__init__.py b/.venv/Lib/site-packages/pytest_django/__init__.py
new file mode 100644
index 00000000..1c4ddd35
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_django/__init__.py
@@ -0,0 +1 @@
+__version__ = '2.3.1'
diff --git a/.venv/Lib/site-packages/pytest_django/_version.py b/.venv/Lib/site-packages/pytest_django/_version.py
new file mode 100644
index 00000000..cfa08dde
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_django/_version.py
@@ -0,0 +1,16 @@
+# file generated by setuptools_scm
+# don't change, don't track in version control
+TYPE_CHECKING = False
+if TYPE_CHECKING:
+    from typing import Tuple, Union
+    VERSION_TUPLE = Tuple[Union[int, str], ...]
+else:
+    VERSION_TUPLE = object
+
+version: str
+__version__: str
+__version_tuple__: VERSION_TUPLE
+version_tuple: VERSION_TUPLE
+
+__version__ = version = '4.9.0'
+__version_tuple__ = version_tuple = (4, 9, 0)
diff --git a/.venv/Lib/site-packages/pytest_django/asserts.py b/.venv/Lib/site-packages/pytest_django/asserts.py
new file mode 100644
index 00000000..14741066
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_django/asserts.py
@@ -0,0 +1,221 @@
+"""
+Dynamically load all Django assertion cases and expose them for importing.
+"""
+
+from __future__ import annotations
+
+from functools import wraps
+from typing import TYPE_CHECKING, Any, Callable, Sequence
+
+from django import VERSION
+from django.test import LiveServerTestCase, SimpleTestCase, TestCase, TransactionTestCase
+
+
+USE_CONTRIB_MESSAGES = VERSION >= (5, 0)
+
+if USE_CONTRIB_MESSAGES:
+    from django.contrib.messages import Message
+    from django.contrib.messages.test import MessagesTestMixin
+
+    class MessagesTestCase(MessagesTestMixin, TestCase):
+        pass
+
+    test_case = MessagesTestCase("run")
+else:
+    test_case = TestCase("run")
+
+
+def _wrapper(name: str):
+    func = getattr(test_case, name)
+
+    @wraps(func)
+    def assertion_func(*args, **kwargs):
+        return func(*args, **kwargs)
+
+    return assertion_func
+
+
+__all__ = []
+assertions_names: set[str] = set()
+assertions_names.update(
+    {attr for attr in vars(TestCase) if attr.startswith("assert")},
+    {attr for attr in vars(SimpleTestCase) if attr.startswith("assert")},
+    {attr for attr in vars(LiveServerTestCase) if attr.startswith("assert")},
+    {attr for attr in vars(TransactionTestCase) if attr.startswith("assert")},
+)
+
+if USE_CONTRIB_MESSAGES:
+    assertions_names.update(
+        {attr for attr in vars(MessagesTestMixin) if attr.startswith("assert")},
+    )
+
+for assert_func in assertions_names:
+    globals()[assert_func] = _wrapper(assert_func)
+    __all__.append(assert_func)  # noqa: PYI056
+
+
+if TYPE_CHECKING:
+    from django import forms
+    from django.http.response import HttpResponseBase
+
+    def assertRedirects(
+        response: HttpResponseBase,
+        expected_url: str,
+        status_code: int = ...,
+        target_status_code: int = ...,
+        msg_prefix: str = ...,
+        fetch_redirect_response: bool = ...,
+    ) -> None: ...
+
+    def assertURLEqual(
+        url1: str,
+        url2: str,
+        msg_prefix: str = ...,
+    ) -> None: ...
+
+    def assertContains(
+        response: HttpResponseBase,
+        text: object,
+        count: int | None = ...,
+        status_code: int = ...,
+        msg_prefix: str = ...,
+        html: bool = False,
+    ) -> None: ...
+
+    def assertNotContains(
+        response: HttpResponseBase,
+        text: object,
+        status_code: int = ...,
+        msg_prefix: str = ...,
+        html: bool = False,
+    ) -> None: ...
+
+    def assertFormError(
+        form: forms.BaseForm,
+        field: str | None,
+        errors: str | Sequence[str],
+        msg_prefix: str = ...,
+    ) -> None: ...
+
+    def assertFormSetError(
+        formset: forms.BaseFormSet,
+        form_index: int | None,
+        field: str | None,
+        errors: str | Sequence[str],
+        msg_prefix: str = ...,
+    ) -> None: ...
+
+    def assertTemplateUsed(
+        response: HttpResponseBase | str | None = ...,
+        template_name: str | None = ...,
+        msg_prefix: str = ...,
+        count: int | None = ...,
+    ): ...
+
+    def assertTemplateNotUsed(
+        response: HttpResponseBase | str | None = ...,
+        template_name: str | None = ...,
+        msg_prefix: str = ...,
+    ): ...
+
+    def assertRaisesMessage(
+        expected_exception: type[Exception],
+        expected_message: str,
+        *args,
+        **kwargs,
+    ): ...
+
+    def assertWarnsMessage(
+        expected_warning: Warning,
+        expected_message: str,
+        *args,
+        **kwargs,
+    ): ...
+
+    def assertFieldOutput(
+        fieldclass,
+        valid,
+        invalid,
+        field_args=...,
+        field_kwargs=...,
+        empty_value: str = ...,
+    ) -> None: ...
+
+    def assertHTMLEqual(
+        html1: str,
+        html2: str,
+        msg: str | None = ...,
+    ) -> None: ...
+
+    def assertHTMLNotEqual(
+        html1: str,
+        html2: str,
+        msg: str | None = ...,
+    ) -> None: ...
+
+    def assertInHTML(
+        needle: str,
+        haystack: str,
+        count: int | None = ...,
+        msg_prefix: str = ...,
+    ) -> None: ...
+
+    def assertJSONEqual(
+        raw: str,
+        expected_data: Any,
+        msg: str | None = ...,
+    ) -> None: ...
+
+    def assertJSONNotEqual(
+        raw: str,
+        expected_data: Any,
+        msg: str | None = ...,
+    ) -> None: ...
+
+    def assertXMLEqual(
+        xml1: str,
+        xml2: str,
+        msg: str | None = ...,
+    ) -> None: ...
+
+    def assertXMLNotEqual(
+        xml1: str,
+        xml2: str,
+        msg: str | None = ...,
+    ) -> None: ...
+
+    # Removed in Django 5.1: use assertQuerySetEqual.
+    def assertQuerysetEqual(
+        qs,
+        values,
+        transform=...,
+        ordered: bool = ...,
+        msg: str | None = ...,
+    ) -> None: ...
+
+    def assertQuerySetEqual(
+        qs,
+        values,
+        transform=...,
+        ordered: bool = ...,
+        msg: str | None = ...,
+    ) -> None: ...
+
+    def assertNumQueries(
+        num: int,
+        func=...,
+        *args,
+        using: str = ...,
+        **kwargs,
+    ): ...
+
+    # Added in Django 5.0.
+    def assertMessages(
+        response: HttpResponseBase,
+        expected_messages: Sequence[Message],
+        *args,
+        ordered: bool = ...,
+    ) -> None: ...
+
+    # Fallback in case Django adds new asserts.
+    def __getattr__(name: str) -> Callable[..., Any]: ...
diff --git a/.venv/Lib/site-packages/pytest_django/client.py b/.venv/Lib/site-packages/pytest_django/client.py
new file mode 100644
index 00000000..b67b839b
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_django/client.py
@@ -0,0 +1,45 @@
+from django.core.handlers.wsgi import WSGIRequest
+from django.test.client import FakePayload
+from django.test.client import RequestFactory as VanillaRequestFactory
+
+
+class PytestDjangoRequestFactory(VanillaRequestFactory):
+    """
+    Based on Django 1.3's RequestFactory, but fixes an issue that causes an
+    error to be thrown when creating a WSGIRequest instance with a plain call
+    to RequestFactory.rf().
+
+    This issue is fixed in Django 1.4, so this class will be unnecessary when
+    support for Django 1.3 is dropped.
+
+    https://code.djangoproject.com/ticket/15898
+
+    Incorporates code from https://code.djangoproject.com/changeset/16933.
+    """
+    def request(self, **request):
+        environ = {
+            'HTTP_COOKIE':       self.cookies.output(header='', sep='; '),
+            'PATH_INFO':         '/',
+            'REMOTE_ADDR':       '127.0.0.1',
+            'REQUEST_METHOD':    'GET',
+            'SCRIPT_NAME':       '',
+            'SERVER_NAME':       'testserver',
+            'SERVER_PORT':       '80',
+            'SERVER_PROTOCOL':   'HTTP/1.1',
+            'wsgi.version':      (1, 0),
+            'wsgi.url_scheme':   'http',
+            'wsgi.input':        FakePayload(''),
+            'wsgi.errors':       self.errors,
+            'wsgi.multiprocess': True,
+            'wsgi.multithread':  False,
+            'wsgi.run_once':     False,
+        }
+        environ.update(self.defaults)
+        environ.update(request)
+        return WSGIRequest(environ)
+
+try:
+    VanillaRequestFactory().request()
+    RequestFactory = VanillaRequestFactory
+except KeyError:
+    RequestFactory = PytestDjangoRequestFactory
diff --git a/.venv/Lib/site-packages/pytest_django/db_reuse.py b/.venv/Lib/site-packages/pytest_django/db_reuse.py
new file mode 100644
index 00000000..e3e6a99e
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_django/db_reuse.py
@@ -0,0 +1,77 @@
+"""Functions to aid in preserving the test database between test runs.
+
+The code in this module is heavily inspired by django-nose:
+https://github.com/jbalogh/django-nose/
+"""
+import sys
+import types
+
+
+def can_support_db_reuse(connection):
+    """Return whether it makes any sense to use REUSE_DB with the backend of a
+    connection."""
+    # This is a SQLite in-memory DB. Those are created implicitly when
+    # you try to connect to them, so our test below doesn't work.
+    return connection.creation._get_test_db_name() != ':memory:'
+
+
+def test_database_exists_from_previous_run(connection):
+    # Check for sqlite memory databases
+    if not can_support_db_reuse(connection):
+        return False
+
+    # Try to open a cursor to the test database
+    orig_db_name = connection.settings_dict['NAME']
+    connection.settings_dict['NAME'] = connection.creation._get_test_db_name()
+
+    try:
+        connection.cursor()
+        return True
+    except Exception:  # TODO: Be more discerning but still DB agnostic.
+        return False
+    finally:
+        connection.close()
+        connection.settings_dict['NAME'] = orig_db_name
+
+
+def create_test_db(self, verbosity=1, autoclobber=False):
+    """
+    This method is a monkey patched version of create_test_db that
+    will not actually create a new database, but just reuse the
+    existing.
+    """
+    test_database_name = self._get_test_db_name()
+    self.connection.settings_dict['NAME'] = test_database_name
+
+    if verbosity >= 1:
+        test_db_repr = ''
+        if verbosity >= 2:
+            test_db_repr = " ('%s')" % test_database_name
+        print("Re-using existing test database for alias '%s'%s..." % (
+            self.connection.alias, test_db_repr))
+
+    # confirm() is not needed/available in Django >= 1.5
+    # See https://code.djangoproject.com/ticket/17760
+    if hasattr(self.connection.features, 'confirm'):
+        self.connection.features.confirm()
+
+    return test_database_name
+
+
+def monkey_patch_creation_for_db_reuse():
+    from django.db import connections
+
+    for alias in connections:
+        connection = connections[alias]
+        creation = connection.creation
+
+        if test_database_exists_from_previous_run(connection):
+            # Make sure our monkey patch is still valid in the future
+            assert hasattr(creation, 'create_test_db')
+
+            if sys.version_info < (3, 0):
+                creation.create_test_db = types.MethodType(
+                    create_test_db, creation, creation.__class__)
+            else:
+                creation.create_test_db = types.MethodType(create_test_db,
+                                                           creation)
diff --git a/.venv/Lib/site-packages/pytest_django/deprecated.py b/.venv/Lib/site-packages/pytest_django/deprecated.py
new file mode 100644
index 00000000..b64051b6
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_django/deprecated.py
@@ -0,0 +1,17 @@
+import pytest
+
+
+def transaction_test_case(*args, **kwargs):
+    raise pytest.UsageError('transaction_test_case has been deprecated: use '
+                            'pytest.mark.django_db(transaction=True) ')
+
+
+def pytest_namespace():
+    def load_fixture(*args, **kwargs):
+        raise pytest.UsageError('pytest.load_fixture has been deprecated')
+
+    def urls(*args, **kwargs):
+        raise pytest.UsageError('pytest.urls has been deprecated: use '
+                                'pytest.mark.urls instead')
+
+    return {'load_fixture': load_fixture, 'urls': urls}
diff --git a/.venv/Lib/site-packages/pytest_django/django_compat.py b/.venv/Lib/site-packages/pytest_django/django_compat.py
new file mode 100644
index 00000000..d7652046
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_django/django_compat.py
@@ -0,0 +1,19 @@
+# Note that all functions here assume django is available.  So ensure
+# this is the case before you call them.
+import sys
+
+
+def is_django_unittest(item):
+    """Returns True if the item is a Django test case, otherwise False"""
+    try:
+        from django.test import SimpleTestCase as TestCase
+    except ImportError:
+        from django.test import TestCase
+
+    if sys.version_info < (3, 0):
+        return (hasattr(item.obj, 'im_class') and
+                issubclass(item.obj.im_class, TestCase))
+
+    return (hasattr(item.obj, '__self__') and
+            hasattr(item.obj.__self__, '__class__') and
+            issubclass(item.obj.__self__.__class__, TestCase))
diff --git a/.venv/Lib/site-packages/pytest_django/fixtures.py b/.venv/Lib/site-packages/pytest_django/fixtures.py
new file mode 100644
index 00000000..cfd54fd1
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_django/fixtures.py
@@ -0,0 +1,227 @@
+"""All pytest-django fixtures"""
+
+from __future__ import with_statement
+
+import os
+
+import pytest
+
+from . import live_server_helper
+from .db_reuse import monkey_patch_creation_for_db_reuse
+from .django_compat import is_django_unittest
+from .lazy_django import skip_if_no_django
+
+__all__ = ['_django_db_setup', 'db', 'transactional_db',
+           'client', 'admin_client', 'rf', 'settings', 'live_server',
+           '_live_server_helper']
+
+
+################ Internal Fixtures ################
+
+
+@pytest.fixture(scope='session')
+def _django_db_setup(request, _django_runner, _django_cursor_wrapper):
+    """Session-wide database setup, internal to pytest-django"""
+    skip_if_no_django()
+
+    from django.core import management
+
+    # Disable south's syncdb command
+    commands = management.get_commands()
+    if commands['syncdb'] == 'south':
+        management._commands['syncdb'] = 'django.core'
+
+    with _django_cursor_wrapper:
+
+        # Monkey patch Django's setup code to support database re-use
+        if request.config.getvalue('reuse_db'):
+            if not request.config.getvalue('create_db'):
+                monkey_patch_creation_for_db_reuse()
+            _django_runner.teardown_databases = lambda db_cfg: None
+
+        # Create the database
+        db_cfg = _django_runner.setup_databases()
+
+    def teardown_database():
+        with _django_cursor_wrapper:
+            _django_runner.teardown_databases(db_cfg)
+
+    request.addfinalizer(teardown_database)
+
+
+################ User visible fixtures ################
+
+
+@pytest.fixture(scope='function')
+def db(request, _django_db_setup, _django_cursor_wrapper):
+    """Require a django test database
+
+    This database will be setup with the default fixtures and will
+    have the transaction management disabled.  At the end of the test
+    the transaction will be rolled back to undo any changes to the
+    database.  This is more limited then the ``transaction_db``
+    resource but faster.
+
+    If both this and ``transaction_db`` are requested then the
+    database setup will behave as only ``transaction_db`` was
+    requested.
+    """
+    if ('transactional_db' not in request.funcargnames and
+            'live_server' not in request.funcargnames and
+            not is_django_unittest(request.node)):
+
+        from django.test import TestCase
+
+        _django_cursor_wrapper.enable()
+        case = TestCase(methodName='__init__')
+        case._pre_setup()
+        request.addfinalizer(case._post_teardown)
+        request.addfinalizer(_django_cursor_wrapper.disable)
+
+
+@pytest.fixture(scope='function')
+def transactional_db(request, _django_db_setup, _django_cursor_wrapper):
+    """Require a django test database with transaction support
+
+    This will re-initialise the django database for each test and is
+    thus slower then the normal ``db`` fixture.
+
+    If you want to use the database with transactions you must request
+    this resource.  If both this and ``db`` are requested then the
+    database setup will behave as only ``transaction_db`` was
+    requested.
+    """
+    if not is_django_unittest(request.node):
+        _django_cursor_wrapper.enable()
+
+        def flushdb():
+            """Flush the database and close database connections"""
+            # Django does this by default *before* each test
+            # instead of after.
+            from django.db import connections
+            from django.core.management import call_command
+
+            for db in connections:
+                call_command('flush', verbosity=0,
+                             interactive=False, database=db)
+            for conn in connections.all():
+                conn.close()
+
+        request.addfinalizer(_django_cursor_wrapper.disable)
+        request.addfinalizer(flushdb)
+
+
+@pytest.fixture()
+def client():
+    """A Django test client instance"""
+    skip_if_no_django()
+
+    from django.test.client import Client
+
+    return Client()
+
+
+@pytest.fixture()
+def admin_client(db):
+    """A Django test client logged in as an admin user"""
+    try:
+        from django.contrib.auth import get_user_model
+        User = get_user_model()
+    except ImportError:
+        from django.contrib.auth.models import User
+    from django.test.client import Client
+
+    try:
+        User.objects.get(username='admin')
+    except User.DoesNotExist:
+        user = User.objects.create_user('admin', 'admin@example.com',
+                                        'password')
+        user.is_staff = True
+        user.is_superuser = True
+        user.save()
+
+    client = Client()
+    client.login(username='admin', password='password')
+    return client
+
+
+@pytest.fixture()
+def rf():
+    """RequestFactory instance"""
+    skip_if_no_django()
+
+    from django.test.client import RequestFactory
+
+    return RequestFactory()
+
+
+class MonkeyPatchWrapper(object):
+    def __init__(self, monkeypatch, wrapped_object):
+        super(MonkeyPatchWrapper, self).__setattr__('monkeypatch', monkeypatch)
+        super(MonkeyPatchWrapper, self).__setattr__('wrapped_object',
+                                                    wrapped_object)
+
+    def __getattr__(self, attr):
+        return getattr(self.wrapped_object, attr)
+
+    def __setattr__(self, attr, value):
+        self.monkeypatch.setattr(self.wrapped_object, attr, value,
+                                 raising=False)
+
+    def __delattr__(self, attr):
+        self.monkeypatch.delattr(self.wrapped_object, attr)
+
+
+@pytest.fixture()
+def settings(request, monkeypatch):
+    """A Django settings object which restores changes after the testrun"""
+    skip_if_no_django()
+
+    from django.conf import settings as django_settings
+    return MonkeyPatchWrapper(monkeypatch, django_settings)
+
+
+@pytest.fixture(scope='session')
+def live_server(request):
+    """Run a live Django server in the background during tests
+
+    The address the server is started from is taken from the
+    --liveserver command line option or if this is not provided from
+    the DJANGO_LIVE_TEST_SERVER_ADDRESS environment variable.  If
+    neither is provided ``localhost:8081,8100-8200`` is used.  See the
+    Django documentation for it's full syntax.
+
+    NOTE: If the live server needs database access to handle a request
+          your test will have to request database access.  Furthermore
+          when the tests want to see data added by the live-server (or
+          the other way around) transactional database access will be
+          needed as data inside a transaction is not shared between
+          the live server and test code.
+    """
+    skip_if_no_django()
+    addr = request.config.getvalue('liveserver')
+    if not addr:
+        addr = os.getenv('DJANGO_TEST_LIVE_SERVER_ADDRESS')
+    if not addr:
+        addr = 'localhost:8081,8100-8200'
+    server = live_server_helper.LiveServer(addr)
+    request.addfinalizer(server.stop)
+    return server
+
+
+@pytest.fixture(autouse=True, scope='function')
+def _live_server_helper(request):
+    """Helper to make live_server work, internal to pytest-django
+
+    This helper will dynamically request the transactional_db fixture
+    for a tests which uses the live_server fixture.  This allows the
+    server and test to access the database without having to mark
+    this explicitly which is handy since it is usually required and
+    matches the Django behaviour.
+
+    The separate helper is required since live_server can not request
+    transactional_db directly since it is session scoped instead of
+    function-scoped.
+    """
+    if 'live_server' in request.funcargnames:
+        request.getfuncargvalue('transactional_db')
diff --git a/.venv/Lib/site-packages/pytest_django/lazy_django.py b/.venv/Lib/site-packages/pytest_django/lazy_django.py
new file mode 100644
index 00000000..72b82c95
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_django/lazy_django.py
@@ -0,0 +1,21 @@
+"""
+Helpers to load Django lazily when Django settings can't be configured.
+"""
+
+import os
+import sys
+import pytest
+
+
+def skip_if_no_django():
+    """Raises a skip exception when no Django settings are available"""
+    if not django_settings_is_configured():
+        pytest.skip('Test skipped since no Django settings is present.')
+
+
+def django_settings_is_configured():
+    if 'django' in sys.modules or os.environ.get('DJANGO_SETTINGS_MODULE'):
+        from django.conf import settings
+        return settings.configured
+    else:
+        return False
diff --git a/.venv/Lib/site-packages/pytest_django/live_server_helper.py b/.venv/Lib/site-packages/pytest_django/live_server_helper.py
new file mode 100644
index 00000000..84d02943
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_django/live_server_helper.py
@@ -0,0 +1,96 @@
+import sys
+import pytest
+
+
+def supported():
+    import django.test.testcases
+
+    return hasattr(django.test.testcases, 'LiveServerThread')
+
+
+class LiveServer(object):
+    """The liveserver fixture
+
+    This is the object which is returned to the actual user when they
+    request the ``live_server`` fixture.  The fixture handles creation
+    and stopping however.
+    """
+
+    def __init__(self, addr):
+        try:
+            from django.test.testcases import LiveServerThread
+        except ImportError:
+            pytest.skip('live_server tests not supported in Django < 1.4')
+        from django.db import connections
+
+        connections_override = {}
+        for conn in connections.all():
+            # If using in-memory sqlite databases, pass the connections to
+            # the server thread.
+            if (conn.settings_dict['ENGINE'] == 'django.db.backends.sqlite3'
+                    and conn.settings_dict['NAME'] == ':memory:'):
+                # Explicitly enable thread-shareability for this connection
+                conn.allow_thread_sharing = True
+                connections_override[conn.alias] = conn
+
+        host, possible_ports = parse_addr(addr)
+        self.thread = LiveServerThread(host, possible_ports,
+                                       connections_override)
+        self.thread.daemon = True
+        self.thread.start()
+        self.thread.is_ready.wait()
+        if self.thread.error:
+            raise self.thread.error
+
+    def stop(self):
+        """Stop the server"""
+        self.thread.join()
+
+    @property
+    def url(self):
+        return 'http://%s:%s' % (self.thread.host, self.thread.port)
+
+    if sys.version_info < (3, 0):
+        def __unicode__(self):
+            return self.url
+
+        def __add__(self, other):
+            return unicode(self) + other
+    else:
+        def __str__(self):
+            return self.url
+
+        def __add__(self, other):
+            return str(self) + other
+
+    def __repr__(self):
+        return '' % self.url
+
+
+def parse_addr(specified_address):
+    """Parse the --liveserver argument into a host/IP address and port range"""
+    # This code is based on
+    # django.test.testcases.LiveServerTestCase.setUpClass
+
+    # The specified ports may be of the form '8000-8010,8080,9200-9300'
+    # i.e. a comma-separated list of ports or ranges of ports, so we break
+    # it down into a detailed list of all possible ports.
+    possible_ports = []
+    try:
+        host, port_ranges = specified_address.split(':')
+        for port_range in port_ranges.split(','):
+            # A port range can be of either form: '8000' or '8000-8010'.
+            extremes = list(map(int, port_range.split('-')))
+            assert len(extremes) in [1, 2]
+            if len(extremes) == 1:
+                # Port range of the form '8000'
+                possible_ports.append(extremes[0])
+            else:
+                # Port range of the form '8000-8010'
+                for port in range(extremes[0], extremes[1] + 1):
+                    possible_ports.append(port)
+    except Exception:
+        raise Exception(
+            'Invalid address ("%s") for live server.' % specified_address)
+
+    return (host, possible_ports)
diff --git a/.venv/Lib/site-packages/pytest_django/plugin.py b/.venv/Lib/site-packages/pytest_django/plugin.py
new file mode 100644
index 00000000..eec01247
--- /dev/null
+++ b/.venv/Lib/site-packages/pytest_django/plugin.py
@@ -0,0 +1,267 @@
+"""A py.test plugin which helps testing Django applications
+
+This plugin handles creating and destroying the test environment and
+test database and provides some useful text fixtues.
+"""
+
+import os
+import sys
+
+import pytest
+
+from .django_compat import is_django_unittest
+from .fixtures import (_django_db_setup, db, transactional_db, client,
+                       admin_client, rf, settings, live_server,
+                       _live_server_helper)
+
+from .lazy_django import skip_if_no_django, django_settings_is_configured
+
+
+(_django_db_setup, db, transactional_db, client, admin_client, rf,
+ settings, live_server, _live_server_helper)
+
+
+SETTINGS_MODULE_ENV = 'DJANGO_SETTINGS_MODULE'
+CONFIGURATION_ENV = 'DJANGO_CONFIGURATION'
+
+
+################ pytest hooks ################
+
+
+def pytest_addoption(parser):
+    group = parser.getgroup('django')
+    group._addoption('--reuse-db',
+                     action='store_true', dest='reuse_db', default=False,
+                     help='Re-use the testing database if it already exists, '
+                          'and do not remove it when the test finishes. This '
+                          'option will be ignored when --no-db is given.')
+    group._addoption('--create-db',
+                     action='store_true', dest='create_db', default=False,
+                     help='Re-create the database, even if it exists. This '
+                          'option will be ignored if not --reuse-db is given.')
+    group._addoption('--ds',
+                     action='store', type='string', dest='ds', default=None,
+                     help='Set DJANGO_SETTINGS_MODULE.')
+    group._addoption('--dc',
+                     action='store', type='string', dest='dc', default=None,
+                     help='Set DJANGO_CONFIGURATION.')
+    parser.addini(CONFIGURATION_ENV,
+                  'django-configurations class to use by pytest-django.')
+    group._addoption('--liveserver', default=None,
+                     help='Address and port for the live_server fixture.')
+    parser.addini(SETTINGS_MODULE_ENV,
+                  'Django settings module to use by pytest-django.')
+
+
+def pytest_configure(config):
+    """Configure DJANGO_SETTINGS_MODULE and register our marks
+
+    The first specified value from the following will be used:
+
+    * --ds command line option
+    * DJANGO_SETTINGS_MODULE pytest.ini option
+    * DJANGO_SETTINGS_MODULE
+
+    If configured via django.conf.settings.configure(), those settings
+    will be used instead.
+    """
+    # Configure DJANGO_SETTINGS_MODULE
+    ds = (config.option.ds or
+          config.getini(SETTINGS_MODULE_ENV) or
+          os.environ.get(SETTINGS_MODULE_ENV))
+
+    # Configure DJANGO_CONFIGURATION
+    dc = (config.option.dc or
+          config.getini(CONFIGURATION_ENV) or
+          os.environ.get(CONFIGURATION_ENV))
+
+    if ds:
+        os.environ[SETTINGS_MODULE_ENV] = ds
+
+        if dc:
+            os.environ[CONFIGURATION_ENV] = dc
+
+            # Install the django-configurations importer
+            import configurations.importer
+            configurations.importer.install()
+
+        from django.conf import settings
+        try:
+            settings.DATABASES
+        except ImportError:
+            e = sys.exc_info()[1]
+            raise pytest.UsageError(*e.args)
+
+    # Register the marks
+    config.addinivalue_line(
+        'markers',
+        'django_db(transaction=False): Mark the test as using '
+        'the django test database.  The *transaction* argument marks will '
+        "allow you to use real transactions in the test like Django's "
+        'TransactionTestCase.')
+    config.addinivalue_line(
+        'markers',
+        'urls(modstr): Use a different URLconf for this test, similar to '
+        'the `urls` attribute of Django `TestCase` objects.  *modstr* is '
+        'a string specifying the module of a URL config, e.g. '
+        '"my_app.test_urls".')
+
+
+################ Autouse fixtures ################
+
+
+@pytest.fixture(autouse=True, scope='session')
+def _django_runner(request):
+    """Create the django runner, internal to pytest-django
+
+    This does important things like setting up the local memory email
+    backend etc.
+
+    XXX It is a little dodgy that this is an autouse fixture.  Perhaps
+        an email fixture should be requested in order to be able to
+        use the Django email machinery just like you need to request a
+        db fixture for access to the Django database, etc.  But
+        without duplicating a lot more of Django's test support code
+        we need to follow this model.
+    """
+    if django_settings_is_configured():
+        from django.test.simple import DjangoTestSuiteRunner
+
+        runner = DjangoTestSuiteRunner(interactive=False)
+        runner.setup_test_environment()
+        request.addfinalizer(runner.teardown_test_environment)
+        return runner
+
+
+@pytest.fixture(autouse=True, scope='session')
+def _django_cursor_wrapper(request):
+    """The django cursor wrapper, internal to pytest-django
+
+    This will globally disable all database access. The object
+    returned has a .enable() and a .disable() method which can be used
+    to temporarily enable database access.
+    """
+    if django_settings_is_configured():
+
+        import django.db.backends.util
+
+        manager = CursorManager(django.db.backends.util)
+        manager.disable()
+    else:
+        manager = CursorManager()
+    return manager
+
+
+@pytest.fixture(autouse=True)
+def _django_db_marker(request):
+    """Implement the django_db marker, internal to pytest-django
+
+    This will dynamically request the ``db`` or ``transactional_db``
+    fixtures as required by the django_db marker.
+    """
+    marker = request.keywords.get('django_db', None)
+    if marker:
+        validate_django_db(marker)
+        if marker.transaction:
+            request.getfuncargvalue('transactional_db')
+        else:
+            request.getfuncargvalue('db')
+
+
+@pytest.fixture(autouse=True)
+def _django_setup_unittest(request, _django_cursor_wrapper):
+    """Setup a django unittest, internal to pytest-django"""
+    if django_settings_is_configured() and is_django_unittest(request.node):
+        request.getfuncargvalue('_django_runner')
+        request.getfuncargvalue('_django_db_setup')
+        _django_cursor_wrapper.enable()
+        request.addfinalizer(_django_cursor_wrapper.disable)
+
+
+@pytest.fixture(autouse=True, scope='function')
+def _django_clear_outbox(request):
+    """Clear the django outbox, internal to pytest-django"""
+    if django_settings_is_configured():
+        from django.core import mail
+        mail.outbox = []
+
+
+@pytest.fixture(autouse=True, scope='function')
+def _django_set_urlconf(request):
+    """Apply the @pytest.mark.urls marker, internal to pytest-django"""
+    marker = request.keywords.get('urls', None)
+    if marker:
+        skip_if_no_django()
+        import django.conf
+        from django.core.urlresolvers import clear_url_caches
+
+        validate_urls(marker)
+        original_urlconf = django.conf.settings.ROOT_URLCONF
+        django.conf.settings.ROOT_URLCONF = marker.urls
+        clear_url_caches()
+
+        def restore():
+            django.conf.settings.ROOT_URLCONF = original_urlconf
+
+        request.addfinalizer(restore)
+
+
+################ Helper Functions ################
+
+
+class CursorManager(object):
+    """Manager for django.db.backends.util.CursorWrapper
+
+    This is the object returned by _django_cursor_wrapper.
+
+    If created with None as django.db.backends.util the object is a
+    no-op.
+    """
+
+    def __init__(self, dbutil=None):
+        self._dbutil = dbutil
+        if dbutil:
+            self._orig_wrapper = dbutil.CursorWrapper
+
+    def _blocking_wrapper(*args, **kwargs):
+        __tracebackhide__ = True
+        __tracebackhide__  # Silence pyflakes
+        pytest.fail('Database access not allowed, '
+                    'use the "django_db" mark to enable')
+
+    def enable(self):
+        """Enable access to the django database"""
+        if self._dbutil:
+            self._dbutil.CursorWrapper = self._orig_wrapper
+
+    def disable(self):
+        if self._dbutil:
+            self._dbutil.CursorWrapper = self._blocking_wrapper
+
+    def __enter__(self):
+        self.enable()
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        self.disable()
+
+
+def validate_django_db(marker):
+    """This function validates the django_db marker
+
+    It checks the signature and creates the `transaction` attribute on
+    the marker which will have the correct value.
+    """
+    def apifun(transaction=False):
+        marker.transaction = transaction
+    apifun(*marker.args, **marker.kwargs)
+
+
+def validate_urls(marker):
+    """This function validates the urls marker
+
+    It checks the signature and creates the `urls` attribute on the
+    marker which will have the correct value.
+    """
+    def apifun(urls):
+        marker.urls = urls
+    apifun(*marker.args, **marker.kwargs)
diff --git a/.venv/Lib/site-packages/pytest_django/py.typed b/.venv/Lib/site-packages/pytest_django/py.typed
new file mode 100644
index 00000000..e69de29b
diff --git a/.venv/Scripts/coverage-3.12.exe b/.venv/Scripts/coverage-3.12.exe
new file mode 100644
index 0000000000000000000000000000000000000000..43e5e29cd03b6c2abcc65af9c4d5caa132a03055
GIT binary patch
literal 108427
zcmeFadw5jU)%ZWjWXKQ_P7p@IO-Bic#!G0tBo5RJ%;*`JC{}2xf}+8Qib}(bU_}i*
zNt@v~ed)#4zP;$%+PC)dzP-K@u*HN(5-vi(8(ykWyqs}B0W}HN^ZTrQW|Da6`@GNh
z?;nrOIeVXdS$plZ*IsMwwRUQ*Tjz4ST&_I+w{4fJg{Suk
zDk#k~{i~yk?|JX1Bd28lkG=4tDesa#KJ3?1I@I&=Dc@7ibyGgz`N6)QPkD>ydq35t
zw5a^YGUb1mdHz5>zj9mcQfc#FjbLurNVL)nYxs88p%GSZYD=wU2mVCNzLw{@99Q)S$;kf8bu9yca(9kvVm9ml^vrR!I-q`G>GNZ^tcvmFj1Tw`fDZD%
z5W|pvewS(+{hSy`MGklppb3cC_!<
z@h|$MW%{fb(kD6pOP~L^oj#w3zJ~Vs2kG-#R!FALiJ3n2#KKaqo`{tee@!>``%TYZ
zAvWDSs+)%@UX7YtqsdvvwN2d-bF206snTti-qaeKWO__hZf7u%6VXC1N9?vp8HGbt
z$J5=q87r;S&34^f$e4|1{5Q7m80e=&PpmHW&kxQE&JTVy_%+?!PrubsGZjsG&H_mA
zQ+};HYAVAOZ$}fiR9ee5mn&%QXlmtKAw{$wwpraLZCf`f17340_E;ehEotl68O}?z
z_Fyo%={Uuj?4YI}4_CCBFIkf)7FE?&m*#BB1OGwurHJ`#$n3Cu6PQBtS>5cm-c_yd
zm7$&vBt6p082K;-_NUj{k+KuI`&jBbOy5(mhdgt;_4`wte(4luajXgG4i5JF>$9DH
zLuPx#d`UNVTE7`D<#$S>tLTmKF}kZpFmlFe?$sV{v-Y20jP$OX&jnkAUs(V7XVtyb
zD?14U)*?`&hGB*eDs)t|y2JbRvVO)oJ=15@?4VCZW>wIq(@~Mrk@WIydI@Ul!>+o3
z=M=Kzo*MI=be*)8{ISB{9>(!J__N-a=8R&n#W%-gTYRcuDCpB^^s3~-GP@@5&-(G&
zdQS_V>w;D8SV2wM8)U9HoOaik`_z>Ep^Rpe3rnjb<}(rV`tpdmg4g@>h`BF#WAKLH
zqTs?sEDwi<=6_WPwY&oS9!h@ge4(br)-Q{|OY*#YAspuHyx;~|kASS3FIH@oGSl?L
zvQoe8yKukD)zqprHiFKlW%;G=hwx4l;FI%8m&(#zU|j&_bW@ThNpr9D0V}xa)%aIb
zI$i2CA2mPU{0nJmK0dxe)dY-`z>ln($
z;r!UXuLDDi42|Zd3Erx&m8GqlFWbIX0V<*Gn6lVNq%gD>gw}da}r}ZQB~ns?p8uy4i0%1Ti$Vt|~OUth4=+yEmPu8{3(w
zUDkd@?w?`_J9HBkx&ZF8v{+9phcT@3J8VI~wN7Ez)oJS6^dhb2N;;{RTXB`K*E$64
z3rDqRtY&&*}9yq2oUcvD7K)=@bWqC1X%l0jk)W<5-WBYC(#rn4H5)gp#eHMmwlLJq=^%|*gMQ*pq4VV(QhHA4CGj<;!d8i*#Z8CaN#*>VcCnj~;kkeUa{LUoKxFCaoQ)
z(Lz++&x3Lwz;=6UnhwM!MvN17>{Qmb?dwgsTmzkLB~jD#wiGz73hc0bFE|C9KA#|=
zH}%FQ>c&Y5z*TJD-<$$Y*WZx>5NNe-E-TfAt1!)%Wc@I;ZuNwxDGGasDIMyUNiVvG
zq;Q70PYHcLO=Xgv2698@cJrkun-^>P2}|fMHlm7xaZmE<{&cQtb`{N9zj0bRmpW^T
zzQV7oTs0ENHe&mxQ6DI7qd0SU4;3o*2qRd`X1>(=ew})X5Dx
zx$lyzZM^emtdsbk^u+xwdSX$lp7h*2CkHCqDohShL)V4hM9k+UQLP(GN-H7!C8gyq
zex`xuPQ(!g4}S>0r+CyH+xIAMP9Z&+?BT1!*kA<}dqRn*FwJPGe}l-sw(lGYN1b8}
zWQQjQN`9tdtF?#aqMN?wu4E3)qGxzOhwr*vb;kX_%&U*-=KLr0raiGc^x8|=Wqt`N
z?L0luR(~BF;DS@~yKDN7|*TJkj*-B%s1{65$`jY_(C#P&^rVi0?Ro4iaFbR)Z2NLxS0
zTL;%Kt22(A8JiL`U$i!iR&zLxx^E%H=*c-=+h@sisygu-_#m4J4LQqB?~vXvP4@yQo0-^oki(PiH+=FZl}&W)S-qI
zk>W;2Zl-vl6rbe4X6feZb)l-Mv2oh^5t8q5@(Y-SPoUZ;N<5Tdl!h|=x!1}5)E;}=RcAXJ8(<$^13IV==^rU>wwq$hX3V4iuA0>h<
zuxK^)myr=p7a)oeZ+g4u^9(OmpFl8J@{{UJfy=DjAf8lTTD00iSF3Kb9|GdM-PQp)0<*
zZkW*V-TPpIXEKDks>&FQ?qoV&Tfa*;TJyB^yJa8xcch+*-cYj6E7HdBX!5)TIXSNM
z4C2L57KVd0rioelfI{ELMrb&Y}?h%mk5iSTXrmJ
zwlk6qsS{}3<}Uc!G}Wr;Tek1Tym8$SrWokvCzU(FVIAWTEa1pwE
zBJ6JdS@$4RFBV*~g^Eo9MAFafx2rt|uRsR%xpNVyj8!g>2u0v=>eO
zS~4nHBgR%cVxB-_OwP@%JN(CpY3qHvqsbt-TUGivY2Dr$b+=`6PJSkbWF)!Jn=iZJ
zMt}mOG~-m{)L*SV+yRH!c@XR%)K^BqVRh
zq&wib)2#d0V3BD*|F5o2J6$vbdJGh`O-30SrMI;e*Y&m8c0Bi^cD-$Daq1haK*i4o
zS^0dLE!U;Du-W5i&*6##L30bjy7q7@lQPyCc8<%{>0)|vQlrFG_D_+v^1uh+p+bhA?!)dFEqi$(hoT?=hJt20DQXmOiJ``9LY)@=HE
zO1esvSjV70vmITir9t{Om5D&<%?UTa#`5Sp-x@^?6JCK@(Y_-+ye_agHcB_zSUEYe
zay}#@o~N5_?G>%q2t<~g3s!Y+G*Mj=P3Zn>mA2=HCm`lzap|)*f|(31R{)36WvAyz
zfea$wK&B|2YxO{n>twI{fk3f0YVK4T;XDy#cUe=*$V6#=30zz**pkdJOUUdHcyGKx
z={=%tU83}-sM&@LFz=EaBy8m5*VS4ZYhB<>lI{BnIk4cD&H_E|%!spiL((
z$1W0V$;KX^P(?<}XYHqoplpQo7H>!m)d{bdPaLde+h7(tf+ZB(6MxWZnoX6&>|)(q
z*DB~wjMmL&u~F-ZIbJ>BJ5ZM6ik)gUbdlBM`Quqove#M~lf*ebB4nBg}NN8q8e!?
zVj>HOMJZ@LQzOdvHUSih8gCt%IxvyHLmO^Ea(*!Nd-Zuw>`f87{SkAwbrcIp6hiff
zt7^x@FVoBVwDl9eTxT2$))(-5-O9W=qunp;*yvYT{VJ=~FI-x;pN&=5ArA%W0()Z}
z=?f87g#Y@j2_ct@T|gzY^?R)mq?NdksZ}7gJW^{18>hCuy{s)%iDWGzC?-DRKLl?l
zlnO5zQf3*!v6nJ;)xm`Sjm!6zf=o%-07p#e5?cL}gBtB`Nq!dTtt@<7#(o8m8xm*XOvN65AL(=C_D}
zJM9UyYteSSwriu8{DkKl6tSk&09e8kMrjh@N|SS;@9l|6^W@_Q=i{`@$NUzI6|VF>
zN{Rev95oVSa&%)ew#+uKZf{3cFg?f64ASokLt$^COgO2#BW71L>H7~o2Zg;=Z|nCM
zZ=N18^ET^uY+VpF$K*teqc&2xaTF!LhIKrwGne_WBX+B_9vi@rt2GKHy|kQxSUJ18@{fEswY{>va~$3%JGyYfr29k%@bck16c
zdf9Hh?|r@PC`@3R-j=#7868z@m3)O|u0`Iw|bd&(6~U$UMGD@Vncn>Lm}{NqU9US&{gYu`~lU+m1n
zi1g$#vC1#v|9B;ObTzhRor!#90$^5b(Gy`buihHrRfjV>-l^6#?Dg3lZ}@PRD|I(>
zVcp1Kiyr8xABHMWk$xp&hFzvUhIKbDi1339ve8Ac5ON73NDM}^^I8O?+8zk+GVA0S
zG|7G=o9JQQO;-x!z=zz5c@^<{-AWi)tG`b65v40t#CwnzKA}>?+z|q4`eNlNfRXZK%L4$WHQ)8Sgo0
zwE~@9)+4fUIf8fW?9TihJ6Hgttrta)MqB{FTBqxu|CDLzEKWn{Cn*>&wx$DtvzSvC
z(4Jr-g8~qe!NL-;BVhBlx}Y;!It5;VT~^q_HdZcH!a^(MA3%zpy!zmpD(NfkvF=9=
z6p^lmDSFnrRVn4npverH%%I5(CT}SgTNGB)0sCY%@`7%@lG#4Gt*2;3c3;0E8(QyS
zoo-l-h2)DEIh-3t!@^Gefe~>Aq|Sbf{goW=Op7FDAB-5amdpAhatG_BQh1V>p|DF2
zoM~XblmiX(kl0U_veatKBQ+uz9@Z1{N|y`0j<11Sd^JtI@w2S`$mW?%;MWLc4%=HL
zi!p2d7Nf9k{=Kw;xt19k$vh+UMEX9C2D?jRP0wn3ihvj
zIKqjR_QyB+t|%#l=^@PkY$HlM{<4z$Jve9n{#ZUhYv#%_q#uJnen
z7S7e0{d|oCJ_u>EJ_(yUqk*m3cisoGsENRi9?F=l*A~&-*(<$4vm*-sUaFT_dJdnX
zrOQM7ERMPl>SbN2|4`NV9yZ$|0jqv#7_|5qM&SK>FdA$Qn}>sahte?IEg|!hNZ-Lw
z+2M47yawJ6YgZhmd7`)o7cpN%77HvCf^&@h2FBhy;L2rI>K+Cp6&?pq
zlFhyiSR(126>L@rL1c*79q1?uBeI5<%2ZP3K!*8bJ8n5Vkdy&9Re{a#rI-
z6fv$Y@#|&(1pg>!eIKW$IeEqD_akO!YCNey`?q5Uh$a^MgG!T#n1>V}I*O@Oh-I-5
z%k{Du%Iw6?)MXzjh?<)@`1%M|Z2fN100q^u)YBKp;(8NX!a7BpNWL}bB60|{!@3IM
z&!_-j!}^5^fVs3)8n2d}7M6&L95t6HGcO7O>k8tJiY2gy{mtC0V*s
z;mM4hWAvYlP0?$+)i!p-gT`AH%yAiSovz=pXFBCU*-y1#y_wmwf!PgMrEDEyp_Y+h-3$ZW$Ny$8H)g+M&odOm3D+qCuDCyTVF4s8_v
zmEyLRLz)cEXCoqszT`H8*!|T3k)9}efv(zxR?xmMPtJ#z>B&Eo77PE!jE`0XJbxM^
zJEbz?Lu5g--#l!-Y#gzXP3G6p>XOps?99>9SjC=T%MY0{>#J9bVPGK(CmAlr@LDVu
zdtE8Cwy$lsu#8`O8L={lK%5}c`pb6GjOmh$5gX((WMNF8jU#kU?6HQLb+0+w?hE$3nE@wxIvFA6~zB7QMVyoEeHQuBH-S!>tRw89F
zyIi51ALX;4mfyl>Gbw7NUa`Y^`9s-NepV{j;n;E-$Ceyj?qimR?nQpJ7Zt@YCfL5$
zX%(74|FeDDa8Ol;N-078H81eqW|LX(_9$cc`%a*!#=7{V2=)|lNG5a40)v6g4t
z01XUUv68UZ2|@vkl?ceW7{YVw!nCy?
z+sAnJ?mvd`Ab`J#GpRgV_N#doE}<~&Z?VHb%c3L;ua)NW2qzfhmeh>}dH
zGKiE|U&0iVSyyQ$NO;+GkhAqI3{1v-UXl6k&ogShm<+H}bDWf8ZLbv`!7=F`^V*WW
z%|fH`g0dA}vmj?dt{;}&QQW)P9h)H{A4EQ&PP7V>>J53l4KOcs^mIW(
zWkEdG-lC&N1l;w9;87FIEh#42)wpNXA?u;BStwK2f%x9dIa=c%`6v*^^D7Rdeo3P2
zK9dB;uN>7oyTltCA%$60W`E3W-dBpg
zuqcq@x{}^i&v~(2yR)n>8M=s-@@eAy%xR>v4&Y%h*z7^|kj=+ut-*SgnXpUQ2Za%i
zw_32)!m77h`9S6v$7W)#c5Gu%xh%>rSYMFAD@|Kh-5MzR0ebF=8}-^F_#pg>cMe^Q
z_fFTrqJD?X&Jg+pQE^7T9S;~YZ`N{LIq@lM=%?CSV`D_iRT3c{J=yaikxU5%rHT=TI9ln9_p;9*QY6sX)@dJei;QU6QC|w1dx9PPU
z-k*1jcMjN$eZXl0=c@we30H5Z#G4Zf18#{O`?4|fubhbI#LpT6?u0J@S5*J&gl|g|
zx>4w6bp!F}L5Qb)5yTF=Q~b_2auNe$u2af-1--x-Y8ugJ)$~A7xqyDQUb~z9yjp?2
zS$2CCh3xpcnb+1EDhBdlycVY?TH-GQhOBi1Em;xS%mih!zz5d%5ZTK)kgI(;YVM1)
z9Y?6R=*3Ee3NQqA=9m}0tBfPY>WV^F{KDkb!>u=FvBx{<@$4HF#Ty?(D_|c16@7ar
z?3sMj4pkIxD3B@pYY^(UW7-_E@LkG|E4F$T>^}02mQUF3kyHzn_+N+p{xB`ffEMeA9vW5-D%{
zZltI*4Xan_uaQoJoSn85x~zjwdZGe`c|L&8DFe`!Uzz7`w0>!xulJ>+=37i-p5mR>
zWl?vJ+1b|P3AuYhVyI7#LAPEYZ87i$tRpmE}@el^F1lN0erixJ1-N#3v0fp0!puf
z11^VLsS9qh<=8A
zl(KovC21r`^>K0LV;-uDR<&qv-K@mIx|7<^+mo|TDsK^_F=k^064`x9BFi|CeU^vI
zA`v->wGlB>5s}S`2Vld*+LS4GWdW#Z9=Ld+EhF-ng5iU)X7A68`i#
zO|AEyO~DJK*d*(2vK_TGJ;J(KCFF$1nt-h(v%kz8V%#2jMxD`gWt|!-@k5${77Q@!{4z;ze=7&BScC
z{l96Ke7GeU{#P5P(1-)>pb!x>_limI(??L33;=E&UU`S^Xg(o6V~Xzp2+b869oyFB~+oK91m(zDG}-Ce|yro;clXhx0fm
zqA!a1;w8|CgOIS{tHtHPM)Qnv&@IQrVjZ>Cz6}8;hEX6s#`+#jXAT>_&8rE)U3h@u(3Rj2wHPF8HLr_+u|u2h!@v|soMqnSEk8Zd`9UErc
zRN_h>v@U-yBXM8Ej^Rk$+sR6^P!=M|4(TT&#@8NU-8`?Hjo1~wjxi#DFXslCbHj#H
zR5!NB>1Vtka3nsdw|a3-Y^?Qbif>?ajCQZ}h|~?V$4;Z2hvePt!VjWV5kP_Mdzd#2
z(Ya9OE~}OG95vq%MZN6^iVy-|(zl&p4c#oK!g~#g9ul0wCtz5||XBmlcb|@y+~5^oMA2
z%2&t|Z30b#v!su;P0>oP@n%l!68gTFk*t&4-cTiC(g?CTh0XM*M_NA`XrI~P!(S-N
zL`<-L&IbV?K2X3qpYwnLW)JqoQsvmwRaiiIOAWlUuFCW7CR}XuDqc-j>a`x<)1Wa~
zw1+(1-L|GuLWkn}HjH3W>Zkjq4e-!WA;hn0iSIXW`S*t~{JgUpYShtg%LoE=slzv~<=K*WA*ElMAxu<+e5ER>PXppG$|uZeA(Temu%&q(p;3AFN2!kq
zm=?vfxfpqDEN!LF)Xm0H1wg{HMEXo-l13}ryyuWqH$7J>Xgp69ORBMSo%EOR{GE@T
zp6`=69Ftb3=ONylwdwgfFVgK&D$mcnFSmVb{~?FB$0_H`z~O7eOlSLUCm#&_o;kIB
z^GO&pU!)Lg-zm3^a<;FL4;!T`wb1X9I%}R0*ioufT+j91NaBu?NMeOwVtj_4-Bj0@
z_j+s0>1Gh!;oi!cvc4Mg&8Yc4=Cmj3w59_z5~=-$9!bpUA~dL*qwByWnz05DbT{~4
z*jZ@K?vDlzYTtT-qUP-5@^1W$cjLZ1m)7`wc?;yk#>sw)Ni$-;5OH_f-AMb*3BElL
zTXVmwcEz1Nab&8Q-#V9uW2Z6VdwH||2KhpVBR4w8!{_^EvduYpj=@m1wadC|nCyj2
zt$A%;w3fp&nPJJ87ID86l?_lyq<-5M`#ZFGH^n*bFxrb{B4*!>glHD=IX
zaR4E?rmXV`e=Jb3r)umy9O_=}HG_<;wLag>;c-u)&Cx(xabWC&VP!^jmFM&Ib
z$EM)|j1Ueju0pu}b54-q=pis$~y&T*+xHtN5ij^Dv
z^%7mNlKsbrMJuxz??mDQn__!^I>*gYDhiq>gCh>6y-yP!!np!os_nT!v)geY)f(H$
zMdxVz82saUVjQ{l!Fyx32g`P8jl0P*QX^tlU_Sb?kt&IuWuyvXIfW6
zvj(<2h5p+D2H`EwSwH=TECv*ISR}=U4K0jI?@X;}rSnDnja37_hg1U|)xdV^hSx;N
zR_l)tW>JcPb8F@5C~uO{c@SQX_Wc-vx12+X_zdyQjX9DVg;djzhq7W0o
z))<;YTY1Kqwi$lJ9G%8d#&=Y2g-5J9EDiLvQu;DVkGayNG;o{qwO{JmzR6Uh$UG@x
zPCO=Jtf)bg*6_lp#3+w^Tg=a7c|p*fGtm(jE${gPmO7HD77SR?ytQ3_Bxr`(@-qAT
zWfSOxaSdnVed(w}=&i-FC`!Pi=?<=yrTgx#ws#DU@R`1IyXR+k0R7~IY6mXQnIYJ=|Dqf4+{O?83Q*D35
zm~q?{FH`;v)-R{BFDCMi3*t-k>{7fQ)8nw?9TyWqG3`Ursw{KR7s%pMMe3iM)dT*M`1?|}%AZgc@
zX30+IPfbP!7X!AEjBUyvWF0|-nESBQh0Mtj(=rdU9mNVG#;RgmWP&-P(zBuAracc-
zp+(j}^q7=iuyEi?+-C&NiI3TU^)U0@n#|Xx-UoNc*6NmU3HqR;Wl%dL
zkIaY`kZ}eU*h+@_w{SA-$LNPRs?I`9&yRXRk~$gghBqUHqL4xmtMtVD2F!n`DBU&Y
zA@L!Y3w6XoW)F{rN=O!R5%FX>|1Ypcy+BCeYqX6PttY}QV(d8A+D=AhCvAj2I9Ci+
zE_xz1LN~*Y8IN@_s1s-}DbcJjI5vpO#CDDjrv=T!AxN@1Y#t5bfti^9CyoyfXpL_T
z2V8Sei{e7KzA*ct9Fu(Nld9;CL
z?d=gOO0=h4Y+4Jb!Gh3(cScOi?2L8L!@
zXRz-XiI$JM!z1>gk%aITI}Ha2`#~+lD$VpAZrrCeDp|VeRi;hXLX+MU&wulyCi{V@
zp~_QZXJ}92zB_-Nbp#$k+W_m_M`OPZC+5?&W-o>zKXw6;Mw
zPZVMo6>O;(y{(rJ))j>Jj--v{g0^&C9d>R#xu`p+I!;{+20Fvd@~tlHPH#Z}#D#80
zwJKsBYO=M&SD3rt(@+KWTkw{8Sk2`v+CyWht11NA9@xI&HVQx{ji8>XzDsLtBV)te
zncQFSH2RmvZZP^+XpO58RW`&kpI(%5tDHnrJ71E)Kc>S>es<7(F(N@%94gfc
zt}u%Qr8lQ*gBzd@RpP2l;SukoBN6k<1H@t7b$bS(TH|}1=7p2j`DH3Rgr=l(6PIL>
zoLb8o5hMoHL6p-P+JoNWY5<8%Jy_)&dQZbMH@;n1k5gZVSDG59CRwN@mS3YieR+R+
zBAkSWPvs4(spUN{Y+l|!Sg;6&bFUYtQyI6H=HmrUtM0Jb+GO9GuVy+uB51tb7Yv*T
zYFD3tL}TJ3oc#GNW=rR=aO>o4-~yYIy{l>KgSZEC^?)4Dv_{}AeTN7(PtHQSsCppR
z-O&ueZ%;ojbgn0xqy?c1=D}`fMTVQ+(Hf7#GMidk%E4&NTj|ys)55Ur?JSdKcj|Q#
z@lkkIq~gI09sUQhXE1Oi`1G%+0*FVX$zZ^K;H)*Biv-5nT~_VsJQLwR!63B8U?hW)?=-Hdlqq`a)%WG*cKqMfqu&U6`6B@bTa*hHb`MGTvKIJRjs3NL+*6oUu`f
zPz-+a;yzVqgUnl|_Ft%7(MqVuf;hXE{lHCF2ZJV3dw8A0ZK9=1GTeu=CHDQBU?IYD
zYb`v2rzovi+{2bQ@h4?87jd5uw$%IJMg@8LZ1vzM6o{&c7{V%n5d_#@0$C223kja0
zjv%e6ch#8!Yiyzet6(Ps>o6M6;8nan=LVmWkAUisOgL8(UDj`QAml+b0wtTWQz}))
zSJ`rn{zz=D(Z4h{djmEwSX!(^ZPaMhTGKdHXyg77DUCNG*u3gne57pNGR1|dUZ|DD
zUz|F?3wuqfM>2#Z)dh{pi{q#ASe1LBs*PR_05B!hk@A>Ki}d9}v5yvdfiOihrQ8wUSumgQPT
z^#CeUufkXX@5DLrvx5#hRD)I=NS3K=5*W_V>qWl{rNnBGEPPs!nOv=RtGrjq3z|oz
z%TQ`338%qxgAOAc(jbx<>pSsBsbK8L>)Xq6SeSZ@BwFdhWMPA9H$=OVZ%8pZ3SwOU
zve7>|_N5K7hM2X<8_siH#wcItPcL%K1u0ta&UGs3R;U
zDFUi^?@j0u_Vu&Ua)bjE8WCg%lxXp`R{m?P8%2g!!Sm&i8ysliZz-Pe)W~iKi$2@-
z%_3*UuodHBQkRe`Gg%(oKyxZiY$9Kkf}%9HjO|Gs??vP=@Th3JlaO^YUi*R06`J)L
zM<&jp6-PabbnTBvoEC@yMN~q%Hte32CG^+Hq!Y-3#Bck`o&Ye^n)8gAcjrS3G3;f#
ztlv78_U$6c{iV}g2vq6cNn)6j5UD?NVll)n<{W@3DD~vmQD0afGzl}{o*aCRADki_
z=2bm;e{nE5XBgAp9!e}Kj3yT4)qV7PJvnnErUkw1#M->mWvgOe+8O_dh*2zSE)^88
zHm|BVM?!u%g)5yXB(SvQ%{h1(*lmIK`cKw|O268HNamNIhp(p3)}H)Y
zPDp#QH5Ayq^3-4%J5cMD$!OkkaoPKe-}-JTT@VzuHovho{+xMvA)b$wYN|zTDK{_A
z!=;ipwz8(>5Q?(SiryT8!!Lqar~p8UnO`j=uM&6I*a>7SB%*^ANS&jk`adDWz7Sx2zfof8}0FuZtes9;}u
zB+1-Zal>$baBaxDuX&9iE1ln=o-T=^!RCgr5bsJ~CbW6gB=GQPFj?(4`p2#G(oAxe
zKV8Tn{kWAQX$9i_OdFVjLG*L=sG>-tI9wRH1Q$&*H~5=?sf
z00n0WnNK)qk3fD%dRC{TQE?y+baCD^r9)P~=SLLO6W>vFO;58*F`ox*%F>k6!x3eP
zc{T1$&hc9d;0GDo(7-vRvd2`T@-mUcE?7|-H>ONK0Yq}-H>J~aChwpa{&C^2T`ni|
zz*%QM45LVV0&)-tQ>Q{NTp92^7BAbrnT{X=
z{9VAVs&sD53A%Sg-2258V;u3+r`FgO<8l;^HMYd#YmI#r=S~9KckScO`lDlr5YJ*H
zTi?`7<`$KC)kJX=7tUgxcLwDBKwjd8!cf(cQor`?hg6AB>D0=FrBh?)RW8VhP1ByN
z)SlFH0!LQ*%68G_C6fTCp&&2fem+vRBmRkKB$Xxc=k(;|r)@Y%0}Wnp#Qlu=W?q%I
zCiOVHU(Drsu?a?sn+Gsw=b_S!Z^?s&q(`@$B9FqBJoJ#Xr)3nW#N~ydM4dP7PTb(t
zlMfWb={ATW2Afk+3ssZm9Am&uE$q-@f_UMx1Dod;oX)$GpGoCu2*2&EynoQJ>*{3a
zoZ^Vt6|5|YO|SfVPV8Lm$x+&q!JI(%%5kuSFHH)rbqC$g2l1>Ux5m8#4#{F8PY=8VI@V4ed8Ja-K;lqb{X!#!&;aj>ZKK?0ZXiqsqd&(KwQ!=z@*^8i?
z#a%onx%!-sH_EUGHPGr3#5%U+M#`Q?w}Uk52@(;DP87;v74K_x_RR*0!>X&5ktlO#
zmEzeP1rG74R6Zc)k)ZLcZFSRy+?rG@s)+duS#@ktn@C|03e3*a8spHy20vtI^`9bT
z_u`f)O#Ei@b@NBgI_(O!s3JdE!u(*Tcut&)y=WsL6Nwiyyej-%DU2D=c!%rQ?BN9R
zn<^_3*dgnGGaw`s2nTI<@3*@soU1iqFLm{L9%O65oe^%}+Em03Ncf~gPHAW7B|LXy
z0XAoQ6Q0}EOJTxui@bz$6>16rPWHPuQ*dpY}NlQP&(W~Yj6k}hp_|woF2JBV+Dt3<`-hr%Ezr=pxxW7j1
zQwQya#XN8`!r~?-DhW$G7|LP$7=SE~H0T%rEt}55mQ81YbJ9bhyDkeI2OSDJDZ<&H
zfCpc7z{})0@Nt=f179eoSpdWVRPk$8P4*5(N=#E;;=Ie`upgiM9uKzS
z@x}&0gFt?wmMqhh0#=h0PTsd*lS2lcL+|pf>WYJ00cC2+LrF&Ku@*@=<3Z4k@6y#!
z1HMbnm)Yt|r(a~xO`^ssNf!ar*|t-Y`Oe|QKy0%RQc&v8h?=9KfjzMc^aKlRn{_^f
zPOx^2NbYUce~}0pm&&~$NzXK7ifEu4c5>-SK}EYd6hM6C<_M=<>z^`Oj3k*G7N#-`
zxyvde%Z#-Cp}s%T3I@_;8$>*}*5a{_4bhZ5PS`}wwZ3Xg`+J=Nw~gilc5$!BBVGAY
zD&t7Tcn~`6DR*<+%e&|>X3_gVDM4CAw(lkKjiS9|fHYi7ehib9a)?dYa0xv1kYhY|
zK1s8QHID&!cPqsnt$usgt_PNiBC$i=EUeC-oJTG8+^^rP-j9@t9;JJwN>$
z4<-AaP5#qrU)yC(0;$ZBDYK-ka?;jB*)PXZ=Ze?K%?i!Ktb-ew40db_8Q7VV*EtTO
zdUh6LWukK?5E%5p%-dPvF~TA|IkI*G{jrh8Wn3>JB}N<@nAM*td3w9`L)w-lniZ-u
zc$M{GEz?Alj4g%}{#i}WSxk1qGl~wxM_gCa>p1@eM+n3+@v-S<(TCEr%<+pqQ7xQ?
zGQ;jyC|j5B74kB3+(IwtKkA%G?O`f>Qqfnj3f7$OTvI!j;|gTIK$q6|JB8Jn9_vO0
z_@W-;zA>)&S=##f=tfTy!#_^$B-!k5xF6oc-c@rjBk6M~M|wHubj3;$=AMofQ<_AOs>}JJ5>u%(%)41kNIq1IvFKc1K))za8*eVg&hY`m|wpzYQxnde<~
z0>F0FV=72u2bV~!IPY^z3hyaE&K20W0xTUoB(F?-BcLgo=QC)WAQ$vR`^$PY!pZ4@cA({mL4nip57
zdCG^p;&{{ayb!lpWN|AY_dYVga-|DRmxFPw@mJ2*&FX8R`r5DPFlu7wmpdZSrh4hXG*R{@B@?OJgoIBda|NU)=bHI
zoUCH*`Sx;vs`
zPpS@9wL>DBnYNtN0#XtqD+Z<19QA2O#!3`2H>av3C%Z1K->_Y=GO9r|_0?TF(ug(M
zsfVgD>2Z;^IabF9Wh7QDV{@_5e`@_9uF=vT!SfDZzgBP77YHt~taOO48%DIb^uUh$
z`infoEYMh5Eqxxb9)of#dL0(3HGTkLB(HK?r`|5C7LpMKO)@-WK;T8j%OIznZiwbB>UnP8=V#ywX^
z#w%pd#G^D3+yFp;7Y+X%**j9Ug~Lnk%jW3BS_}vJqIQ=_yHuY?brm}Bto2{Fs__T8
z>m`%(QzwTF&)35W3APj?m@{JQo40Vp&ghxSY@oCQu1}i%Y^G~yrc>?!%GwSUbZPtE
z`JSM$UpOC{HJjhnCYC-NJ=cy1Hhb%;Dq^GT&FVg(_S`i`KL)?`?}%Bdy1Myqr4=Ft
z)m|;AP?7ZW#NlI?Tw^Wh|f_hvJC4dygPAxw|6lgr!oKdcOn%DRBs|th9xAZWd^SbKBpPvt@oi4p4n^m-7BH#T&!dE0YfwmPv
zJvr9_xZ&mt8a@SddBG5X^FI&lR@2vs84pvpH}Kr*=JYUg(t6T3t2Vv*z-nBnO6}NE
zd7O;h6zmPVa$?uX!^?4*Sy;-w*#D+hP*|`1P)`;;LRIC&r<+@dCU=5$4=m8#=W_95
z9$r6TS8#2ZQPdPShq=FYud1yz-Ugeq!-aNd#NHAyp792bt!@mP??z0FA2Vkw_-1e$
zFc%5V;5y)fhG@XskZJ;5K~{qJfOyyR?QP)%$eys(X!`_~u7!y9`0aNY8C#Pqn;O9)
zHV(3XM>dH7)_*;5Za{8E&zB~v(*;JqJMNKpY=6-}Hh^_{2F%S6Fae{5=^|BJ@5~Db
z;0P59g7!1|nqyvOS9?e&k39|Qw|(EGD!0KUe^x5=>4YiXF%YJxZn}qQ55!Upy%(K@
z<~L{lgng+3LFW)>Wk^rl5&0K-bTpl5L`;>+E#Q^(V$QsaqM_u^Eyz6-cq3@0gW47Q
zgMs~Vq_Bar7K}V#VNjuQ?ySq&@jlx>);I}-OG)PvYaoGb&st}{GXTOlRh~YW`8{XK
zCi!O&8%jRv05ItdVe*_@YgZf(29C$6{J#S6FL59%7jaI(AhDDH&{8WCD?)$#0*U1U
zif=ejaG`mbg5nn$D88S>9m1==H>n7{S
z-m<4;{-#Kz1XZOyO--#9yrgMw?PQ#+F}XR?6Uq7(IU_p
z*UZ@^jji`;M$ZZU{z^LEm{a1HU~O|wvH0%FS+3Y}66jWgl5kevkUa$Fb1ZQfV^SBg
z)~s7uhAeXr{66iM`zERZg8MVJTQ8v1(eKDRRM39wpb=*f=Yuiz3j0JdaH)}79jJ^bPd-8#dQb7oZ4CAoR2{*B&Yq;uo2y@+8FZ|
z&34nQ-JV*`uQN$pq=D`8L=KVU&RjtdF$wI!^$qlh=Qw+LyDFS2pxOY(1!G1jS^{~Dde#<9}X
zTh;FEOqiNIfN*GhA@?=5i`;6IJ_CnLzdCeZm;2I%{XJa@R#BtYy#(Fi08_?wT%6?G
zN8}q53FEtj9)%%X@jGF|;@92I{Rlhb&r_+EN)QjC6Sr;n9EP5^1?f3rtY%N+B&s8Q?}lkqvyO=}aXDxXS++z+i%7g{o)&7W4e~2kZ8xiz11ICtT@a)-*m*yU3z*{=Nj2(#97}
ziWm#jI2HEQwIMUdP)B#a3U7HsY_^}U<6QPH`N6RFKJh_Az5^He)_fo?j;zw
zh@gUt2+okp1-!bth#+0e5xU$yV6&)&Ps#-YBe`H;R`bHC_W$92fq$`YA~b*Ib^&%F
zE>!r`?E){8MTpQlJRni6ajSa4eYlkuxm}>fdS;i%iRaJzu`
zVoHGjGV8n4Qnw3;Kxs9QN|dA@uvYS-CyNe3N`qGm&={u?;>Uo9I@p-VH65YTZICi}
zv%tkpyYUL^T;4+5EO0h%kkdNyRjEnVspJk^EHGRpP8A3?|BsqLp_1yMJD&4*Matnt
zEF})9GZ#)x%iJsQC@{dU(;I~T8|sCze8
zyG1AOj?}ipd5hImMY>ma&++yK-CC@WV^ufTU+RxU-Cfa&ZQMofY!^9?!vuk08i8-X
z!H3;e0@8Arm(o~<@<_EKL~0Rf_nJq|Lj*lNz@F4CYw!}rE4LjkRbiCiR@v?34oJWG
zQpoHQk>Cdit{Gem*+P}w0L6@Rhf`1;E(NGG$tfH&5ybcVbQndp_T|1j6XbW!L{L
z5{)Z8}}E{XmeqjG2}{hcnqYd6KY8b0_hg
z==3`dGPXA}I?Psdn8MBJeAdt7-HbEn^~c8I9Jv$g4tHbS&8T1>TH}X8vj{AB8kt=EsIb%i8orF&A`kcVoopxh&F_8Wyi|68R+Du~Bt(
zb?es2VHdX>%N@iYi|=tk^C42IYA$M>dxn28V4+DGYHJ2m)ms_?Q`QmPV9OA-g=r$63(u%WQjm72$7
ze0Ht*G8#Mw+($ej>mYBcEOevu~(tx*WziE6D$ESpc{vf+36xm6@}2>cse
zIlMZgm2b_sODzAo8N^7&sr4?a^S{NB;0ipkzgCP?*q_f)!xi4F-BV2~rw=afrTkX>
zMyc>4D#&IrLlOydA|~`vLP_yH{^J=CSHj2YcmO0l7;c>Yn&|Iv?+l
z>vkfjt)1;H{nm_c#XZ`_yGx4JJg6=*iBF(6Z_Ec&+{x-f=vUE9TBt1{aBB9|UhPTc
zPM6TqWAG(!HF}DT*5ct;lo+>qhujjDJ^YmQ4HGKH`Pw_5EA~aH8T?~>3-sDHt~}`s
z_dt|(V$s{e^~YItTQS?&iArlGFPV!AwhUv_ve~YhALlLLS&Po88ISOe#h9QEBIf@3
z0M`O@!p0Spjmg(R%Tr-_{P2I?6
zE)41(~C3dM|P)!0etmm?S)~ig9%2R3(F^1wW{Mn8njlaS1+%r9>fqN3|z(K
z{=R=hJz-d{-7od_&M_O+kYKyz)!77>&jwoxgh)c=(0e0?hOV{I^5MZtIXFTc6&riw
zw|NGeM`r5;xl}diekGFpYEC%0xG&TkDjyzhJP^A%TYv_tXdreCUTrna1=(!s==Nr+
z^h=ehU<3NY`Pq-uxm4;*qRzO%I!=WnRFyiHW~T*j^4D-fM1-5JtoF9gen2=YQAFTa
zubuxI(M-*&d8bgITl>y8c*QKbdo?S@{T7|}%k0Xa8??rY_y{z)TH`}VQ_NRUu;I%E
zVp=Kp=A}IiOUk{+BDK$8)R8}k=I+oFVM_(da~(Hk<03&1#-SPGwZ`}5{nBS*Mar2J
zqflxGImm35Zg+7SuwrZ^8P1VQ5DC}WlAC^j!+_MUD8k4TNHQ`+y9F{dCsvzAGGm;e
z#u(=gkngQl`$%2Y{jbGtVq8b=v+bdS(qrQr?q5(4J3Z7qIotBu@Pg*h^x^41gumG~
zLO#bm9qxj383g0>q;AW-ZYj=ae5BQ1(P~VS74Lb3SK7isHX69o(!N#5GDx#Z2Ju+!
z;43#hTyUX=A2Roa%ie9ce=#0PyTPnjw;JVq8-LAScSGDubE!Wwcy+pv){LWh4~_-8
z`co)iZ`Pi4&#L^pYxy-?9`v^Mj?mr6@zd()%APv0vU4At(j
zlsp@LJ8IrJH(2)iZVPwX8nZ(rQU08rcoxcEdcl^v<(t9}dPH=#eLW;#(FgD=6>zsf
zIDvL^Q4b2+%x~KEl^H~G;ZtYW{dQt?xt{t@$~5iSD2p>zgd_f`|0_W*Rs?y=AVG4t
z%HK8XhbGS_vo08TCdL7=8yzxNC@&@Q3Us*`VdbO{=6DE`KPprlAI|5z)PK>f(B?mR
zX0er_&Akq7f^qc0Ex8%ueBeGsk|S;3$M?#c*7PF^K%kCr0}ai)_p?MAP@}7>n!lI7
zdO=|4+Av(oSqDO@Yr`)ONmgZNw0U0nrRk_paq&R?IB`{@)0Z$+dgo@@3t)h5>$|r=
zTY^A(e{mIo3DVQ4>B4N@X33L)Qjh{&FV?;#!cF?jY)`@;2I#sF-*HgtpwJ<0CQ!(r
zCh$qj8$mw%=D#z&$4+AIcnuGmuiL)VD#)|n6Q5xHmBSKeC$hTKE1cSu3SyTv`tOYA
znQx^32l{xHPpNas#I7*jdXyA<%&Nhv(|=2ObuHwAfkV6-uFu@zi&%j9K{m?4T@p<{
zDBIin-1uqOvNv8yYZb2&czwn|v#CwMQt_(njX&otF!Qc=WpCs_0}^;IYWB$`tI_1l
z6=V|_hAi+lcTDE>u^^*V8{WZjl>Hmc~
zud4Qj{MbT9;iS(A8eio8K7#Ij)>>6V0jP_R@5p5JLX8(S|R^)bin<3&Qf2Q-fdM;3B
zw|UX(z7!dZ8;RvQ^HOdplAFr5@OL~{6k5CSHg&GO+N5IX1s-JNK|#jR1+l7Cqko|#
z8Q)Yv(Y7l+#lF(J3MahWW>{jb_GDYyt8Ln9O~y)rxE9YF?oQ|0EL|rSp781D7ulSM
zx@KVJE7fbc&mV907pvDkYj3xjm=@zQECfxjKKNb+r~yl|V>ud-TmRo;y1(qibYB=;
zJ0zrgB;B%g(R2J1iRd2X*q#4;ne{PijDW7)|A%mHWz)&}hbyr!`G?YS>T@pKEgOmH
z>1g3m!MSi#7aUD2{VJY&xk!ymv8psU0p0NDB{<#kSTGRF9VNAp|L0lZA7gh`7jv*A0o~-iX{SMpf8n=K!@o0r=sbuuu`oJEe|29ViRx#awqL9&lx8u_+
z@!Yj4o;zRoQGeXIi`3{}r8TwFP|I1APS3TwFd@mG$H9KYK0?Iyc76Aev>!wW0@k!E
ze5MQRt`L7kCm+3^Qisd7v+L=p`)DT{)O}zesC$VM)QyI6@4~!mh@_fZ9!y?yn2`8u
z(pP5#xewf19UhTJHg;kbtv{WcK^UYUo;1B%{6j;x6$VrC2PFkTPUyBduQZwo+P32P
zLLY@I24c6*S5qskaR29)fq?C?PQZ4t${P}}t2&wPgk`pVIM41Y*2O-h)C~|XSs)#>ramEx4ajCWvW0r@?
zme6R~dlbpWX){LLlK$+s`iXI78+uHIHOn%e%O{D`4wd??3y`I#f>bf<52
z4x;$**dbn0)ln)#D3V@-my3;s=YC4t$DD5SPBmf>P&mty~Xa~TEJa`D33TGJJrR1s&Z
z_V1c?L*r~ka1bY=zdj^L{aLA>bxoYD2pEG>_M&#^BND6RcWLZwewT@v;P}e;ql%TM
z9|<;8E{hkiHA=cL-3(_aPJfGEzq&>$xK{Rz1KNy>yCkG(g6kFvTN|L83hX(Ot6G8mRfCXYg@Ff(rQ~?S8!`sgy0Ie;ZjYlZJ!vmu~op0{J-bk
z=b21Gu=ag_{q^(y{vEhE=ehemcR%;sa~WJG3uH(gFOV^Gq`*~lOM&Q4@c?B8DwJ03
z^E~v7o{p^5r?NCU4B22Yb6441;okU+RW3_dY|64Xj)v8u*Gzi8M>!<(SESc-@M_mV
z+jm)kQTEeDaavkCyd7
zcv*PIk9h4jBY0cePdGc}9;KX&9d}2j_*L`%%+uBrKZV?~qEEJdrX%T#f3_~|^BKsH
zQV}5)#C$R<7*~#pKO~Jr#z4;bWzeO`-$S@|jy#?gxeMg?IOlfW1F~Q5t1EH4zcAZ{>yl
zn!Do*d3B%=tMID>F(0rYOw}909JXxPlvXx-9~{;XHOO9%?u>)z2w<-_*!s!+;Z5=V
zpd@TId-oBN?HBrAjja{z@;FKM*v@W`?Tb++FFIgPyuTW3Z5a(G+DOFj2*%c!I6gm&sPu)rv`%3$%p8J;WdZ_xb#PsWZ%U97u#ii?3=^c9SA|t1)zbi1=
zR^vw6lx8C(oErmNGnh9hBVC$heh%Td?&{Hy~(g(7P
z8mdwFWBuQZSWDA|mt;46eN?WafeJ?JQQEO6R*2L+!KbW-h*{wX@CWN9fnspe^&
zRJUt)wh5y_vN-|E*1B6{0Z`#tf0^t{v<|1qFnJhi-a&`c;TV{342w&{bAMY3u03^G
z&2aV@={iOUoKQQM{YG|E)r&unHz=}gWmfIq5lvQ%P%<)Qi&VsjV%Z9_E}1aa-q{^(
zyPU=vsV54_PIQc(K$q15N<-_hby=n8*ksv%(@YT
z`^ywm-NQ`d>}6~PRc0SUpRayGHsLu<<+89@y+-s?!Nsf?yHxfyLf)^pU+HXY-dTN-
z_MM&ZXLzQO3aXwRX;akGP)Cbpp3RC-QWb}isyJ5S70^JnZKBf%Da}qtN9cQ;J*{Gi
z;B0#SJ({Zeil(Z}W1e|DJ`xyP-J7DSZkr#J9`vH9iree9rm7dTG9Z6gRh6g=)2gbn
z*Z-OJ&t6a_;_QqG=n~+Ag9_ACWp9|!_VH(7Jyqx0daAxp9cCUiYN|Z*j?(-6J+xFk
z{vuI0TB^$MuD3vd;ma1=P
zPcKAz(&N%`TB^30#)O8d_E<9(%Ba}(?x&0d-L+LMZTr+%Mrx~CYP415X>C<`+q|?a
zsZPBQ>P=gf-pssg&1R#+u+gQh3iVduUC<&p#-!bgwkkVx4539>@kFYs3cIPQdI(tp
zVVCt#RaL0h(pDWilrB|O!u4I%K2ZY>OJy2u9}~`~PTr`ik{!^m@6}T`Jt=Gb!Bv-Q
zbyb(>ZPj+6gPqyMB%qrnc`!<-Bmi;BZphQHfB`{vL`T=La-#J}PMN@&uEm?JwQ4$^
zB6MA~?~pnBOI29)Cj@iQdkJlEV4@AmC`Rfhv%febwtc_=!O)Q0_9qZgVRc9>aPo+j
zs$NxCJ%o=Fs<8S2ju9%XHp*u?bTCS(zA2w<%I!}Xow}>Ax*VG(pV#=F&xd5%=$({_
zQj0gOGW#E+!b)=~tY&sM(5&q_hI6BBimj{O+UNp1>Z=g(^E4t|tU|{)Yw>F#jqcj3
z{B5j=S-a>hj=$|`omEkX)vNX@z1v|SC=@i>tCqCM5lnc~gH|kO(^Dtj{u%96i;2|T
zevw4oK9|3)_AIHFI9M{Gy=tnXx~f75<7{}|HYGEQieza@v>`1RCd))kj4stxM}=w#
zsrF&j78jg#ycVmS{w^(6i`GhKz5PU5tgP>F=3=i{&%a4(v@<*Xu3alFDHqJ@ygTo2yml~HLyoN
zi`qP4NBeo%JU|@U`-m$U#u|4IzHmkPN+?rb4zm^~w@>OpvOs|-EHhf}gz
zVR>kJ5Cm<`uy(rWkvHKW?JZ`&@x_imzSujX5WtEk_LEMrO~l0BmQCN{9-HT3WUA!l
zn1jKO{D^#Ur>(O^;^oMCeRPs=HaFl82l+K3mKgzOurL9Q@horcg_$yhIQ#Isxp
zle>zYDHmUguVSBeTdmXpNL@+6XqXZI93pA@MAEIZ{^duL_x(md=SX3igA4Y&y^N2zwh!*J33~
ziMY+t82jA)*pPFs297w$X+3=NF@XgV!EG{zp;Er7+7+1OFaAK&LS)UKe@4g=C!ye$
z!oqw>ri>52ujQgIlABaW$@`mz&yl!-4-m1|Pf3(_ApVipIPMD4;qjrpv87L$JEw*+
zS-s1~cHI}uYoxZU{f#258cG^O&aHVSMmKodVKQvjKT>+(Ge}`ibf%m`1);yqTqMj}
zK4T;YveJBJqy~>T$OjYlV&yNkq?F}P3yC_Ul$<%DCWfiD#Tqg~8WFd$xb5@DuL(~1
z^#Sd1XQ4J9fyanAOAL(WDuY|}V&^7XKfI>16UEp^Sn5%7Bmo-dBqN|nn~+=h(%<|c
z*SZY-AjX9HRjDz-aiJ{lEHCQC11Ymc3FtR#w1Bu-D(eRb_FI49+~XM{lkO)pkT}pC
zKu_mB&?WjnQ};|G!{3cITyWwR?46IxSc$y9Tq;6>i7C$?+O%2POX#T?Gq{h~bbYgY
z@!o}8@_Wzu=H=!X+@nR9SoYa6S>}a&Zdd_mALaw;%-CR3USqBsb!wk$Fd?$c(z*ZgJO4CKn1LyvCd
zE9lu1~A_lJqhsi*}FsNpRhl#m^Aa2vrXxGMQ6#e}ra*+570)b|b_`z@SL`P^QwqFoi
zU8V{Y$Qa=!bX~*{L2XiF&sz6NP%}i-b`23%jn;G215qjF~p89@W=ICI5n5pk)Jv7>LOEX)$
zki~kaGY5aXoV_u6L!7^Jujiqu;_{sJQm&pI2KMxTYgWVIz%X_Xzs{;V<_+}WZ{Oe@
z5=q}Z=ONMoPvq&Thar=v;g95^E|c@ay3D>o9!uNR{-L&)wV~V$;dP&xVag&`kP$
z_QWlv43cHmF747h0`quh**()6IB#a(z#Is2mgfof3VxwZC#B$#o{eO9moB^nwCT{E
zfD;7SC3czy2<%-V)nU>>kWZ)6HV8X?$%RW%WATY@#
zgvUbDp9A9=t(>>9Trv0TWoUb4PwYncChS);7D;;>F$&-Q##yfk4;6t?D2uLk7}N4b
zlwa?i;HJY4bxxTcm#uYifH@l`u>OtoXMR|_)L+cGu^*K~wHKil|3iP~ff}ayr>t>L
z;@?a;8F@{-AsdcYPbc=-)e2(G)&*^xHIl6OsPg9Q#t|Oy_Gr4SP=W3y8(H1xPrNqB
z;(e%vdTC&i^)%?76gtFI%$cz)EA^y&IE=j~lWGP6iUQO92R_p)p={nyL30CEX?oJ_
zOzB6o%#2jzMbg19KmyU89ep|m9bAI3G}UXPityU#g$26XC&=a9pVo@7%13(s{2BIK
zHE73y+4NSv%qT}uD;yClb`E6}I!o@z$lN8>?B#CTw*rK1npFqrU9X6ql$lUjzea|;
z+=N^56~mcZc>YlA-M5e)V@kbr|-c!U+6=&ZF_U9RBW=FR=671
z9?IIVc8R}nZAVVSvjKPG+M~XQliTC68%vL7Z)9x9KV&^JR~n{g{i(3}waCT#j$rbU
zJt`}XA!J6*p+Iy_{1>6;jQ$MR*s9q#W*({j_BWW
z*U8zFY*btD&oOWvAo3VEJJiuWH0$slcfd`OiX`9ni2!9*J8~Hvq5MLgL2C9rP8IR?
zRdQgW{23#EhRPpL{U=$$hMdff&?}x>c5?n7I)HZC&`a%coQ<_dgF19Xj+6|+v?ogovVvn4w9_vgQoKGHGtTB|qdh>e}B%|#|&{rSa#^c6@@d6V~_LoKT
zJllS5)g7{4BMwU6+L`hWR;=}YX?+W;y()>)wBPQ_d@|U_SND8YdtXuU5CiJ=hZePl
z60AXWgwz>+jXk8vuq~#}Tk|>bM5XB7Fy_6}V&bM*zSpSBc{hsx*
z49{tR#q|rCny=yGKrob$gF=j_I<4^t>NMuGNUaXF`jEkO8R9#TPewX9fozitWN52u
zTJ)mH!}7+pFIql!oDgKl^7^$eo)k>xVnz%8zndlJDxHDd#4gjc^;9d24J__AL3I{J
zlZ8j5M{ienU;npYQYh!pn4Q6xgb&-J5;~~#oiz73vt*SSIF;=bU^HJ*x;tb6M)4J+
z^j0fI1xI9W$XU`pWV^g+XSbMmZs06wkCEZV^kjs+XhS|8pUV!dZEjrK;#vPwu|PtP
zvNn&|L5wQP(;#Akg4PA9IrdpEOi6vWp+=C*KV6mVtN%Ras)_uKY_0zn>GhUb$C#XgCs79%uo<^bz9l^Fg+6P0
zkzCA@`~*kpv>BDG^tbF3Qb<9_rMF{F)&>~Y_F0rZu!@pzK|h&4)t8
znnHOR{%$OFt#?c}1q+_jCK|6GhUD7!xD+jvkXyW)u-rh5ZONIi+sZsuw;49LvgnF#
z&B=W4y4Tv#WxlrAZu7+n*&9naF_1Ryt9$1`PHihPR$HW4OMwAJ^|yYtp<*SF4w>HypQ?1Xw6K*2b{e%eZ(gGp%9@*K#HV|)tS9v38
z6?#p5M|NCC1S!lD|lnbb=G&6jm9m2FO
z|1J4Hi0IFlx*AaeiTaCu510{lIxBQ*GfpBn4s+^x>$~C)sY&~WX9J%sWt|(I
z`O(AQXphbd{hr&M8Dp=T$(1-6>m=aUbS#|#9c6xGlv&-QJmbrwr)avT&b;tHG?u8DGWYjHP3}*Pi2Vsu(+#OQ@>`a~W0csd14u&hrowoz1X4+WRq3
zleJf@EnEf(wTLd-$C35yd@_^JYxa5`-qW7tFPd>+=#
z$Mg-{RW#$c<&Ek7`Z(CQdZ+XX*|W}=DJ7@*i@0HSi4;;R=HpEsvsrT9vJUT;e)~OS
zni0MsSORjdIUxE55;=Z8*e=0IM63T0*6Q|e>AhI}K9_$+QVFX&dLe6Bn|IQs>wJ-|
zBotP(xeKGU&>Rd56gi-N*)SN!(YXULh!u=7d%Hr}#+K>PArA>v$u1f?S&g^KiAn5o
zIWf7cHD^Zgpx_wUlK1gE1OcM6GfI!@3lkmoA%Z+hlDhBNvOp%jXDb@>}V@1N_D7B(R?s
zdU<|rg)86f-V+^Gk0$Gi}*&?0`6a2LTD
zJI}x4-DL0?;FE296!;Kh9p7*`xE-d7i_XR0WBTtG`tRrZ?`Qh&r~2yHO~#8%uPK1HsL%_q6bS${OZwaRKaA&}0M`Jw0AF+etMWz42&;qb&|
zAE{LkPg^VWqTnk`!Tm>ITv2co4(6SioSWHlHIH(eLdW~Vgwkby^HIC(!a$UHo&iwp
zjdsdkEMuk|bp-l3<=>SI=izl3bSfir6Fy=^e=-CRHJ*W)p`2=RM8;v@a2N}ZiNTm!
zOOUeYt+begR$1P3&}{+ye^Atu?V5*E8p#(`m9y<
zb;&1akruWdkk}f=%1SC5Rzx#UJ7+W8
zWRbxP9OV!KG~Exr1w7AiJJa~w%%`X*dl`4H)&cJVs0qWhQ%12|Oi_Q6urY=k4K4ZstiwB^m>oh`)LT*Z%PWU>!~~LzRg8X%B}UY>>}ZP(USyDH
zc-Od#!V+6$3(r@!#>sM<8`HbAz82EZ35W)lzl$XbT;%5&$#BjO)Y0eSWpzDUBFqad
zjF(lI*Wc)C%@Z{)q3n3>IWL6kA$nbW9atU>zDQyt+rGgl92wsx&LZWpw3-LE5ux&=
z#>9J4v*WY;>vq)fO*UXrwuz5zS$yY(5>0w}o?U%0GXLkrCre_feC8&LU8>l5#V(C(
zWr=;O*jr+6GKK;OY&*pEXz*9L>nuqD=@S8-ddZ~GB(t5$Jih$UU{h{1igCJEkiT=E
zQ%Aaj{Pk^75tXDX2)meYB{>yT&{aY8ZEm5dCY&o6uAn$mK^*dgllY4DlO2ClDA7T}
zQbDQIMY2>7gd1d%@gdCEKlqZa9v1iA%d6{$+4E{sKh%X(OSqa${p^USpFBG~q3=br=F%riMN739XU|CiOzBh-&#iTr
zmeq48*KJ+%HR=5qBwODwNUBw45U+K)LDH;?4U%rtyF`QSssIASbYpqZGCZxPJEU1kw!v7Gs`mg2EpGj_$I;k8(hX0Yq!BS3%7<|9r)doK#c!|MV1z%!tOYl5{cL<(k@S}oH
zGq`Yrtu%wX1s`s3{Qyj|!BfRP#^7GTk1i1+m?vf4Gq`@yrPbgW;^#$!%fj1gF}U1;
zwH`CLJP2cLHF&k)KR5U)!EZBoo!~bbe1qV12Hzxjz~HwDUS{wz!Iv6*i{J$Y-zs>v
z!M6#XVen?bPd9jr;9i687krSxHw*4I_#weRU#!dCDtL#%Ey3S0c!%JJ41QGbXABO<
zR9VdimuI`J2MnGp_!fhw3Vyr6y@GEtc$(l122U4!mBBLvuP`{QSY;I&+%Nb-gBJ+y
zH~134XBxav@N|Qh2|m`~)q#8tO_fHx-Y=jmH!d)QimkV-sy`(y(zG
zn-3RBu`l2S!K7n1=xn}aY%;L<$k;q-j?C1ieG>kSq|d7-Cd4K!?{Yxc%Leb3$*yqKHjM77v|WJerfgMZ%CwH-dc
zX;9zg>)!74EMNEOQP0&+vj|3sBTZyy@OQb7INRsE=!5?H4hn|mx~V&J*Y67KZTI+x
zvEe(^xeLytta8{ek7tuS#@;XwlMS}Dio_aWRp#ELByibxJkiatelP`ak)V~`YSWy3NOkh&|yL|$KJD&j$KjJV1E{YqKx(^^OzN!8*cc6d$
zX9M8|1H0p*>bEuoQ~p
zj8IY|M?0Yd@EE+I*mdC1Etv<_p2nk!T2u24n+brBN{gG97m>yHhLV=xsr?1(RnC8M
z8)L?jvp8~g5`x>mbK^PlEsjIKCuxPAM@MjbY=~<}FJ->P!&PLtFIo1iPo)XvHR}9k
zzU9$u$?Qg*%eF6M19?>Mfc>7?`~A`TQ2|)fU;JD|-i1}v96U+$jG8WH8hyDYSKOvcxr9gL-+`{B
zrr}5Rk^b`&iM26S6l0;`t20F|H~HbfH}T?H%6-PMSUbKcFR
z81cflrNl=)>t7PGG$sAaFZ9dT^pfu7Y51;mt)`S~aL}c>LozH5*XTaSUGu-5u6_8m
z4>)+S*Ai)G$|~_FchR3W?#W^I<=TCTohiwVzZDWsV{9s(&}|)x^$5}rqz?!>{o^Dwa$C!grV3o9vo=$Lgp%IBNkB(u
z%IP|(R#C|{QxZC>^JM|BSK;yb^eb?3@h3yG`C#LJOf0_67x5Bzm^%VUW1|%yg#(^Y
z(mIJV^ZCFu-pvw$G5nm0T(4m~j>JQm?O|YN%7eBC_R#YB7=A)YBI4Yc@*~?NnQI5I
znNW15z0gjY9ahiv48usxvYph53A*~8(9C(zhxUuAG_s-p91ME#!0Q$JSe%fv0pf`Iy`k-vUY&tiPqL?X
zvbdHFYS-%QRTNw0a;_E}ofZE#A@+KUZ!$4dp*1|c4o(ssj&>wkjNm~aX$iNMcV14@ZI|{H
zteO#9yn&@U{r+j|$KTficN6^epS51~xY&fSu_`(9-m4Oc$sEe1%lMrkgUjW+tc!5e
zgK{8^X`#jX1dbAKLcU~WI1ZN@hgR(%0-TSU^Zzg(+AFW7aED6TPGE$v?$2xWANhN3
zW^=8_`jB8w;_b6g-wYRiU%+k67$s$3wB$Xs=d4%s)FPu#V6f=L>+hd{RBmFN6nK~Q
zA^ONfNwq$`Yr+CA|pKr0h>E5yX|AZ((`Y_fSPl*yW&O<`6hpr$o84=fePl5_C
zaAEblI|_9p=={%tjKW&}Qy)B05hJb3$n&TS>r9<>y=?g_8$~(U+kv0F5JIzmL=C|Y
zZ)J4f@p-JT{x2itfeVp|Ey%yJbBS+bz>^`fePLGA;jI0~kn)bwvfi#>U*yiT&fXvT
z4rhDNs-1*Z?WeU??I8oHfTyh&-;zr7G(5#-l0>GH$oZj|R=mf_>Gl0sTV>q8Vl3wn
zdnv2JW@#f$u?hH`amgUb2{IfW&n>$;Q@%~zNn~pY1t+^N;^&?Q*%BichZ7V)-sAVM
z`bpKsGH=pT&i!vuH0x=%)GL8)31qNbEr*FT7eaVPc5%>
zpSU6JKHQejp@j%9+xp|%wukSC2Lw+t^xt&FptzLtz_Eqqf~G!ooqABDH)4e{92UxX
zMrX>|0LWzQKOtB?ny+XZb^=4+M+5=f4>c;9Ej
z7tu5vdBuH+=f+sr}mV#cafb!(7!3=m#mFD
z_fnX*eH*epc{IzneS5Rx3ZQ|aZ|1dqqFdH!WBEMP_8uSFwjBftUrA^ogl_n>2W*^$!WUD&UoL(n6bH?yJyA+6E+Oy7Cl-d
z*t+q5LmxrcebPxks(H>oiW7E!(|QSy3YqK)OrF`)cT>_IS*7|zi958qAz7j8nwEO^
z`gOEPNKGP&=L73boh(8E8x%Eb4b
zzCsCqKgN_WpON=OB|MFS^ekbfl(0Vzx?I)bW1CPw`Y4B_T@^LCdx;WhZE~8UMWaMK
z%03I?P-P1wuh|pXqop@jPoOUXq#rLL1;pD$P4W*WphWe+QQnqt>cn*J%P0?e1f6Rp^+8hqunvz;&Sx6HQKa3hu^Pxm{_Jlp?Umh)V2_!_b2+z(u
zcHOpiR_segNsE@x6z*V}0y7Ty&>(SrGz8JD28qn_-zOuCpD~#2Ct1kRYrW2tIXVZ7^q;c=qU}w6z5VCR3nEV6wuJZbuMb_Fh^uaF_0jc?m?bbGyY)f%N3*m#X-rb81yl(n$b5OyH4h^jj
z?;S>*F8#NTsyxwu`zS6w^xr;oqkHS{Nd33A(yL}}@yzu+)X;Z7uD%@>8n5(9>nI8;
zWWMo*T3Et*8j8u8h>G9nHgK8^|8CpAX~WxX*gzIUq%yV^w8t3upxNUace9#R_-3US>Dy7DPR
zH-)(8{clrsI!>Z{|SY-y7{zE
zl2~;tT?%o}JK8P^aRFh4xZp84q4Rh&3#GaLe^7{f&ql_}6Dq_-9x>@zw!oTrkqU9s
zhtdxIM+$LoB3j;6PL+6iQ;54@oX!^J)DhX;)xaF))?PH
z#uF>V{p6=%Li-~X;(l_LPRdb;YgD_+(m1RU_xThA%r=hJ8gZwykYvIM#QW-x#-WCr
zrP-G&$h~>GS!8~hg4|gsU@Z$w;;*A1cN5oL-cM+6tUJ4cI~AQfkN}=GnIX}UEB2_!we3-nJ4x(IQ1C9W+|zKfKvd)o
z7Kn=6egaXE+eaX(9OYh;s5dHBKPasgRLU>A}1PDexrbo}5QDqzeS^fby<-qp+v|cr^tiSI#wx0<1w^RUtBPDx8gX9O_ES7s
zPhJ*YIbNG>tH}N4;mG?&EYL;JRWuG~upaoiA1cE%;+@V$9agpqUSN2^Q-L6iU
zbJBmXKT0Ncwkei{jHg-6x4{Sz-MCj}&dMaM+RARaakH`NZGR*eT+%3S#Qtc2eh0L$EcL`h|cCwTyo7meir45qW_ypeM~7y_JZ
z!o4-OO5no44Mw7whm8*g&6N^i6-SLi^G4f7iHoo3`o5hAKhi0$yDG)Hg>ww&z#wln
z-Dp=k3PBe!lIOQtcTY99OMLa;9Hcz!g{{VA#ti*NEh@III$w@_28a+m&$Pf=7e4g2
zzD+Ychgi++4r?lC-P)rnq~tnE_!fw4nd>A+^}7o%mwhrZr4v)|RLez(rprgOeS6d=
zO?WMLNMwkL2;H`bZ@5+L_4@3MX8XmI5|qfxsj}$AfKM?%H|l})Yttw(<>zSf^}rqQ^MA}coYYVK(Q7>GhiUuc
z${xCjvd`w&MIU}pfKRhb;XMsMXINmy2i-}^sUw=|1pn$$98FRi2rB9+R;a;6~fxl?~TJ;rMl$xRda5T${3Oy
zd3HcHr@kNhl%wU)@8x_Z#hQLecs%;xTy`Fx5_w)|6e>%MdX`6KVIhaWG3nCOEP4Zc
zd-0UnYP0|^pHUX&4^3ZECd?_G@4IEMKXdwgzJgU;s0@9;twqtX(*89#du}e1&FB~W
zxU)H|w`<`#p%2|cPDbPn;=b1QYjjo68JYvb{1g7l*k-L~rzh%nWP=ro;f$?0Xia_J
z-#8hPuJSide|3d)9@zT7Aa5Lph|XG?eXhijZ9Vz`F*e5TE`nKf_5H%GU%lG8>pso5
zueQ!u;?O`358-y-b@osD&mp!Lj`!Y@q{lS*-PTEUI?{PM<>mmKq%`PIU@{W)YAs0C
z$Jc33XWO2BVmwWd&(H_br*8Cz`s7b|&mTILd*BOsAgwyT7?G^zK+Y3F`h3yTwO=aW
zy#Hbv=Bh?;sNA5NJ!4v#r{NBKfF^>lzq
zb$pN|ZU^7_g)Bk$*;kFFs=e0BnN0oS?Gody?T2{karT%c2aoy=41CE?U`<+E@hn+O
zlbdqBhBeV6f+J~4DPrg4v@DAOSKpi)vqz59DP*iZW$o<_9b-s=3?DLb$R**>0pE6R
zH?fFs=9V4@q$r^4b<9J@lzrO!?$l0sSMxj<5-Zb>m|=n?NT2|_D0xvAH7I0QtdNQO
zJ(_tKvOPELAeGLPRQL_P-^s+nJ=g@#ux^GYXpUE{ZwY%4mtMy`
zdD-kT#=b{X9jwOZtT&0DvoK!6%*}kuA9^XrlfM`1d(0Ud7u{|%Ik|RN`|DOdG1q6r
z1{16?I=LhQ`+2%b^zuJvamYnhSH{cONPldZdayI)YQEYRt-cIG5jmdDW*H}iH2NvA
zXgf!$iFMgbydF8^ABJ4ZTij0d*P{@5ob|{8DVHQnpw}3AsEltK@!{1nR%n)CuKi>d2T@PY-k9ymfU~yL<&J9ht@~pg
zsbzbf*zY^=DK|Z`I8|Q)#5N!|KM<`AqzObvgjXQiA^fxJ@?7pZ4#J-1X1&T-$G6IG
zwWs&6zh2u%wWs3C<-V>x*>NWm*ksh9a3>h2b<*&_(vjDOHIGxx3MDOMLMqg4%m2u<
zG{pMJd}m0u7SG_YTUf2_@uAq!aCI78P`uu`56<9JF*em1t$8(4-nZr^QMU)K7yX6e
z$OG3;c^em`w#}qp_VU1WdywMw^1$`3MHICA1J`3eavIco(vn!eGQfG;himmbayZOd
zF+21mmL+5T*2{mEFA5+U{qO65&=u9G-(S%t(!U9u$k=_u#4Agc&UD^
zGa+fiXkX27H
zll;60td$0~ShuqcVcI}V-QM<8lXBOjVC{hjqV&=bm-9K2MXRc$TmK#(B`Ad84-00!
zBIKOUPopJ*M<^S2;j|FIWpNa_G4`${Qu5t?qnCl{`BrVg&HY3nNT5$=N+?!)N!!&q
z&I0Wm_pbgc>~fOi&LgRM{h@bR*%w$JOb}s2b~jwpjC9GeUhL@tStLxM^@#0~9vNmk
z!=bWPtm!2>Ct{ZaWhL_dg=sbxtI`?UY(s{cWdi36hm`YjV#_nu1YR2SRS^
z!Fzhk4da8dp7>^OPI}yycYu#0iI%6cHuUPGL#>Q(>QOw_6w1nva1Rr@{_#58*rSS#BR!2%5`H^JUW8LYM5t6CBi-t*er=)B!pCRzmQ8EXmAzy>l%Hj7up{f%TBR9RMK}mW|MUBQmIAG3NCQ{u
z0~@L-=DVK_(`hN3LD;F!`p258yoJnVXF-f+t5AL#Gh)z(``7@hIuwzYQrmR
zc)bmOXu~vFnD85H!#*~A?<`~gk?l`SGvA3e9BadwHoVY=SJ-fa4R5#MRvSKL!#8dC
zfenw@aKLnv&M7v$(1wLJth8Z+4R5yLW*gpX!-s6R(}pkF@NFA**zi*u#-C}@_1f@s
z8=hms`8NEz4XbUq!G@b`xY>sH+VBY*9d$J8PZ0NV)*KN4UhBw&odp7*J
z4Ii-K9vi-9!)bOs>dNKMGj=^bWWz&Fy*eIF05^{lrEW?MDl)L}pn=caZD7w}?$3;U
z-6_4hNBVaqeXvZvWhs-7X+5lf9K$B+5tt0KOO70fdIn~UFN*aWqGWIRR0(`9SQqm;?N
zf}WCJu0`s6O4%h}PJRrmb5
z_^R#UZ!!5O(IxNhvJl^;5x(=Gab-l<1-N(rmV7wrDq5MOr<93bz9l{>hr}cKmhh~6
z{AaIRd3J5ML6z`3-J8$PE68eo_##~X9U$&QBAml&o8Rf
zpQNiuOA)`st%y_N!&DM}wIVKwN6jr=rU;`J6a|7cB{=Y#TT^ah(4{O`Qycz*UZo|K
zr4bejgXSy0s#5z}5VT=YK;n_`5=P-q;YZ;vNhnuTbWCiYICtOpgv6wNp5*=m1`bLY
zJS27KNyCPZIC-RZ)aWr|$DJ}h?bOpIoIY{Vz5Z6Eh{c5UB05M{E90pR#sM3f1{>0
z5WMQ@RjaT0=9;zFUZ>_%)#R)y4;0i?6_-lwuB0s$Q};Erf>Je!mQ1^kQj$ap5>jf{=b
z56da_3cf0J|1H;JTV!0~UQU|jxL5G^8rz@ro_O86O#I@n1ovX?Ek%|D6Jgeb?QlKSvM87ZZSbtSekQhK$|E6Kmfdw^aorI%W)CB_Qvr%Ely
zPU4d~bxJ1VQx}~kYC5eXZ5dN#%<-x;W`ttCYSgKGEhoN8zNO5PC$W*1AoP?H9Z#uB
zokwXwW)6_@Nehb%nXU6Aqp9R;lCE88PfmSL3DqbeZN0_i)ooDPv6H7R
z`c6@2h2wMb^VRC}YSQXG#op`G&|wOrhLiuVo}Tn9>9hZx^rnZ?tEP>bHgFYj)extw
zIx3*r@jc1un_U!h@;@yc-&fE7<>Xw}N~=gWKpz$gIbYHuom%Wl&8hD*)QoU?z14RW
zwJP;xMndV|ReH3LQL~gWQbw&(9fQ-39B9gOMvwL+xsn)Vd@y5MC@_T%IE1|lKfkF|&gSBdxJJjbsld
zzrtj*-;$G6{j?eC%Xx7YqY$^PD&X#8`vLjSVtZ@HWyzm5ds&J_Ut+hTu@w7*;9jl0+WuC~8N
z+23_;()`k9?#x3GPbjc&-~JeK}L)U`k?&MDuWdjps?}#aHhxMYIGmf
zCn`B6CnqOXe$&&5OFVir3YNsV)miE3iwoeNd%e1exeLn*`6;!kdKEu6K6rV-?FP8{
zC!hcMK>_b^|I!!-&A;Q_j<@ksGhgz_+~wSSQ@T(7$RMZxp=D*v4D
z-v6|L>tB@XtNnArAK#+?S(|^<10RkcF}imB>egLf-?09MZ*6GY7`n0Prf+Zh&duMw
z<<{?g|F$3e@JF}*_$NQze8-(X`}r^Kx_iqne|68jzy8f{xBl0C_doF9Ll1A;{>Y<`
zJ^sY+ns@Bnwfo6Edt3HB_4G5(KKK0o0|#Gt@uinvIrQplufOs8H{WXg!`pv+=TCqB
zi`DjS`+M(y@YjwH|MvHfK0bWp=qI0k_BpC+{>KcO6Ek4G5`*U7UH*S}`u}74|04$3
ziQP4W?B8AfSk8mxfZq9y;9F$LoF6iZ-M*Xnj$BLJ)Z?4mzunw7_4wuvcsKW(dwhSl
z$G1FL8JV6uYZ>`1(kHT}ZpO$-{CTAguW@mCWl7c53j#%fa`>UxFRCrAnYZkU(&9jF
z*`q0Mc+_&!}WE8Vq;m+tzW+$!l$R#71V7|Zk0AZqhN6z
z>opd21qB-j>P@TLP)8`mvaYPG%X6^@^t?zN?XK!meeS#+g*)&@!_eR(BCFW1F#!gsk>1p~c#u=CgD4_bbS
zzeUuG!zXcg%f-};a3_RUA-hr8K?uJ?ILLQ+pNIj<;)4aPup!stnXrRd~ya
zDoZL#YrH+n*;RilN&{41dB9s-RZ{A$TJEiOc=Zy~B+^}laek9&Kegm&GVMTeF&Q`6
z)jPkORn>Gb(=trW6Yt8E6X0`$Usb$wOqb8}>qxrm+(r5?Db-CO(vLS-D}-6JaPCBN
zVjSsTr#yblcyEzi3TZ`=p-JI*|D(o3+KP&*t0iIy-J>}eq8%5mdyV!;rI&PyYE}fL
z!fU;0rB^Xhl`r>}uB;BMKJ_1`w~VG{4`M}Rw77`Y;524wu-=uWE351y!O?b49IZ!G
z>4#o*ydC_r1=$O3T{GeF-?yBX^Mk`lj~;vLYw0eEI_K=AGC$QWy_iP0dMW2+GEvno
ztu0?!T~T_uGY&5;DX$GI4V*b`Qgw+Lhz*%e_*dfYKhUiPmL#fy(-PFc`JVkr%?Z_S
z%rWu;cY2k25|bqY{rsNtD)lDD`R;#Gj5=w`;OdmZLFp1k;@dY$slQ{sW`}VNjaNeh
zNopu*3|*L@hEC(VCZ&1k#H8sXcYD;ZKtDC4B#HDBm1k;vO`q17{ZYcqSi>9$aK*={
zc*5XP?MiT|1WM)_6t4zN^Qb{nk~{jfChm`Kc2~z0_9^HuY3(MB0I;MlX}Q(V`6>II
zytSOJ)E_VbCvUv(5kq|ahsUbnvs0T*NtAN@Z|uz2brSq&?pKBo0k!)_k5e?W6`fh#p$rBZLH)LSZbkUC%6
zSN9*(M-3`*QwMQU2fDpTxpHSJwFDC`SDz@=XMWU|){ErtGH%9vgn7r#PZaF4AsFYo
zHyRe7%Xu-zNvnVVKB_-?>_0_XaD1Udt9!DPdLHxFFGz@AU)`Sis`&YR!uj6j<4k?F
zQbRvC(1o6)L|1?1@+K;8Nq^;Cn5?|e#alDHMYWcpDQj(#kqc@`;E{~o8&%x%-G@%@t4
zZify%esd{8`b!yWoIFS!)kLKa9qA@b_Tn{N{Ym@RUni3*Pi
z*Oe%BD`usgrpcG-A5I&c%QB(>v%&UL3NH6Iw?yW13TrdLxd&{Xi
z1Z14Bavf_KCLDG^j2bX4Ne#F;p}?j4qutMj$D2B&Zim-&)t^JF*RMb`(3L2N?VgA9
zp%WA6D;KF@3k&Ek^VBfc`O4HhnOVblL8e^86V&iPD(zzk?PIVS?i!#>uf$D{iS%#k
zb13y`_wVNZCuldnLJs9*1ZA9dWBNP&yu=<)=cjZ;_V?v1xqgNDi=FR@;JYwG>^|U1
zajO)@mK4U86xveCl>W{AkGI?J(BWq=>i>Y5;)K`vC+!l(*@fY8w%OGq|1KF{Ih1e>
zaWlsERYMj6skoRm1Nj|E>M^dzzD~6AKg4<7vbFWlUo18OFRcY|4-h
zLpxLF(oeRs6M7rtJ|-~{mmaGaqsUL{G`C8fV)sQU7jaO=Rx`VGjSWBk9%BQhD-Oa@
zC#lp)Ds&-^>Y?cgYUH%L)JWIus{3q1qSW>N7}6djeX}2ZGl{;Ls0Q7fT&-!bFrG1h
zaey(v_+j26e}l;1p!v2R>d?curTyss>el_Wuh5P$$*F_ITTyR_DWDDny2i$Lh+95aM;2Ttu*(=%LpIGl%Y{gmgvglZ>USHCFLZ%Vv)(e0)u>`AZ3pI2%J
zM%s$N{zKwvgRC_e2Zqca*x|GWhenGIDD_9oqc)99AB$K=F#kGzOyb;gkn!mSrCxPt
zdNO1E%?Yi2_s2EIR>u@Z7eu8CO}l8(HNOu%GeM1;_KoOquI16awJGl~^7|$2_6My>
zJ&keN?TO~TEB~O>Z!yl?XWDWJZTV}xw&fPatuIS=`}<10k8#pVm~)T#81>lyP;k5VVO8qHdferUe&1l`l!_)F}g66srs
z^UeCuH8N3+4D?qcOOol+{nW^=G2dS6bQ?cfSp%IYudR~Tp;Hso=s>A!bV-S8^t58v
zXxGz7)@6QM
zrV8#-&5pb~Ulw+oqq_XqUN!iSe7vE{f8^s09sak;$B%SHii0+};JeN-{GmK{)Qi=G
zm<6T6AS@^flr2`*@)gOgg?nc>xN3`{{{b*X*tc{w}+L*u_QVfw@&R
z3t%)y6x>0Nv!l^KXP`BFU4aekD>Pi!;#1xt_TfT*hog?g9rEU?5EC__%Kb0~_J{PX8
zE>)T0I;X0#wyL6ZPN1g3#8RU!)%L-f8ki>83
zj#*S$rkg}b&Z=TWzX=Zkh*YWjrJN^pj*8B$%`ROQT(P3Grl6*@7GkJVV&(@bE-t5%
ziYgXW!nb0-Gg9pGs;aIGR?mf1E(wrnVG5;+%bcQWO89(N@`42punm8KtTHlJ;YI8{#E8#scxLDh2n=VTL+@7t?@rvs7y&4dY@6qz+O86{UfmROHZWK}9L@
z{F9^e=HwSu(~4eHm
z>RPTqEG#FTT1inb^=*565sSsj7oAsCRFYS|tcEKOl=?N@2IiLO_3<~_LlMN!&ee&RkDtBlgoV
z^39a1zd26P-%M*d%zWE^femGLk@zpcNZKrZb-0y4FNUc}4acy+)cKcki2pi_M`QpfRX$lAEPCLe`0^%0hIjx93$!7jS+tjW28*aVZ{9vjJT&l6rqn8q07Ja
zmwdvXN!NSA-@i6r|F>d4vGASA!HI>x{%_^*U!Tqin}9t_pRfsd|MhwMH>B{tyh#+~
znDv({Dn<_=`)vOY;s5zN-?{T7^`|?nJ2~j=@e9X)?HxMAMNB9cz4rCjyz27Tu6S)q
z58sT(FC2Qa^%JGexYmS3RaWPm2w#5t-buC%vurrih8Z@TX2WzFrrFSI!&Do(ZFsbg
zq4Rq-Y_;JVHauj*7j3xThR@ir#fH0W*lfecY`D#a57=<44Y%0vHXGh(!v-5V@vpJJ
z12(L%VWAC|*wAmo3>&7~@N^q`ZRob)(O6UNzD)S82s(Gz_LdD>ZFtCr`)$}_!)6<9
zwc%zPZnEJj8y4EIz=jz%Ot)d04ZSu@wPCUi-8NJ67^?HGPnht$A)*?=`K|O{LVnuoY>z2TssI^0Ps5CKFk~7
z&j6E9R9ctjQiFiYFk8mDR0%L`2)ujz2%N`-=uO}Sz@=>5mx2pCG*YPtzy-dIkvNr?
z^BzpW7?<(_zrZX6SED%3!bn;HVC-n(#NG|e!PJqi==^LH96vV#Cyp_AI&kh-(!#$V
z*ou*~1b%OvDeq<=dcbs8fp=rX&lX_9cw?UkoMq!J!23@{R~d0W0PMtkB>6c_snalu
z{G1LfJ{=x`&;*z;k>Y_T0#C&hh#%nBXaq~ZmjZWUq%6CE?_wkm9|6xzM=lThEZ{dW
zLgzKWUt`42R^Z4plzNPp8@<4DFcNWNV
zux2J@!A}4;->+am1XP&M*H9i5q}Ku
zo3qhD1il7%6GrmC3HTbDjxy{;R_WCo@+mlQyB`@O@W+4y&nHgsrNA{92`lh+8yEOC
zM)IaEpqerJ@t+R#V-A5A058J40bU3!!nA^y0H^06j|-jwtipT*UJZ=TC;!x4B9Lo1
zDj+X#0x!l$9+m+AhLL*z2v`SmOz0`F`cmq0Jn;ZeTS`9#KOOiOW+Ax1GcKp!flmVt
zDB_F}96fnzCPw0~SfPi2)u3u>axM>fUYuQ9|L?9lY#vkz?5=hp9-90<9=Ys#%~1v4wH@lX5c3np~L6E
zd#*6}y}-;0+8cfXz#n2H4=uoPRkSzoG~ksO$$tQNH%9zy0bT<$@m}yXz)vwP;GYAp
zt2KBXFg9RtH*gb1>Pz6+LFyO(Gl36cWc=I)jJe7#FR%mSK9xAd?rPc!xWKqorXIb(
zKC7uC?A^dTjFeH}6cji}|C$C|^G(WvAAvu_NdLMW*ol#{h`iJYjFiy}T#MO^|E<7d
zn62PyEn4NTC7csuorkQM#|U%Z2AS?*lz+pd6%J23o!p~L)!x2w=fd_2H-x7ghel;ddJ2E
zKJZK9U*J2xGGnR0`|mYl<^#ZA{Tf=4*1f>ZzcF))z(W|RFM-LwHMqcCm{$B3Y^7Y7
z_rPxf&fEt7cmiz(*l#=I2zWAZHb&~S8u&a$^0{B|M`<(o*$?dVn2FyDy!CNTeX-vR
z{1Zm{y9J#5gu%0b7N!nA0`J=a9~}Gv;Q2eD8+ab@SGy=L_`Sf>c2j=vEMQI>x7rku!F9D8!#o%ec
zGK}~an0d&w!A)nZ<0X~Kidx0O@_)*|RpHd&#F9hzx$e8d9Fzz$z2zzv)s?#tM
zR_^J@y`#@*O9JJdkKh93uFO`(B7t%bM(hRdwsE-&Blk_jUZC775&r^*es1gqiVVK^
z5h(W^1Q#fG8w3|9_YedZ_%j=qy9jcRK4*h{2a#nJvb@yloP3GDZuz`pea_8lj%S3(5)7nyGI3GBTmuut#BUii0J*caT%
z*bRKgB%m^W!5Bk+obSTB7)#w<-|pWs#!(55d-VgjkL&tQeT{D_*>P`v7yrcVe5d`D
zZ_4C+Z{picB|G1@{f%)UBK^aHnl-a#
zzkBLbOdYr^kfPG`+e@cTojH>gie!Ijf$YD6$M53W9T!D_#r4#wygprk*Z%8k+!f`H
zKSfd+r=L%qS?vBDfaM+M_0$6Q62iyDp?#(_*56+c7gvD(^|fw4E^Y$uqfQ#zFU4~v
z)Y0_`&oto*u1nE>!8`gIov{5v_OH_w^p77GDFA+GEiR7ul@|O|h^x!2pz)tu`qci?
z{g>GWuYY@BLdL0^}AQ1GI+eZQ{D=Yabzq^Fbs{B~WxVE>2Nf8AeFT9Cez*Wi!*IwZfO
zR6k~Y@b}D$648$}|JlT8$79ylzrAqO@mRO7abx3bZ-F!I)R(H%YiHW=YUx4ga7SHo
zHGFnM)}nEZlFwPs6^`vxmeX)w(v=%>HRPJeb&$m5p_lfHHp{Nlb
zhzdP|ZMS2h1{I1*MILfO)bMXbjXExBSe-O2)Sk&8ZzpdVT?>+Uw2Op?|2M?+v
zM~;;{{ISlA
z#+m>^lB||WhT0_S)Dbyo?5Vm7@ZQmw|C8Xm
zFTuJq59e+GKc*PIfYeh@0e%JG*8+Yg;12=*qzimYzz0XeUrd6ZxCH)T9{fVU?=DvA
zAmEPz{!74r4fr#FF9Q5!7kK}cnBPJidl+0x=Pklo2kWomx1fZF>M1$eUdb1Ol$=af
zayCOr@j4}!50>HYe;44r0pAWjOE17j0e(2(;{iXto|329D_J>6$!p0`yVAA+g-4q*r?-BL*pXG+D`W8A>*_H1(p@rglqJC^I
z>f#_#my<8sN_MS7AB*u3{m;(L>)VL6~4S_
zusV|K&Ct-^A-zMw^sND{eOom5_HNm?tU^fd$cTuru*lG$ppY+*W!F085J5H8HDD;Lqmdkhgk1pg%&Nmn>wFIL!y{(-syKTTy*JO}<^gb#_t^SkOd
z>F>mCKx?7^cxzBd)cui>;gQxxY@nfMg9d-O{r1}%4dBP_Cs|ShHKWIoK*D|-L3Zh&IjaGL>SDK^SQN~zelZG-Q4Q+1BOH~Dl!OyboF^i*Us%a)~e=y
zX9OOG_J%BX>J%D!^|{sBJ7i#wIyGx_K@Xu;wjL37wSQ|UeU%&4UG!w0a+`1jOQlx^QhM?Dg=CtaCuDrhv^!)-?d|C52%XkW8FT+
zH!=)@7!lsNdxgg??PH)LLb{iShJKV!QRNzkeU+VQC3Vw-j>EawiJb1pf5x9nLmL&d
zROZX2tDp-0-1Yqws?AfzNefZ&r+BZ0PqEMYUaYCIuc~YG_cTRox-FNE8ABwbZaOu(|_3N*{s*Ee5
z*1=ae_=}63^>2!at}7-w=;e}8nCPZsqFaoK?qyX^HmdfrLk*I>Dp?Mz4EbEGlT(<#
zug){d9}{Au{p_(;GH2YfQ%p8)(yz^@1Vdw?%2n`i#(r+|I`^;7;&
z`YDZ+BQ+O=x=VI9!4#XZ#;<0(Lz;2Qn=HNMNInV-MEpAYay|E>M}n)~@R
z_0#kaW)~@x2ZCZZ9vdrYM=4Be$$os}@#33YLMKf(wnGFGIcq
z*>D^m|09X~VY^t}R4I|S9>^4b%c@nY{)TyHTTxNbH{XB%{kNEVPMkk~{uIi>ix)2z
zVO@Ii^UptD_wKvzK8^E2em#5kgyd_QFlp#^u)5OY_DA@~vF5~lXUmo?o1rAHxJ_M^
z~q_D71u>KZ42MvNX_!p;7pFRUx
zzdCg2&AGI8O!)^=uoCXe~-FUU=h8(^=ZV@XK~qAge!
zRUQ9^;CBIZuoK(p+O=y-;s?Am{`>ar6YP-+@RO}uw`#nxha}&8^Nk!ha6qu9pz9F^
zJ_7Hb1k9>+>(E;@VmtYA;KjL!uH1@h^qpK3fYJ{ot9&GF;M
z<+IN|)7PZQWcWP#Gi2`K4?p}+0=fGIZJz%5=bz=rAAh{?(MKP>{`J>i7oxA92m?C*
z1%2xb92ODwkKzx1O3+tXCjKUquO@R$OeX&16-)D(*UW|vmc++o;4CI%%*AAL&y+WV
z|66apwGiXz4VjovT6XW=t!bcqV82N?2H0oP@JDMv
zfB$_=1IG~hFB}K#)pHBCEaCbW+%M&G=x_(nP*weHc=_^W!8D}hKOi8WCFPizV}N}#
z*|u$)V9W(Rv({mb0mlUUYC045y?gfxb>-;Mqxv&xpbenBQBOEV#BKBUBC*Frf<6@K
z^qz?S+ai9uMB45Yd0?MN&MA>gmoFcNu2BE0k~IE1cI?P#-n@Bd`0$HhF9qYFX`s#F
z7?7`|hhst9`I+PY<(FUT%+KUG`AvK&3#5g3lmC>_tnWm^@EhX6heUc{3%)C21|9c;
zhCL#!K|{+oMZDh-S@bz#1Er-as)9V~X#8R8#bn}dvKb3g-i)454=68x`%>kOoMS|}q5RYCV$G%z8VLORf99Z}H@=723%GU%4cJeXB+y0t;}458
z-|kS0Dq#=)W6ls`|0(~NkA&Hb2TS5_IKu_uSD(#
z4Uv%lKHxWL2s|Lt?E?o5?Vt~VOMbtCemdfa$yt|Hx
zkt>$uKc916AS~^SYZ`hII!YS85QzW{;h-V!ylahXe*OVZmR*CGhCluElVF}Hmq~-k
zhK|a{#L(hO$1lJ9A_rezDKE{8lx4|5^6bQ5jo3rqhz$8!WYBSuDAMq$Q=cI-J&iuo
zCPANRlUhO#sLyR6Pt@t(5NE4|28}<)dm;8NykXbplNQ=TmWBqFlzYl`MLLXpQPwF-
zh5I+kD^q()_T)f$A+?vLfdo8!M&v;!4bg=n1AtRM&`_q&q`|4rZE2HWQ(M3$H3bb`
zTOGC)&CIDlgT~+6+q=yEV~*!M|GV*^{&GGr<3WAkwb2ve54$HjpBpW<={@D;sezIO
z8dib^rWn}I5ujlhZ2VtOh>!-O&!oZVGi_4)cft3!L|Tys@Za*f$UxXBqz@~@ANF3$
zKXh#o{UPd{p~KL^G2ndQIwpn|mc*ZSa?_j;Sv#YL&hO^D}5LK8DW@2XHy=!~WA=82Rtsy?aZJ
zg~`-c;!WI$yP<`)pZ1>X8rn_HRh-+HcFYZs*FnRk+1({?W)HE0hKl<9Z`h=agb-Oc
zzPBuh50!aiLM3fLBqybpaHr-eI^amXWFFaVOL4R64EfPw>&WxG{l9;
z?9t(x2I5H{gY&cTF*;*@L;9Wk(`IlErXRt)X3ZM0*=$0;)MPUrEU%|0*q4<9FT}{M
zd0l1u9MAw=*Z>+{bLul~Qs(4dvNENYEKLfMXA*;D@q`e08ZtAS)hTbqL1-+
z=myg7><{sWENJL&kK-kPJO0LGWz@?Y!Yc$?xf+LkfTN8
zL*&VL2Mt72`xx-I2OzJ6z4}e${R*8S*{z1;6VBS~g6$L^A0M$=t%9=|LccCGHB}}}
znj{MsF4TIBEgx-n=nGXw3wh7Efwqdi5dBfE2bieO1usvQ(x0*3$eyC-XICHNd8a-v
zfi5srhOD`Vy^6Z<_5X@%jenaqZMxuSPa=M*{T%#gJq}q}Su$|oK*8A}`Nu#0A&4o-
zGtWGu@!|TO{3Z>Y3ph_uu9&%oAq}*rlta#iP
zXNQl$`PnurRJQzUjC}U~X1RRn(qW8umG>WTmQv==pML>olw|JQxtAV#=pmUfVS?s6
z)|Fb8NCWkP`#!{tbX25;dk~z5Dfje|s4Lu$$j5q;_vj;Wo**riB(UJ8FF9U+bpMBK
zL3dYUkFs#aj2XWXPn;XonL2G~Xx+NC;Ov?n2hJIkdHOzv4&qDvDf6V0YXg?l2kHp*
z!Hfw@${>9cGxu=q&Gj?nofi%#rVsBZ74I8HK
za|}p>$)v%od%32hk4?EJo-8Rtq|L||@6)HE&kbAhPwYK)!8yalTtnOp{*-%W$`bdR
zNfUA7o-@wgY5ffi4VB>FV8Q7WJq~(2kO)wEyeZua~uJ*XlX&
zwbx$LaNHvRpH9QJJ&ZYWCUD=4drtsq#yzSUziCK(>HpCFLmwCMn)=3)xNzKQ7a*tF
zmq46E+g9khp5JI|DBqMh;!i!GUUEz*CmaLrXYsj7{LFSaMjQ{$=b-l`;7z;9IAK+j
z#^1;(OIH~t4JH#e(nT4fErn0~
zOXN9q2sUFW@VFV=%kX!Vf7%Gw@u+M}NEi3mM~oODIG3yC+wk1AWIf6fWu5mq2M~7?
zAHf=F<)6S^0(@tnX<+5op;{R
zc9%S$zEa+aKlK3e_#(RhvohmKK|}J>?0>>elZ)q^$FrbvPE3dGeVwQr00y
zzrja1jj`gq$}<*!WNspUH%fP+0?#1)zs&4|XCHV@<4-3^$K;Yfb!w@qDZd3xoq8PK
z&nb44_@uA@b7eJWdv4AW-+n=VNPn_4?V0vn@J?s*xZde3kN=r6&55TtN;hX&%~{rT
zmUW$_m$M9VmTAsX9Hkpep3|}*2P5C)Ij!mdrGLvzdA_Evh>@Oyj9qZKjO?0t&4^0H
z{XjHVX<}cVSPQnpoF5q*8#@U8ei-)rLvX(xGJWp5k;~+iZGTZBG6k`oejhtxY}^ZG
zOrZl}J0q|+{PB+(rR(89S{v7>QKKu?cFEA`652EPoR?s4&tNY24tD7n{PQgAlZ}H9
z9ESRM6N~bOzfm*>F%HJbxEC6X{aePo881F^L8Jh?a`-+H)Ay)GS^vyLtgO_b}|{8*G>{Q^p$^=VE+?d*X}_eRNUe0BEm5tk;Wn8v6<{
z*n1jE9Oyf7?aBF(cAaw(Z8d#a`YC+I&rHJ+t76QPu{Oqdcz%H~D#mvh_h*ca=~~3&
zz8BX3Sc4{Fy)cgRFXuI`bJ;KbX!68KzxJIREjs=(UiJ|7ZW-@mtZZYk>(fdt&`{7I@&m-P-))6-y3>+Bu?K9?U
z7O|K9XK%V
z$@saJN8rF%8)H0_N5-fa>tvkpLFfq5Ugwy2aURFMOCr91H;!{Nv%!Hr2YEu@dhP5`
z9qVI!hWm(2?3-wZB<
z;Jz~V0<*E7$(S#`ODA)nW3D(*9*0(tN1Wf+I>ECvl*bBjIxqH*cuEZR6^6Rv!M-Wy
zESVT@V~m9HLdK~!KbLgFnBm!{#tU)K@`$*riG8y@*Zye(
znCaUvyW&AyIR3ky2olD-)&mFbX>xCzF&4@r<6MldFxIGJT+aSyjqby)v`!d#37SEs;zAy3AjH^d1MTq
zd(%cI7-wRvk8!dzCl2f^y3i5FAA!CHz@-s^QB=X%Cv7+Zp$B{=u3={D;x;=xRQ5RZyuL;N^z(ROfMisri@)4#i3^5Pm4
z{>NFy5*e4k_e_QRuf!oaIa%|a_JH#s+cq-5zGxSWu40}jMOj5axa~64eAH+G<#PZ1KI_`5f}1;cAGYnc;@HlFGk08
zZqzxRYyC(3HU{6L8_K@fCdZjL;5`9?_X^1U_usE^BM!{0Lmgl}$_@P_+6$gb#9nwN
z+T~iIDoNX4+8>;wAIJ6zu_wU*3E`RB_ozg*7lAO=u`HYVoe~^&jN=TO
z{;_LkjHluJGxu44K^w;*e;28TTpLqf*J2Ga7=9hkH3ngAwH)g}q%Ecm)}>6-hco_+
z(J9(8E7nb1GjR{$5ZszhIM<0}{A0!V0sEq^(JnDlUJ1(-gXIa=z*`(HnZD=za|z{U
z+9v7|ai#A-**>uSMah`x@C`>f*Kb@ua{bD+0@o_-uvU5=crn(>H92vo{IlOzXGdrs
z=K|JX%Yif7;98t(FYZNf-N$uj0M?z{qoz+qylJAT|b1<>o)8Re>
z*KFh;$BAA56Aap
z%I5Syy`H4+PQRV&2CkiE#rD;G3@#K|gLN&w=PhYzY5I%^<7ymB%8TK@u8cnicI9cm
ze-_q0&O~B2k$;#MwB2G%j^_smYh-TS>|S#A_+iaYuAv@_*Zp5K@ja
zvSfWT{^XtUy9zN+vVYD;Tqm$?+GWiBX|C|J)5M$YkQaEa^@(#f_mc?AbvEa2#A3?9
zvYpvA?I9k>s+uz^l-J6p4(l`5WnDLHWLhj9D68u?u;E>n9*!%%f9)1LV;y(>_O)tw
z=D6a!*Q(=V8~D
z;BOy%W_q)F5qv{8{f`ETpI8R*AyRex16t)$9K1+3v^NRAmKCoO)fluEk6+Y^MazjQ
zPEADZ-nvdZ6@cy1ZpfYR9vUF8BH@m>sZi8Oz}sD7P=7kGiNRH@?qe`o;uE%J!TW1s
z)MV5dg?GorqBn~gqEhuIR{lG_+Ui!;cNp%cq9$=l0&el>Be6v4ETms2JeM
zzL`4Um%gsHKUDXd>e6m|ylI`eoi535ZVtNxhJ2W$>nEbc2`&^)!ZM%}>c{Fn@m_4z
z4D|=#dW^0~&Ln{+q+$%Ho`g1kf6oGEIuu{5v|tpf&aVKC+dyaA9691<#*SBxQK*Px
zFkAw>=@M`g)o5^O6tJrLm~r7lK9+~C$j!=l$<1vyNN)^!HdtPjFmA%r-S{h>QjStV
zOAIi(I_|Zq=T+|+lbRZvFml3l%d~_EiK&6!lT#AAr;ZvMn-G)QCLw-Q%EZ)(ag*AN
znwZc%CN-h$l=j}1gqXzmxY$(lOS*yH?c28V?sZ3Pi>2qJl*y_3H+QSpT1N*QAX8&U
zO~%jJPS@>Nkb0)XCQn8$v7@6?;-}!(Qpd!mmOXTR96k-8{C(m9u~T9vSSIjqpm$7a
zMBfg{B)WgpEtj^B4G&T`N8$d1k)maRPSj%nQAyZY$r)2mBX
zyJvc4`ep`X_RAcWIW}`z=7P+nnQJn)X70^Ant3|2B-1_1GmF36a0D=Fwa4o8SC`~0
z%~_qZCMPdvYtGJ`y*USRj^-TCIh|9SQ<9@>?lupbr_EyXwY9Sa*n(~SY|*x1wnuDZ
zZArFiwshM9+Y;MS+iKeyTb^yJZKrLo?SSp5?YOPPrtI!^54)$`V)wPTvj^CN?fvZ0
z_F?u%>|^ao_G$KX`vUtC`%?RA`x<+meXD(^eXsq1{iyx8{j|N
z-`sY&0lC4s{c^YF9?dPu^;{dUcG}t{YnQHFy>`voytO;mf|h7%IbL;=H7zSWYeCkM
ztfg72v({wgWo^ycnYB0TK-N*sw~{QCP2PECTe5w#+hqr2_tSiQBztUjQueg$^lUQb
zzv;gm_}|5WW=%u8j~J4QUyL4+5R)=$;)v+!qsGRhB;d!N$Km(iQ%4MqA00b!41Od#
z)tcnEH=^y7*u*I#29HXKPeO&H>66AzOl%uFEw=U@wWIr2tA3ZJaK`-c5B}KdHTONZ
zV&)3fYE6iVPsF>^lBPGav}xVOg5P4t&wO{cOr8|iCV+Qp$0sCBOqpa!i8Y^6r>EAA
zOPQE}x>I6P@bl(vMdXuk
zDk#k~{i~yk?|JX1Bd28lkG=4tDesa#KJ3?1I@I&=Dc@7ibyGgz`N6)QPkD>ydq35t
zw5a^YGUb1mdHz5>zj9mcQfc#FjbLurNVL)nYxs88p%GSZYD=wU2mVCNzLw{@99Q)S$;kf8bu9yca(9kvVm9ml^vrR!I-q`G>GNZ^tcvmFj1Tw`fDZD%
z5W|pvewS(+{hSy`MGklppb3cC_!<
z@h|$MW%{fb(kD6pOP~L^oj#w3zJ~Vs2kG-#R!FALiJ3n2#KKaqo`{tee@!>``%TYZ
zAvWDSs+)%@UX7YtqsdvvwN2d-bF206snTti-qaeKWO__hZf7u%6VXC1N9?vp8HGbt
z$J5=q87r;S&34^f$e4|1{5Q7m80e=&PpmHW&kxQE&JTVy_%+?!PrubsGZjsG&H_mA
zQ+};HYAVAOZ$}fiR9ee5mn&%QXlmtKAw{$wwpraLZCf`f17340_E;ehEotl68O}?z
z_Fyo%={Uuj?4YI}4_CCBFIkf)7FE?&m*#BB1OGwurHJ`#$n3Cu6PQBtS>5cm-c_yd
zm7$&vBt6p082K;-_NUj{k+KuI`&jBbOy5(mhdgt;_4`wte(4luajXgG4i5JF>$9DH
zLuPx#d`UNVTE7`D<#$S>tLTmKF}kZpFmlFe?$sV{v-Y20jP$OX&jnkAUs(V7XVtyb
zD?14U)*?`&hGB*eDs)t|y2JbRvVO)oJ=15@?4VCZW>wIq(@~Mrk@WIydI@Ul!>+o3
z=M=Kzo*MI=be*)8{ISB{9>(!J__N-a=8R&n#W%-gTYRcuDCpB^^s3~-GP@@5&-(G&
zdQS_V>w;D8SV2wM8)U9HoOaik`_z>Ep^Rpe3rnjb<}(rV`tpdmg4g@>h`BF#WAKLH
zqTs?sEDwi<=6_WPwY&oS9!h@ge4(br)-Q{|OY*#YAspuHyx;~|kASS3FIH@oGSl?L
zvQoe8yKukD)zqprHiFKlW%;G=hwx4l;FI%8m&(#zU|j&_bW@ThNpr9D0V}xa)%aIb
zI$i2CA2mPU{0nJmK0dxe)dY-`z>ln($
z;r!UXuLDDi42|Zd3Erx&m8GqlFWbIX0V<*Gn6lVNq%gD>gw}da}r}ZQB~ns?p8uy4i0%1Ti$Vt|~OUth4=+yEmPu8{3(w
zUDkd@?w?`_J9HBkx&ZF8v{+9phcT@3J8VI~wN7Ez)oJS6^dhb2N;;{RTXB`K*E$64
z3rDqRtY&&*}9yq2oUcvD7K)=@bWqC1X%l0jk)W<5-WBYC(#rn4H5)gp#eHMmwlLJq=^%|*gMQ*pq4VV(QhHA4CGj<;!d8i*#Z8CaN#*>VcCnj~;kkeUa{LUoKxFCaoQ)
z(Lz++&x3Lwz;=6UnhwM!MvN17>{Qmb?dwgsTmzkLB~jD#wiGz73hc0bFE|C9KA#|=
zH}%FQ>c&Y5z*TJD-<$$Y*WZx>5NNe-E-TfAt1!)%Wc@I;ZuNwxDGGasDIMyUNiVvG
zq;Q70PYHcLO=Xgv2698@cJrkun-^>P2}|fMHlm7xaZmE<{&cQtb`{N9zj0bRmpW^T
zzQV7oTs0ENHe&mxQ6DI7qd0SU4;3o*2qRd`X1>(=ew})X5Dx
zx$lyzZM^emtdsbk^u+xwdSX$lp7h*2CkHCqDohShL)V4hM9k+UQLP(GN-H7!C8gyq
zex`xuPQ(!g4}S>0r+CyH+xIAMP9Z&+?BT1!*kA<}dqRn*FwJPGe}l-sw(lGYN1b8}
zWQQjQN`9tdtF?#aqMN?wu4E3)qGxzOhwr*vb;kX_%&U*-=KLr0raiGc^x8|=Wqt`N
z?L0luR(~BF;DS@~yKDN7|*TJkj*-B%s1{65$`jY_(C#P&^rVi0?Ro4iaFbR)Z2NLxS0
zTL;%Kt22(A8JiL`U$i!iR&zLxx^E%H=*c-=+h@sisygu-_#m4J4LQqB?~vXvP4@yQo0-^oki(PiH+=FZl}&W)S-qI
zk>W;2Zl-vl6rbe4X6feZb)l-Mv2oh^5t8q5@(Y-SPoUZ;N<5Tdl!h|=x!1}5)E;}=RcAXJ8(<$^13IV==^rU>wwq$hX3V4iuA0>h<
zuxK^)myr=p7a)oeZ+g4u^9(OmpFl8J@{{UJfy=DjAf8lTTD00iSF3Kb9|GdM-PQp)0<*
zZkW*V-TPpIXEKDks>&FQ?qoV&Tfa*;TJyB^yJa8xcch+*-cYj6E7HdBX!5)TIXSNM
z4C2L57KVd0rioelfI{ELMrb&Y}?h%mk5iSTXrmJ
zwlk6qsS{}3<}Uc!G}Wr;Tek1Tym8$SrWokvCzU(FVIAWTEa1pwE
zBJ6JdS@$4RFBV*~g^Eo9MAFafx2rt|uRsR%xpNVyj8!g>2u0v=>eO
zS~4nHBgR%cVxB-_OwP@%JN(CpY3qHvqsbt-TUGivY2Dr$b+=`6PJSkbWF)!Jn=iZJ
zMt}mOG~-m{)L*SV+yRH!c@XR%)K^BqVRh
zq&wib)2#d0V3BD*|F5o2J6$vbdJGh`O-30SrMI;e*Y&m8c0Bi^cD-$Daq1haK*i4o
zS^0dLE!U;Du-W5i&*6##L30bjy7q7@lQPyCc8<%{>0)|vQlrFG_D_+v^1uh+p+bhA?!)dFEqi$(hoT?=hJt20DQXmOiJ``9LY)@=HE
zO1esvSjV70vmITir9t{Om5D&<%?UTa#`5Sp-x@^?6JCK@(Y_-+ye_agHcB_zSUEYe
zay}#@o~N5_?G>%q2t<~g3s!Y+G*Mj=P3Zn>mA2=HCm`lzap|)*f|(31R{)36WvAyz
zfea$wK&B|2YxO{n>twI{fk3f0YVK4T;XDy#cUe=*$V6#=30zz**pkdJOUUdHcyGKx
z={=%tU83}-sM&@LFz=EaBy8m5*VS4ZYhB<>lI{BnIk4cD&H_E|%!spiL((
z$1W0V$;KX^P(?<}XYHqoplpQo7H>!m)d{bdPaLde+h7(tf+ZB(6MxWZnoX6&>|)(q
z*DB~wjMmL&u~F-ZIbJ>BJ5ZM6ik)gUbdlBM`Quqove#M~lf*ebB4nBg}NN8q8e!?
zVj>HOMJZ@LQzOdvHUSih8gCt%IxvyHLmO^Ea(*!Nd-Zuw>`f87{SkAwbrcIp6hiff
zt7^x@FVoBVwDl9eTxT2$))(-5-O9W=qunp;*yvYT{VJ=~FI-x;pN&=5ArA%W0()Z}
z=?f87g#Y@j2_ct@T|gzY^?R)mq?NdksZ}7gJW^{18>hCuy{s)%iDWGzC?-DRKLl?l
zlnO5zQf3*!v6nJ;)xm`Sjm!6zf=o%-07p#e5?cL}gBtB`Nq!dTtt@<7#(o8m8xm*XOvN65AL(=C_D}
zJM9UyYteSSwriu8{DkKl6tSk&09e8kMrjh@N|SS;@9l|6^W@_Q=i{`@$NUzI6|VF>
zN{Rev95oVSa&%)ew#+uKZf{3cFg?f64ASokLt$^COgO2#BW71L>H7~o2Zg;=Z|nCM
zZ=N18^ET^uY+VpF$K*teqc&2xaTF!LhIKrwGne_WBX+B_9vi@rt2GKHy|kQxSUJ18@{fEswY{>va~$3%JGyYfr29k%@bck16c
zdf9Hh?|r@PC`@3R-j=#7868z@m3)O|u0`Iw|bd&(6~U$UMGD@Vncn>Lm}{NqU9US&{gYu`~lU+m1n
zi1g$#vC1#v|9B;ObTzhRor!#90$^5b(Gy`buihHrRfjV>-l^6#?Dg3lZ}@PRD|I(>
zVcp1Kiyr8xABHMWk$xp&hFzvUhIKbDi1339ve8Ac5ON73NDM}^^I8O?+8zk+GVA0S
zG|7G=o9JQQO;-x!z=zz5c@^<{-AWi)tG`b65v40t#CwnzKA}>?+z|q4`eNlNfRXZK%L4$WHQ)8Sgo0
zwE~@9)+4fUIf8fW?9TihJ6Hgttrta)MqB{FTBqxu|CDLzEKWn{Cn*>&wx$DtvzSvC
z(4Jr-g8~qe!NL-;BVhBlx}Y;!It5;VT~^q_HdZcH!a^(MA3%zpy!zmpD(NfkvF=9=
z6p^lmDSFnrRVn4npverH%%I5(CT}SgTNGB)0sCY%@`7%@lG#4Gt*2;3c3;0E8(QyS
zoo-l-h2)DEIh-3t!@^Gefe~>Aq|Sbf{goW=Op7FDAB-5amdpAhatG_BQh1V>p|DF2
zoM~XblmiX(kl0U_veatKBQ+uz9@Z1{N|y`0j<11Sd^JtI@w2S`$mW?%;MWLc4%=HL
zi!p2d7Nf9k{=Kw;xt19k$vh+UMEX9C2D?jRP0wn3ihvj
zIKqjR_QyB+t|%#l=^@PkY$HlM{<4z$Jve9n{#ZUhYv#%_q#uJnen
z7S7e0{d|oCJ_u>EJ_(yUqk*m3cisoGsENRi9?F=l*A~&-*(<$4vm*-sUaFT_dJdnX
zrOQM7ERMPl>SbN2|4`NV9yZ$|0jqv#7_|5qM&SK>FdA$Qn}>sahte?IEg|!hNZ-Lw
z+2M47yawJ6YgZhmd7`)o7cpN%77HvCf^&@h2FBhy;L2rI>K+Cp6&?pq
zlFhyiSR(126>L@rL1c*79q1?uBeI5<%2ZP3K!*8bJ8n5Vkdy&9Re{a#rI-
z6fv$Y@#|&(1pg>!eIKW$IeEqD_akO!YCNey`?q5Uh$a^MgG!T#n1>V}I*O@Oh-I-5
z%k{Du%Iw6?)MXzjh?<)@`1%M|Z2fN100q^u)YBKp;(8NX!a7BpNWL}bB60|{!@3IM
z&!_-j!}^5^fVs3)8n2d}7M6&L95t6HGcO7O>k8tJiY2gy{mtC0V*s
z;mM4hWAvYlP0?$+)i!p-gT`AH%yAiSovz=pXFBCU*-y1#y_wmwf!PgMrEDEyp_Y+h-3$ZW$Ny$8H)g+M&odOm3D+qCuDCyTVF4s8_v
zmEyLRLz)cEXCoqszT`H8*!|T3k)9}efv(zxR?xmMPtJ#z>B&Eo77PE!jE`0XJbxM^
zJEbz?Lu5g--#l!-Y#gzXP3G6p>XOps?99>9SjC=T%MY0{>#J9bVPGK(CmAlr@LDVu
zdtE8Cwy$lsu#8`O8L={lK%5}c`pb6GjOmh$5gX((WMNF8jU#kU?6HQLb+0+w?hE$3nE@wxIvFA6~zB7QMVyoEeHQuBH-S!>tRw89F
zyIi51ALX;4mfyl>Gbw7NUa`Y^`9s-NepV{j;n;E-$Ceyj?qimR?nQpJ7Zt@YCfL5$
zX%(74|FeDDa8Ol;N-078H81eqW|LX(_9$cc`%a*!#=7{V2=)|lNG5a40)v6g4t
z01XUUv68UZ2|@vkl?ceW7{YVw!nCy?
z+sAnJ?mvd`Ab`J#GpRgV_N#doE}<~&Z?VHb%c3L;ua)NW2qzfhmeh>}dH
zGKiE|U&0iVSyyQ$NO;+GkhAqI3{1v-UXl6k&ogShm<+H}bDWf8ZLbv`!7=F`^V*WW
z%|fH`g0dA}vmj?dt{;}&QQW)P9h)H{A4EQ&PP7V>>J53l4KOcs^mIW(
zWkEdG-lC&N1l;w9;87FIEh#42)wpNXA?u;BStwK2f%x9dIa=c%`6v*^^D7Rdeo3P2
zK9dB;uN>7oyTltCA%$60W`E3W-dBpg
zuqcq@x{}^i&v~(2yR)n>8M=s-@@eAy%xR>v4&Y%h*z7^|kj=+ut-*SgnXpUQ2Za%i
zw_32)!m77h`9S6v$7W)#c5Gu%xh%>rSYMFAD@|Kh-5MzR0ebF=8}-^F_#pg>cMe^Q
z_fFTrqJD?X&Jg+pQE^7T9S;~YZ`N{LIq@lM=%?CSV`D_iRT3c{J=yaikxU5%rHT=TI9ln9_p;9*QY6sX)@dJei;QU6QC|w1dx9PPU
z-k*1jcMjN$eZXl0=c@we30H5Z#G4Zf18#{O`?4|fubhbI#LpT6?u0J@S5*J&gl|g|
zx>4w6bp!F}L5Qb)5yTF=Q~b_2auNe$u2af-1--x-Y8ugJ)$~A7xqyDQUb~z9yjp?2
zS$2CCh3xpcnb+1EDhBdlycVY?TH-GQhOBi1Em;xS%mih!zz5d%5ZTK)kgI(;YVM1)
z9Y?6R=*3Ee3NQqA=9m}0tBfPY>WV^F{KDkb!>u=FvBx{<@$4HF#Ty?(D_|c16@7ar
z?3sMj4pkIxD3B@pYY^(UW7-_E@LkG|E4F$T>^}02mQUF3kyHzn_+N+p{xB`ffEMeA9vW5-D%{
zZltI*4Xan_uaQoJoSn85x~zjwdZGe`c|L&8DFe`!Uzz7`w0>!xulJ>+=37i-p5mR>
zWl?vJ+1b|P3AuYhVyI7#LAPEYZ87i$tRpmE}@el^F1lN0erixJ1-N#3v0fp0!puf
z11^VLsS9qh<=8A
zl(KovC21r`^>K0LV;-uDR<&qv-K@mIx|7<^+mo|TDsK^_F=k^064`x9BFi|CeU^vI
zA`v->wGlB>5s}S`2Vld*+LS4GWdW#Z9=Ld+EhF-ng5iU)X7A68`i#
zO|AEyO~DJK*d*(2vK_TGJ;J(KCFF$1nt-h(v%kz8V%#2jMxD`gWt|!-@k5${77Q@!{4z;ze=7&BScC
z{l96Ke7GeU{#P5P(1-)>pb!x>_limI(??L33;=E&UU`S^Xg(o6V~Xzp2+b869oyFB~+oK91m(zDG}-Ce|yro;clXhx0fm
zqA!a1;w8|CgOIS{tHtHPM)Qnv&@IQrVjZ>Cz6}8;hEX6s#`+#jXAT>_&8rE)U3h@u(3Rj2wHPF8HLr_+u|u2h!@v|soMqnSEk8Zd`9UErc
zRN_h>v@U-yBXM8Ej^Rk$+sR6^P!=M|4(TT&#@8NU-8`?Hjo1~wjxi#DFXslCbHj#H
zR5!NB>1Vtka3nsdw|a3-Y^?Qbif>?ajCQZ}h|~?V$4;Z2hvePt!VjWV5kP_Mdzd#2
z(Ya9OE~}OG95vq%MZN6^iVy-|(zl&p4c#oK!g~#g9ul0wCtz5||XBmlcb|@y+~5^oMA2
z%2&t|Z30b#v!su;P0>oP@n%l!68gTFk*t&4-cTiC(g?CTh0XM*M_NA`XrI~P!(S-N
zL`<-L&IbV?K2X3qpYwnLW)JqoQsvmwRaiiIOAWlUuFCW7CR}XuDqc-j>a`x<)1Wa~
zw1+(1-L|GuLWkn}HjH3W>Zkjq4e-!WA;hn0iSIXW`S*t~{JgUpYShtg%LoE=slzv~<=K*WA*ElMAxu<+e5ER>PXppG$|uZeA(Temu%&q(p;3AFN2!kq
zm=?vfxfpqDEN!LF)Xm0H1wg{HMEXo-l13}ryyuWqH$7J>Xgp69ORBMSo%EOR{GE@T
zp6`=69Ftb3=ONylwdwgfFVgK&D$mcnFSmVb{~?FB$0_H`z~O7eOlSLUCm#&_o;kIB
z^GO&pU!)Lg-zm3^a<;FL4;!T`wb1X9I%}R0*ioufT+j91NaBu?NMeOwVtj_4-Bj0@
z_j+s0>1Gh!;oi!cvc4Mg&8Yc4=Cmj3w59_z5~=-$9!bpUA~dL*qwByWnz05DbT{~4
z*jZ@K?vDlzYTtT-qUP-5@^1W$cjLZ1m)7`wc?;yk#>sw)Ni$-;5OH_f-AMb*3BElL
zTXVmwcEz1Nab&8Q-#V9uW2Z6VdwH||2KhpVBR4w8!{_^EvduYpj=@m1wadC|nCyj2
zt$A%;w3fp&nPJJ87ID86l?_lyq<-5M`#ZFGH^n*bFxrb{B4*!>glHD=IX
zaR4E?rmXV`e=Jb3r)umy9O_=}HG_<;wLag>;c-u)&Cx(xabWC&VP!^jmFM&Ib
z$EM)|j1Ueju0pu}b54-q=pis$~y&T*+xHtN5ij^Dv
z^%7mNlKsbrMJuxz??mDQn__!^I>*gYDhiq>gCh>6y-yP!!np!os_nT!v)geY)f(H$
zMdxVz82saUVjQ{l!Fyx32g`P8jl0P*QX^tlU_Sb?kt&IuWuyvXIfW6
zvj(<2h5p+D2H`EwSwH=TECv*ISR}=U4K0jI?@X;}rSnDnja37_hg1U|)xdV^hSx;N
zR_l)tW>JcPb8F@5C~uO{c@SQX_Wc-vx12+X_zdyQjX9DVg;djzhq7W0o
z))<;YTY1Kqwi$lJ9G%8d#&=Y2g-5J9EDiLvQu;DVkGayNG;o{qwO{JmzR6Uh$UG@x
zPCO=Jtf)bg*6_lp#3+w^Tg=a7c|p*fGtm(jE${gPmO7HD77SR?ytQ3_Bxr`(@-qAT
zWfSOxaSdnVed(w}=&i-FC`!Pi=?<=yrTgx#ws#DU@R`1IyXR+k0R7~IY6mXQnIYJ=|Dqf4+{O?83Q*D35
zm~q?{FH`;v)-R{BFDCMi3*t-k>{7fQ)8nw?9TyWqG3`Ursw{KR7s%pMMe3iM)dT*M`1?|}%AZgc@
zX30+IPfbP!7X!AEjBUyvWF0|-nESBQh0Mtj(=rdU9mNVG#;RgmWP&-P(zBuAracc-
zp+(j}^q7=iuyEi?+-C&NiI3TU^)U0@n#|Xx-UoNc*6NmU3HqR;Wl%dL
zkIaY`kZ}eU*h+@_w{SA-$LNPRs?I`9&yRXRk~$gghBqUHqL4xmtMtVD2F!n`DBU&Y
zA@L!Y3w6XoW)F{rN=O!R5%FX>|1Ypcy+BCeYqX6PttY}QV(d8A+D=AhCvAj2I9Ci+
zE_xz1LN~*Y8IN@_s1s-}DbcJjI5vpO#CDDjrv=T!AxN@1Y#t5bfti^9CyoyfXpL_T
z2V8Sei{e7KzA*ct9Fu(Nld9;CL
z?d=gOO0=h4Y+4Jb!Gh3(cScOi?2L8L!@
zXRz-XiI$JM!z1>gk%aITI}Ha2`#~+lD$VpAZrrCeDp|VeRi;hXLX+MU&wulyCi{V@
zp~_QZXJ}92zB_-Nbp#$k+W_m_M`OPZC+5?&W-o>zKXw6;Mw
zPZVMo6>O;(y{(rJ))j>Jj--v{g0^&C9d>R#xu`p+I!;{+20Fvd@~tlHPH#Z}#D#80
zwJKsBYO=M&SD3rt(@+KWTkw{8Sk2`v+CyWht11NA9@xI&HVQx{ji8>XzDsLtBV)te
zncQFSH2RmvZZP^+XpO58RW`&kpI(%5tDHnrJ71E)Kc>S>es<7(F(N@%94gfc
zt}u%Qr8lQ*gBzd@RpP2l;SukoBN6k<1H@t7b$bS(TH|}1=7p2j`DH3Rgr=l(6PIL>
zoLb8o5hMoHL6p-P+JoNWY5<8%Jy_)&dQZbMH@;n1k5gZVSDG59CRwN@mS3YieR+R+
zBAkSWPvs4(spUN{Y+l|!Sg;6&bFUYtQyI6H=HmrUtM0Jb+GO9GuVy+uB51tb7Yv*T
zYFD3tL}TJ3oc#GNW=rR=aO>o4-~yYIy{l>KgSZEC^?)4Dv_{}AeTN7(PtHQSsCppR
z-O&ueZ%;ojbgn0xqy?c1=D}`fMTVQ+(Hf7#GMidk%E4&NTj|ys)55Ur?JSdKcj|Q#
z@lkkIq~gI09sUQhXE1Oi`1G%+0*FVX$zZ^K;H)*Biv-5nT~_VsJQLwR!63B8U?hW)?=-Hdlqq`a)%WG*cKqMfqu&U6`6B@bTa*hHb`MGTvKIJRjs3NL+*6oUu`f
zPz-+a;yzVqgUnl|_Ft%7(MqVuf;hXE{lHCF2ZJV3dw8A0ZK9=1GTeu=CHDQBU?IYD
zYb`v2rzovi+{2bQ@h4?87jd5uw$%IJMg@8LZ1vzM6o{&c7{V%n5d_#@0$C223kja0
zjv%e6ch#8!Yiyzet6(Ps>o6M6;8nan=LVmWkAUisOgL8(UDj`QAml+b0wtTWQz}))
zSJ`rn{zz=D(Z4h{djmEwSX!(^ZPaMhTGKdHXyg77DUCNG*u3gne57pNGR1|dUZ|DD
zUz|F?3wuqfM>2#Z)dh{pi{q#ASe1LBs*PR_05B!hk@A>Ki}d9}v5yvdfiOihrQ8wUSumgQPT
z^#CeUufkXX@5DLrvx5#hRD)I=NS3K=5*W_V>qWl{rNnBGEPPs!nOv=RtGrjq3z|oz
z%TQ`338%qxgAOAc(jbx<>pSsBsbK8L>)Xq6SeSZ@BwFdhWMPA9H$=OVZ%8pZ3SwOU
zve7>|_N5K7hM2X<8_siH#wcItPcL%K1u0ta&UGs3R;U
zDFUi^?@j0u_Vu&Ua)bjE8WCg%lxXp`R{m?P8%2g!!Sm&i8ysliZz-Pe)W~iKi$2@-
z%_3*UuodHBQkRe`Gg%(oKyxZiY$9Kkf}%9HjO|Gs??vP=@Th3JlaO^YUi*R06`J)L
zM<&jp6-PabbnTBvoEC@yMN~q%Hte32CG^+Hq!Y-3#Bck`o&Ye^n)8gAcjrS3G3;f#
ztlv78_U$6c{iV}g2vq6cNn)6j5UD?NVll)n<{W@3DD~vmQD0afGzl}{o*aCRADki_
z=2bm;e{nE5XBgAp9!e}Kj3yT4)qV7PJvnnErUkw1#M->mWvgOe+8O_dh*2zSE)^88
zHm|BVM?!u%g)5yXB(SvQ%{h1(*lmIK`cKw|O268HNamNIhp(p3)}H)Y
zPDp#QH5Ayq^3-4%J5cMD$!OkkaoPKe-}-JTT@VzuHovho{+xMvA)b$wYN|zTDK{_A
z!=;ipwz8(>5Q?(SiryT8!!Lqar~p8UnO`j=uM&6I*a>7SB%*^ANS&jk`adDWz7Sx2zfof8}0FuZtes9;}u
zB+1-Zal>$baBaxDuX&9iE1ln=o-T=^!RCgr5bsJ~CbW6gB=GQPFj?(4`p2#G(oAxe
zKV8Tn{kWAQX$9i_OdFVjLG*L=sG>-tI9wRH1Q$&*H~5=?sf
z00n0WnNK)qk3fD%dRC{TQE?y+baCD^r9)P~=SLLO6W>vFO;58*F`ox*%F>k6!x3eP
zc{T1$&hc9d;0GDo(7-vRvd2`T@-mUcE?7|-H>ONK0Yq}-H>J~aChwpa{&C^2T`ni|
zz*%QM45LVV0&)-tQ>Q{NTp92^7BAbrnT{X=
z{9VAVs&sD53A%Sg-2258V;u3+r`FgO<8l;^HMYd#YmI#r=S~9KckScO`lDlr5YJ*H
zTi?`7<`$KC)kJX=7tUgxcLwDBKwjd8!cf(cQor`?hg6AB>D0=FrBh?)RW8VhP1ByN
z)SlFH0!LQ*%68G_C6fTCp&&2fem+vRBmRkKB$Xxc=k(;|r)@Y%0}Wnp#Qlu=W?q%I
zCiOVHU(Drsu?a?sn+Gsw=b_S!Z^?s&q(`@$B9FqBJoJ#Xr)3nW#N~ydM4dP7PTb(t
zlMfWb={ATW2Afk+3ssZm9Am&uE$q-@f_UMx1Dod;oX)$GpGoCu2*2&EynoQJ>*{3a
zoZ^Vt6|5|YO|SfVPV8Lm$x+&q!JI(%%5kuSFHH)rbqC$g2l1>Ux5m8#4#{F8PY=8VI@V4ed8Ja-K;lqb{X!#!&;aj>ZKK?0ZXiqsqd&(KwQ!=z@*^8i?
z#a%onx%!-sH_EUGHPGr3#5%U+M#`Q?w}Uk52@(;DP87;v74K_x_RR*0!>X&5ktlO#
zmEzeP1rG74R6Zc)k)ZLcZFSRy+?rG@s)+duS#@ktn@C|03e3*a8spHy20vtI^`9bT
z_u`f)O#Ei@b@NBgI_(O!s3JdE!u(*Tcut&)y=WsL6Nwiyyej-%DU2D=c!%rQ?BN9R
zn<^_3*dgnGGaw`s2nTI<@3*@soU1iqFLm{L9%O65oe^%}+Em03Ncf~gPHAW7B|LXy
z0XAoQ6Q0}EOJTxui@bz$6>16rPWHPuQ*dpY}NlQP&(W~Yj6k}hp_|woF2JBV+Dt3<`-hr%Ezr=pxxW7j1
zQwQya#XN8`!r~?-DhW$G7|LP$7=SE~H0T%rEt}55mQ81YbJ9bhyDkeI2OSDJDZ<&H
zfCpc7z{})0@Nt=f179eoSpdWVRPk$8P4*5(N=#E;;=Ie`upgiM9uKzS
z@x}&0gFt?wmMqhh0#=h0PTsd*lS2lcL+|pf>WYJ00cC2+LrF&Ku@*@=<3Z4k@6y#!
z1HMbnm)Yt|r(a~xO`^ssNf!ar*|t-Y`Oe|QKy0%RQc&v8h?=9KfjzMc^aKlRn{_^f
zPOx^2NbYUce~}0pm&&~$NzXK7ifEu4c5>-SK}EYd6hM6C<_M=<>z^`Oj3k*G7N#-`
zxyvde%Z#-Cp}s%T3I@_;8$>*}*5a{_4bhZ5PS`}wwZ3Xg`+J=Nw~gilc5$!BBVGAY
zD&t7Tcn~`6DR*<+%e&|>X3_gVDM4CAw(lkKjiS9|fHYi7ehib9a)?dYa0xv1kYhY|
zK1s8QHID&!cPqsnt$usgt_PNiBC$i=EUeC-oJTG8+^^rP-j9@t9;JJwN>$
z4<-AaP5#qrU)yC(0;$ZBDYK-ka?;jB*)PXZ=Ze?K%?i!Ktb-ew40db_8Q7VV*EtTO
zdUh6LWukK?5E%5p%-dPvF~TA|IkI*G{jrh8Wn3>JB}N<@nAM*td3w9`L)w-lniZ-u
zc$M{GEz?Alj4g%}{#i}WSxk1qGl~wxM_gCa>p1@eM+n3+@v-S<(TCEr%<+pqQ7xQ?
zGQ;jyC|j5B74kB3+(IwtKkA%G?O`f>Qqfnj3f7$OTvI!j;|gTIK$q6|JB8Jn9_vO0
z_@W-;zA>)&S=##f=tfTy!#_^$B-!k5xF6oc-c@rjBk6M~M|wHubj3;$=AMofQ<_AOs>}JJ5>u%(%)41kNIq1IvFKc1K))za8*eVg&hY`m|wpzYQxnde<~
z0>F0FV=72u2bV~!IPY^z3hyaE&K20W0xTUoB(F?-BcLgo=QC)WAQ$vR`^$PY!pZ4@cA({mL4nip57
zdCG^p;&{{ayb!lpWN|AY_dYVga-|DRmxFPw@mJ2*&FX8R`r5DPFlu7wmpdZSrh4hXG*R{@B@?OJgoIBda|NU)=bHI
zoUCH*`Sx;vs`
zPpS@9wL>DBnYNtN0#XtqD+Z<19QA2O#!3`2H>av3C%Z1K->_Y=GO9r|_0?TF(ug(M
zsfVgD>2Z;^IabF9Wh7QDV{@_5e`@_9uF=vT!SfDZzgBP77YHt~taOO48%DIb^uUh$
z`infoEYMh5Eqxxb9)of#dL0(3HGTkLB(HK?r`|5C7LpMKO)@-WK;T8j%OIznZiwbB>UnP8=V#ywX^
z#w%pd#G^D3+yFp;7Y+X%**j9Ug~Lnk%jW3BS_}vJqIQ=_yHuY?brm}Bto2{Fs__T8
z>m`%(QzwTF&)35W3APj?m@{JQo40Vp&ghxSY@oCQu1}i%Y^G~yrc>?!%GwSUbZPtE
z`JSM$UpOC{HJjhnCYC-NJ=cy1Hhb%;Dq^GT&FVg(_S`i`KL)?`?}%Bdy1Myqr4=Ft
z)m|;AP?7ZW#NlI?Tw^Wh|f_hvJC4dygPAxw|6lgr!oKdcOn%DRBs|th9xAZWd^SbKBpPvt@oi4p4n^m-7BH#T&!dE0YfwmPv
zJvr9_xZ&mt8a@SddBG5X^FI&lR@2vs84pvpH}Kr*=JYUg(t6T3t2Vv*z-nBnO6}NE
zd7O;h6zmPVa$?uX!^?4*Sy;-w*#D+hP*|`1P)`;;LRIC&r<+@dCU=5$4=m8#=W_95
z9$r6TS8#2ZQPdPShq=FYud1yz-Ugeq!-aNd#NHAyp792bt!@mP??z0FA2Vkw_-1e$
zFc%5V;5y)fhG@XskZJ;5K~{qJfOyyR?QP)%$eys(X!`_~u7!y9`0aNY8C#Pqn;O9)
zHV(3XM>dH7)_*;5Za{8E&zB~v(*;JqJMNKpY=6-}Hh^_{2F%S6Fae{5=^|BJ@5~Db
z;0P59g7!1|nqyvOS9?e&k39|Qw|(EGD!0KUe^x5=>4YiXF%YJxZn}qQ55!Upy%(K@
z<~L{lgng+3LFW)>Wk^rl5&0K-bTpl5L`;>+E#Q^(V$QsaqM_u^Eyz6-cq3@0gW47Q
zgMs~Vq_Bar7K}V#VNjuQ?ySq&@jlx>);I}-OG)PvYaoGb&st}{GXTOlRh~YW`8{XK
zCi!O&8%jRv05ItdVe*_@YgZf(29C$6{J#S6FL59%7jaI(AhDDH&{8WCD?)$#0*U1U
zif=ejaG`mbg5nn$D88S>9m1==H>n7{S
z-m<4;{-#Kz1XZOyO--#9yrgMw?PQ#+F}XR?6Uq7(IU_p
z*UZ@^jji`;M$ZZU{z^LEm{a1HU~O|wvH0%FS+3Y}66jWgl5kevkUa$Fb1ZQfV^SBg
z)~s7uhAeXr{66iM`zERZg8MVJTQ8v1(eKDRRM39wpb=*f=Yuiz3j0JdaH)}79jJ^bPd-8#dQb7oZ4CAoR2{*B&Yq;uo2y@+8FZ|
z&34nQ-JV*`uQN$pq=D`8L=KVU&RjtdF$wI!^$qlh=Qw+LyDFS2pxOY(1!G1jS^{~Dde#<9}X
zTh;FEOqiNIfN*GhA@?=5i`;6IJ_CnLzdCeZm;2I%{XJa@R#BtYy#(Fi08_?wT%6?G
zN8}q53FEtj9)%%X@jGF|;@92I{Rlhb&r_+EN)QjC6Sr;n9EP5^1?f3rtY%N+B&s8Q?}lkqvyO=}aXDxXS++z+i%7g{o)&7W4e~2kZ8xiz11ICtT@a)-*m*yU3z*{=Nj2(#97}
ziWm#jI2HEQwIMUdP)B#a3U7HsY_^}U<6QPH`N6RFKJh_Az5^He)_fo?j;zw
zh@gUt2+okp1-!bth#+0e5xU$yV6&)&Ps#-YBe`H;R`bHC_W$92fq$`YA~b*Ib^&%F
zE>!r`?E){8MTpQlJRni6ajSa4eYlkuxm}>fdS;i%iRaJzu`
zVoHGjGV8n4Qnw3;Kxs9QN|dA@uvYS-CyNe3N`qGm&={u?;>Uo9I@p-VH65YTZICi}
zv%tkpyYUL^T;4+5EO0h%kkdNyRjEnVspJk^EHGRpP8A3?|BsqLp_1yMJD&4*Matnt
zEF})9GZ#)x%iJsQC@{dU(;I~T8|sCze8
zyG1AOj?}ipd5hImMY>ma&++yK-CC@WV^ufTU+RxU-Cfa&ZQMofY!^9?!vuk08i8-X
z!H3;e0@8Arm(o~<@<_EKL~0Rf_nJq|Lj*lNz@F4CYw!}rE4LjkRbiCiR@v?34oJWG
zQpoHQk>Cdit{Gem*+P}w0L6@Rhf`1;E(NGG$tfH&5ybcVbQndp_T|1j6XbW!L{L
z5{)Z8}}E{XmeqjG2}{hcnqYd6KY8b0_hg
z==3`dGPXA}I?Psdn8MBJeAdt7-HbEn^~c8I9Jv$g4tHbS&8T1>TH}X8vj{AB8kt=EsIb%i8orF&A`kcVoopxh&F_8Wyi|68R+Du~Bt(
zb?es2VHdX>%N@iYi|=tk^C42IYA$M>dxn28V4+DGYHJ2m)ms_?Q`QmPV9OA-g=r$63(u%WQjm72$7
ze0Ht*G8#Mw+($ej>mYBcEOevu~(tx*WziE6D$ESpc{vf+36xm6@}2>cse
zIlMZgm2b_sODzAo8N^7&sr4?a^S{NB;0ipkzgCP?*q_f)!xi4F-BV2~rw=afrTkX>
zMyc>4D#&IrLlOydA|~`vLP_yH{^J=CSHj2YcmO0l7;c>Yn&|Iv?+l
z>vkfjt)1;H{nm_c#XZ`_yGx4JJg6=*iBF(6Z_Ec&+{x-f=vUE9TBt1{aBB9|UhPTc
zPM6TqWAG(!HF}DT*5ct;lo+>qhujjDJ^YmQ4HGKH`Pw_5EA~aH8T?~>3-sDHt~}`s
z_dt|(V$s{e^~YItTQS?&iArlGFPV!AwhUv_ve~YhALlLLS&Po88ISOe#h9QEBIf@3
z0M`O@!p0Spjmg(R%Tr-_{P2I?6
zE)41(~C3dM|P)!0etmm?S)~ig9%2R3(F^1wW{Mn8njlaS1+%r9>fqN3|z(K
z{=R=hJz-d{-7od_&M_O+kYKyz)!77>&jwoxgh)c=(0e0?hOV{I^5MZtIXFTc6&riw
zw|NGeM`r5;xl}diekGFpYEC%0xG&TkDjyzhJP^A%TYv_tXdreCUTrna1=(!s==Nr+
z^h=ehU<3NY`Pq-uxm4;*qRzO%I!=WnRFyiHW~T*j^4D-fM1-5JtoF9gen2=YQAFTa
zubuxI(M-*&d8bgITl>y8c*QKbdo?S@{T7|}%k0Xa8??rY_y{z)TH`}VQ_NRUu;I%E
zVp=Kp=A}IiOUk{+BDK$8)R8}k=I+oFVM_(da~(Hk<03&1#-SPGwZ`}5{nBS*Mar2J
zqflxGImm35Zg+7SuwrZ^8P1VQ5DC}WlAC^j!+_MUD8k4TNHQ`+y9F{dCsvzAGGm;e
z#u(=gkngQl`$%2Y{jbGtVq8b=v+bdS(qrQr?q5(4J3Z7qIotBu@Pg*h^x^41gumG~
zLO#bm9qxj383g0>q;AW-ZYj=ae5BQ1(P~VS74Lb3SK7isHX69o(!N#5GDx#Z2Ju+!
z;43#hTyUX=A2Roa%ie9ce=#0PyTPnjw;JVq8-LAScSGDubE!Wwcy+pv){LWh4~_-8
z`co)iZ`Pi4&#L^pYxy-?9`v^Mj?mr6@zd()%APv0vU4At(j
zlsp@LJ8IrJH(2)iZVPwX8nZ(rQU08rcoxcEdcl^v<(t9}dPH=#eLW;#(FgD=6>zsf
zIDvL^Q4b2+%x~KEl^H~G;ZtYW{dQt?xt{t@$~5iSD2p>zgd_f`|0_W*Rs?y=AVG4t
z%HK8XhbGS_vo08TCdL7=8yzxNC@&@Q3Us*`VdbO{=6DE`KPprlAI|5z)PK>f(B?mR
zX0er_&Akq7f^qc0Ex8%ueBeGsk|S;3$M?#c*7PF^K%kCr0}ai)_p?MAP@}7>n!lI7
zdO=|4+Av(oSqDO@Yr`)ONmgZNw0U0nrRk_paq&R?IB`{@)0Z$+dgo@@3t)h5>$|r=
zTY^A(e{mIo3DVQ4>B4N@X33L)Qjh{&FV?;#!cF?jY)`@;2I#sF-*HgtpwJ<0CQ!(r
zCh$qj8$mw%=D#z&$4+AIcnuGmuiL)VD#)|n6Q5xHmBSKeC$hTKE1cSu3SyTv`tOYA
znQx^32l{xHPpNas#I7*jdXyA<%&Nhv(|=2ObuHwAfkV6-uFu@zi&%j9K{m?4T@p<{
zDBIin-1uqOvNv8yYZb2&czwn|v#CwMQt_(njX&otF!Qc=WpCs_0}^;IYWB$`tI_1l
z6=V|_hAi+lcTDE>u^^*V8{WZjl>Hmc~
zud4Qj{MbT9;iS(A8eio8K7#Ij)>>6V0jP_R@5p5JLX8(S|R^)bin<3&Qf2Q-fdM;3B
zw|UX(z7!dZ8;RvQ^HOdplAFr5@OL~{6k5CSHg&GO+N5IX1s-JNK|#jR1+l7Cqko|#
z8Q)Yv(Y7l+#lF(J3MahWW>{jb_GDYyt8Ln9O~y)rxE9YF?oQ|0EL|rSp781D7ulSM
zx@KVJE7fbc&mV907pvDkYj3xjm=@zQECfxjKKNb+r~yl|V>ud-TmRo;y1(qibYB=;
zJ0zrgB;B%g(R2J1iRd2X*q#4;ne{PijDW7)|A%mHWz)&}hbyr!`G?YS>T@pKEgOmH
z>1g3m!MSi#7aUD2{VJY&xk!ymv8psU0p0NDB{<#kSTGRF9VNAp|L0lZA7gh`7jv*A0o~-iX{SMpf8n=K!@o0r=sbuuu`oJEe|29ViRx#awqL9&lx8u_+
z@!Yj4o;zRoQGeXIi`3{}r8TwFP|I1APS3TwFd@mG$H9KYK0?Iyc76Aev>!wW0@k!E
ze5MQRt`L7kCm+3^Qisd7v+L=p`)DT{)O}zesC$VM)QyI6@4~!mh@_fZ9!y?yn2`8u
z(pP5#xewf19UhTJHg;kbtv{WcK^UYUo;1B%{6j;x6$VrC2PFkTPUyBduQZwo+P32P
zLLY@I24c6*S5qskaR29)fq?C?PQZ4t${P}}t2&wPgk`pVIM41Y*2O-h)C~|XSs)#>ramEx4ajCWvW0r@?
zme6R~dlbpWX){LLlK$+s`iXI78+uHIHOn%e%O{D`4wd??3y`I#f>bf<52
z4x;$**dbn0)ln)#D3V@-my3;s=YC4t$DD5SPBmf>P&mty~Xa~TEJa`D33TGJJrR1s&Z
z_V1c?L*r~ka1bY=zdj^L{aLA>bxoYD2pEG>_M&#^BND6RcWLZwewT@v;P}e;ql%TM
z9|<;8E{hkiHA=cL-3(_aPJfGEzq&>$xK{Rz1KNy>yCkG(g6kFvTN|L83hX(Ot6G8mRfCXYg@Ff(rQ~?S8!`sgy0Ie;ZjYlZJ!vmu~op0{J-bk
z=b21Gu=ag_{q^(y{vEhE=ehemcR%;sa~WJG3uH(gFOV^Gq`*~lOM&Q4@c?B8DwJ03
z^E~v7o{p^5r?NCU4B22Yb6441;okU+RW3_dY|64Xj)v8u*Gzi8M>!<(SESc-@M_mV
z+jm)kQTEeDaavkCyd7
zcv*PIk9h4jBY0cePdGc}9;KX&9d}2j_*L`%%+uBrKZV?~qEEJdrX%T#f3_~|^BKsH
zQV}5)#C$R<7*~#pKO~Jr#z4;bWzeO`-$S@|jy#?gxeMg?IOlfW1F~Q5t1EH4zcAZ{>yl
zn!Do*d3B%=tMID>F(0rYOw}909JXxPlvXx-9~{;XHOO9%?u>)z2w<-_*!s!+;Z5=V
zpd@TId-oBN?HBrAjja{z@;FKM*v@W`?Tb++FFIgPyuTW3Z5a(G+DOFj2*%c!I6gm&sPu)rv`%3$%p8J;WdZ_xb#PsWZ%U97u#ii?3=^c9SA|t1)zbi1=
zR^vw6lx8C(oErmNGnh9hBVC$heh%Td?&{Hy~(g(7P
z8mdwFWBuQZSWDA|mt;46eN?WafeJ?JQQEO6R*2L+!KbW-h*{wX@CWN9fnspe^&
zRJUt)wh5y_vN-|E*1B6{0Z`#tf0^t{v<|1qFnJhi-a&`c;TV{342w&{bAMY3u03^G
z&2aV@={iOUoKQQM{YG|E)r&unHz=}gWmfIq5lvQ%P%<)Qi&VsjV%Z9_E}1aa-q{^(
zyPU=vsV54_PIQc(K$q15N<-_hby=n8*ksv%(@YT
z`^ywm-NQ`d>}6~PRc0SUpRayGHsLu<<+89@y+-s?!Nsf?yHxfyLf)^pU+HXY-dTN-
z_MM&ZXLzQO3aXwRX;akGP)Cbpp3RC-QWb}isyJ5S70^JnZKBf%Da}qtN9cQ;J*{Gi
z;B0#SJ({Zeil(Z}W1e|DJ`xyP-J7DSZkr#J9`vH9iree9rm7dTG9Z6gRh6g=)2gbn
z*Z-OJ&t6a_;_QqG=n~+Ag9_ACWp9|!_VH(7Jyqx0daAxp9cCUiYN|Z*j?(-6J+xFk
z{vuI0TB^$MuD3vd;ma1=P
zPcKAz(&N%`TB^30#)O8d_E<9(%Ba}(?x&0d-L+LMZTr+%Mrx~CYP415X>C<`+q|?a
zsZPBQ>P=gf-pssg&1R#+u+gQh3iVduUC<&p#-!bgwkkVx4539>@kFYs3cIPQdI(tp
zVVCt#RaL0h(pDWilrB|O!u4I%K2ZY>OJy2u9}~`~PTr`ik{!^m@6}T`Jt=Gb!Bv-Q
zbyb(>ZPj+6gPqyMB%qrnc`!<-Bmi;BZphQHfB`{vL`T=La-#J}PMN@&uEm?JwQ4$^
zB6MA~?~pnBOI29)Cj@iQdkJlEV4@AmC`Rfhv%febwtc_=!O)Q0_9qZgVRc9>aPo+j
zs$NxCJ%o=Fs<8S2ju9%XHp*u?bTCS(zA2w<%I!}Xow}>Ax*VG(pV#=F&xd5%=$({_
zQj0gOGW#E+!b)=~tY&sM(5&q_hI6BBimj{O+UNp1>Z=g(^E4t|tU|{)Yw>F#jqcj3
z{B5j=S-a>hj=$|`omEkX)vNX@z1v|SC=@i>tCqCM5lnc~gH|kO(^Dtj{u%96i;2|T
zevw4oK9|3)_AIHFI9M{Gy=tnXx~f75<7{}|HYGEQieza@v>`1RCd))kj4stxM}=w#
zsrF&j78jg#ycVmS{w^(6i`GhKz5PU5tgP>F=3=i{&%a4(v@<*Xu3alFDHqJ@ygTo2yml~HLyoN
zi`qP4NBeo%JU|@U`-m$U#u|4IzHmkPN+?rb4zm^~w@>OpvOs|-EHhf}gz
zVR>kJ5Cm<`uy(rWkvHKW?JZ`&@x_imzSujX5WtEk_LEMrO~l0BmQCN{9-HT3WUA!l
zn1jKO{D^#Ur>(O^;^oMCeRPs=HaFl82l+K3mKgzOurL9Q@horcg_$yhIQ#Isxp
zle>zYDHmUguVSBeTdmXpNL@+6XqXZI93pA@MAEIZ{^duL_x(md=SX3igA4Y&y^N2zwh!*J33~
ziMY+t82jA)*pPFs297w$X+3=NF@XgV!EG{zp;Er7+7+1OFaAK&LS)UKe@4g=C!ye$
z!oqw>ri>52ujQgIlABaW$@`mz&yl!-4-m1|Pf3(_ApVipIPMD4;qjrpv87L$JEw*+
zS-s1~cHI}uYoxZU{f#258cG^O&aHVSMmKodVKQvjKT>+(Ge}`ibf%m`1);yqTqMj}
zK4T;YveJBJqy~>T$OjYlV&yNkq?F}P3yC_Ul$<%DCWfiD#Tqg~8WFd$xb5@DuL(~1
z^#Sd1XQ4J9fyanAOAL(WDuY|}V&^7XKfI>16UEp^Sn5%7Bmo-dBqN|nn~+=h(%<|c
z*SZY-AjX9HRjDz-aiJ{lEHCQC11Ymc3FtR#w1Bu-D(eRb_FI49+~XM{lkO)pkT}pC
zKu_mB&?WjnQ};|G!{3cITyWwR?46IxSc$y9Tq;6>i7C$?+O%2POX#T?Gq{h~bbYgY
z@!o}8@_Wzu=H=!X+@nR9SoYa6S>}a&Zdd_mALaw;%-CR3USqBsb!wk$Fd?$c(z*ZgJO4CKn1LyvCd
zE9lu1~A_lJqhsi*}FsNpRhl#m^Aa2vrXxGMQ6#e}ra*+570)b|b_`z@SL`P^QwqFoi
zU8V{Y$Qa=!bX~*{L2XiF&sz6NP%}i-b`23%jn;G215qjF~p89@W=ICI5n5pk)Jv7>LOEX)$
zki~kaGY5aXoV_u6L!7^Jujiqu;_{sJQm&pI2KMxTYgWVIz%X_Xzs{;V<_+}WZ{Oe@
z5=q}Z=ONMoPvq&Thar=v;g95^E|c@ay3D>o9!uNR{-L&)wV~V$;dP&xVag&`kP$
z_QWlv43cHmF747h0`quh**()6IB#a(z#Is2mgfof3VxwZC#B$#o{eO9moB^nwCT{E
zfD;7SC3czy2<%-V)nU>>kWZ)6HV8X?$%RW%WATY@#
zgvUbDp9A9=t(>>9Trv0TWoUb4PwYncChS);7D;;>F$&-Q##yfk4;6t?D2uLk7}N4b
zlwa?i;HJY4bxxTcm#uYifH@l`u>OtoXMR|_)L+cGu^*K~wHKil|3iP~ff}ayr>t>L
z;@?a;8F@{-AsdcYPbc=-)e2(G)&*^xHIl6OsPg9Q#t|Oy_Gr4SP=W3y8(H1xPrNqB
z;(e%vdTC&i^)%?76gtFI%$cz)EA^y&IE=j~lWGP6iUQO92R_p)p={nyL30CEX?oJ_
zOzB6o%#2jzMbg19KmyU89ep|m9bAI3G}UXPityU#g$26XC&=a9pVo@7%13(s{2BIK
zHE73y+4NSv%qT}uD;yClb`E6}I!o@z$lN8>?B#CTw*rK1npFqrU9X6ql$lUjzea|;
z+=N^56~mcZc>YlA-M5e)V@kbr|-c!U+6=&ZF_U9RBW=FR=671
z9?IIVc8R}nZAVVSvjKPG+M~XQliTC68%vL7Z)9x9KV&^JR~n{g{i(3}waCT#j$rbU
zJt`}XA!J6*p+Iy_{1>6;jQ$MR*s9q#W*({j_BWW
z*U8zFY*btD&oOWvAo3VEJJiuWH0$slcfd`OiX`9ni2!9*J8~Hvq5MLgL2C9rP8IR?
zRdQgW{23#EhRPpL{U=$$hMdff&?}x>c5?n7I)HZC&`a%coQ<_dgF19Xj+6|+v?ogovVvn4w9_vgQoKGHGtTB|qdh>e}B%|#|&{rSa#^c6@@d6V~_LoKT
zJllS5)g7{4BMwU6+L`hWR;=}YX?+W;y()>)wBPQ_d@|U_SND8YdtXuU5CiJ=hZePl
z60AXWgwz>+jXk8vuq~#}Tk|>bM5XB7Fy_6}V&bM*zSpSBc{hsx*
z49{tR#q|rCny=yGKrob$gF=j_I<4^t>NMuGNUaXF`jEkO8R9#TPewX9fozitWN52u
zTJ)mH!}7+pFIql!oDgKl^7^$eo)k>xVnz%8zndlJDxHDd#4gjc^;9d24J__AL3I{J
zlZ8j5M{ienU;npYQYh!pn4Q6xgb&-J5;~~#oiz73vt*SSIF;=bU^HJ*x;tb6M)4J+
z^j0fI1xI9W$XU`pWV^g+XSbMmZs06wkCEZV^kjs+XhS|8pUV!dZEjrK;#vPwu|PtP
zvNn&|L5wQP(;#Akg4PA9IrdpEOi6vWp+=C*KV6mVtN%Ras)_uKY_0zn>GhUb$C#XgCs79%uo<^bz9l^Fg+6P0
zkzCA@`~*kpv>BDG^tbF3Qb<9_rMF{F)&>~Y_F0rZu!@pzK|h&4)t8
znnHOR{%$OFt#?c}1q+_jCK|6GhUD7!xD+jvkXyW)u-rh5ZONIi+sZsuw;49LvgnF#
z&B=W4y4Tv#WxlrAZu7+n*&9naF_1Ryt9$1`PHihPR$HW4OMwAJ^|yYtp<*SF4w>HypQ?1Xw6K*2b{e%eZ(gGp%9@*K#HV|)tS9v38
z6?#p5M|NCC1S!lD|lnbb=G&6jm9m2FO
z|1J4Hi0IFlx*AaeiTaCu510{lIxBQ*GfpBn4s+^x>$~C)sY&~WX9J%sWt|(I
z`O(AQXphbd{hr&M8Dp=T$(1-6>m=aUbS#|#9c6xGlv&-QJmbrwr)avT&b;tHG?u8DGWYjHP3}*Pi2Vsu(+#OQ@>`a~W0csd14u&hrowoz1X4+WRq3
zleJf@EnEf(wTLd-$C35yd@_^JYxa5`-qW7tFPd>+=#
z$Mg-{RW#$c<&Ek7`Z(CQdZ+XX*|W}=DJ7@*i@0HSi4;;R=HpEsvsrT9vJUT;e)~OS
zni0MsSORjdIUxE55;=Z8*e=0IM63T0*6Q|e>AhI}K9_$+QVFX&dLe6Bn|IQs>wJ-|
zBotP(xeKGU&>Rd56gi-N*)SN!(YXULh!u=7d%Hr}#+K>PArA>v$u1f?S&g^KiAn5o
zIWf7cHD^Zgpx_wUlK1gE1OcM6GfI!@3lkmoA%Z+hlDhBNvOp%jXDb@>}V@1N_D7B(R?s
zdU<|rg)86f-V+^Gk0$Gi}*&?0`6a2LTD
zJI}x4-DL0?;FE296!;Kh9p7*`xE-d7i_XR0WBTtG`tRrZ?`Qh&r~2yHO~#8%uPK1HsL%_q6bS${OZwaRKaA&}0M`Jw0AF+etMWz42&;qb&|
zAE{LkPg^VWqTnk`!Tm>ITv2co4(6SioSWHlHIH(eLdW~Vgwkby^HIC(!a$UHo&iwp
zjdsdkEMuk|bp-l3<=>SI=izl3bSfir6Fy=^e=-CRHJ*W)p`2=RM8;v@a2N}ZiNTm!
zOOUeYt+begR$1P3&}{+ye^Atu?V5*E8p#(`m9y<
zb;&1akruWdkk}f=%1SC5Rzx#UJ7+W8
zWRbxP9OV!KG~Exr1w7AiJJa~w%%`X*dl`4H)&cJVs0qWhQ%12|Oi_Q6urY=k4K4ZstiwB^m>oh`)LT*Z%PWU>!~~LzRg8X%B}UY>>}ZP(USyDH
zc-Od#!V+6$3(r@!#>sM<8`HbAz82EZ35W)lzl$XbT;%5&$#BjO)Y0eSWpzDUBFqad
zjF(lI*Wc)C%@Z{)q3n3>IWL6kA$nbW9atU>zDQyt+rGgl92wsx&LZWpw3-LE5ux&=
z#>9J4v*WY;>vq)fO*UXrwuz5zS$yY(5>0w}o?U%0GXLkrCre_feC8&LU8>l5#V(C(
zWr=;O*jr+6GKK;OY&*pEXz*9L>nuqD=@S8-ddZ~GB(t5$Jih$UU{h{1igCJEkiT=E
zQ%Aaj{Pk^75tXDX2)meYB{>yT&{aY8ZEm5dCY&o6uAn$mK^*dgllY4DlO2ClDA7T}
zQbDQIMY2>7gd1d%@gdCEKlqZa9v1iA%d6{$+4E{sKh%X(OSqa${p^USpFBG~q3=br=F%riMN739XU|CiOzBh-&#iTr
zmeq48*KJ+%HR=5qBwODwNUBw45U+K)LDH;?4U%rtyF`QSssIASbYpqZGCZxPJEU1kw!v7Gs`mg2EpGj_$I;k8(hX0Yq!BS3%7<|9r)doK#c!|MV1z%!tOYl5{cL<(k@S}oH
zGq`Yrtu%wX1s`s3{Qyj|!BfRP#^7GTk1i1+m?vf4Gq`@yrPbgW;^#$!%fj1gF}U1;
zwH`CLJP2cLHF&k)KR5U)!EZBoo!~bbe1qV12Hzxjz~HwDUS{wz!Iv6*i{J$Y-zs>v
z!M6#XVen?bPd9jr;9i687krSxHw*4I_#weRU#!dCDtL#%Ey3S0c!%JJ41QGbXABO<
zR9VdimuI`J2MnGp_!fhw3Vyr6y@GEtc$(l122U4!mBBLvuP`{QSY;I&+%Nb-gBJ+y
zH~134XBxav@N|Qh2|m`~)q#8tO_fHx-Y=jmH!d)QimkV-sy`(y(zG
zn-3RBu`l2S!K7n1=xn}aY%;L<$k;q-j?C1ieG>kSq|d7-Cd4K!?{Yxc%Leb3$*yqKHjM77v|WJerfgMZ%CwH-dc
zX;9zg>)!74EMNEOQP0&+vj|3sBTZyy@OQb7INRsE=!5?H4hn|mx~V&J*Y67KZTI+x
zvEe(^xeLytta8{ek7tuS#@;XwlMS}Dio_aWRp#ELByibxJkiatelP`ak)V~`YSWy3NOkh&|yL|$KJD&j$KjJV1E{YqKx(^^OzN!8*cc6d$
zX9M8|1H0p*>bEuoQ~p
zj8IY|M?0Yd@EE+I*mdC1Etv<_p2nk!T2u24n+brBN{gG97m>yHhLV=xsr?1(RnC8M
z8)L?jvp8~g5`x>mbK^PlEsjIKCuxPAM@MjbY=~<}FJ->P!&PLtFIo1iPo)XvHR}9k
zzU9$u$?Qg*%eF6M19?>Mfc>7?`~A`TQ2|)fU;JD|-i1}v96U+$jG8WH8hyDYSKOvcxr9gL-+`{B
zrr}5Rk^b`&iM26S6l0;`t20F|H~HbfH}T?H%6-PMSUbKcFR
z81cflrNl=)>t7PGG$sAaFZ9dT^pfu7Y51;mt)`S~aL}c>LozH5*XTaSUGu-5u6_8m
z4>)+S*Ai)G$|~_FchR3W?#W^I<=TCTohiwVzZDWsV{9s(&}|)x^$5}rqz?!>{o^Dwa$C!grV3o9vo=$Lgp%IBNkB(u
z%IP|(R#C|{QxZC>^JM|BSK;yb^eb?3@h3yG`C#LJOf0_67x5Bzm^%VUW1|%yg#(^Y
z(mIJV^ZCFu-pvw$G5nm0T(4m~j>JQm?O|YN%7eBC_R#YB7=A)YBI4Yc@*~?NnQI5I
znNW15z0gjY9ahiv48usxvYph53A*~8(9C(zhxUuAG_s-p91ME#!0Q$JSe%fv0pf`Iy`k-vUY&tiPqL?X
zvbdHFYS-%QRTNw0a;_E}ofZE#A@+KUZ!$4dp*1|c4o(ssj&>wkjNm~aX$iNMcV14@ZI|{H
zteO#9yn&@U{r+j|$KTficN6^epS51~xY&fSu_`(9-m4Oc$sEe1%lMrkgUjW+tc!5e
zgK{8^X`#jX1dbAKLcU~WI1ZN@hgR(%0-TSU^Zzg(+AFW7aED6TPGE$v?$2xWANhN3
zW^=8_`jB8w;_b6g-wYRiU%+k67$s$3wB$Xs=d4%s)FPu#V6f=L>+hd{RBmFN6nK~Q
zA^ONfNwq$`Yr+CA|pKr0h>E5yX|AZ((`Y_fSPl*yW&O<`6hpr$o84=fePl5_C
zaAEblI|_9p=={%tjKW&}Qy)B05hJb3$n&TS>r9<>y=?g_8$~(U+kv0F5JIzmL=C|Y
zZ)J4f@p-JT{x2itfeVp|Ey%yJbBS+bz>^`fePLGA;jI0~kn)bwvfi#>U*yiT&fXvT
z4rhDNs-1*Z?WeU??I8oHfTyh&-;zr7G(5#-l0>GH$oZj|R=mf_>Gl0sTV>q8Vl3wn
zdnv2JW@#f$u?hH`amgUb2{IfW&n>$;Q@%~zNn~pY1t+^N;^&?Q*%BichZ7V)-sAVM
z`bpKsGH=pT&i!vuH0x=%)GL8)31qNbEr*FT7eaVPc5%>
zpSU6JKHQejp@j%9+xp|%wukSC2Lw+t^xt&FptzLtz_Eqqf~G!ooqABDH)4e{92UxX
zMrX>|0LWzQKOtB?ny+XZb^=4+M+5=f4>c;9Ej
z7tu5vdBuH+=f+sr}mV#cafb!(7!3=m#mFD
z_fnX*eH*epc{IzneS5Rx3ZQ|aZ|1dqqFdH!WBEMP_8uSFwjBftUrA^ogl_n>2W*^$!WUD&UoL(n6bH?yJyA+6E+Oy7Cl-d
z*t+q5LmxrcebPxks(H>oiW7E!(|QSy3YqK)OrF`)cT>_IS*7|zi958qAz7j8nwEO^
z`gOEPNKGP&=L73boh(8E8x%Eb4b
zzCsCqKgN_WpON=OB|MFS^ekbfl(0Vzx?I)bW1CPw`Y4B_T@^LCdx;WhZE~8UMWaMK
z%03I?P-P1wuh|pXqop@jPoOUXq#rLL1;pD$P4W*WphWe+QQnqt>cn*J%P0?e1f6Rp^+8hqunvz;&Sx6HQKa3hu^Pxm{_Jlp?Umh)V2_!_b2+z(u
zcHOpiR_segNsE@x6z*V}0y7Ty&>(SrGz8JD28qn_-zOuCpD~#2Ct1kRYrW2tIXVZ7^q;c=qU}w6z5VCR3nEV6wuJZbuMb_Fh^uaF_0jc?m?bbGyY)f%N3*m#X-rb81yl(n$b5OyH4h^jj
z?;S>*F8#NTsyxwu`zS6w^xr;oqkHS{Nd33A(yL}}@yzu+)X;Z7uD%@>8n5(9>nI8;
zWWMo*T3Et*8j8u8h>G9nHgK8^|8CpAX~WxX*gzIUq%yV^w8t3upxNUace9#R_-3US>Dy7DPR
zH-)(8{clrsI!>Z{|SY-y7{zE
zl2~;tT?%o}JK8P^aRFh4xZp84q4Rh&3#GaLe^7{f&ql_}6Dq_-9x>@zw!oTrkqU9s
zhtdxIM+$LoB3j;6PL+6iQ;54@oX!^J)DhX;)xaF))?PH
z#uF>V{p6=%Li-~X;(l_LPRdb;YgD_+(m1RU_xThA%r=hJ8gZwykYvIM#QW-x#-WCr
zrP-G&$h~>GS!8~hg4|gsU@Z$w;;*A1cN5oL-cM+6tUJ4cI~AQfkN}=GnIX}UEB2_!we3-nJ4x(IQ1C9W+|zKfKvd)o
z7Kn=6egaXE+eaX(9OYh;s5dHBKPasgRLU>A}1PDexrbo}5QDqzeS^fby<-qp+v|cr^tiSI#wx0<1w^RUtBPDx8gX9O_ES7s
zPhJ*YIbNG>tH}N4;mG?&EYL;JRWuG~upaoiA1cE%;+@V$9agpqUSN2^Q-L6iU
zbJBmXKT0Ncwkei{jHg-6x4{Sz-MCj}&dMaM+RARaakH`NZGR*eT+%3S#Qtc2eh0L$EcL`h|cCwTyo7meir45qW_ypeM~7y_JZ
z!o4-OO5no44Mw7whm8*g&6N^i6-SLi^G4f7iHoo3`o5hAKhi0$yDG)Hg>ww&z#wln
z-Dp=k3PBe!lIOQtcTY99OMLa;9Hcz!g{{VA#ti*NEh@III$w@_28a+m&$Pf=7e4g2
zzD+Ychgi++4r?lC-P)rnq~tnE_!fw4nd>A+^}7o%mwhrZr4v)|RLez(rprgOeS6d=
zO?WMLNMwkL2;H`bZ@5+L_4@3MX8XmI5|qfxsj}$AfKM?%H|l})Yttw(<>zSf^}rqQ^MA}coYYVK(Q7>GhiUuc
z${xCjvd`w&MIU}pfKRhb;XMsMXINmy2i-}^sUw=|1pn$$98FRi2rB9+R;a;6~fxl?~TJ;rMl$xRda5T${3Oy
zd3HcHr@kNhl%wU)@8x_Z#hQLecs%;xTy`Fx5_w)|6e>%MdX`6KVIhaWG3nCOEP4Zc
zd-0UnYP0|^pHUX&4^3ZECd?_G@4IEMKXdwgzJgU;s0@9;twqtX(*89#du}e1&FB~W
zxU)H|w`<`#p%2|cPDbPn;=b1QYjjo68JYvb{1g7l*k-L~rzh%nWP=ro;f$?0Xia_J
z-#8hPuJSide|3d)9@zT7Aa5Lph|XG?eXhijZ9Vz`F*e5TE`nKf_5H%GU%lG8>pso5
zueQ!u;?O`358-y-b@osD&mp!Lj`!Y@q{lS*-PTEUI?{PM<>mmKq%`PIU@{W)YAs0C
z$Jc33XWO2BVmwWd&(H_br*8Cz`s7b|&mTILd*BOsAgwyT7?G^zK+Y3F`h3yTwO=aW
zy#Hbv=Bh?;sNA5NJ!4v#r{NBKfF^>lzq
zb$pN|ZU^7_g)Bk$*;kFFs=e0BnN0oS?Gody?T2{karT%c2aoy=41CE?U`<+E@hn+O
zlbdqBhBeV6f+J~4DPrg4v@DAOSKpi)vqz59DP*iZW$o<_9b-s=3?DLb$R**>0pE6R
zH?fFs=9V4@q$r^4b<9J@lzrO!?$l0sSMxj<5-Zb>m|=n?NT2|_D0xvAH7I0QtdNQO
zJ(_tKvOPELAeGLPRQL_P-^s+nJ=g@#ux^GYXpUE{ZwY%4mtMy`
zdD-kT#=b{X9jwOZtT&0DvoK!6%*}kuA9^XrlfM`1d(0Ud7u{|%Ik|RN`|DOdG1q6r
z1{16?I=LhQ`+2%b^zuJvamYnhSH{cONPldZdayI)YQEYRt-cIG5jmdDW*H}iH2NvA
zXgf!$iFMgbydF8^ABJ4ZTij0d*P{@5ob|{8DVHQnpw}3AsEltK@!{1nR%n)CuKi>d2T@PY-k9ymfU~yL<&J9ht@~pg
zsbzbf*zY^=DK|Z`I8|Q)#5N!|KM<`AqzObvgjXQiA^fxJ@?7pZ4#J-1X1&T-$G6IG
zwWs&6zh2u%wWs3C<-V>x*>NWm*ksh9a3>h2b<*&_(vjDOHIGxx3MDOMLMqg4%m2u<
zG{pMJd}m0u7SG_YTUf2_@uAq!aCI78P`uu`56<9JF*em1t$8(4-nZr^QMU)K7yX6e
z$OG3;c^em`w#}qp_VU1WdywMw^1$`3MHICA1J`3eavIco(vn!eGQfG;himmbayZOd
zF+21mmL+5T*2{mEFA5+U{qO65&=u9G-(S%t(!U9u$k=_u#4Agc&UD^
zGa+fiXkX27H
zll;60td$0~ShuqcVcI}V-QM<8lXBOjVC{hjqV&=bm-9K2MXRc$TmK#(B`Ad84-00!
zBIKOUPopJ*M<^S2;j|FIWpNa_G4`${Qu5t?qnCl{`BrVg&HY3nNT5$=N+?!)N!!&q
z&I0Wm_pbgc>~fOi&LgRM{h@bR*%w$JOb}s2b~jwpjC9GeUhL@tStLxM^@#0~9vNmk
z!=bWPtm!2>Ct{ZaWhL_dg=sbxtI`?UY(s{cWdi36hm`YjV#_nu1YR2SRS^
z!Fzhk4da8dp7>^OPI}yycYu#0iI%6cHuUPGL#>Q(>QOw_6w1nva1Rr@{_#58*rSS#BR!2%5`H^JUW8LYM5t6CBi-t*er=)B!pCRzmQ8EXmAzy>l%Hj7up{f%TBR9RMK}mW|MUBQmIAG3NCQ{u
z0~@L-=DVK_(`hN3LD;F!`p258yoJnVXF-f+t5AL#Gh)z(``7@hIuwzYQrmR
zc)bmOXu~vFnD85H!#*~A?<`~gk?l`SGvA3e9BadwHoVY=SJ-fa4R5#MRvSKL!#8dC
zfenw@aKLnv&M7v$(1wLJth8Z+4R5yLW*gpX!-s6R(}pkF@NFA**zi*u#-C}@_1f@s
z8=hms`8NEz4XbUq!G@b`xY>sH+VBY*9d$J8PZ0NV)*KN4UhBw&odp7*J
z4Ii-K9vi-9!)bOs>dNKMGj=^bWWz&Fy*eIF05^{lrEW?MDl)L}pn=caZD7w}?$3;U
z-6_4hNBVaqeXvZvWhs-7X+5lf9K$B+5tt0KOO70fdIn~UFN*aWqGWIRR0(`9SQqm;?N
zf}WCJu0`s6O4%h}PJRrmb5
z_^R#UZ!!5O(IxNhvJl^;5x(=Gab-l<1-N(rmV7wrDq5MOr<93bz9l{>hr}cKmhh~6
z{AaIRd3J5ML6z`3-J8$PE68eo_##~X9U$&QBAml&o8Rf
zpQNiuOA)`st%y_N!&DM}wIVKwN6jr=rU;`J6a|7cB{=Y#TT^ah(4{O`Qycz*UZo|K
zr4bejgXSy0s#5z}5VT=YK;n_`5=P-q;YZ;vNhnuTbWCiYICtOpgv6wNp5*=m1`bLY
zJS27KNyCPZIC-RZ)aWr|$DJ}h?bOpIoIY{Vz5Z6Eh{c5UB05M{E90pR#sM3f1{>0
z5WMQ@RjaT0=9;zFUZ>_%)#R)y4;0i?6_-lwuB0s$Q};Erf>Je!mQ1^kQj$ap5>jf{=b
z56da_3cf0J|1H;JTV!0~UQU|jxL5G^8rz@ro_O86O#I@n1ovX?Ek%|D6Jgeb?QlKSvM87ZZSbtSekQhK$|E6Kmfdw^aorI%W)CB_Qvr%Ely
zPU4d~bxJ1VQx}~kYC5eXZ5dN#%<-x;W`ttCYSgKGEhoN8zNO5PC$W*1AoP?H9Z#uB
zokwXwW)6_@Nehb%nXU6Aqp9R;lCE88PfmSL3DqbeZN0_i)ooDPv6H7R
z`c6@2h2wMb^VRC}YSQXG#op`G&|wOrhLiuVo}Tn9>9hZx^rnZ?tEP>bHgFYj)extw
zIx3*r@jc1un_U!h@;@yc-&fE7<>Xw}N~=gWKpz$gIbYHuom%Wl&8hD*)QoU?z14RW
zwJP;xMndV|ReH3LQL~gWQbw&(9fQ-39B9gOMvwL+xsn)Vd@y5MC@_T%IE1|lKfkF|&gSBdxJJjbsld
zzrtj*-;$G6{j?eC%Xx7YqY$^PD&X#8`vLjSVtZ@HWyzm5ds&J_Ut+hTu@w7*;9jl0+WuC~8N
z+23_;()`k9?#x3GPbjc&-~JeK}L)U`k?&MDuWdjps?}#aHhxMYIGmf
zCn`B6CnqOXe$&&5OFVir3YNsV)miE3iwoeNd%e1exeLn*`6;!kdKEu6K6rV-?FP8{
zC!hcMK>_b^|I!!-&A;Q_j<@ksGhgz_+~wSSQ@T(7$RMZxp=D*v4D
z-v6|L>tB@XtNnArAK#+?S(|^<10RkcF}imB>egLf-?09MZ*6GY7`n0Prf+Zh&duMw
z<<{?g|F$3e@JF}*_$NQze8-(X`}r^Kx_iqne|68jzy8f{xBl0C_doF9Ll1A;{>Y<`
zJ^sY+ns@Bnwfo6Edt3HB_4G5(KKK0o0|#Gt@uinvIrQplufOs8H{WXg!`pv+=TCqB
zi`DjS`+M(y@YjwH|MvHfK0bWp=qI0k_BpC+{>KcO6Ek4G5`*U7UH*S}`u}74|04$3
ziQP4W?B8AfSk8mxfZq9y;9F$LoF6iZ-M*Xnj$BLJ)Z?4mzunw7_4wuvcsKW(dwhSl
z$G1FL8JV6uYZ>`1(kHT}ZpO$-{CTAguW@mCWl7c53j#%fa`>UxFRCrAnYZkU(&9jF
z*`q0Mc+_&!}WE8Vq;m+tzW+$!l$R#71V7|Zk0AZqhN6z
z>opd21qB-j>P@TLP)8`mvaYPG%X6^@^t?zN?XK!meeS#+g*)&@!_eR(BCFW1F#!gsk>1p~c#u=CgD4_bbS
zzeUuG!zXcg%f-};a3_RUA-hr8K?uJ?ILLQ+pNIj<;)4aPup!stnXrRd~ya
zDoZL#YrH+n*;RilN&{41dB9s-RZ{A$TJEiOc=Zy~B+^}laek9&Kegm&GVMTeF&Q`6
z)jPkORn>Gb(=trW6Yt8E6X0`$Usb$wOqb8}>qxrm+(r5?Db-CO(vLS-D}-6JaPCBN
zVjSsTr#yblcyEzi3TZ`=p-JI*|D(o3+KP&*t0iIy-J>}eq8%5mdyV!;rI&PyYE}fL
z!fU;0rB^Xhl`r>}uB;BMKJ_1`w~VG{4`M}Rw77`Y;524wu-=uWE351y!O?b49IZ!G
z>4#o*ydC_r1=$O3T{GeF-?yBX^Mk`lj~;vLYw0eEI_K=AGC$QWy_iP0dMW2+GEvno
ztu0?!T~T_uGY&5;DX$GI4V*b`Qgw+Lhz*%e_*dfYKhUiPmL#fy(-PFc`JVkr%?Z_S
z%rWu;cY2k25|bqY{rsNtD)lDD`R;#Gj5=w`;OdmZLFp1k;@dY$slQ{sW`}VNjaNeh
zNopu*3|*L@hEC(VCZ&1k#H8sXcYD;ZKtDC4B#HDBm1k;vO`q17{ZYcqSi>9$aK*={
zc*5XP?MiT|1WM)_6t4zN^Qb{nk~{jfChm`Kc2~z0_9^HuY3(MB0I;MlX}Q(V`6>II
zytSOJ)E_VbCvUv(5kq|ahsUbnvs0T*NtAN@Z|uz2brSq&?pKBo0k!)_k5e?W6`fh#p$rBZLH)LSZbkUC%6
zSN9*(M-3`*QwMQU2fDpTxpHSJwFDC`SDz@=XMWU|){ErtGH%9vgn7r#PZaF4AsFYo
zHyRe7%Xu-zNvnVVKB_-?>_0_XaD1Udt9!DPdLHxFFGz@AU)`Sis`&YR!uj6j<4k?F
zQbRvC(1o6)L|1?1@+K;8Nq^;Cn5?|e#alDHMYWcpDQj(#kqc@`;E{~o8&%x%-G@%@t4
zZify%esd{8`b!yWoIFS!)kLKa9qA@b_Tn{N{Ym@RUni3*Pi
z*Oe%BD`usgrpcG-A5I&c%QB(>v%&UL3NH6Iw?yW13TrdLxd&{Xi
z1Z14Bavf_KCLDG^j2bX4Ne#F;p}?j4qutMj$D2B&Zim-&)t^JF*RMb`(3L2N?VgA9
zp%WA6D;KF@3k&Ek^VBfc`O4HhnOVblL8e^86V&iPD(zzk?PIVS?i!#>uf$D{iS%#k
zb13y`_wVNZCuldnLJs9*1ZA9dWBNP&yu=<)=cjZ;_V?v1xqgNDi=FR@;JYwG>^|U1
zajO)@mK4U86xveCl>W{AkGI?J(BWq=>i>Y5;)K`vC+!l(*@fY8w%OGq|1KF{Ih1e>
zaWlsERYMj6skoRm1Nj|E>M^dzzD~6AKg4<7vbFWlUo18OFRcY|4-h
zLpxLF(oeRs6M7rtJ|-~{mmaGaqsUL{G`C8fV)sQU7jaO=Rx`VGjSWBk9%BQhD-Oa@
zC#lp)Ds&-^>Y?cgYUH%L)JWIus{3q1qSW>N7}6djeX}2ZGl{;Ls0Q7fT&-!bFrG1h
zaey(v_+j26e}l;1p!v2R>d?curTyss>el_Wuh5P$$*F_ITTyR_DWDDny2i$Lh+95aM;2Ttu*(=%LpIGl%Y{gmgvglZ>USHCFLZ%Vv)(e0)u>`AZ3pI2%J
zM%s$N{zKwvgRC_e2Zqca*x|GWhenGIDD_9oqc)99AB$K=F#kGzOyb;gkn!mSrCxPt
zdNO1E%?Yi2_s2EIR>u@Z7eu8CO}l8(HNOu%GeM1;_KoOquI16awJGl~^7|$2_6My>
zJ&keN?TO~TEB~O>Z!yl?XWDWJZTV}xw&fPatuIS=`}<10k8#pVm~)T#81>lyP;k5VVO8qHdferUe&1l`l!_)F}g66srs
z^UeCuH8N3+4D?qcOOol+{nW^=G2dS6bQ?cfSp%IYudR~Tp;Hso=s>A!bV-S8^t58v
zXxGz7)@6QM
zrV8#-&5pb~Ulw+oqq_XqUN!iSe7vE{f8^s09sak;$B%SHii0+};JeN-{GmK{)Qi=G
zm<6T6AS@^flr2`*@)gOgg?nc>xN3`{{{b*X*tc{w}+L*u_QVfw@&R
z3t%)y6x>0Nv!l^KXP`BFU4aekD>Pi!;#1xt_TfT*hog?g9rEU?5EC__%Kb0~_J{PX8
zE>)T0I;X0#wyL6ZPN1g3#8RU!)%L-f8ki>83
zj#*S$rkg}b&Z=TWzX=Zkh*YWjrJN^pj*8B$%`ROQT(P3Grl6*@7GkJVV&(@bE-t5%
ziYgXW!nb0-Gg9pGs;aIGR?mf1E(wrnVG5;+%bcQWO89(N@`42punm8KtTHlJ;YI8{#E8#scxLDh2n=VTL+@7t?@rvs7y&4dY@6qz+O86{UfmROHZWK}9L@
z{F9^e=HwSu(~4eHm
z>RPTqEG#FTT1inb^=*565sSsj7oAsCRFYS|tcEKOl=?N@2IiLO_3<~_LlMN!&ee&RkDtBlgoV
z^39a1zd26P-%M*d%zWE^femGLk@zpcNZKrZb-0y4FNUc}4acy+)cKcki2pi_M`QpfRX$lAEPCLe`0^%0hIjx93$!7jS+tjW28*aVZ{9vjJT&l6rqn8q07Ja
zmwdvXN!NSA-@i6r|F>d4vGASA!HI>x{%_^*U!Tqin}9t_pRfsd|MhwMH>B{tyh#+~
znDv({Dn<_=`)vOY;s5zN-?{T7^`|?nJ2~j=@e9X)?HxMAMNB9cz4rCjyz27Tu6S)q
z58sT(FC2Qa^%JGexYmS3RaWPm2w#5t-buC%vurrih8Z@TX2WzFrrFSI!&Do(ZFsbg
zq4Rq-Y_;JVHauj*7j3xThR@ir#fH0W*lfecY`D#a57=<44Y%0vHXGh(!v-5V@vpJJ
z12(L%VWAC|*wAmo3>&7~@N^q`ZRob)(O6UNzD)S82s(Gz_LdD>ZFtCr`)$}_!)6<9
zwc%zPZnEJj8y4EIz=jz%Ot)d04ZSu@wPCUi-8NJ67^?HGPnht$A)*?=`K|O{LVnuoY>z2TssI^0Ps5CKFk~7
z&j6E9R9ctjQiFiYFk8mDR0%L`2)ujz2%N`-=uO}Sz@=>5mx2pCG*YPtzy-dIkvNr?
z^BzpW7?<(_zrZX6SED%3!bn;HVC-n(#NG|e!PJqi==^LH96vV#Cyp_AI&kh-(!#$V
z*ou*~1b%OvDeq<=dcbs8fp=rX&lX_9cw?UkoMq!J!23@{R~d0W0PMtkB>6c_snalu
z{G1LfJ{=x`&;*z;k>Y_T0#C&hh#%nBXaq~ZmjZWUq%6CE?_wkm9|6xzM=lThEZ{dW
zLgzKWUt`42R^Z4plzNPp8@<4DFcNWNV
zux2J@!A}4;->+am1XP&M*H9i5q}Ku
zo3qhD1il7%6GrmC3HTbDjxy{;R_WCo@+mlQyB`@O@W+4y&nHgsrNA{92`lh+8yEOC
zM)IaEpqerJ@t+R#V-A5A058J40bU3!!nA^y0H^06j|-jwtipT*UJZ=TC;!x4B9Lo1
zDj+X#0x!l$9+m+AhLL*z2v`SmOz0`F`cmq0Jn;ZeTS`9#KOOiOW+Ax1GcKp!flmVt
zDB_F}96fnzCPw0~SfPi2)u3u>axM>fUYuQ9|L?9lY#vkz?5=hp9-90<9=Ys#%~1v4wH@lX5c3np~L6E
zd#*6}y}-;0+8cfXz#n2H4=uoPRkSzoG~ksO$$tQNH%9zy0bT<$@m}yXz)vwP;GYAp
zt2KBXFg9RtH*gb1>Pz6+LFyO(Gl36cWc=I)jJe7#FR%mSK9xAd?rPc!xWKqorXIb(
zKC7uC?A^dTjFeH}6cji}|C$C|^G(WvAAvu_NdLMW*ol#{h`iJYjFiy}T#MO^|E<7d
zn62PyEn4NTC7csuorkQM#|U%Z2AS?*lz+pd6%J23o!p~L)!x2w=fd_2H-x7ghel;ddJ2E
zKJZK9U*J2xGGnR0`|mYl<^#ZA{Tf=4*1f>ZzcF))z(W|RFM-LwHMqcCm{$B3Y^7Y7
z_rPxf&fEt7cmiz(*l#=I2zWAZHb&~S8u&a$^0{B|M`<(o*$?dVn2FyDy!CNTeX-vR
z{1Zm{y9J#5gu%0b7N!nA0`J=a9~}Gv;Q2eD8+ab@SGy=L_`Sf>c2j=vEMQI>x7rku!F9D8!#o%ec
zGK}~an0d&w!A)nZ<0X~Kidx0O@_)*|RpHd&#F9hzx$e8d9Fzz$z2zzv)s?#tM
zR_^J@y`#@*O9JJdkKh93uFO`(B7t%bM(hRdwsE-&Blk_jUZC775&r^*es1gqiVVK^
z5h(W^1Q#fG8w3|9_YedZ_%j=qy9jcRK4*h{2a#nJvb@yloP3GDZuz`pea_8lj%S3(5)7nyGI3GBTmuut#BUii0J*caT%
z*bRKgB%m^W!5Bk+obSTB7)#w<-|pWs#!(55d-VgjkL&tQeT{D_*>P`v7yrcVe5d`D
zZ_4C+Z{picB|G1@{f%)UBK^aHnl-a#
zzkBLbOdYr^kfPG`+e@cTojH>gie!Ijf$YD6$M53W9T!D_#r4#wygprk*Z%8k+!f`H
zKSfd+r=L%qS?vBDfaM+M_0$6Q62iyDp?#(_*56+c7gvD(^|fw4E^Y$uqfQ#zFU4~v
z)Y0_`&oto*u1nE>!8`gIov{5v_OH_w^p77GDFA+GEiR7ul@|O|h^x!2pz)tu`qci?
z{g>GWuYY@BLdL0^}AQ1GI+eZQ{D=Yabzq^Fbs{B~WxVE>2Nf8AeFT9Cez*Wi!*IwZfO
zR6k~Y@b}D$648$}|JlT8$79ylzrAqO@mRO7abx3bZ-F!I)R(H%YiHW=YUx4ga7SHo
zHGFnM)}nEZlFwPs6^`vxmeX)w(v=%>HRPJeb&$m5p_lfHHp{Nlb
zhzdP|ZMS2h1{I1*MILfO)bMXbjXExBSe-O2)Sk&8ZzpdVT?>+Uw2Op?|2M?+v
zM~;;{{ISlA
z#+m>^lB||WhT0_S)Dbyo?5Vm7@ZQmw|C8Xm
zFTuJq59e+GKc*PIfYeh@0e%JG*8+Yg;12=*qzimYzz0XeUrd6ZxCH)T9{fVU?=DvA
zAmEPz{!74r4fr#FF9Q5!7kK}cnBPJidl+0x=Pklo2kWomx1fZF>M1$eUdb1Ol$=af
zayCOr@j4}!50>HYe;44r0pAWjOE17j0e(2(;{iXto|329D_J>6$!p0`yVAA+g-4q*r?-BL*pXG+D`W8A>*_H1(p@rglqJC^I
z>f#_#my<8sN_MS7AB*u3{m;(L>)VL6~4S_
zusV|K&Ct-^A-zMw^sND{eOom5_HNm?tU^fd$cTuru*lG$ppY+*W!F085J5H8HDD;Lqmdkhgk1pg%&Nmn>wFIL!y{(-syKTTy*JO}<^gb#_t^SkOd
z>F>mCKx?7^cxzBd)cui>;gQxxY@nfMg9d-O{r1}%4dBP_Cs|ShHKWIoK*D|-L3Zh&IjaGL>SDK^SQN~zelZG-Q4Q+1BOH~Dl!OyboF^i*Us%a)~e=y
zX9OOG_J%BX>J%D!^|{sBJ7i#wIyGx_K@Xu;wjL37wSQ|UeU%&4UG!w0a+`1jOQlx^QhM?Dg=CtaCuDrhv^!)-?d|C52%XkW8FT+
zH!=)@7!lsNdxgg??PH)LLb{iShJKV!QRNzkeU+VQC3Vw-j>EawiJb1pf5x9nLmL&d
zROZX2tDp-0-1Yqws?AfzNefZ&r+BZ0PqEMYUaYCIuc~YG_cTRox-FNE8ABwbZaOu(|_3N*{s*Ee5
z*1=ae_=}63^>2!at}7-w=;e}8nCPZsqFaoK?qyX^HmdfrLk*I>Dp?Mz4EbEGlT(<#
zug){d9}{Au{p_(;GH2YfQ%p8)(yz^@1Vdw?%2n`i#(r+|I`^;7;&
z`YDZ+BQ+O=x=VI9!4#XZ#;<0(Lz;2Qn=HNMNInV-MEpAYay|E>M}n)~@R
z_0#kaW)~@x2ZCZZ9vdrYM=4Be$$os}@#33YLMKf(wnGFGIcq
z*>D^m|09X~VY^t}R4I|S9>^4b%c@nY{)TyHTTxNbH{XB%{kNEVPMkk~{uIi>ix)2z
zVO@Ii^UptD_wKvzK8^E2em#5kgyd_QFlp#^u)5OY_DA@~vF5~lXUmo?o1rAHxJ_M^
z~q_D71u>KZ42MvNX_!p;7pFRUx
zzdCg2&AGI8O!)^=uoCXe~-FUU=h8(^=ZV@XK~qAge!
zRUQ9^;CBIZuoK(p+O=y-;s?Am{`>ar6YP-+@RO}uw`#nxha}&8^Nk!ha6qu9pz9F^
zJ_7Hb1k9>+>(E;@VmtYA;KjL!uH1@h^qpK3fYJ{ot9&GF;M
z<+IN|)7PZQWcWP#Gi2`K4?p}+0=fGIZJz%5=bz=rAAh{?(MKP>{`J>i7oxA92m?C*
z1%2xb92ODwkKzx1O3+tXCjKUquO@R$OeX&16-)D(*UW|vmc++o;4CI%%*AAL&y+WV
z|66apwGiXz4VjovT6XW=t!bcqV82N?2H0oP@JDMv
zfB$_=1IG~hFB}K#)pHBCEaCbW+%M&G=x_(nP*weHc=_^W!8D}hKOi8WCFPizV}N}#
z*|u$)V9W(Rv({mb0mlUUYC045y?gfxb>-;Mqxv&xpbenBQBOEV#BKBUBC*Frf<6@K
z^qz?S+ai9uMB45Yd0?MN&MA>gmoFcNu2BE0k~IE1cI?P#-n@Bd`0$HhF9qYFX`s#F
z7?7`|hhst9`I+PY<(FUT%+KUG`AvK&3#5g3lmC>_tnWm^@EhX6heUc{3%)C21|9c;
zhCL#!K|{+oMZDh-S@bz#1Er-as)9V~X#8R8#bn}dvKb3g-i)454=68x`%>kOoMS|}q5RYCV$G%z8VLORf99Z}H@=723%GU%4cJeXB+y0t;}458
z-|kS0Dq#=)W6ls`|0(~NkA&Hb2TS5_IKu_uSD(#
z4Uv%lKHxWL2s|Lt?E?o5?Vt~VOMbtCemdfa$yt|Hx
zkt>$uKc916AS~^SYZ`hII!YS85QzW{;h-V!ylahXe*OVZmR*CGhCluElVF}Hmq~-k
zhK|a{#L(hO$1lJ9A_rezDKE{8lx4|5^6bQ5jo3rqhz$8!WYBSuDAMq$Q=cI-J&iuo
zCPANRlUhO#sLyR6Pt@t(5NE4|28}<)dm;8NykXbplNQ=TmWBqFlzYl`MLLXpQPwF-
zh5I+kD^q()_T)f$A+?vLfdo8!M&v;!4bg=n1AtRM&`_q&q`|4rZE2HWQ(M3$H3bb`
zTOGC)&CIDlgT~+6+q=yEV~*!M|GV*^{&GGr<3WAkwb2ve54$HjpBpW<={@D;sezIO
z8dib^rWn}I5ujlhZ2VtOh>!-O&!oZVGi_4)cft3!L|Tys@Za*f$UxXBqz@~@ANF3$
zKXh#o{UPd{p~KL^G2ndQIwpn|mc*ZSa?_j;Sv#YL&hO^D}5LK8DW@2XHy=!~WA=82Rtsy?aZJ
zg~`-c;!WI$yP<`)pZ1>X8rn_HRh-+HcFYZs*FnRk+1({?W)HE0hKl<9Z`h=agb-Oc
zzPBuh50!aiLM3fLBqybpaHr-eI^amXWFFaVOL4R64EfPw>&WxG{l9;
z?9t(x2I5H{gY&cTF*;*@L;9Wk(`IlErXRt)X3ZM0*=$0;)MPUrEU%|0*q4<9FT}{M
zd0l1u9MAw=*Z>+{bLul~Qs(4dvNENYEKLfMXA*;D@q`e08ZtAS)hTbqL1-+
z=myg7><{sWENJL&kK-kPJO0LGWz@?Y!Yc$?xf+LkfTN8
zL*&VL2Mt72`xx-I2OzJ6z4}e${R*8S*{z1;6VBS~g6$L^A0M$=t%9=|LccCGHB}}}
znj{MsF4TIBEgx-n=nGXw3wh7Efwqdi5dBfE2bieO1usvQ(x0*3$eyC-XICHNd8a-v
zfi5srhOD`Vy^6Z<_5X@%jenaqZMxuSPa=M*{T%#gJq}q}Su$|oK*8A}`Nu#0A&4o-
zGtWGu@!|TO{3Z>Y3ph_uu9&%oAq}*rlta#iP
zXNQl$`PnurRJQzUjC}U~X1RRn(qW8umG>WTmQv==pML>olw|JQxtAV#=pmUfVS?s6
z)|Fb8NCWkP`#!{tbX25;dk~z5Dfje|s4Lu$$j5q;_vj;Wo**riB(UJ8FF9U+bpMBK
zL3dYUkFs#aj2XWXPn;XonL2G~Xx+NC;Ov?n2hJIkdHOzv4&qDvDf6V0YXg?l2kHp*
z!Hfw@${>9cGxu=q&Gj?nofi%#rVsBZ74I8HK
za|}p>$)v%od%32hk4?EJo-8Rtq|L||@6)HE&kbAhPwYK)!8yalTtnOp{*-%W$`bdR
zNfUA7o-@wgY5ffi4VB>FV8Q7WJq~(2kO)wEyeZua~uJ*XlX&
zwbx$LaNHvRpH9QJJ&ZYWCUD=4drtsq#yzSUziCK(>HpCFLmwCMn)=3)xNzKQ7a*tF
zmq46E+g9khp5JI|DBqMh;!i!GUUEz*CmaLrXYsj7{LFSaMjQ{$=b-l`;7z;9IAK+j
z#^1;(OIH~t4JH#e(nT4fErn0~
zOXN9q2sUFW@VFV=%kX!Vf7%Gw@u+M}NEi3mM~oODIG3yC+wk1AWIf6fWu5mq2M~7?
zAHf=F<)6S^0(@tnX<+5op;{R
zc9%S$zEa+aKlK3e_#(RhvohmKK|}J>?0>>elZ)q^$FrbvPE3dGeVwQr00y
zzrja1jj`gq$}<*!WNspUH%fP+0?#1)zs&4|XCHV@<4-3^$K;Yfb!w@qDZd3xoq8PK
z&nb44_@uA@b7eJWdv4AW-+n=VNPn_4?V0vn@J?s*xZde3kN=r6&55TtN;hX&%~{rT
zmUW$_m$M9VmTAsX9Hkpep3|}*2P5C)Ij!mdrGLvzdA_Evh>@Oyj9qZKjO?0t&4^0H
z{XjHVX<}cVSPQnpoF5q*8#@U8ei-)rLvX(xGJWp5k;~+iZGTZBG6k`oejhtxY}^ZG
zOrZl}J0q|+{PB+(rR(89S{v7>QKKu?cFEA`652EPoR?s4&tNY24tD7n{PQgAlZ}H9
z9ESRM6N~bOzfm*>F%HJbxEC6X{aePo881F^L8Jh?a`-+H)Ay)GS^vyLtgO_b}|{8*G>{Q^p$^=VE+?d*X}_eRNUe0BEm5tk;Wn8v6<{
z*n1jE9Oyf7?aBF(cAaw(Z8d#a`YC+I&rHJ+t76QPu{Oqdcz%H~D#mvh_h*ca=~~3&
zz8BX3Sc4{Fy)cgRFXuI`bJ;KbX!68KzxJIREjs=(UiJ|7ZW-@mtZZYk>(fdt&`{7I@&m-P-))6-y3>+Bu?K9?U
z7O|K9XK%V
z$@saJN8rF%8)H0_N5-fa>tvkpLFfq5Ugwy2aURFMOCr91H;!{Nv%!Hr2YEu@dhP5`
z9qVI!hWm(2?3-wZB<
z;Jz~V0<*E7$(S#`ODA)nW3D(*9*0(tN1Wf+I>ECvl*bBjIxqH*cuEZR6^6Rv!M-Wy
zESVT@V~m9HLdK~!KbLgFnBm!{#tU)K@`$*riG8y@*Zye(
znCaUvyW&AyIR3ky2olD-)&mFbX>xCzF&4@r<6MldFxIGJT+aSyjqby)v`!d#37SEs;zAy3AjH^d1MTq
zd(%cI7-wRvk8!dzCl2f^y3i5FAA!CHz@-s^QB=X%Cv7+Zp$B{=u3={D;x;=xRQ5RZyuL;N^z(ROfMisri@)4#i3^5Pm4
z{>NFy5*e4k_e_QRuf!oaIa%|a_JH#s+cq-5zGxSWu40}jMOj5axa~64eAH+G<#PZ1KI_`5f}1;cAGYnc;@HlFGk08
zZqzxRYyC(3HU{6L8_K@fCdZjL;5`9?_X^1U_usE^BM!{0Lmgl}$_@P_+6$gb#9nwN
z+T~iIDoNX4+8>;wAIJ6zu_wU*3E`RB_ozg*7lAO=u`HYVoe~^&jN=TO
z{;_LkjHluJGxu44K^w;*e;28TTpLqf*J2Ga7=9hkH3ngAwH)g}q%Ecm)}>6-hco_+
z(J9(8E7nb1GjR{$5ZszhIM<0}{A0!V0sEq^(JnDlUJ1(-gXIa=z*`(HnZD=za|z{U
z+9v7|ai#A-**>uSMah`x@C`>f*Kb@ua{bD+0@o_-uvU5=crn(>H92vo{IlOzXGdrs
z=K|JX%Yif7;98t(FYZNf-N$uj0M?z{qoz+qylJAT|b1<>o)8Re>
z*KFh;$BAA56Aap
z%I5Syy`H4+PQRV&2CkiE#rD;G3@#K|gLN&w=PhYzY5I%^<7ymB%8TK@u8cnicI9cm
ze-_q0&O~B2k$;#MwB2G%j^_smYh-TS>|S#A_+iaYuAv@_*Zp5K@ja
zvSfWT{^XtUy9zN+vVYD;Tqm$?+GWiBX|C|J)5M$YkQaEa^@(#f_mc?AbvEa2#A3?9
zvYpvA?I9k>s+uz^l-J6p4(l`5WnDLHWLhj9D68u?u;E>n9*!%%f9)1LV;y(>_O)tw
z=D6a!*Q(=V8~D
z;BOy%W_q)F5qv{8{f`ETpI8R*AyRex16t)$9K1+3v^NRAmKCoO)fluEk6+Y^MazjQ
zPEADZ-nvdZ6@cy1ZpfYR9vUF8BH@m>sZi8Oz}sD7P=7kGiNRH@?qe`o;uE%J!TW1s
z)MV5dg?GorqBn~gqEhuIR{lG_+Ui!;cNp%cq9$=l0&el>Be6v4ETms2JeM
zzL`4Um%gsHKUDXd>e6m|ylI`eoi535ZVtNxhJ2W$>nEbc2`&^)!ZM%}>c{Fn@m_4z
z4D|=#dW^0~&Ln{+q+$%Ho`g1kf6oGEIuu{5v|tpf&aVKC+dyaA9691<#*SBxQK*Px
zFkAw>=@M`g)o5^O6tJrLm~r7lK9+~C$j!=l$<1vyNN)^!HdtPjFmA%r-S{h>QjStV
zOAIi(I_|Zq=T+|+lbRZvFml3l%d~_EiK&6!lT#AAr;ZvMn-G)QCLw-Q%EZ)(ag*AN
znwZc%CN-h$l=j}1gqXzmxY$(lOS*yH?c28V?sZ3Pi>2qJl*y_3H+QSpT1N*QAX8&U
zO~%jJPS@>Nkb0)XCQn8$v7@6?;-}!(Qpd!mmOXTR96k-8{C(m9u~T9vSSIjqpm$7a
zMBfg{B)WgpEtj^B4G&T`N8$d1k)maRPSj%nQAyZY$r)2mBX
zyJvc4`ep`X_RAcWIW}`z=7P+nnQJn)X70^Ant3|2B-1_1GmF36a0D=Fwa4o8SC`~0
z%~_qZCMPdvYtGJ`y*USRj^-TCIh|9SQ<9@>?lupbr_EyXwY9Sa*n(~SY|*x1wnuDZ
zZArFiwshM9+Y;MS+iKeyTb^yJZKrLo?SSp5?YOPPrtI!^54)$`V)wPTvj^CN?fvZ0
z_F?u%>|^ao_G$KX`vUtC`%?RA`x<+meXD(^eXsq1{iyx8{j|N
z-`sY&0lC4s{c^YF9?dPu^;{dUcG}t{YnQHFy>`voytO;mf|h7%IbL;=H7zSWYeCkM
ztfg72v({wgWo^ycnYB0TK-N*sw~{QCP2PECTe5w#+hqr2_tSiQBztUjQueg$^lUQb
zzv;gm_}|5WW=%u8j~J4QUyL4+5R)=$;)v+!qsGRhB;d!N$Km(iQ%4MqA00b!41Od#
z)tcnEH=^y7*u*I#29HXKPeO&H>66AzOl%uFEw=U@wWIr2tA3ZJaK`-c5B}KdHTONZ
zV&)3fYE6iVPsF>^lBPGav}xVOg5P4t&wO{cOr8|iCV+Qp$0sCBOqpa!i8Y^6r>EAA
zOPQE}x>I6P@bl(vMdXuk
zDk#k~{i~yk?|JX1Bd28lkG=4tDesa#KJ3?1I@I&=Dc@7ibyGgz`N6)QPkD>ydq35t
zw5a^YGUb1mdHz5>zj9mcQfc#FjbLurNVL)nYxs88p%GSZYD=wU2mVCNzLw{@99Q)S$;kf8bu9yca(9kvVm9ml^vrR!I-q`G>GNZ^tcvmFj1Tw`fDZD%
z5W|pvewS(+{hSy`MGklppb3cC_!<
z@h|$MW%{fb(kD6pOP~L^oj#w3zJ~Vs2kG-#R!FALiJ3n2#KKaqo`{tee@!>``%TYZ
zAvWDSs+)%@UX7YtqsdvvwN2d-bF206snTti-qaeKWO__hZf7u%6VXC1N9?vp8HGbt
z$J5=q87r;S&34^f$e4|1{5Q7m80e=&PpmHW&kxQE&JTVy_%+?!PrubsGZjsG&H_mA
zQ+};HYAVAOZ$}fiR9ee5mn&%QXlmtKAw{$wwpraLZCf`f17340_E;ehEotl68O}?z
z_Fyo%={Uuj?4YI}4_CCBFIkf)7FE?&m*#BB1OGwurHJ`#$n3Cu6PQBtS>5cm-c_yd
zm7$&vBt6p082K;-_NUj{k+KuI`&jBbOy5(mhdgt;_4`wte(4luajXgG4i5JF>$9DH
zLuPx#d`UNVTE7`D<#$S>tLTmKF}kZpFmlFe?$sV{v-Y20jP$OX&jnkAUs(V7XVtyb
zD?14U)*?`&hGB*eDs)t|y2JbRvVO)oJ=15@?4VCZW>wIq(@~Mrk@WIydI@Ul!>+o3
z=M=Kzo*MI=be*)8{ISB{9>(!J__N-a=8R&n#W%-gTYRcuDCpB^^s3~-GP@@5&-(G&
zdQS_V>w;D8SV2wM8)U9HoOaik`_z>Ep^Rpe3rnjb<}(rV`tpdmg4g@>h`BF#WAKLH
zqTs?sEDwi<=6_WPwY&oS9!h@ge4(br)-Q{|OY*#YAspuHyx;~|kASS3FIH@oGSl?L
zvQoe8yKukD)zqprHiFKlW%;G=hwx4l;FI%8m&(#zU|j&_bW@ThNpr9D0V}xa)%aIb
zI$i2CA2mPU{0nJmK0dxe)dY-`z>ln($
z;r!UXuLDDi42|Zd3Erx&m8GqlFWbIX0V<*Gn6lVNq%gD>gw}da}r}ZQB~ns?p8uy4i0%1Ti$Vt|~OUth4=+yEmPu8{3(w
zUDkd@?w?`_J9HBkx&ZF8v{+9phcT@3J8VI~wN7Ez)oJS6^dhb2N;;{RTXB`K*E$64
z3rDqRtY&&*}9yq2oUcvD7K)=@bWqC1X%l0jk)W<5-WBYC(#rn4H5)gp#eHMmwlLJq=^%|*gMQ*pq4VV(QhHA4CGj<;!d8i*#Z8CaN#*>VcCnj~;kkeUa{LUoKxFCaoQ)
z(Lz++&x3Lwz;=6UnhwM!MvN17>{Qmb?dwgsTmzkLB~jD#wiGz73hc0bFE|C9KA#|=
zH}%FQ>c&Y5z*TJD-<$$Y*WZx>5NNe-E-TfAt1!)%Wc@I;ZuNwxDGGasDIMyUNiVvG
zq;Q70PYHcLO=Xgv2698@cJrkun-^>P2}|fMHlm7xaZmE<{&cQtb`{N9zj0bRmpW^T
zzQV7oTs0ENHe&mxQ6DI7qd0SU4;3o*2qRd`X1>(=ew})X5Dx
zx$lyzZM^emtdsbk^u+xwdSX$lp7h*2CkHCqDohShL)V4hM9k+UQLP(GN-H7!C8gyq
zex`xuPQ(!g4}S>0r+CyH+xIAMP9Z&+?BT1!*kA<}dqRn*FwJPGe}l-sw(lGYN1b8}
zWQQjQN`9tdtF?#aqMN?wu4E3)qGxzOhwr*vb;kX_%&U*-=KLr0raiGc^x8|=Wqt`N
z?L0luR(~BF;DS@~yKDN7|*TJkj*-B%s1{65$`jY_(C#P&^rVi0?Ro4iaFbR)Z2NLxS0
zTL;%Kt22(A8JiL`U$i!iR&zLxx^E%H=*c-=+h@sisygu-_#m4J4LQqB?~vXvP4@yQo0-^oki(PiH+=FZl}&W)S-qI
zk>W;2Zl-vl6rbe4X6feZb)l-Mv2oh^5t8q5@(Y-SPoUZ;N<5Tdl!h|=x!1}5)E;}=RcAXJ8(<$^13IV==^rU>wwq$hX3V4iuA0>h<
zuxK^)myr=p7a)oeZ+g4u^9(OmpFl8J@{{UJfy=DjAf8lTTD00iSF3Kb9|GdM-PQp)0<*
zZkW*V-TPpIXEKDks>&FQ?qoV&Tfa*;TJyB^yJa8xcch+*-cYj6E7HdBX!5)TIXSNM
z4C2L57KVd0rioelfI{ELMrb&Y}?h%mk5iSTXrmJ
zwlk6qsS{}3<}Uc!G}Wr;Tek1Tym8$SrWokvCzU(FVIAWTEa1pwE
zBJ6JdS@$4RFBV*~g^Eo9MAFafx2rt|uRsR%xpNVyj8!g>2u0v=>eO
zS~4nHBgR%cVxB-_OwP@%JN(CpY3qHvqsbt-TUGivY2Dr$b+=`6PJSkbWF)!Jn=iZJ
zMt}mOG~-m{)L*SV+yRH!c@XR%)K^BqVRh
zq&wib)2#d0V3BD*|F5o2J6$vbdJGh`O-30SrMI;e*Y&m8c0Bi^cD-$Daq1haK*i4o
zS^0dLE!U;Du-W5i&*6##L30bjy7q7@lQPyCc8<%{>0)|vQlrFG_D_+v^1uh+p+bhA?!)dFEqi$(hoT?=hJt20DQXmOiJ``9LY)@=HE
zO1esvSjV70vmITir9t{Om5D&<%?UTa#`5Sp-x@^?6JCK@(Y_-+ye_agHcB_zSUEYe
zay}#@o~N5_?G>%q2t<~g3s!Y+G*Mj=P3Zn>mA2=HCm`lzap|)*f|(31R{)36WvAyz
zfea$wK&B|2YxO{n>twI{fk3f0YVK4T;XDy#cUe=*$V6#=30zz**pkdJOUUdHcyGKx
z={=%tU83}-sM&@LFz=EaBy8m5*VS4ZYhB<>lI{BnIk4cD&H_E|%!spiL((
z$1W0V$;KX^P(?<}XYHqoplpQo7H>!m)d{bdPaLde+h7(tf+ZB(6MxWZnoX6&>|)(q
z*DB~wjMmL&u~F-ZIbJ>BJ5ZM6ik)gUbdlBM`Quqove#M~lf*ebB4nBg}NN8q8e!?
zVj>HOMJZ@LQzOdvHUSih8gCt%IxvyHLmO^Ea(*!Nd-Zuw>`f87{SkAwbrcIp6hiff
zt7^x@FVoBVwDl9eTxT2$))(-5-O9W=qunp;*yvYT{VJ=~FI-x;pN&=5ArA%W0()Z}
z=?f87g#Y@j2_ct@T|gzY^?R)mq?NdksZ}7gJW^{18>hCuy{s)%iDWGzC?-DRKLl?l
zlnO5zQf3*!v6nJ;)xm`Sjm!6zf=o%-07p#e5?cL}gBtB`Nq!dTtt@<7#(o8m8xm*XOvN65AL(=C_D}
zJM9UyYteSSwriu8{DkKl6tSk&09e8kMrjh@N|SS;@9l|6^W@_Q=i{`@$NUzI6|VF>
zN{Rev95oVSa&%)ew#+uKZf{3cFg?f64ASokLt$^COgO2#BW71L>H7~o2Zg;=Z|nCM
zZ=N18^ET^uY+VpF$K*teqc&2xaTF!LhIKrwGne_WBX+B_9vi@rt2GKHy|kQxSUJ18@{fEswY{>va~$3%JGyYfr29k%@bck16c
zdf9Hh?|r@PC`@3R-j=#7868z@m3)O|u0`Iw|bd&(6~U$UMGD@Vncn>Lm}{NqU9US&{gYu`~lU+m1n
zi1g$#vC1#v|9B;ObTzhRor!#90$^5b(Gy`buihHrRfjV>-l^6#?Dg3lZ}@PRD|I(>
zVcp1Kiyr8xABHMWk$xp&hFzvUhIKbDi1339ve8Ac5ON73NDM}^^I8O?+8zk+GVA0S
zG|7G=o9JQQO;-x!z=zz5c@^<{-AWi)tG`b65v40t#CwnzKA}>?+z|q4`eNlNfRXZK%L4$WHQ)8Sgo0
zwE~@9)+4fUIf8fW?9TihJ6Hgttrta)MqB{FTBqxu|CDLzEKWn{Cn*>&wx$DtvzSvC
z(4Jr-g8~qe!NL-;BVhBlx}Y;!It5;VT~^q_HdZcH!a^(MA3%zpy!zmpD(NfkvF=9=
z6p^lmDSFnrRVn4npverH%%I5(CT}SgTNGB)0sCY%@`7%@lG#4Gt*2;3c3;0E8(QyS
zoo-l-h2)DEIh-3t!@^Gefe~>Aq|Sbf{goW=Op7FDAB-5amdpAhatG_BQh1V>p|DF2
zoM~XblmiX(kl0U_veatKBQ+uz9@Z1{N|y`0j<11Sd^JtI@w2S`$mW?%;MWLc4%=HL
zi!p2d7Nf9k{=Kw;xt19k$vh+UMEX9C2D?jRP0wn3ihvj
zIKqjR_QyB+t|%#l=^@PkY$HlM{<4z$Jve9n{#ZUhYv#%_q#uJnen
z7S7e0{d|oCJ_u>EJ_(yUqk*m3cisoGsENRi9?F=l*A~&-*(<$4vm*-sUaFT_dJdnX
zrOQM7ERMPl>SbN2|4`NV9yZ$|0jqv#7_|5qM&SK>FdA$Qn}>sahte?IEg|!hNZ-Lw
z+2M47yawJ6YgZhmd7`)o7cpN%77HvCf^&@h2FBhy;L2rI>K+Cp6&?pq
zlFhyiSR(126>L@rL1c*79q1?uBeI5<%2ZP3K!*8bJ8n5Vkdy&9Re{a#rI-
z6fv$Y@#|&(1pg>!eIKW$IeEqD_akO!YCNey`?q5Uh$a^MgG!T#n1>V}I*O@Oh-I-5
z%k{Du%Iw6?)MXzjh?<)@`1%M|Z2fN100q^u)YBKp;(8NX!a7BpNWL}bB60|{!@3IM
z&!_-j!}^5^fVs3)8n2d}7M6&L95t6HGcO7O>k8tJiY2gy{mtC0V*s
z;mM4hWAvYlP0?$+)i!p-gT`AH%yAiSovz=pXFBCU*-y1#y_wmwf!PgMrEDEyp_Y+h-3$ZW$Ny$8H)g+M&odOm3D+qCuDCyTVF4s8_v
zmEyLRLz)cEXCoqszT`H8*!|T3k)9}efv(zxR?xmMPtJ#z>B&Eo77PE!jE`0XJbxM^
zJEbz?Lu5g--#l!-Y#gzXP3G6p>XOps?99>9SjC=T%MY0{>#J9bVPGK(CmAlr@LDVu
zdtE8Cwy$lsu#8`O8L={lK%5}c`pb6GjOmh$5gX((WMNF8jU#kU?6HQLb+0+w?hE$3nE@wxIvFA6~zB7QMVyoEeHQuBH-S!>tRw89F
zyIi51ALX;4mfyl>Gbw7NUa`Y^`9s-NepV{j;n;E-$Ceyj?qimR?nQpJ7Zt@YCfL5$
zX%(74|FeDDa8Ol;N-078H81eqW|LX(_9$cc`%a*!#=7{V2=)|lNG5a40)v6g4t
z01XUUv68UZ2|@vkl?ceW7{YVw!nCy?
z+sAnJ?mvd`Ab`J#GpRgV_N#doE}<~&Z?VHb%c3L;ua)NW2qzfhmeh>}dH
zGKiE|U&0iVSyyQ$NO;+GkhAqI3{1v-UXl6k&ogShm<+H}bDWf8ZLbv`!7=F`^V*WW
z%|fH`g0dA}vmj?dt{;}&QQW)P9h)H{A4EQ&PP7V>>J53l4KOcs^mIW(
zWkEdG-lC&N1l;w9;87FIEh#42)wpNXA?u;BStwK2f%x9dIa=c%`6v*^^D7Rdeo3P2
zK9dB;uN>7oyTltCA%$60W`E3W-dBpg
zuqcq@x{}^i&v~(2yR)n>8M=s-@@eAy%xR>v4&Y%h*z7^|kj=+ut-*SgnXpUQ2Za%i
zw_32)!m77h`9S6v$7W)#c5Gu%xh%>rSYMFAD@|Kh-5MzR0ebF=8}-^F_#pg>cMe^Q
z_fFTrqJD?X&Jg+pQE^7T9S;~YZ`N{LIq@lM=%?CSV`D_iRT3c{J=yaikxU5%rHT=TI9ln9_p;9*QY6sX)@dJei;QU6QC|w1dx9PPU
z-k*1jcMjN$eZXl0=c@we30H5Z#G4Zf18#{O`?4|fubhbI#LpT6?u0J@S5*J&gl|g|
zx>4w6bp!F}L5Qb)5yTF=Q~b_2auNe$u2af-1--x-Y8ugJ)$~A7xqyDQUb~z9yjp?2
zS$2CCh3xpcnb+1EDhBdlycVY?TH-GQhOBi1Em;xS%mih!zz5d%5ZTK)kgI(;YVM1)
z9Y?6R=*3Ee3NQqA=9m}0tBfPY>WV^F{KDkb!>u=FvBx{<@$4HF#Ty?(D_|c16@7ar
z?3sMj4pkIxD3B@pYY^(UW7-_E@LkG|E4F$T>^}02mQUF3kyHzn_+N+p{xB`ffEMeA9vW5-D%{
zZltI*4Xan_uaQoJoSn85x~zjwdZGe`c|L&8DFe`!Uzz7`w0>!xulJ>+=37i-p5mR>
zWl?vJ+1b|P3AuYhVyI7#LAPEYZ87i$tRpmE}@el^F1lN0erixJ1-N#3v0fp0!puf
z11^VLsS9qh<=8A
zl(KovC21r`^>K0LV;-uDR<&qv-K@mIx|7<^+mo|TDsK^_F=k^064`x9BFi|CeU^vI
zA`v->wGlB>5s}S`2Vld*+LS4GWdW#Z9=Ld+EhF-ng5iU)X7A68`i#
zO|AEyO~DJK*d*(2vK_TGJ;J(KCFF$1nt-h(v%kz8V%#2jMxD`gWt|!-@k5${77Q@!{4z;ze=7&BScC
z{l96Ke7GeU{#P5P(1-)>pb!x>_limI(??L33;=E&UU`S^Xg(o6V~Xzp2+b869oyFB~+oK91m(zDG}-Ce|yro;clXhx0fm
zqA!a1;w8|CgOIS{tHtHPM)Qnv&@IQrVjZ>Cz6}8;hEX6s#`+#jXAT>_&8rE)U3h@u(3Rj2wHPF8HLr_+u|u2h!@v|soMqnSEk8Zd`9UErc
zRN_h>v@U-yBXM8Ej^Rk$+sR6^P!=M|4(TT&#@8NU-8`?Hjo1~wjxi#DFXslCbHj#H
zR5!NB>1Vtka3nsdw|a3-Y^?Qbif>?ajCQZ}h|~?V$4;Z2hvePt!VjWV5kP_Mdzd#2
z(Ya9OE~}OG95vq%MZN6^iVy-|(zl&p4c#oK!g~#g9ul0wCtz5||XBmlcb|@y+~5^oMA2
z%2&t|Z30b#v!su;P0>oP@n%l!68gTFk*t&4-cTiC(g?CTh0XM*M_NA`XrI~P!(S-N
zL`<-L&IbV?K2X3qpYwnLW)JqoQsvmwRaiiIOAWlUuFCW7CR}XuDqc-j>a`x<)1Wa~
zw1+(1-L|GuLWkn}HjH3W>Zkjq4e-!WA;hn0iSIXW`S*t~{JgUpYShtg%LoE=slzv~<=K*WA*ElMAxu<+e5ER>PXppG$|uZeA(Temu%&q(p;3AFN2!kq
zm=?vfxfpqDEN!LF)Xm0H1wg{HMEXo-l13}ryyuWqH$7J>Xgp69ORBMSo%EOR{GE@T
zp6`=69Ftb3=ONylwdwgfFVgK&D$mcnFSmVb{~?FB$0_H`z~O7eOlSLUCm#&_o;kIB
z^GO&pU!)Lg-zm3^a<;FL4;!T`wb1X9I%}R0*ioufT+j91NaBu?NMeOwVtj_4-Bj0@
z_j+s0>1Gh!;oi!cvc4Mg&8Yc4=Cmj3w59_z5~=-$9!bpUA~dL*qwByWnz05DbT{~4
z*jZ@K?vDlzYTtT-qUP-5@^1W$cjLZ1m)7`wc?;yk#>sw)Ni$-;5OH_f-AMb*3BElL
zTXVmwcEz1Nab&8Q-#V9uW2Z6VdwH||2KhpVBR4w8!{_^EvduYpj=@m1wadC|nCyj2
zt$A%;w3fp&nPJJ87ID86l?_lyq<-5M`#ZFGH^n*bFxrb{B4*!>glHD=IX
zaR4E?rmXV`e=Jb3r)umy9O_=}HG_<;wLag>;c-u)&Cx(xabWC&VP!^jmFM&Ib
z$EM)|j1Ueju0pu}b54-q=pis$~y&T*+xHtN5ij^Dv
z^%7mNlKsbrMJuxz??mDQn__!^I>*gYDhiq>gCh>6y-yP!!np!os_nT!v)geY)f(H$
zMdxVz82saUVjQ{l!Fyx32g`P8jl0P*QX^tlU_Sb?kt&IuWuyvXIfW6
zvj(<2h5p+D2H`EwSwH=TECv*ISR}=U4K0jI?@X;}rSnDnja37_hg1U|)xdV^hSx;N
zR_l)tW>JcPb8F@5C~uO{c@SQX_Wc-vx12+X_zdyQjX9DVg;djzhq7W0o
z))<;YTY1Kqwi$lJ9G%8d#&=Y2g-5J9EDiLvQu;DVkGayNG;o{qwO{JmzR6Uh$UG@x
zPCO=Jtf)bg*6_lp#3+w^Tg=a7c|p*fGtm(jE${gPmO7HD77SR?ytQ3_Bxr`(@-qAT
zWfSOxaSdnVed(w}=&i-FC`!Pi=?<=yrTgx#ws#DU@R`1IyXR+k0R7~IY6mXQnIYJ=|Dqf4+{O?83Q*D35
zm~q?{FH`;v)-R{BFDCMi3*t-k>{7fQ)8nw?9TyWqG3`Ursw{KR7s%pMMe3iM)dT*M`1?|}%AZgc@
zX30+IPfbP!7X!AEjBUyvWF0|-nESBQh0Mtj(=rdU9mNVG#;RgmWP&-P(zBuAracc-
zp+(j}^q7=iuyEi?+-C&NiI3TU^)U0@n#|Xx-UoNc*6NmU3HqR;Wl%dL
zkIaY`kZ}eU*h+@_w{SA-$LNPRs?I`9&yRXRk~$gghBqUHqL4xmtMtVD2F!n`DBU&Y
zA@L!Y3w6XoW)F{rN=O!R5%FX>|1Ypcy+BCeYqX6PttY}QV(d8A+D=AhCvAj2I9Ci+
zE_xz1LN~*Y8IN@_s1s-}DbcJjI5vpO#CDDjrv=T!AxN@1Y#t5bfti^9CyoyfXpL_T
z2V8Sei{e7KzA*ct9Fu(Nld9;CL
z?d=gOO0=h4Y+4Jb!Gh3(cScOi?2L8L!@
zXRz-XiI$JM!z1>gk%aITI}Ha2`#~+lD$VpAZrrCeDp|VeRi;hXLX+MU&wulyCi{V@
zp~_QZXJ}92zB_-Nbp#$k+W_m_M`OPZC+5?&W-o>zKXw6;Mw
zPZVMo6>O;(y{(rJ))j>Jj--v{g0^&C9d>R#xu`p+I!;{+20Fvd@~tlHPH#Z}#D#80
zwJKsBYO=M&SD3rt(@+KWTkw{8Sk2`v+CyWht11NA9@xI&HVQx{ji8>XzDsLtBV)te
zncQFSH2RmvZZP^+XpO58RW`&kpI(%5tDHnrJ71E)Kc>S>es<7(F(N@%94gfc
zt}u%Qr8lQ*gBzd@RpP2l;SukoBN6k<1H@t7b$bS(TH|}1=7p2j`DH3Rgr=l(6PIL>
zoLb8o5hMoHL6p-P+JoNWY5<8%Jy_)&dQZbMH@;n1k5gZVSDG59CRwN@mS3YieR+R+
zBAkSWPvs4(spUN{Y+l|!Sg;6&bFUYtQyI6H=HmrUtM0Jb+GO9GuVy+uB51tb7Yv*T
zYFD3tL}TJ3oc#GNW=rR=aO>o4-~yYIy{l>KgSZEC^?)4Dv_{}AeTN7(PtHQSsCppR
z-O&ueZ%;ojbgn0xqy?c1=D}`fMTVQ+(Hf7#GMidk%E4&NTj|ys)55Ur?JSdKcj|Q#
z@lkkIq~gI09sUQhXE1Oi`1G%+0*FVX$zZ^K;H)*Biv-5nT~_VsJQLwR!63B8U?hW)?=-Hdlqq`a)%WG*cKqMfqu&U6`6B@bTa*hHb`MGTvKIJRjs3NL+*6oUu`f
zPz-+a;yzVqgUnl|_Ft%7(MqVuf;hXE{lHCF2ZJV3dw8A0ZK9=1GTeu=CHDQBU?IYD
zYb`v2rzovi+{2bQ@h4?87jd5uw$%IJMg@8LZ1vzM6o{&c7{V%n5d_#@0$C223kja0
zjv%e6ch#8!Yiyzet6(Ps>o6M6;8nan=LVmWkAUisOgL8(UDj`QAml+b0wtTWQz}))
zSJ`rn{zz=D(Z4h{djmEwSX!(^ZPaMhTGKdHXyg77DUCNG*u3gne57pNGR1|dUZ|DD
zUz|F?3wuqfM>2#Z)dh{pi{q#ASe1LBs*PR_05B!hk@A>Ki}d9}v5yvdfiOihrQ8wUSumgQPT
z^#CeUufkXX@5DLrvx5#hRD)I=NS3K=5*W_V>qWl{rNnBGEPPs!nOv=RtGrjq3z|oz
z%TQ`338%qxgAOAc(jbx<>pSsBsbK8L>)Xq6SeSZ@BwFdhWMPA9H$=OVZ%8pZ3SwOU
zve7>|_N5K7hM2X<8_siH#wcItPcL%K1u0ta&UGs3R;U
zDFUi^?@j0u_Vu&Ua)bjE8WCg%lxXp`R{m?P8%2g!!Sm&i8ysliZz-Pe)W~iKi$2@-
z%_3*UuodHBQkRe`Gg%(oKyxZiY$9Kkf}%9HjO|Gs??vP=@Th3JlaO^YUi*R06`J)L
zM<&jp6-PabbnTBvoEC@yMN~q%Hte32CG^+Hq!Y-3#Bck`o&Ye^n)8gAcjrS3G3;f#
ztlv78_U$6c{iV}g2vq6cNn)6j5UD?NVll)n<{W@3DD~vmQD0afGzl}{o*aCRADki_
z=2bm;e{nE5XBgAp9!e}Kj3yT4)qV7PJvnnErUkw1#M->mWvgOe+8O_dh*2zSE)^88
zHm|BVM?!u%g)5yXB(SvQ%{h1(*lmIK`cKw|O268HNamNIhp(p3)}H)Y
zPDp#QH5Ayq^3-4%J5cMD$!OkkaoPKe-}-JTT@VzuHovho{+xMvA)b$wYN|zTDK{_A
z!=;ipwz8(>5Q?(SiryT8!!Lqar~p8UnO`j=uM&6I*a>7SB%*^ANS&jk`adDWz7Sx2zfof8}0FuZtes9;}u
zB+1-Zal>$baBaxDuX&9iE1ln=o-T=^!RCgr5bsJ~CbW6gB=GQPFj?(4`p2#G(oAxe
zKV8Tn{kWAQX$9i_OdFVjLG*L=sG>-tI9wRH1Q$&*H~5=?sf
z00n0WnNK)qk3fD%dRC{TQE?y+baCD^r9)P~=SLLO6W>vFO;58*F`ox*%F>k6!x3eP
zc{T1$&hc9d;0GDo(7-vRvd2`T@-mUcE?7|-H>ONK0Yq}-H>J~aChwpa{&C^2T`ni|
zz*%QM45LVV0&)-tQ>Q{NTp92^7BAbrnT{X=
z{9VAVs&sD53A%Sg-2258V;u3+r`FgO<8l;^HMYd#YmI#r=S~9KckScO`lDlr5YJ*H
zTi?`7<`$KC)kJX=7tUgxcLwDBKwjd8!cf(cQor`?hg6AB>D0=FrBh?)RW8VhP1ByN
z)SlFH0!LQ*%68G_C6fTCp&&2fem+vRBmRkKB$Xxc=k(;|r)@Y%0}Wnp#Qlu=W?q%I
zCiOVHU(Drsu?a?sn+Gsw=b_S!Z^?s&q(`@$B9FqBJoJ#Xr)3nW#N~ydM4dP7PTb(t
zlMfWb={ATW2Afk+3ssZm9Am&uE$q-@f_UMx1Dod;oX)$GpGoCu2*2&EynoQJ>*{3a
zoZ^Vt6|5|YO|SfVPV8Lm$x+&q!JI(%%5kuSFHH)rbqC$g2l1>Ux5m8#4#{F8PY=8VI@V4ed8Ja-K;lqb{X!#!&;aj>ZKK?0ZXiqsqd&(KwQ!=z@*^8i?
z#a%onx%!-sH_EUGHPGr3#5%U+M#`Q?w}Uk52@(;DP87;v74K_x_RR*0!>X&5ktlO#
zmEzeP1rG74R6Zc)k)ZLcZFSRy+?rG@s)+duS#@ktn@C|03e3*a8spHy20vtI^`9bT
z_u`f)O#Ei@b@NBgI_(O!s3JdE!u(*Tcut&)y=WsL6Nwiyyej-%DU2D=c!%rQ?BN9R
zn<^_3*dgnGGaw`s2nTI<@3*@soU1iqFLm{L9%O65oe^%}+Em03Ncf~gPHAW7B|LXy
z0XAoQ6Q0}EOJTxui@bz$6>16rPWHPuQ*dpY}NlQP&(W~Yj6k}hp_|woF2JBV+Dt3<`-hr%Ezr=pxxW7j1
zQwQya#XN8`!r~?-DhW$G7|LP$7=SE~H0T%rEt}55mQ81YbJ9bhyDkeI2OSDJDZ<&H
zfCpc7z{})0@Nt=f179eoSpdWVRPk$8P4*5(N=#E;;=Ie`upgiM9uKzS
z@x}&0gFt?wmMqhh0#=h0PTsd*lS2lcL+|pf>WYJ00cC2+LrF&Ku@*@=<3Z4k@6y#!
z1HMbnm)Yt|r(a~xO`^ssNf!ar*|t-Y`Oe|QKy0%RQc&v8h?=9KfjzMc^aKlRn{_^f
zPOx^2NbYUce~}0pm&&~$NzXK7ifEu4c5>-SK}EYd6hM6C<_M=<>z^`Oj3k*G7N#-`
zxyvde%Z#-Cp}s%T3I@_;8$>*}*5a{_4bhZ5PS`}wwZ3Xg`+J=Nw~gilc5$!BBVGAY
zD&t7Tcn~`6DR*<+%e&|>X3_gVDM4CAw(lkKjiS9|fHYi7ehib9a)?dYa0xv1kYhY|
zK1s8QHID&!cPqsnt$usgt_PNiBC$i=EUeC-oJTG8+^^rP-j9@t9;JJwN>$
z4<-AaP5#qrU)yC(0;$ZBDYK-ka?;jB*)PXZ=Ze?K%?i!Ktb-ew40db_8Q7VV*EtTO
zdUh6LWukK?5E%5p%-dPvF~TA|IkI*G{jrh8Wn3>JB}N<@nAM*td3w9`L)w-lniZ-u
zc$M{GEz?Alj4g%}{#i}WSxk1qGl~wxM_gCa>p1@eM+n3+@v-S<(TCEr%<+pqQ7xQ?
zGQ;jyC|j5B74kB3+(IwtKkA%G?O`f>Qqfnj3f7$OTvI!j;|gTIK$q6|JB8Jn9_vO0
z_@W-;zA>)&S=##f=tfTy!#_^$B-!k5xF6oc-c@rjBk6M~M|wHubj3;$=AMofQ<_AOs>}JJ5>u%(%)41kNIq1IvFKc1K))za8*eVg&hY`m|wpzYQxnde<~
z0>F0FV=72u2bV~!IPY^z3hyaE&K20W0xTUoB(F?-BcLgo=QC)WAQ$vR`^$PY!pZ4@cA({mL4nip57
zdCG^p;&{{ayb!lpWN|AY_dYVga-|DRmxFPw@mJ2*&FX8R`r5DPFlu7wmpdZSrh4hXG*R{@B@?OJgoIBda|NU)=bHI
zoUCH*`Sx;vs`
zPpS@9wL>DBnYNtN0#XtqD+Z<19QA2O#!3`2H>av3C%Z1K->_Y=GO9r|_0?TF(ug(M
zsfVgD>2Z;^IabF9Wh7QDV{@_5e`@_9uF=vT!SfDZzgBP77YHt~taOO48%DIb^uUh$
z`infoEYMh5Eqxxb9)of#dL0(3HGTkLB(HK?r`|5C7LpMKO)@-WK;T8j%OIznZiwbB>UnP8=V#ywX^
z#w%pd#G^D3+yFp;7Y+X%**j9Ug~Lnk%jW3BS_}vJqIQ=_yHuY?brm}Bto2{Fs__T8
z>m`%(QzwTF&)35W3APj?m@{JQo40Vp&ghxSY@oCQu1}i%Y^G~yrc>?!%GwSUbZPtE
z`JSM$UpOC{HJjhnCYC-NJ=cy1Hhb%;Dq^GT&FVg(_S`i`KL)?`?}%Bdy1Myqr4=Ft
z)m|;AP?7ZW#NlI?Tw^Wh|f_hvJC4dygPAxw|6lgr!oKdcOn%DRBs|th9xAZWd^SbKBpPvt@oi4p4n^m-7BH#T&!dE0YfwmPv
zJvr9_xZ&mt8a@SddBG5X^FI&lR@2vs84pvpH}Kr*=JYUg(t6T3t2Vv*z-nBnO6}NE
zd7O;h6zmPVa$?uX!^?4*Sy;-w*#D+hP*|`1P)`;;LRIC&r<+@dCU=5$4=m8#=W_95
z9$r6TS8#2ZQPdPShq=FYud1yz-Ugeq!-aNd#NHAyp792bt!@mP??z0FA2Vkw_-1e$
zFc%5V;5y)fhG@XskZJ;5K~{qJfOyyR?QP)%$eys(X!`_~u7!y9`0aNY8C#Pqn;O9)
zHV(3XM>dH7)_*;5Za{8E&zB~v(*;JqJMNKpY=6-}Hh^_{2F%S6Fae{5=^|BJ@5~Db
z;0P59g7!1|nqyvOS9?e&k39|Qw|(EGD!0KUe^x5=>4YiXF%YJxZn}qQ55!Upy%(K@
z<~L{lgng+3LFW)>Wk^rl5&0K-bTpl5L`;>+E#Q^(V$QsaqM_u^Eyz6-cq3@0gW47Q
zgMs~Vq_Bar7K}V#VNjuQ?ySq&@jlx>);I}-OG)PvYaoGb&st}{GXTOlRh~YW`8{XK
zCi!O&8%jRv05ItdVe*_@YgZf(29C$6{J#S6FL59%7jaI(AhDDH&{8WCD?)$#0*U1U
zif=ejaG`mbg5nn$D88S>9m1==H>n7{S
z-m<4;{-#Kz1XZOyO--#9yrgMw?PQ#+F}XR?6Uq7(IU_p
z*UZ@^jji`;M$ZZU{z^LEm{a1HU~O|wvH0%FS+3Y}66jWgl5kevkUa$Fb1ZQfV^SBg
z)~s7uhAeXr{66iM`zERZg8MVJTQ8v1(eKDRRM39wpb=*f=Yuiz3j0JdaH)}79jJ^bPd-8#dQb7oZ4CAoR2{*B&Yq;uo2y@+8FZ|
z&34nQ-JV*`uQN$pq=D`8L=KVU&RjtdF$wI!^$qlh=Qw+LyDFS2pxOY(1!G1jS^{~Dde#<9}X
zTh;FEOqiNIfN*GhA@?=5i`;6IJ_CnLzdCeZm;2I%{XJa@R#BtYy#(Fi08_?wT%6?G
zN8}q53FEtj9)%%X@jGF|;@92I{Rlhb&r_+EN)QjC6Sr;n9EP5^1?f3rtY%N+B&s8Q?}lkqvyO=}aXDxXS++z+i%7g{o)&7W4e~2kZ8xiz11ICtT@a)-*m*yU3z*{=Nj2(#97}
ziWm#jI2HEQwIMUdP)B#a3U7HsY_^}U<6QPH`N6RFKJh_Az5^He)_fo?j;zw
zh@gUt2+okp1-!bth#+0e5xU$yV6&)&Ps#-YBe`H;R`bHC_W$92fq$`YA~b*Ib^&%F
zE>!r`?E){8MTpQlJRni6ajSa4eYlkuxm}>fdS;i%iRaJzu`
zVoHGjGV8n4Qnw3;Kxs9QN|dA@uvYS-CyNe3N`qGm&={u?;>Uo9I@p-VH65YTZICi}
zv%tkpyYUL^T;4+5EO0h%kkdNyRjEnVspJk^EHGRpP8A3?|BsqLp_1yMJD&4*Matnt
zEF})9GZ#)x%iJsQC@{dU(;I~T8|sCze8
zyG1AOj?}ipd5hImMY>ma&++yK-CC@WV^ufTU+RxU-Cfa&ZQMofY!^9?!vuk08i8-X
z!H3;e0@8Arm(o~<@<_EKL~0Rf_nJq|Lj*lNz@F4CYw!}rE4LjkRbiCiR@v?34oJWG
zQpoHQk>Cdit{Gem*+P}w0L6@Rhf`1;E(NGG$tfH&5ybcVbQndp_T|1j6XbW!L{L
z5{)Z8}}E{XmeqjG2}{hcnqYd6KY8b0_hg
z==3`dGPXA}I?Psdn8MBJeAdt7-HbEn^~c8I9Jv$g4tHbS&8T1>TH}X8vj{AB8kt=EsIb%i8orF&A`kcVoopxh&F_8Wyi|68R+Du~Bt(
zb?es2VHdX>%N@iYi|=tk^C42IYA$M>dxn28V4+DGYHJ2m)ms_?Q`QmPV9OA-g=r$63(u%WQjm72$7
ze0Ht*G8#Mw+($ej>mYBcEOevu~(tx*WziE6D$ESpc{vf+36xm6@}2>cse
zIlMZgm2b_sODzAo8N^7&sr4?a^S{NB;0ipkzgCP?*q_f)!xi4F-BV2~rw=afrTkX>
zMyc>4D#&IrLlOydA|~`vLP_yH{^J=CSHj2YcmO0l7;c>Yn&|Iv?+l
z>vkfjt)1;H{nm_c#XZ`_yGx4JJg6=*iBF(6Z_Ec&+{x-f=vUE9TBt1{aBB9|UhPTc
zPM6TqWAG(!HF}DT*5ct;lo+>qhujjDJ^YmQ4HGKH`Pw_5EA~aH8T?~>3-sDHt~}`s
z_dt|(V$s{e^~YItTQS?&iArlGFPV!AwhUv_ve~YhALlLLS&Po88ISOe#h9QEBIf@3
z0M`O@!p0Spjmg(R%Tr-_{P2I?6
zE)41(~C3dM|P)!0etmm?S)~ig9%2R3(F^1wW{Mn8njlaS1+%r9>fqN3|z(K
z{=R=hJz-d{-7od_&M_O+kYKyz)!77>&jwoxgh)c=(0e0?hOV{I^5MZtIXFTc6&riw
zw|NGeM`r5;xl}diekGFpYEC%0xG&TkDjyzhJP^A%TYv_tXdreCUTrna1=(!s==Nr+
z^h=ehU<3NY`Pq-uxm4;*qRzO%I!=WnRFyiHW~T*j^4D-fM1-5JtoF9gen2=YQAFTa
zubuxI(M-*&d8bgITl>y8c*QKbdo?S@{T7|}%k0Xa8??rY_y{z)TH`}VQ_NRUu;I%E
zVp=Kp=A}IiOUk{+BDK$8)R8}k=I+oFVM_(da~(Hk<03&1#-SPGwZ`}5{nBS*Mar2J
zqflxGImm35Zg+7SuwrZ^8P1VQ5DC}WlAC^j!+_MUD8k4TNHQ`+y9F{dCsvzAGGm;e
z#u(=gkngQl`$%2Y{jbGtVq8b=v+bdS(qrQr?q5(4J3Z7qIotBu@Pg*h^x^41gumG~
zLO#bm9qxj383g0>q;AW-ZYj=ae5BQ1(P~VS74Lb3SK7isHX69o(!N#5GDx#Z2Ju+!
z;43#hTyUX=A2Roa%ie9ce=#0PyTPnjw;JVq8-LAScSGDubE!Wwcy+pv){LWh4~_-8
z`co)iZ`Pi4&#L^pYxy-?9`v^Mj?mr6@zd()%APv0vU4At(j
zlsp@LJ8IrJH(2)iZVPwX8nZ(rQU08rcoxcEdcl^v<(t9}dPH=#eLW;#(FgD=6>zsf
zIDvL^Q4b2+%x~KEl^H~G;ZtYW{dQt?xt{t@$~5iSD2p>zgd_f`|0_W*Rs?y=AVG4t
z%HK8XhbGS_vo08TCdL7=8yzxNC@&@Q3Us*`VdbO{=6DE`KPprlAI|5z)PK>f(B?mR
zX0er_&Akq7f^qc0Ex8%ueBeGsk|S;3$M?#c*7PF^K%kCr0}ai)_p?MAP@}7>n!lI7
zdO=|4+Av(oSqDO@Yr`)ONmgZNw0U0nrRk_paq&R?IB`{@)0Z$+dgo@@3t)h5>$|r=
zTY^A(e{mIo3DVQ4>B4N@X33L)Qjh{&FV?;#!cF?jY)`@;2I#sF-*HgtpwJ<0CQ!(r
zCh$qj8$mw%=D#z&$4+AIcnuGmuiL)VD#)|n6Q5xHmBSKeC$hTKE1cSu3SyTv`tOYA
znQx^32l{xHPpNas#I7*jdXyA<%&Nhv(|=2ObuHwAfkV6-uFu@zi&%j9K{m?4T@p<{
zDBIin-1uqOvNv8yYZb2&czwn|v#CwMQt_(njX&otF!Qc=WpCs_0}^;IYWB$`tI_1l
z6=V|_hAi+lcTDE>u^^*V8{WZjl>Hmc~
zud4Qj{MbT9;iS(A8eio8K7#Ij)>>6V0jP_R@5p5JLX8(S|R^)bin<3&Qf2Q-fdM;3B
zw|UX(z7!dZ8;RvQ^HOdplAFr5@OL~{6k5CSHg&GO+N5IX1s-JNK|#jR1+l7Cqko|#
z8Q)Yv(Y7l+#lF(J3MahWW>{jb_GDYyt8Ln9O~y)rxE9YF?oQ|0EL|rSp781D7ulSM
zx@KVJE7fbc&mV907pvDkYj3xjm=@zQECfxjKKNb+r~yl|V>ud-TmRo;y1(qibYB=;
zJ0zrgB;B%g(R2J1iRd2X*q#4;ne{PijDW7)|A%mHWz)&}hbyr!`G?YS>T@pKEgOmH
z>1g3m!MSi#7aUD2{VJY&xk!ymv8psU0p0NDB{<#kSTGRF9VNAp|L0lZA7gh`7jv*A0o~-iX{SMpf8n=K!@o0r=sbuuu`oJEe|29ViRx#awqL9&lx8u_+
z@!Yj4o;zRoQGeXIi`3{}r8TwFP|I1APS3TwFd@mG$H9KYK0?Iyc76Aev>!wW0@k!E
ze5MQRt`L7kCm+3^Qisd7v+L=p`)DT{)O}zesC$VM)QyI6@4~!mh@_fZ9!y?yn2`8u
z(pP5#xewf19UhTJHg;kbtv{WcK^UYUo;1B%{6j;x6$VrC2PFkTPUyBduQZwo+P32P
zLLY@I24c6*S5qskaR29)fq?C?PQZ4t${P}}t2&wPgk`pVIM41Y*2O-h)C~|XSs)#>ramEx4ajCWvW0r@?
zme6R~dlbpWX){LLlK$+s`iXI78+uHIHOn%e%O{D`4wd??3y`I#f>bf<52
z4x;$**dbn0)ln)#D3V@-my3;s=YC4t$DD5SPBmf>P&mty~Xa~TEJa`D33TGJJrR1s&Z
z_V1c?L*r~ka1bY=zdj^L{aLA>bxoYD2pEG>_M&#^BND6RcWLZwewT@v;P}e;ql%TM
z9|<;8E{hkiHA=cL-3(_aPJfGEzq&>$xK{Rz1KNy>yCkG(g6kFvTN|L83hX(Ot6G8mRfCXYg@Ff(rQ~?S8!`sgy0Ie;ZjYlZJ!vmu~op0{J-bk
z=b21Gu=ag_{q^(y{vEhE=ehemcR%;sa~WJG3uH(gFOV^Gq`*~lOM&Q4@c?B8DwJ03
z^E~v7o{p^5r?NCU4B22Yb6441;okU+RW3_dY|64Xj)v8u*Gzi8M>!<(SESc-@M_mV
z+jm)kQTEeDaavkCyd7
zcv*PIk9h4jBY0cePdGc}9;KX&9d}2j_*L`%%+uBrKZV?~qEEJdrX%T#f3_~|^BKsH
zQV}5)#C$R<7*~#pKO~Jr#z4;bWzeO`-$S@|jy#?gxeMg?IOlfW1F~Q5t1EH4zcAZ{>yl
zn!Do*d3B%=tMID>F(0rYOw}909JXxPlvXx-9~{;XHOO9%?u>)z2w<-_*!s!+;Z5=V
zpd@TId-oBN?HBrAjja{z@;FKM*v@W`?Tb++FFIgPyuTW3Z5a(G+DOFj2*%c!I6gm&sPu)rv`%3$%p8J;WdZ_xb#PsWZ%U97u#ii?3=^c9SA|t1)zbi1=
zR^vw6lx8C(oErmNGnh9hBVC$heh%Td?&{Hy~(g(7P
z8mdwFWBuQZSWDA|mt;46eN?WafeJ?JQQEO6R*2L+!KbW-h*{wX@CWN9fnspe^&
zRJUt)wh5y_vN-|E*1B6{0Z`#tf0^t{v<|1qFnJhi-a&`c;TV{342w&{bAMY3u03^G
z&2aV@={iOUoKQQM{YG|E)r&unHz=}gWmfIq5lvQ%P%<)Qi&VsjV%Z9_E}1aa-q{^(
zyPU=vsV54_PIQc(K$q15N<-_hby=n8*ksv%(@YT
z`^ywm-NQ`d>}6~PRc0SUpRayGHsLu<<+89@y+-s?!Nsf?yHxfyLf)^pU+HXY-dTN-
z_MM&ZXLzQO3aXwRX;akGP)Cbpp3RC-QWb}isyJ5S70^JnZKBf%Da}qtN9cQ;J*{Gi
z;B0#SJ({Zeil(Z}W1e|DJ`xyP-J7DSZkr#J9`vH9iree9rm7dTG9Z6gRh6g=)2gbn
z*Z-OJ&t6a_;_QqG=n~+Ag9_ACWp9|!_VH(7Jyqx0daAxp9cCUiYN|Z*j?(-6J+xFk
z{vuI0TB^$MuD3vd;ma1=P
zPcKAz(&N%`TB^30#)O8d_E<9(%Ba}(?x&0d-L+LMZTr+%Mrx~CYP415X>C<`+q|?a
zsZPBQ>P=gf-pssg&1R#+u+gQh3iVduUC<&p#-!bgwkkVx4539>@kFYs3cIPQdI(tp
zVVCt#RaL0h(pDWilrB|O!u4I%K2ZY>OJy2u9}~`~PTr`ik{!^m@6}T`Jt=Gb!Bv-Q
zbyb(>ZPj+6gPqyMB%qrnc`!<-Bmi;BZphQHfB`{vL`T=La-#J}PMN@&uEm?JwQ4$^
zB6MA~?~pnBOI29)Cj@iQdkJlEV4@AmC`Rfhv%febwtc_=!O)Q0_9qZgVRc9>aPo+j
zs$NxCJ%o=Fs<8S2ju9%XHp*u?bTCS(zA2w<%I!}Xow}>Ax*VG(pV#=F&xd5%=$({_
zQj0gOGW#E+!b)=~tY&sM(5&q_hI6BBimj{O+UNp1>Z=g(^E4t|tU|{)Yw>F#jqcj3
z{B5j=S-a>hj=$|`omEkX)vNX@z1v|SC=@i>tCqCM5lnc~gH|kO(^Dtj{u%96i;2|T
zevw4oK9|3)_AIHFI9M{Gy=tnXx~f75<7{}|HYGEQieza@v>`1RCd))kj4stxM}=w#
zsrF&j78jg#ycVmS{w^(6i`GhKz5PU5tgP>F=3=i{&%a4(v@<*Xu3alFDHqJ@ygTo2yml~HLyoN
zi`qP4NBeo%JU|@U`-m$U#u|4IzHmkPN+?rb4zm^~w@>OpvOs|-EHhf}gz
zVR>kJ5Cm<`uy(rWkvHKW?JZ`&@x_imzSujX5WtEk_LEMrO~l0BmQCN{9-HT3WUA!l
zn1jKO{D^#Ur>(O^;^oMCeRPs=HaFl82l+K3mKgzOurL9Q@horcg_$yhIQ#Isxp
zle>zYDHmUguVSBeTdmXpNL@+6XqXZI93pA@MAEIZ{^duL_x(md=SX3igA4Y&y^N2zwh!*J33~
ziMY+t82jA)*pPFs297w$X+3=NF@XgV!EG{zp;Er7+7+1OFaAK&LS)UKe@4g=C!ye$
z!oqw>ri>52ujQgIlABaW$@`mz&yl!-4-m1|Pf3(_ApVipIPMD4;qjrpv87L$JEw*+
zS-s1~cHI}uYoxZU{f#258cG^O&aHVSMmKodVKQvjKT>+(Ge}`ibf%m`1);yqTqMj}
zK4T;YveJBJqy~>T$OjYlV&yNkq?F}P3yC_Ul$<%DCWfiD#Tqg~8WFd$xb5@DuL(~1
z^#Sd1XQ4J9fyanAOAL(WDuY|}V&^7XKfI>16UEp^Sn5%7Bmo-dBqN|nn~+=h(%<|c
z*SZY-AjX9HRjDz-aiJ{lEHCQC11Ymc3FtR#w1Bu-D(eRb_FI49+~XM{lkO)pkT}pC
zKu_mB&?WjnQ};|G!{3cITyWwR?46IxSc$y9Tq;6>i7C$?+O%2POX#T?Gq{h~bbYgY
z@!o}8@_Wzu=H=!X+@nR9SoYa6S>}a&Zdd_mALaw;%-CR3USqBsb!wk$Fd?$c(z*ZgJO4CKn1LyvCd
zE9lu1~A_lJqhsi*}FsNpRhl#m^Aa2vrXxGMQ6#e}ra*+570)b|b_`z@SL`P^QwqFoi
zU8V{Y$Qa=!bX~*{L2XiF&sz6NP%}i-b`23%jn;G215qjF~p89@W=ICI5n5pk)Jv7>LOEX)$
zki~kaGY5aXoV_u6L!7^Jujiqu;_{sJQm&pI2KMxTYgWVIz%X_Xzs{;V<_+}WZ{Oe@
z5=q}Z=ONMoPvq&Thar=v;g95^E|c@ay3D>o9!uNR{-L&)wV~V$;dP&xVag&`kP$
z_QWlv43cHmF747h0`quh**()6IB#a(z#Is2mgfof3VxwZC#B$#o{eO9moB^nwCT{E
zfD;7SC3czy2<%-V)nU>>kWZ)6HV8X?$%RW%WATY@#
zgvUbDp9A9=t(>>9Trv0TWoUb4PwYncChS);7D;;>F$&-Q##yfk4;6t?D2uLk7}N4b
zlwa?i;HJY4bxxTcm#uYifH@l`u>OtoXMR|_)L+cGu^*K~wHKil|3iP~ff}ayr>t>L
z;@?a;8F@{-AsdcYPbc=-)e2(G)&*^xHIl6OsPg9Q#t|Oy_Gr4SP=W3y8(H1xPrNqB
z;(e%vdTC&i^)%?76gtFI%$cz)EA^y&IE=j~lWGP6iUQO92R_p)p={nyL30CEX?oJ_
zOzB6o%#2jzMbg19KmyU89ep|m9bAI3G}UXPityU#g$26XC&=a9pVo@7%13(s{2BIK
zHE73y+4NSv%qT}uD;yClb`E6}I!o@z$lN8>?B#CTw*rK1npFqrU9X6ql$lUjzea|;
z+=N^56~mcZc>YlA-M5e)V@kbr|-c!U+6=&ZF_U9RBW=FR=671
z9?IIVc8R}nZAVVSvjKPG+M~XQliTC68%vL7Z)9x9KV&^JR~n{g{i(3}waCT#j$rbU
zJt`}XA!J6*p+Iy_{1>6;jQ$MR*s9q#W*({j_BWW
z*U8zFY*btD&oOWvAo3VEJJiuWH0$slcfd`OiX`9ni2!9*J8~Hvq5MLgL2C9rP8IR?
zRdQgW{23#EhRPpL{U=$$hMdff&?}x>c5?n7I)HZC&`a%coQ<_dgF19Xj+6|+v?ogovVvn4w9_vgQoKGHGtTB|qdh>e}B%|#|&{rSa#^c6@@d6V~_LoKT
zJllS5)g7{4BMwU6+L`hWR;=}YX?+W;y()>)wBPQ_d@|U_SND8YdtXuU5CiJ=hZePl
z60AXWgwz>+jXk8vuq~#}Tk|>bM5XB7Fy_6}V&bM*zSpSBc{hsx*
z49{tR#q|rCny=yGKrob$gF=j_I<4^t>NMuGNUaXF`jEkO8R9#TPewX9fozitWN52u
zTJ)mH!}7+pFIql!oDgKl^7^$eo)k>xVnz%8zndlJDxHDd#4gjc^;9d24J__AL3I{J
zlZ8j5M{ienU;npYQYh!pn4Q6xgb&-J5;~~#oiz73vt*SSIF;=bU^HJ*x;tb6M)4J+
z^j0fI1xI9W$XU`pWV^g+XSbMmZs06wkCEZV^kjs+XhS|8pUV!dZEjrK;#vPwu|PtP
zvNn&|L5wQP(;#Akg4PA9IrdpEOi6vWp+=C*KV6mVtN%Ras)_uKY_0zn>GhUb$C#XgCs79%uo<^bz9l^Fg+6P0
zkzCA@`~*kpv>BDG^tbF3Qb<9_rMF{F)&>~Y_F0rZu!@pzK|h&4)t8
znnHOR{%$OFt#?c}1q+_jCK|6GhUD7!xD+jvkXyW)u-rh5ZONIi+sZsuw;49LvgnF#
z&B=W4y4Tv#WxlrAZu7+n*&9naF_1Ryt9$1`PHihPR$HW4OMwAJ^|yYtp<*SF4w>HypQ?1Xw6K*2b{e%eZ(gGp%9@*K#HV|)tS9v38
z6?#p5M|NCC1S!lD|lnbb=G&6jm9m2FO
z|1J4Hi0IFlx*AaeiTaCu510{lIxBQ*GfpBn4s+^x>$~C)sY&~WX9J%sWt|(I
z`O(AQXphbd{hr&M8Dp=T$(1-6>m=aUbS#|#9c6xGlv&-QJmbrwr)avT&b;tHG?u8DGWYjHP3}*Pi2Vsu(+#OQ@>`a~W0csd14u&hrowoz1X4+WRq3
zleJf@EnEf(wTLd-$C35yd@_^JYxa5`-qW7tFPd>+=#
z$Mg-{RW#$c<&Ek7`Z(CQdZ+XX*|W}=DJ7@*i@0HSi4;;R=HpEsvsrT9vJUT;e)~OS
zni0MsSORjdIUxE55;=Z8*e=0IM63T0*6Q|e>AhI}K9_$+QVFX&dLe6Bn|IQs>wJ-|
zBotP(xeKGU&>Rd56gi-N*)SN!(YXULh!u=7d%Hr}#+K>PArA>v$u1f?S&g^KiAn5o
zIWf7cHD^Zgpx_wUlK1gE1OcM6GfI!@3lkmoA%Z+hlDhBNvOp%jXDb@>}V@1N_D7B(R?s
zdU<|rg)86f-V+^Gk0$Gi}*&?0`6a2LTD
zJI}x4-DL0?;FE296!;Kh9p7*`xE-d7i_XR0WBTtG`tRrZ?`Qh&r~2yHO~#8%uPK1HsL%_q6bS${OZwaRKaA&}0M`Jw0AF+etMWz42&;qb&|
zAE{LkPg^VWqTnk`!Tm>ITv2co4(6SioSWHlHIH(eLdW~Vgwkby^HIC(!a$UHo&iwp
zjdsdkEMuk|bp-l3<=>SI=izl3bSfir6Fy=^e=-CRHJ*W)p`2=RM8;v@a2N}ZiNTm!
zOOUeYt+begR$1P3&}{+ye^Atu?V5*E8p#(`m9y<
zb;&1akruWdkk}f=%1SC5Rzx#UJ7+W8
zWRbxP9OV!KG~Exr1w7AiJJa~w%%`X*dl`4H)&cJVs0qWhQ%12|Oi_Q6urY=k4K4ZstiwB^m>oh`)LT*Z%PWU>!~~LzRg8X%B}UY>>}ZP(USyDH
zc-Od#!V+6$3(r@!#>sM<8`HbAz82EZ35W)lzl$XbT;%5&$#BjO)Y0eSWpzDUBFqad
zjF(lI*Wc)C%@Z{)q3n3>IWL6kA$nbW9atU>zDQyt+rGgl92wsx&LZWpw3-LE5ux&=
z#>9J4v*WY;>vq)fO*UXrwuz5zS$yY(5>0w}o?U%0GXLkrCre_feC8&LU8>l5#V(C(
zWr=;O*jr+6GKK;OY&*pEXz*9L>nuqD=@S8-ddZ~GB(t5$Jih$UU{h{1igCJEkiT=E
zQ%Aaj{Pk^75tXDX2)meYB{>yT&{aY8ZEm5dCY&o6uAn$mK^*dgllY4DlO2ClDA7T}
zQbDQIMY2>7gd1d%@gdCEKlqZa9v1iA%d6{$+4E{sKh%X(OSqa${p^USpFBG~q3=br=F%riMN739XU|CiOzBh-&#iTr
zmeq48*KJ+%HR=5qBwODwNUBw45U+K)LDH;?4U%rtyF`QSssIASbYpqZGCZxPJEU1kw!v7Gs`mg2EpGj_$I;k8(hX0Yq!BS3%7<|9r)doK#c!|MV1z%!tOYl5{cL<(k@S}oH
zGq`Yrtu%wX1s`s3{Qyj|!BfRP#^7GTk1i1+m?vf4Gq`@yrPbgW;^#$!%fj1gF}U1;
zwH`CLJP2cLHF&k)KR5U)!EZBoo!~bbe1qV12Hzxjz~HwDUS{wz!Iv6*i{J$Y-zs>v
z!M6#XVen?bPd9jr;9i687krSxHw*4I_#weRU#!dCDtL#%Ey3S0c!%JJ41QGbXABO<
zR9VdimuI`J2MnGp_!fhw3Vyr6y@GEtc$(l122U4!mBBLvuP`{QSY;I&+%Nb-gBJ+y
zH~134XBxav@N|Qh2|m`~)q#8tO_fHx-Y=jmH!d)QimkV-sy`(y(zG
zn-3RBu`l2S!K7n1=xn}aY%;L<$k;q-j?C1ieG>kSq|d7-Cd4K!?{Yxc%Leb3$*yqKHjM77v|WJerfgMZ%CwH-dc
zX;9zg>)!74EMNEOQP0&+vj|3sBTZyy@OQb7INRsE=!5?H4hn|mx~V&J*Y67KZTI+x
zvEe(^xeLytta8{ek7tuS#@;XwlMS}Dio_aWRp#ELByibxJkiatelP`ak)V~`YSWy3NOkh&|yL|$KJD&j$KjJV1E{YqKx(^^OzN!8*cc6d$
zX9M8|1H0p*>bEuoQ~p
zj8IY|M?0Yd@EE+I*mdC1Etv<_p2nk!T2u24n+brBN{gG97m>yHhLV=xsr?1(RnC8M
z8)L?jvp8~g5`x>mbK^PlEsjIKCuxPAM@MjbY=~<}FJ->P!&PLtFIo1iPo)XvHR}9k
zzU9$u$?Qg*%eF6M19?>Mfc>7?`~A`TQ2|)fU;JD|-i1}v96U+$jG8WH8hyDYSKOvcxr9gL-+`{B
zrr}5Rk^b`&iM26S6l0;`t20F|H~HbfH}T?H%6-PMSUbKcFR
z81cflrNl=)>t7PGG$sAaFZ9dT^pfu7Y51;mt)`S~aL}c>LozH5*XTaSUGu-5u6_8m
z4>)+S*Ai)G$|~_FchR3W?#W^I<=TCTohiwVzZDWsV{9s(&}|)x^$5}rqz?!>{o^Dwa$C!grV3o9vo=$Lgp%IBNkB(u
z%IP|(R#C|{QxZC>^JM|BSK;yb^eb?3@h3yG`C#LJOf0_67x5Bzm^%VUW1|%yg#(^Y
z(mIJV^ZCFu-pvw$G5nm0T(4m~j>JQm?O|YN%7eBC_R#YB7=A)YBI4Yc@*~?NnQI5I
znNW15z0gjY9ahiv48usxvYph53A*~8(9C(zhxUuAG_s-p91ME#!0Q$JSe%fv0pf`Iy`k-vUY&tiPqL?X
zvbdHFYS-%QRTNw0a;_E}ofZE#A@+KUZ!$4dp*1|c4o(ssj&>wkjNm~aX$iNMcV14@ZI|{H
zteO#9yn&@U{r+j|$KTficN6^epS51~xY&fSu_`(9-m4Oc$sEe1%lMrkgUjW+tc!5e
zgK{8^X`#jX1dbAKLcU~WI1ZN@hgR(%0-TSU^Zzg(+AFW7aED6TPGE$v?$2xWANhN3
zW^=8_`jB8w;_b6g-wYRiU%+k67$s$3wB$Xs=d4%s)FPu#V6f=L>+hd{RBmFN6nK~Q
zA^ONfNwq$`Yr+CA|pKr0h>E5yX|AZ((`Y_fSPl*yW&O<`6hpr$o84=fePl5_C
zaAEblI|_9p=={%tjKW&}Qy)B05hJb3$n&TS>r9<>y=?g_8$~(U+kv0F5JIzmL=C|Y
zZ)J4f@p-JT{x2itfeVp|Ey%yJbBS+bz>^`fePLGA;jI0~kn)bwvfi#>U*yiT&fXvT
z4rhDNs-1*Z?WeU??I8oHfTyh&-;zr7G(5#-l0>GH$oZj|R=mf_>Gl0sTV>q8Vl3wn
zdnv2JW@#f$u?hH`amgUb2{IfW&n>$;Q@%~zNn~pY1t+^N;^&?Q*%BichZ7V)-sAVM
z`bpKsGH=pT&i!vuH0x=%)GL8)31qNbEr*FT7eaVPc5%>
zpSU6JKHQejp@j%9+xp|%wukSC2Lw+t^xt&FptzLtz_Eqqf~G!ooqABDH)4e{92UxX
zMrX>|0LWzQKOtB?ny+XZb^=4+M+5=f4>c;9Ej
z7tu5vdBuH+=f+sr}mV#cafb!(7!3=m#mFD
z_fnX*eH*epc{IzneS5Rx3ZQ|aZ|1dqqFdH!WBEMP_8uSFwjBftUrA^ogl_n>2W*^$!WUD&UoL(n6bH?yJyA+6E+Oy7Cl-d
z*t+q5LmxrcebPxks(H>oiW7E!(|QSy3YqK)OrF`)cT>_IS*7|zi958qAz7j8nwEO^
z`gOEPNKGP&=L73boh(8E8x%Eb4b
zzCsCqKgN_WpON=OB|MFS^ekbfl(0Vzx?I)bW1CPw`Y4B_T@^LCdx;WhZE~8UMWaMK
z%03I?P-P1wuh|pXqop@jPoOUXq#rLL1;pD$P4W*WphWe+QQnqt>cn*J%P0?e1f6Rp^+8hqunvz;&Sx6HQKa3hu^Pxm{_Jlp?Umh)V2_!_b2+z(u
zcHOpiR_segNsE@x6z*V}0y7Ty&>(SrGz8JD28qn_-zOuCpD~#2Ct1kRYrW2tIXVZ7^q;c=qU}w6z5VCR3nEV6wuJZbuMb_Fh^uaF_0jc?m?bbGyY)f%N3*m#X-rb81yl(n$b5OyH4h^jj
z?;S>*F8#NTsyxwu`zS6w^xr;oqkHS{Nd33A(yL}}@yzu+)X;Z7uD%@>8n5(9>nI8;
zWWMo*T3Et*8j8u8h>G9nHgK8^|8CpAX~WxX*gzIUq%yV^w8t3upxNUace9#R_-3US>Dy7DPR
zH-)(8{clrsI!>Z{|SY-y7{zE
zl2~;tT?%o}JK8P^aRFh4xZp84q4Rh&3#GaLe^7{f&ql_}6Dq_-9x>@zw!oTrkqU9s
zhtdxIM+$LoB3j;6PL+6iQ;54@oX!^J)DhX;)xaF))?PH
z#uF>V{p6=%Li-~X;(l_LPRdb;YgD_+(m1RU_xThA%r=hJ8gZwykYvIM#QW-x#-WCr
zrP-G&$h~>GS!8~hg4|gsU@Z$w;;*A1cN5oL-cM+6tUJ4cI~AQfkN}=GnIX}UEB2_!we3-nJ4x(IQ1C9W+|zKfKvd)o
z7Kn=6egaXE+eaX(9OYh;s5dHBKPasgRLU>A}1PDexrbo}5QDqzeS^fby<-qp+v|cr^tiSI#wx0<1w^RUtBPDx8gX9O_ES7s
zPhJ*YIbNG>tH}N4;mG?&EYL;JRWuG~upaoiA1cE%;+@V$9agpqUSN2^Q-L6iU
zbJBmXKT0Ncwkei{jHg-6x4{Sz-MCj}&dMaM+RARaakH`NZGR*eT+%3S#Qtc2eh0L$EcL`h|cCwTyo7meir45qW_ypeM~7y_JZ
z!o4-OO5no44Mw7whm8*g&6N^i6-SLi^G4f7iHoo3`o5hAKhi0$yDG)Hg>ww&z#wln
z-Dp=k3PBe!lIOQtcTY99OMLa;9Hcz!g{{VA#ti*NEh@III$w@_28a+m&$Pf=7e4g2
zzD+Ychgi++4r?lC-P)rnq~tnE_!fw4nd>A+^}7o%mwhrZr4v)|RLez(rprgOeS6d=
zO?WMLNMwkL2;H`bZ@5+L_4@3MX8XmI5|qfxsj}$AfKM?%H|l})Yttw(<>zSf^}rqQ^MA}coYYVK(Q7>GhiUuc
z${xCjvd`w&MIU}pfKRhb;XMsMXINmy2i-}^sUw=|1pn$$98FRi2rB9+R;a;6~fxl?~TJ;rMl$xRda5T${3Oy
zd3HcHr@kNhl%wU)@8x_Z#hQLecs%;xTy`Fx5_w)|6e>%MdX`6KVIhaWG3nCOEP4Zc
zd-0UnYP0|^pHUX&4^3ZECd?_G@4IEMKXdwgzJgU;s0@9;twqtX(*89#du}e1&FB~W
zxU)H|w`<`#p%2|cPDbPn;=b1QYjjo68JYvb{1g7l*k-L~rzh%nWP=ro;f$?0Xia_J
z-#8hPuJSide|3d)9@zT7Aa5Lph|XG?eXhijZ9Vz`F*e5TE`nKf_5H%GU%lG8>pso5
zueQ!u;?O`358-y-b@osD&mp!Lj`!Y@q{lS*-PTEUI?{PM<>mmKq%`PIU@{W)YAs0C
z$Jc33XWO2BVmwWd&(H_br*8Cz`s7b|&mTILd*BOsAgwyT7?G^zK+Y3F`h3yTwO=aW
zy#Hbv=Bh?;sNA5NJ!4v#r{NBKfF^>lzq
zb$pN|ZU^7_g)Bk$*;kFFs=e0BnN0oS?Gody?T2{karT%c2aoy=41CE?U`<+E@hn+O
zlbdqBhBeV6f+J~4DPrg4v@DAOSKpi)vqz59DP*iZW$o<_9b-s=3?DLb$R**>0pE6R
zH?fFs=9V4@q$r^4b<9J@lzrO!?$l0sSMxj<5-Zb>m|=n?NT2|_D0xvAH7I0QtdNQO
zJ(_tKvOPELAeGLPRQL_P-^s+nJ=g@#ux^GYXpUE{ZwY%4mtMy`
zdD-kT#=b{X9jwOZtT&0DvoK!6%*}kuA9^XrlfM`1d(0Ud7u{|%Ik|RN`|DOdG1q6r
z1{16?I=LhQ`+2%b^zuJvamYnhSH{cONPldZdayI)YQEYRt-cIG5jmdDW*H}iH2NvA
zXgf!$iFMgbydF8^ABJ4ZTij0d*P{@5ob|{8DVHQnpw}3AsEltK@!{1nR%n)CuKi>d2T@PY-k9ymfU~yL<&J9ht@~pg
zsbzbf*zY^=DK|Z`I8|Q)#5N!|KM<`AqzObvgjXQiA^fxJ@?7pZ4#J-1X1&T-$G6IG
zwWs&6zh2u%wWs3C<-V>x*>NWm*ksh9a3>h2b<*&_(vjDOHIGxx3MDOMLMqg4%m2u<
zG{pMJd}m0u7SG_YTUf2_@uAq!aCI78P`uu`56<9JF*em1t$8(4-nZr^QMU)K7yX6e
z$OG3;c^em`w#}qp_VU1WdywMw^1$`3MHICA1J`3eavIco(vn!eGQfG;himmbayZOd
zF+21mmL+5T*2{mEFA5+U{qO65&=u9G-(S%t(!U9u$k=_u#4Agc&UD^
zGa+fiXkX27H
zll;60td$0~ShuqcVcI}V-QM<8lXBOjVC{hjqV&=bm-9K2MXRc$TmK#(B`Ad84-00!
zBIKOUPopJ*M<^S2;j|FIWpNa_G4`${Qu5t?qnCl{`BrVg&HY3nNT5$=N+?!)N!!&q
z&I0Wm_pbgc>~fOi&LgRM{h@bR*%w$JOb}s2b~jwpjC9GeUhL@tStLxM^@#0~9vNmk
z!=bWPtm!2>Ct{ZaWhL_dg=sbxtI`?UY(s{cWdi36hm`YjV#_nu1YR2SRS^
z!Fzhk4da8dp7>^OPI}yycYu#0iI%6cHuUPGL#>Q(>QOw_6w1nva1Rr@{_#58*rSS#BR!2%5`H^JUW8LYM5t6CBi-t*er=)B!pCRzmQ8EXmAzy>l%Hj7up{f%TBR9RMK}mW|MUBQmIAG3NCQ{u
z0~@L-=DVK_(`hN3LD;F!`p258yoJnVXF-f+t5AL#Gh)z(``7@hIuwzYQrmR
zc)bmOXu~vFnD85H!#*~A?<`~gk?l`SGvA3e9BadwHoVY=SJ-fa4R5#MRvSKL!#8dC
zfenw@aKLnv&M7v$(1wLJth8Z+4R5yLW*gpX!-s6R(}pkF@NFA**zi*u#-C}@_1f@s
z8=hms`8NEz4XbUq!G@b`xY>sH+VBY*9d$J8PZ0NV)*KN4UhBw&odp7*J
z4Ii-K9vi-9!)bOs>dNKMGj=^bWWz&Fy*eIF05^{lrEW?MDl)L}pn=caZD7w}?$3;U
z-6_4hNBVaqeXvZvWhs-7X+5lf9K$B+5tt0KOO70fdIn~UFN*aWqGWIRR0(`9SQqm;?N
zf}WCJu0`s6O4%h}PJRrmb5
z_^R#UZ!!5O(IxNhvJl^;5x(=Gab-l<1-N(rmV7wrDq5MOr<93bz9l{>hr}cKmhh~6
z{AaIRd3J5ML6z`3-J8$PE68eo_##~X9U$&QBAml&o8Rf
zpQNiuOA)`st%y_N!&DM}wIVKwN6jr=rU;`J6a|7cB{=Y#TT^ah(4{O`Qycz*UZo|K
zr4bejgXSy0s#5z}5VT=YK;n_`5=P-q;YZ;vNhnuTbWCiYICtOpgv6wNp5*=m1`bLY
zJS27KNyCPZIC-RZ)aWr|$DJ}h?bOpIoIY{Vz5Z6Eh{c5UB05M{E90pR#sM3f1{>0
z5WMQ@RjaT0=9;zFUZ>_%)#R)y4;0i?6_-lwuB0s$Q};Erf>Je!mQ1^kQj$ap5>jf{=b
z56da_3cf0J|1H;JTV!0~UQU|jxL5G^8rz@ro_O86O#I@n1ovX?Ek%|D6Jgeb?QlKSvM87ZZSbtSekQhK$|E6Kmfdw^aorI%W)CB_Qvr%Ely
zPU4d~bxJ1VQx}~kYC5eXZ5dN#%<-x;W`ttCYSgKGEhoN8zNO5PC$W*1AoP?H9Z#uB
zokwXwW)6_@Nehb%nXU6Aqp9R;lCE88PfmSL3DqbeZN0_i)ooDPv6H7R
z`c6@2h2wMb^VRC}YSQXG#op`G&|wOrhLiuVo}Tn9>9hZx^rnZ?tEP>bHgFYj)extw
zIx3*r@jc1un_U!h@;@yc-&fE7<>Xw}N~=gWKpz$gIbYHuom%Wl&8hD*)QoU?z14RW
zwJP;xMndV|ReH3LQL~gWQbw&(9fQ-39B9gOMvwL+xsn)Vd@y5MC@_T%IE1|lKfkF|&gSBdxJJjbsld
zzrtj*-;$G6{j?eC%Xx7YqY$^PD&X#8`vLjSVtZ@HWyzm5ds&J_Ut+hTu@w7*;9jl0+WuC~8N
z+23_;()`k9?#x3GPbjc&-~JeK}L)U`k?&MDuWdjps?}#aHhxMYIGmf
zCn`B6CnqOXe$&&5OFVir3YNsV)miE3iwoeNd%e1exeLn*`6;!kdKEu6K6rV-?FP8{
zC!hcMK>_b^|I!!-&A;Q_j<@ksGhgz_+~wSSQ@T(7$RMZxp=D*v4D
z-v6|L>tB@XtNnArAK#+?S(|^<10RkcF}imB>egLf-?09MZ*6GY7`n0Prf+Zh&duMw
z<<{?g|F$3e@JF}*_$NQze8-(X`}r^Kx_iqne|68jzy8f{xBl0C_doF9Ll1A;{>Y<`
zJ^sY+ns@Bnwfo6Edt3HB_4G5(KKK0o0|#Gt@uinvIrQplufOs8H{WXg!`pv+=TCqB
zi`DjS`+M(y@YjwH|MvHfK0bWp=qI0k_BpC+{>KcO6Ek4G5`*U7UH*S}`u}74|04$3
ziQP4W?B8AfSk8mxfZq9y;9F$LoF6iZ-M*Xnj$BLJ)Z?4mzunw7_4wuvcsKW(dwhSl
z$G1FL8JV6uYZ>`1(kHT}ZpO$-{CTAguW@mCWl7c53j#%fa`>UxFRCrAnYZkU(&9jF
z*`q0Mc+_&!}WE8Vq;m+tzW+$!l$R#71V7|Zk0AZqhN6z
z>opd21qB-j>P@TLP)8`mvaYPG%X6^@^t?zN?XK!meeS#+g*)&@!_eR(BCFW1F#!gsk>1p~c#u=CgD4_bbS
zzeUuG!zXcg%f-};a3_RUA-hr8K?uJ?ILLQ+pNIj<;)4aPup!stnXrRd~ya
zDoZL#YrH+n*;RilN&{41dB9s-RZ{A$TJEiOc=Zy~B+^}laek9&Kegm&GVMTeF&Q`6
z)jPkORn>Gb(=trW6Yt8E6X0`$Usb$wOqb8}>qxrm+(r5?Db-CO(vLS-D}-6JaPCBN
zVjSsTr#yblcyEzi3TZ`=p-JI*|D(o3+KP&*t0iIy-J>}eq8%5mdyV!;rI&PyYE}fL
z!fU;0rB^Xhl`r>}uB;BMKJ_1`w~VG{4`M}Rw77`Y;524wu-=uWE351y!O?b49IZ!G
z>4#o*ydC_r1=$O3T{GeF-?yBX^Mk`lj~;vLYw0eEI_K=AGC$QWy_iP0dMW2+GEvno
ztu0?!T~T_uGY&5;DX$GI4V*b`Qgw+Lhz*%e_*dfYKhUiPmL#fy(-PFc`JVkr%?Z_S
z%rWu;cY2k25|bqY{rsNtD)lDD`R;#Gj5=w`;OdmZLFp1k;@dY$slQ{sW`}VNjaNeh
zNopu*3|*L@hEC(VCZ&1k#H8sXcYD;ZKtDC4B#HDBm1k;vO`q17{ZYcqSi>9$aK*={
zc*5XP?MiT|1WM)_6t4zN^Qb{nk~{jfChm`Kc2~z0_9^HuY3(MB0I;MlX}Q(V`6>II
zytSOJ)E_VbCvUv(5kq|ahsUbnvs0T*NtAN@Z|uz2brSq&?pKBo0k!)_k5e?W6`fh#p$rBZLH)LSZbkUC%6
zSN9*(M-3`*QwMQU2fDpTxpHSJwFDC`SDz@=XMWU|){ErtGH%9vgn7r#PZaF4AsFYo
zHyRe7%Xu-zNvnVVKB_-?>_0_XaD1Udt9!DPdLHxFFGz@AU)`Sis`&YR!uj6j<4k?F
zQbRvC(1o6)L|1?1@+K;8Nq^;Cn5?|e#alDHMYWcpDQj(#kqc@`;E{~o8&%x%-G@%@t4
zZify%esd{8`b!yWoIFS!)kLKa9qA@b_Tn{N{Ym@RUni3*Pi
z*Oe%BD`usgrpcG-A5I&c%QB(>v%&UL3NH6Iw?yW13TrdLxd&{Xi
z1Z14Bavf_KCLDG^j2bX4Ne#F;p}?j4qutMj$D2B&Zim-&)t^JF*RMb`(3L2N?VgA9
zp%WA6D;KF@3k&Ek^VBfc`O4HhnOVblL8e^86V&iPD(zzk?PIVS?i!#>uf$D{iS%#k
zb13y`_wVNZCuldnLJs9*1ZA9dWBNP&yu=<)=cjZ;_V?v1xqgNDi=FR@;JYwG>^|U1
zajO)@mK4U86xveCl>W{AkGI?J(BWq=>i>Y5;)K`vC+!l(*@fY8w%OGq|1KF{Ih1e>
zaWlsERYMj6skoRm1Nj|E>M^dzzD~6AKg4<7vbFWlUo18OFRcY|4-h
zLpxLF(oeRs6M7rtJ|-~{mmaGaqsUL{G`C8fV)sQU7jaO=Rx`VGjSWBk9%BQhD-Oa@
zC#lp)Ds&-^>Y?cgYUH%L)JWIus{3q1qSW>N7}6djeX}2ZGl{;Ls0Q7fT&-!bFrG1h
zaey(v_+j26e}l;1p!v2R>d?curTyss>el_Wuh5P$$*F_ITTyR_DWDDny2i$Lh+95aM;2Ttu*(=%LpIGl%Y{gmgvglZ>USHCFLZ%Vv)(e0)u>`AZ3pI2%J
zM%s$N{zKwvgRC_e2Zqca*x|GWhenGIDD_9oqc)99AB$K=F#kGzOyb;gkn!mSrCxPt
zdNO1E%?Yi2_s2EIR>u@Z7eu8CO}l8(HNOu%GeM1;_KoOquI16awJGl~^7|$2_6My>
zJ&keN?TO~TEB~O>Z!yl?XWDWJZTV}xw&fPatuIS=`}<10k8#pVm~)T#81>lyP;k5VVO8qHdferUe&1l`l!_)F}g66srs
z^UeCuH8N3+4D?qcOOol+{nW^=G2dS6bQ?cfSp%IYudR~Tp;Hso=s>A!bV-S8^t58v
zXxGz7)@6QM
zrV8#-&5pb~Ulw+oqq_XqUN!iSe7vE{f8^s09sak;$B%SHii0+};JeN-{GmK{)Qi=G
zm<6T6AS@^flr2`*@)gOgg?nc>xN3`{{{b*X*tc{w}+L*u_QVfw@&R
z3t%)y6x>0Nv!l^KXP`BFU4aekD>Pi!;#1xt_TfT*hog?g9rEU?5EC__%Kb0~_J{PX8
zE>)T0I;X0#wyL6ZPN1g3#8RU!)%L-f8ki>83
zj#*S$rkg}b&Z=TWzX=Zkh*YWjrJN^pj*8B$%`ROQT(P3Grl6*@7GkJVV&(@bE-t5%
ziYgXW!nb0-Gg9pGs;aIGR?mf1E(wrnVG5;+%bcQWO89(N@`42punm8KtTHlJ;YI8{#E8#scxLDh2n=VTL+@7t?@rvs7y&4dY@6qz+O86{UfmROHZWK}9L@
z{F9^e=HwSu(~4eHm
z>RPTqEG#FTT1inb^=*565sSsj7oAsCRFYS|tcEKOl=?N@2IiLO_3<~_LlMN!&ee&RkDtBlgoV
z^39a1zd26P-%M*d%zWE^femGLk@zpcNZKrZb-0y4FNUc}4acy+)cKcki2pi_M`QpfRX$lAEPCLe`0^%0hIjx93$!7jS+tjW28*aVZ{9vjJT&l6rqn8q07Ja
zmwdvXN!NSA-@i6r|F>d4vGASA!HI>x{%_^*U!Tqin}9t_pRfsd|MhwMH>B{tyh#+~
znDv({Dn<_=`)vOY;s5zN-?{T7^`|?nJ2~j=@e9X)?HxMAMNB9cz4rCjyz27Tu6S)q
z58sT(FC2Qa^%JGexYmS3RaWPm2w#5t-buC%vurrih8Z@TX2WzFrrFSI!&Do(ZFsbg
zq4Rq-Y_;JVHauj*7j3xThR@ir#fH0W*lfecY`D#a57=<44Y%0vHXGh(!v-5V@vpJJ
z12(L%VWAC|*wAmo3>&7~@N^q`ZRob)(O6UNzD)S82s(Gz_LdD>ZFtCr`)$}_!)6<9
zwc%zPZnEJj8y4EIz=jz%Ot)d04ZSu@wPCUi-8NJ67^?HGPnht$A)*?=`K|O{LVnuoY>z2TssI^0Ps5CKFk~7
z&j6E9R9ctjQiFiYFk8mDR0%L`2)ujz2%N`-=uO}Sz@=>5mx2pCG*YPtzy-dIkvNr?
z^BzpW7?<(_zrZX6SED%3!bn;HVC-n(#NG|e!PJqi==^LH96vV#Cyp_AI&kh-(!#$V
z*ou*~1b%OvDeq<=dcbs8fp=rX&lX_9cw?UkoMq!J!23@{R~d0W0PMtkB>6c_snalu
z{G1LfJ{=x`&;*z;k>Y_T0#C&hh#%nBXaq~ZmjZWUq%6CE?_wkm9|6xzM=lThEZ{dW
zLgzKWUt`42R^Z4plzNPp8@<4DFcNWNV
zux2J@!A}4;->+am1XP&M*H9i5q}Ku
zo3qhD1il7%6GrmC3HTbDjxy{;R_WCo@+mlQyB`@O@W+4y&nHgsrNA{92`lh+8yEOC
zM)IaEpqerJ@t+R#V-A5A058J40bU3!!nA^y0H^06j|-jwtipT*UJZ=TC;!x4B9Lo1
zDj+X#0x!l$9+m+AhLL*z2v`SmOz0`F`cmq0Jn;ZeTS`9#KOOiOW+Ax1GcKp!flmVt
zDB_F}96fnzCPw0~SfPi2)u3u>axM>fUYuQ9|L?9lY#vkz?5=hp9-90<9=Ys#%~1v4wH@lX5c3np~L6E
zd#*6}y}-;0+8cfXz#n2H4=uoPRkSzoG~ksO$$tQNH%9zy0bT<$@m}yXz)vwP;GYAp
zt2KBXFg9RtH*gb1>Pz6+LFyO(Gl36cWc=I)jJe7#FR%mSK9xAd?rPc!xWKqorXIb(
zKC7uC?A^dTjFeH}6cji}|C$C|^G(WvAAvu_NdLMW*ol#{h`iJYjFiy}T#MO^|E<7d
zn62PyEn4NTC7csuorkQM#|U%Z2AS?*lz+pd6%J23o!p~L)!x2w=fd_2H-x7ghel;ddJ2E
zKJZK9U*J2xGGnR0`|mYl<^#ZA{Tf=4*1f>ZzcF))z(W|RFM-LwHMqcCm{$B3Y^7Y7
z_rPxf&fEt7cmiz(*l#=I2zWAZHb&~S8u&a$^0{B|M`<(o*$?dVn2FyDy!CNTeX-vR
z{1Zm{y9J#5gu%0b7N!nA0`J=a9~}Gv;Q2eD8+ab@SGy=L_`Sf>c2j=vEMQI>x7rku!F9D8!#o%ec
zGK}~an0d&w!A)nZ<0X~Kidx0O@_)*|RpHd&#F9hzx$e8d9Fzz$z2zzv)s?#tM
zR_^J@y`#@*O9JJdkKh93uFO`(B7t%bM(hRdwsE-&Blk_jUZC775&r^*es1gqiVVK^
z5h(W^1Q#fG8w3|9_YedZ_%j=qy9jcRK4*h{2a#nJvb@yloP3GDZuz`pea_8lj%S3(5)7nyGI3GBTmuut#BUii0J*caT%
z*bRKgB%m^W!5Bk+obSTB7)#w<-|pWs#!(55d-VgjkL&tQeT{D_*>P`v7yrcVe5d`D
zZ_4C+Z{picB|G1@{f%)UBK^aHnl-a#
zzkBLbOdYr^kfPG`+e@cTojH>gie!Ijf$YD6$M53W9T!D_#r4#wygprk*Z%8k+!f`H
zKSfd+r=L%qS?vBDfaM+M_0$6Q62iyDp?#(_*56+c7gvD(^|fw4E^Y$uqfQ#zFU4~v
z)Y0_`&oto*u1nE>!8`gIov{5v_OH_w^p77GDFA+GEiR7ul@|O|h^x!2pz)tu`qci?
z{g>GWuYY@BLdL0^}AQ1GI+eZQ{D=Yabzq^Fbs{B~WxVE>2Nf8AeFT9Cez*Wi!*IwZfO
zR6k~Y@b}D$648$}|JlT8$79ylzrAqO@mRO7abx3bZ-F!I)R(H%YiHW=YUx4ga7SHo
zHGFnM)}nEZlFwPs6^`vxmeX)w(v=%>HRPJeb&$m5p_lfHHp{Nlb
zhzdP|ZMS2h1{I1*MILfO)bMXbjXExBSe-O2)Sk&8ZzpdVT?>+Uw2Op?|2M?+v
zM~;;{{ISlA
z#+m>^lB||WhT0_S)Dbyo?5Vm7@ZQmw|C8Xm
zFTuJq59e+GKc*PIfYeh@0e%JG*8+Yg;12=*qzimYzz0XeUrd6ZxCH)T9{fVU?=DvA
zAmEPz{!74r4fr#FF9Q5!7kK}cnBPJidl+0x=Pklo2kWomx1fZF>M1$eUdb1Ol$=af
zayCOr@j4}!50>HYe;44r0pAWjOE17j0e(2(;{iXto|329D_J>6$!p0`yVAA+g-4q*r?-BL*pXG+D`W8A>*_H1(p@rglqJC^I
z>f#_#my<8sN_MS7AB*u3{m;(L>)VL6~4S_
zusV|K&Ct-^A-zMw^sND{eOom5_HNm?tU^fd$cTuru*lG$ppY+*W!F085J5H8HDD;Lqmdkhgk1pg%&Nmn>wFIL!y{(-syKTTy*JO}<^gb#_t^SkOd
z>F>mCKx?7^cxzBd)cui>;gQxxY@nfMg9d-O{r1}%4dBP_Cs|ShHKWIoK*D|-L3Zh&IjaGL>SDK^SQN~zelZG-Q4Q+1BOH~Dl!OyboF^i*Us%a)~e=y
zX9OOG_J%BX>J%D!^|{sBJ7i#wIyGx_K@Xu;wjL37wSQ|UeU%&4UG!w0a+`1jOQlx^QhM?Dg=CtaCuDrhv^!)-?d|C52%XkW8FT+
zH!=)@7!lsNdxgg??PH)LLb{iShJKV!QRNzkeU+VQC3Vw-j>EawiJb1pf5x9nLmL&d
zROZX2tDp-0-1Yqws?AfzNefZ&r+BZ0PqEMYUaYCIuc~YG_cTRox-FNE8ABwbZaOu(|_3N*{s*Ee5
z*1=ae_=}63^>2!at}7-w=;e}8nCPZsqFaoK?qyX^HmdfrLk*I>Dp?Mz4EbEGlT(<#
zug){d9}{Au{p_(;GH2YfQ%p8)(yz^@1Vdw?%2n`i#(r+|I`^;7;&
z`YDZ+BQ+O=x=VI9!4#XZ#;<0(Lz;2Qn=HNMNInV-MEpAYay|E>M}n)~@R
z_0#kaW)~@x2ZCZZ9vdrYM=4Be$$os}@#33YLMKf(wnGFGIcq
z*>D^m|09X~VY^t}R4I|S9>^4b%c@nY{)TyHTTxNbH{XB%{kNEVPMkk~{uIi>ix)2z
zVO@Ii^UptD_wKvzK8^E2em#5kgyd_QFlp#^u)5OY_DA@~vF5~lXUmo?o1rAHxJ_M^
z~q_D71u>KZ42MvNX_!p;7pFRUx
zzdCg2&AGI8O!)^=uoCXe~-FUU=h8(^=ZV@XK~qAge!
zRUQ9^;CBIZuoK(p+O=y-;s?Am{`>ar6YP-+@RO}uw`#nxha}&8^Nk!ha6qu9pz9F^
zJ_7Hb1k9>+>(E;@VmtYA;KjL!uH1@h^qpK3fYJ{ot9&GF;M
z<+IN|)7PZQWcWP#Gi2`K4?p}+0=fGIZJz%5=bz=rAAh{?(MKP>{`J>i7oxA92m?C*
z1%2xb92ODwkKzx1O3+tXCjKUquO@R$OeX&16-)D(*UW|vmc++o;4CI%%*AAL&y+WV
z|66apwGiXz4VjovT6XW=t!bcqV82N?2H0oP@JDMv
zfB$_=1IG~hFB}K#)pHBCEaCbW+%M&G=x_(nP*weHc=_^W!8D}hKOi8WCFPizV}N}#
z*|u$)V9W(Rv({mb0mlUUYC045y?gfxb>-;Mqxv&xpbenBQBOEV#BKBUBC*Frf<6@K
z^qz?S+ai9uMB45Yd0?MN&MA>gmoFcNu2BE0k~IE1cI?P#-n@Bd`0$HhF9qYFX`s#F
z7?7`|hhst9`I+PY<(FUT%+KUG`AvK&3#5g3lmC>_tnWm^@EhX6heUc{3%)C21|9c;
zhCL#!K|{+oMZDh-S@bz#1Er-as)9V~X#8R8#bn}dvKb3g-i)454=68x`%>kOoMS|}q5RYCV$G%z8VLORf99Z}H@=723%GU%4cJeXB+y0t;}458
z-|kS0Dq#=)W6ls`|0(~NkA&Hb2TS5_IKu_uSD(#
z4Uv%lKHxWL2s|Lt?E?o5?Vt~VOMbtCemdfa$yt|Hx
zkt>$uKc916AS~^SYZ`hII!YS85QzW{;h-V!ylahXe*OVZmR*CGhCluElVF}Hmq~-k
zhK|a{#L(hO$1lJ9A_rezDKE{8lx4|5^6bQ5jo3rqhz$8!WYBSuDAMq$Q=cI-J&iuo
zCPANRlUhO#sLyR6Pt@t(5NE4|28}<)dm;8NykXbplNQ=TmWBqFlzYl`MLLXpQPwF-
zh5I+kD^q()_T)f$A+?vLfdo8!M&v;!4bg=n1AtRM&`_q&q`|4rZE2HWQ(M3$H3bb`
zTOGC)&CIDlgT~+6+q=yEV~*!M|GV*^{&GGr<3WAkwb2ve54$HjpBpW<={@D;sezIO
z8dib^rWn}I5ujlhZ2VtOh>!-O&!oZVGi_4)cft3!L|Tys@Za*f$UxXBqz@~@ANF3$
zKXh#o{UPd{p~KL^G2ndQIwpn|mc*ZSa?_j;Sv#YL&hO^D}5LK8DW@2XHy=!~WA=82Rtsy?aZJ
zg~`-c;!WI$yP<`)pZ1>X8rn_HRh-+HcFYZs*FnRk+1({?W)HE0hKl<9Z`h=agb-Oc
zzPBuh50!aiLM3fLBqybpaHr-eI^amXWFFaVOL4R64EfPw>&WxG{l9;
z?9t(x2I5H{gY&cTF*;*@L;9Wk(`IlErXRt)X3ZM0*=$0;)MPUrEU%|0*q4<9FT}{M
zd0l1u9MAw=*Z>+{bLul~Qs(4dvNENYEKLfMXA*;D@q`e08ZtAS)hTbqL1-+
z=myg7><{sWENJL&kK-kPJO0LGWz@?Y!Yc$?xf+LkfTN8
zL*&VL2Mt72`xx-I2OzJ6z4}e${R*8S*{z1;6VBS~g6$L^A0M$=t%9=|LccCGHB}}}
znj{MsF4TIBEgx-n=nGXw3wh7Efwqdi5dBfE2bieO1usvQ(x0*3$eyC-XICHNd8a-v
zfi5srhOD`Vy^6Z<_5X@%jenaqZMxuSPa=M*{T%#gJq}q}Su$|oK*8A}`Nu#0A&4o-
zGtWGu@!|TO{3Z>Y3ph_uu9&%oAq}*rlta#iP
zXNQl$`PnurRJQzUjC}U~X1RRn(qW8umG>WTmQv==pML>olw|JQxtAV#=pmUfVS?s6
z)|Fb8NCWkP`#!{tbX25;dk~z5Dfje|s4Lu$$j5q;_vj;Wo**riB(UJ8FF9U+bpMBK
zL3dYUkFs#aj2XWXPn;XonL2G~Xx+NC;Ov?n2hJIkdHOzv4&qDvDf6V0YXg?l2kHp*
z!Hfw@${>9cGxu=q&Gj?nofi%#rVsBZ74I8HK
za|}p>$)v%od%32hk4?EJo-8Rtq|L||@6)HE&kbAhPwYK)!8yalTtnOp{*-%W$`bdR
zNfUA7o-@wgY5ffi4VB>FV8Q7WJq~(2kO)wEyeZua~uJ*XlX&
zwbx$LaNHvRpH9QJJ&ZYWCUD=4drtsq#yzSUziCK(>HpCFLmwCMn)=3)xNzKQ7a*tF
zmq46E+g9khp5JI|DBqMh;!i!GUUEz*CmaLrXYsj7{LFSaMjQ{$=b-l`;7z;9IAK+j
z#^1;(OIH~t4JH#e(nT4fErn0~
zOXN9q2sUFW@VFV=%kX!Vf7%Gw@u+M}NEi3mM~oODIG3yC+wk1AWIf6fWu5mq2M~7?
zAHf=F<)6S^0(@tnX<+5op;{R
zc9%S$zEa+aKlK3e_#(RhvohmKK|}J>?0>>elZ)q^$FrbvPE3dGeVwQr00y
zzrja1jj`gq$}<*!WNspUH%fP+0?#1)zs&4|XCHV@<4-3^$K;Yfb!w@qDZd3xoq8PK
z&nb44_@uA@b7eJWdv4AW-+n=VNPn_4?V0vn@J?s*xZde3kN=r6&55TtN;hX&%~{rT
zmUW$_m$M9VmTAsX9Hkpep3|}*2P5C)Ij!mdrGLvzdA_Evh>@Oyj9qZKjO?0t&4^0H
z{XjHVX<}cVSPQnpoF5q*8#@U8ei-)rLvX(xGJWp5k;~+iZGTZBG6k`oejhtxY}^ZG
zOrZl}J0q|+{PB+(rR(89S{v7>QKKu?cFEA`652EPoR?s4&tNY24tD7n{PQgAlZ}H9
z9ESRM6N~bOzfm*>F%HJbxEC6X{aePo881F^L8Jh?a`-+H)Ay)GS^vyLtgO_b}|{8*G>{Q^p$^=VE+?d*X}_eRNUe0BEm5tk;Wn8v6<{
z*n1jE9Oyf7?aBF(cAaw(Z8d#a`YC+I&rHJ+t76QPu{Oqdcz%H~D#mvh_h*ca=~~3&
zz8BX3Sc4{Fy)cgRFXuI`bJ;KbX!68KzxJIREjs=(UiJ|7ZW-@mtZZYk>(fdt&`{7I@&m-P-))6-y3>+Bu?K9?U
z7O|K9XK%V
z$@saJN8rF%8)H0_N5-fa>tvkpLFfq5Ugwy2aURFMOCr91H;!{Nv%!Hr2YEu@dhP5`
z9qVI!hWm(2?3-wZB<
z;Jz~V0<*E7$(S#`ODA)nW3D(*9*0(tN1Wf+I>ECvl*bBjIxqH*cuEZR6^6Rv!M-Wy
zESVT@V~m9HLdK~!KbLgFnBm!{#tU)K@`$*riG8y@*Zye(
znCaUvyW&AyIR3ky2olD-)&mFbX>xCzF&4@r<6MldFxIGJT+aSyjqby)v`!d#37SEs;zAy3AjH^d1MTq
zd(%cI7-wRvk8!dzCl2f^y3i5FAA!CHz@-s^QB=X%Cv7+Zp$B{=u3={D;x;=xRQ5RZyuL;N^z(ROfMisri@)4#i3^5Pm4
z{>NFy5*e4k_e_QRuf!oaIa%|a_JH#s+cq-5zGxSWu40}jMOj5axa~64eAH+G<#PZ1KI_`5f}1;cAGYnc;@HlFGk08
zZqzxRYyC(3HU{6L8_K@fCdZjL;5`9?_X^1U_usE^BM!{0Lmgl}$_@P_+6$gb#9nwN
z+T~iIDoNX4+8>;wAIJ6zu_wU*3E`RB_ozg*7lAO=u`HYVoe~^&jN=TO
z{;_LkjHluJGxu44K^w;*e;28TTpLqf*J2Ga7=9hkH3ngAwH)g}q%Ecm)}>6-hco_+
z(J9(8E7nb1GjR{$5ZszhIM<0}{A0!V0sEq^(JnDlUJ1(-gXIa=z*`(HnZD=za|z{U
z+9v7|ai#A-**>uSMah`x@C`>f*Kb@ua{bD+0@o_-uvU5=crn(>H92vo{IlOzXGdrs
z=K|JX%Yif7;98t(FYZNf-N$uj0M?z{qoz+qylJAT|b1<>o)8Re>
z*KFh;$BAA56Aap
z%I5Syy`H4+PQRV&2CkiE#rD;G3@#K|gLN&w=PhYzY5I%^<7ymB%8TK@u8cnicI9cm
ze-_q0&O~B2k$;#MwB2G%j^_smYh-TS>|S#A_+iaYuAv@_*Zp5K@ja
zvSfWT{^XtUy9zN+vVYD;Tqm$?+GWiBX|C|J)5M$YkQaEa^@(#f_mc?AbvEa2#A3?9
zvYpvA?I9k>s+uz^l-J6p4(l`5WnDLHWLhj9D68u?u;E>n9*!%%f9)1LV;y(>_O)tw
z=D6a!*Q(=V8~D
z;BOy%W_q)F5qv{8{f`ETpI8R*AyRex16t)$9K1+3v^NRAmKCoO)fluEk6+Y^MazjQ
zPEADZ-nvdZ6@cy1ZpfYR9vUF8BH@m>sZi8Oz}sD7P=7kGiNRH@?qe`o;uE%J!TW1s
z)MV5dg?GorqBn~gqEhuIR{lG_+Ui!;cNp%cq9$=l0&el>Be6v4ETms2JeM
zzL`4Um%gsHKUDXd>e6m|ylI`eoi535ZVtNxhJ2W$>nEbc2`&^)!ZM%}>c{Fn@m_4z
z4D|=#dW^0~&Ln{+q+$%Ho`g1kf6oGEIuu{5v|tpf&aVKC+dyaA9691<#*SBxQK*Px
zFkAw>=@M`g)o5^O6tJrLm~r7lK9+~C$j!=l$<1vyNN)^!HdtPjFmA%r-S{h>QjStV
zOAIi(I_|Zq=T+|+lbRZvFml3l%d~_EiK&6!lT#AAr;ZvMn-G)QCLw-Q%EZ)(ag*AN
znwZc%CN-h$l=j}1gqXzmxY$(lOS*yH?c28V?sZ3Pi>2qJl*y_3H+QSpT1N*QAX8&U
zO~%jJPS@>Nkb0)XCQn8$v7@6?;-}!(Qpd!mmOXTR96k-8{C(m9u~T9vSSIjqpm$7a
zMBfg{B)WgpEtj^B4G&T`N8$d1k)maRPSj%nQAyZY$r)2mBX
zyJvc4`ep`X_RAcWIW}`z=7P+nnQJn)X70^Ant3|2B-1_1GmF36a0D=Fwa4o8SC`~0
z%~_qZCMPdvYtGJ`y*USRj^-TCIh|9SQ<9@>?lupbr_EyXwY9Sa*n(~SY|*x1wnuDZ
zZArFiwshM9+Y;MS+iKeyTb^yJZKrLo?SSp5?YOPPrtI!^54)$`V)wPTvj^CN?fvZ0
z_F?u%>|^ao_G$KX`vUtC`%?RA`x<+meXD(^eXsq1{iyx8{j|N
z-`sY&0lC4s{c^YF9?dPu^;{dUcG}t{YnQHFy>`voytO;mf|h7%IbL;=H7zSWYeCkM
ztfg72v({wgWo^ycnYB0TK-N*sw~{QCP2PECTe5w#+hqr2_tSiQBztUjQueg$^lUQb
zzv;gm_}|5WW=%u8j~J4QUyL4+5R)=$;)v+!qsGRhB;d!N$Km(iQ%4MqA00b!41Od#
z)tcnEH=^y7*u*I#29HXKPeO&H>66AzOl%uFEw=U@wWIr2tA3ZJaK`-c5B}KdHTONZ
zV&)3fYE6iVPsF>^lBPGav}xVOg5P4t&wO{cOr8|iCV+Qp$0sCBOqpa!i8Y^6r>EAA
zOPQE}x>I6P@bl(vMdXuk
zDk#k~{i~yk?|JX1Bd28lkG=4tDesa#KJ3?1I@I&=Dc@7ibyGgz`N6)QPkD>ydq35t
zw5a^YGUb1mdHz5>zj9mcQfc#FjbLurNVL)nYxs88p%GSZYD=wU2mVCNzLw{@99Q)S$;kf8bu9yca(9kvVm9ml^vrR!I-q`G>GNZ^tcvmFj1Tw`fDZD%
z5W|pvewS(+{hSy`MGklppb3cC_!<
z@h|$MW%{fb(kD6pOP~L^oj#w3zJ~Vs2kG-#R!FALiJ3n2#KKaqo`{tee@!>``%TYZ
zAvWDSs+)%@UX7YtqsdvvwN2d-bF206snTti-qaeKWO__hZf7u%6VXC1N9?vp8HGbt
z$J5=q87r;S&34^f$e4|1{5Q7m80e=&PpmHW&kxQE&JTVy_%+?!PrubsGZjsG&H_mA
zQ+};HYAVAOZ$}fiR9ee5mn&%QXlmtKAw{$wwpraLZCf`f17340_E;ehEotl68O}?z
z_Fyo%={Uuj?4YI}4_CCBFIkf)7FE?&m*#BB1OGwurHJ`#$n3Cu6PQBtS>5cm-c_yd
zm7$&vBt6p082K;-_NUj{k+KuI`&jBbOy5(mhdgt;_4`wte(4luajXgG4i5JF>$9DH
zLuPx#d`UNVTE7`D<#$S>tLTmKF}kZpFmlFe?$sV{v-Y20jP$OX&jnkAUs(V7XVtyb
zD?14U)*?`&hGB*eDs)t|y2JbRvVO)oJ=15@?4VCZW>wIq(@~Mrk@WIydI@Ul!>+o3
z=M=Kzo*MI=be*)8{ISB{9>(!J__N-a=8R&n#W%-gTYRcuDCpB^^s3~-GP@@5&-(G&
zdQS_V>w;D8SV2wM8)U9HoOaik`_z>Ep^Rpe3rnjb<}(rV`tpdmg4g@>h`BF#WAKLH
zqTs?sEDwi<=6_WPwY&oS9!h@ge4(br)-Q{|OY*#YAspuHyx;~|kASS3FIH@oGSl?L
zvQoe8yKukD)zqprHiFKlW%;G=hwx4l;FI%8m&(#zU|j&_bW@ThNpr9D0V}xa)%aIb
zI$i2CA2mPU{0nJmK0dxe)dY-`z>ln($
z;r!UXuLDDi42|Zd3Erx&m8GqlFWbIX0V<*Gn6lVNq%gD>gw}da}r}ZQB~ns?p8uy4i0%1Ti$Vt|~OUth4=+yEmPu8{3(w
zUDkd@?w?`_J9HBkx&ZF8v{+9phcT@3J8VI~wN7Ez)oJS6^dhb2N;;{RTXB`K*E$64
z3rDqRtY&&*}9yq2oUcvD7K)=@bWqC1X%l0jk)W<5-WBYC(#rn4H5)gp#eHMmwlLJq=^%|*gMQ*pq4VV(QhHA4CGj<;!d8i*#Z8CaN#*>VcCnj~;kkeUa{LUoKxFCaoQ)
z(Lz++&x3Lwz;=6UnhwM!MvN17>{Qmb?dwgsTmzkLB~jD#wiGz73hc0bFE|C9KA#|=
zH}%FQ>c&Y5z*TJD-<$$Y*WZx>5NNe-E-TfAt1!)%Wc@I;ZuNwxDGGasDIMyUNiVvG
zq;Q70PYHcLO=Xgv2698@cJrkun-^>P2}|fMHlm7xaZmE<{&cQtb`{N9zj0bRmpW^T
zzQV7oTs0ENHe&mxQ6DI7qd0SU4;3o*2qRd`X1>(=ew})X5Dx
zx$lyzZM^emtdsbk^u+xwdSX$lp7h*2CkHCqDohShL)V4hM9k+UQLP(GN-H7!C8gyq
zex`xuPQ(!g4}S>0r+CyH+xIAMP9Z&+?BT1!*kA<}dqRn*FwJPGe}l-sw(lGYN1b8}
zWQQjQN`9tdtF?#aqMN?wu4E3)qGxzOhwr*vb;kX_%&U*-=KLr0raiGc^x8|=Wqt`N
z?L0luR(~BF;DS@~yKDN7|*TJkj*-B%s1{65$`jY_(C#P&^rVi0?Ro4iaFbR)Z2NLxS0
zTL;%Kt22(A8JiL`U$i!iR&zLxx^E%H=*c-=+h@sisygu-_#m4J4LQqB?~vXvP4@yQo0-^oki(PiH+=FZl}&W)S-qI
zk>W;2Zl-vl6rbe4X6feZb)l-Mv2oh^5t8q5@(Y-SPoUZ;N<5Tdl!h|=x!1}5)E;}=RcAXJ8(<$^13IV==^rU>wwq$hX3V4iuA0>h<
zuxK^)myr=p7a)oeZ+g4u^9(OmpFl8J@{{UJfy=DjAf8lTTD00iSF3Kb9|GdM-PQp)0<*
zZkW*V-TPpIXEKDks>&FQ?qoV&Tfa*;TJyB^yJa8xcch+*-cYj6E7HdBX!5)TIXSNM
z4C2L57KVd0rioelfI{ELMrb&Y}?h%mk5iSTXrmJ
zwlk6qsS{}3<}Uc!G}Wr;Tek1Tym8$SrWokvCzU(FVIAWTEa1pwE
zBJ6JdS@$4RFBV*~g^Eo9MAFafx2rt|uRsR%xpNVyj8!g>2u0v=>eO
zS~4nHBgR%cVxB-_OwP@%JN(CpY3qHvqsbt-TUGivY2Dr$b+=`6PJSkbWF)!Jn=iZJ
zMt}mOG~-m{)L*SV+yRH!c@XR%)K^BqVRh
zq&wib)2#d0V3BD*|F5o2J6$vbdJGh`O-30SrMI;e*Y&m8c0Bi^cD-$Daq1haK*i4o
zS^0dLE!U;Du-W5i&*6##L30bjy7q7@lQPyCc8<%{>0)|vQlrFG_D_+v^1uh+p+bhA?!)dFEqi$(hoT?=hJt20DQXmOiJ``9LY)@=HE
zO1esvSjV70vmITir9t{Om5D&<%?UTa#`5Sp-x@^?6JCK@(Y_-+ye_agHcB_zSUEYe
zay}#@o~N5_?G>%q2t<~g3s!Y+G*Mj=P3Zn>mA2=HCm`lzap|)*f|(31R{)36WvAyz
zfea$wK&B|2YxO{n>twI{fk3f0YVK4T;XDy#cUe=*$V6#=30zz**pkdJOUUdHcyGKx
z={=%tU83}-sM&@LFz=EaBy8m5*VS4ZYhB<>lI{BnIk4cD&H_E|%!spiL((
z$1W0V$;KX^P(?<}XYHqoplpQo7H>!m)d{bdPaLde+h7(tf+ZB(6MxWZnoX6&>|)(q
z*DB~wjMmL&u~F-ZIbJ>BJ5ZM6ik)gUbdlBM`Quqove#M~lf*ebB4nBg}NN8q8e!?
zVj>HOMJZ@LQzOdvHUSih8gCt%IxvyHLmO^Ea(*!Nd-Zuw>`f87{SkAwbrcIp6hiff
zt7^x@FVoBVwDl9eTxT2$))(-5-O9W=qunp;*yvYT{VJ=~FI-x;pN&=5ArA%W0()Z}
z=?f87g#Y@j2_ct@T|gzY^?R)mq?NdksZ}7gJW^{18>hCuy{s)%iDWGzC?-DRKLl?l
zlnO5zQf3*!v6nJ;)xm`Sjm!6zf=o%-07p#e5?cL}gBtB`Nq!dTtt@<7#(o8m8xm*XOvN65AL(=C_D}
zJM9UyYteSSwriu8{DkKl6tSk&09e8kMrjh@N|SS;@9l|6^W@_Q=i{`@$NUzI6|VF>
zN{Rev95oVSa&%)ew#+uKZf{3cFg?f64ASokLt$^COgO2#BW71L>H7~o2Zg;=Z|nCM
zZ=N18^ET^uY+VpF$K*teqc&2xaTF!LhIKrwGne_WBX+B_9vi@rt2GKHy|kQxSUJ18@{fEswY{>va~$3%JGyYfr29k%@bck16c
zdf9Hh?|r@PC`@3R-j=#7868z@m3)O|u0`Iw|bd&(6~U$UMGD@Vncn>Lm}{NqU9US&{gYu`~lU+m1n
zi1g$#vC1#v|9B;ObTzhRor!#90$^5b(Gy`buihHrRfjV>-l^6#?Dg3lZ}@PRD|I(>
zVcp1Kiyr8xABHMWk$xp&hFzvUhIKbDi1339ve8Ac5ON73NDM}^^I8O?+8zk+GVA0S
zG|7G=o9JQQO;-x!z=zz5c@^<{-AWi)tG`b65v40t#CwnzKA}>?+z|q4`eNlNfRXZK%L4$WHQ)8Sgo0
zwE~@9)+4fUIf8fW?9TihJ6Hgttrta)MqB{FTBqxu|CDLzEKWn{Cn*>&wx$DtvzSvC
z(4Jr-g8~qe!NL-;BVhBlx}Y;!It5;VT~^q_HdZcH!a^(MA3%zpy!zmpD(NfkvF=9=
z6p^lmDSFnrRVn4npverH%%I5(CT}SgTNGB)0sCY%@`7%@lG#4Gt*2;3c3;0E8(QyS
zoo-l-h2)DEIh-3t!@^Gefe~>Aq|Sbf{goW=Op7FDAB-5amdpAhatG_BQh1V>p|DF2
zoM~XblmiX(kl0U_veatKBQ+uz9@Z1{N|y`0j<11Sd^JtI@w2S`$mW?%;MWLc4%=HL
zi!p2d7Nf9k{=Kw;xt19k$vh+UMEX9C2D?jRP0wn3ihvj
zIKqjR_QyB+t|%#l=^@PkY$HlM{<4z$Jve9n{#ZUhYv#%_q#uJnen
z7S7e0{d|oCJ_u>EJ_(yUqk*m3cisoGsENRi9?F=l*A~&-*(<$4vm*-sUaFT_dJdnX
zrOQM7ERMPl>SbN2|4`NV9yZ$|0jqv#7_|5qM&SK>FdA$Qn}>sahte?IEg|!hNZ-Lw
z+2M47yawJ6YgZhmd7`)o7cpN%77HvCf^&@h2FBhy;L2rI>K+Cp6&?pq
zlFhyiSR(126>L@rL1c*79q1?uBeI5<%2ZP3K!*8bJ8n5Vkdy&9Re{a#rI-
z6fv$Y@#|&(1pg>!eIKW$IeEqD_akO!YCNey`?q5Uh$a^MgG!T#n1>V}I*O@Oh-I-5
z%k{Du%Iw6?)MXzjh?<)@`1%M|Z2fN100q^u)YBKp;(8NX!a7BpNWL}bB60|{!@3IM
z&!_-j!}^5^fVs3)8n2d}7M6&L95t6HGcO7O>k8tJiY2gy{mtC0V*s
z;mM4hWAvYlP0?$+)i!p-gT`AH%yAiSovz=pXFBCU*-y1#y_wmwf!PgMrEDEyp_Y+h-3$ZW$Ny$8H)g+M&odOm3D+qCuDCyTVF4s8_v
zmEyLRLz)cEXCoqszT`H8*!|T3k)9}efv(zxR?xmMPtJ#z>B&Eo77PE!jE`0XJbxM^
zJEbz?Lu5g--#l!-Y#gzXP3G6p>XOps?99>9SjC=T%MY0{>#J9bVPGK(CmAlr@LDVu
zdtE8Cwy$lsu#8`O8L={lK%5}c`pb6GjOmh$5gX((WMNF8jU#kU?6HQLb+0+w?hE$3nE@wxIvFA6~zB7QMVyoEeHQuBH-S!>tRw89F
zyIi51ALX;4mfyl>Gbw7NUa`Y^`9s-NepV{j;n;E-$Ceyj?qimR?nQpJ7Zt@YCfL5$
zX%(74|FeDDa8Ol;N-078H81eqW|LX(_9$cc`%a*!#=7{V2=)|lNG5a40)v6g4t
z01XUUv68UZ2|@vkl?ceW7{YVw!nCy?
z+sAnJ?mvd`Ab`J#GpRgV_N#doE}<~&Z?VHb%c3L;ua)NW2qzfhmeh>}dH
zGKiE|U&0iVSyyQ$NO;+GkhAqI3{1v-UXl6k&ogShm<+H}bDWf8ZLbv`!7=F`^V*WW
z%|fH`g0dA}vmj?dt{;}&QQW)P9h)H{A4EQ&PP7V>>J53l4KOcs^mIW(
zWkEdG-lC&N1l;w9;87FIEh#42)wpNXA?u;BStwK2f%x9dIa=c%`6v*^^D7Rdeo3P2
zK9dB;uN>7oyTltCA%$60W`E3W-dBpg
zuqcq@x{}^i&v~(2yR)n>8M=s-@@eAy%xR>v4&Y%h*z7^|kj=+ut-*SgnXpUQ2Za%i
zw_32)!m77h`9S6v$7W)#c5Gu%xh%>rSYMFAD@|Kh-5MzR0ebF=8}-^F_#pg>cMe^Q
z_fFTrqJD?X&Jg+pQE^7T9S;~YZ`N{LIq@lM=%?CSV`D_iRT3c{J=yaikxU5%rHT=TI9ln9_p;9*QY6sX)@dJei;QU6QC|w1dx9PPU
z-k*1jcMjN$eZXl0=c@we30H5Z#G4Zf18#{O`?4|fubhbI#LpT6?u0J@S5*J&gl|g|
zx>4w6bp!F}L5Qb)5yTF=Q~b_2auNe$u2af-1--x-Y8ugJ)$~A7xqyDQUb~z9yjp?2
zS$2CCh3xpcnb+1EDhBdlycVY?TH-GQhOBi1Em;xS%mih!zz5d%5ZTK)kgI(;YVM1)
z9Y?6R=*3Ee3NQqA=9m}0tBfPY>WV^F{KDkb!>u=FvBx{<@$4HF#Ty?(D_|c16@7ar
z?3sMj4pkIxD3B@pYY^(UW7-_E@LkG|E4F$T>^}02mQUF3kyHzn_+N+p{xB`ffEMeA9vW5-D%{
zZltI*4Xan_uaQoJoSn85x~zjwdZGe`c|L&8DFe`!Uzz7`w0>!xulJ>+=37i-p5mR>
zWl?vJ+1b|P3AuYhVyI7#LAPEYZ87i$tRpmE}@el^F1lN0erixJ1-N#3v0fp0!puf
z11^VLsS9qh<=8A
zl(KovC21r`^>K0LV;-uDR<&qv-K@mIx|7<^+mo|TDsK^_F=k^064`x9BFi|CeU^vI
zA`v->wGlB>5s}S`2Vld*+LS4GWdW#Z9=Ld+EhF-ng5iU)X7A68`i#
zO|AEyO~DJK*d*(2vK_TGJ;J(KCFF$1nt-h(v%kz8V%#2jMxD`gWt|!-@k5${77Q@!{4z;ze=7&BScC
z{l96Ke7GeU{#P5P(1-)>pb!x>_limI(??L33;=E&UU`S^Xg(o6V~Xzp2+b869oyFB~+oK91m(zDG}-Ce|yro;clXhx0fm
zqA!a1;w8|CgOIS{tHtHPM)Qnv&@IQrVjZ>Cz6}8;hEX6s#`+#jXAT>_&8rE)U3h@u(3Rj2wHPF8HLr_+u|u2h!@v|soMqnSEk8Zd`9UErc
zRN_h>v@U-yBXM8Ej^Rk$+sR6^P!=M|4(TT&#@8NU-8`?Hjo1~wjxi#DFXslCbHj#H
zR5!NB>1Vtka3nsdw|a3-Y^?Qbif>?ajCQZ}h|~?V$4;Z2hvePt!VjWV5kP_Mdzd#2
z(Ya9OE~}OG95vq%MZN6^iVy-|(zl&p4c#oK!g~#g9ul0wCtz5||XBmlcb|@y+~5^oMA2
z%2&t|Z30b#v!su;P0>oP@n%l!68gTFk*t&4-cTiC(g?CTh0XM*M_NA`XrI~P!(S-N
zL`<-L&IbV?K2X3qpYwnLW)JqoQsvmwRaiiIOAWlUuFCW7CR}XuDqc-j>a`x<)1Wa~
zw1+(1-L|GuLWkn}HjH3W>Zkjq4e-!WA;hn0iSIXW`S*t~{JgUpYShtg%LoE=slzv~<=K*WA*ElMAxu<+e5ER>PXppG$|uZeA(Temu%&q(p;3AFN2!kq
zm=?vfxfpqDEN!LF)Xm0H1wg{HMEXo-l13}ryyuWqH$7J>Xgp69ORBMSo%EOR{GE@T
zp6`=69Ftb3=ONylwdwgfFVgK&D$mcnFSmVb{~?FB$0_H`z~O7eOlSLUCm#&_o;kIB
z^GO&pU!)Lg-zm3^a<;FL4;!T`wb1X9I%}R0*ioufT+j91NaBu?NMeOwVtj_4-Bj0@
z_j+s0>1Gh!;oi!cvc4Mg&8Yc4=Cmj3w59_z5~=-$9!bpUA~dL*qwByWnz05DbT{~4
z*jZ@K?vDlzYTtT-qUP-5@^1W$cjLZ1m)7`wc?;yk#>sw)Ni$-;5OH_f-AMb*3BElL
zTXVmwcEz1Nab&8Q-#V9uW2Z6VdwH||2KhpVBR4w8!{_^EvduYpj=@m1wadC|nCyj2
zt$A%;w3fp&nPJJ87ID86l?_lyq<-5M`#ZFGH^n*bFxrb{B4*!>glHD=IX
zaR4E?rmXV`e=Jb3r)umy9O_=}HG_<;wLag>;c-u)&Cx(xabWC&VP!^jmFM&Ib
z$EM)|j1Ueju0pu}b54-q=pis$~y&T*+xHtN5ij^Dv
z^%7mNlKsbrMJuxz??mDQn__!^I>*gYDhiq>gCh>6y-yP!!np!os_nT!v)geY)f(H$
zMdxVz82saUVjQ{l!Fyx32g`P8jl0P*QX^tlU_Sb?kt&IuWuyvXIfW6
zvj(<2h5p+D2H`EwSwH=TECv*ISR}=U4K0jI?@X;}rSnDnja37_hg1U|)xdV^hSx;N
zR_l)tW>JcPb8F@5C~uO{c@SQX_Wc-vx12+X_zdyQjX9DVg;djzhq7W0o
z))<;YTY1Kqwi$lJ9G%8d#&=Y2g-5J9EDiLvQu;DVkGayNG;o{qwO{JmzR6Uh$UG@x
zPCO=Jtf)bg*6_lp#3+w^Tg=a7c|p*fGtm(jE${gPmO7HD77SR?ytQ3_Bxr`(@-qAT
zWfSOxaSdnVed(w}=&i-FC`!Pi=?<=yrTgx#ws#DU@R`1IyXR+k0R7~IY6mXQnIYJ=|Dqf4+{O?83Q*D35
zm~q?{FH`;v)-R{BFDCMi3*t-k>{7fQ)8nw?9TyWqG3`Ursw{KR7s%pMMe3iM)dT*M`1?|}%AZgc@
zX30+IPfbP!7X!AEjBUyvWF0|-nESBQh0Mtj(=rdU9mNVG#;RgmWP&-P(zBuAracc-
zp+(j}^q7=iuyEi?+-C&NiI3TU^)U0@n#|Xx-UoNc*6NmU3HqR;Wl%dL
zkIaY`kZ}eU*h+@_w{SA-$LNPRs?I`9&yRXRk~$gghBqUHqL4xmtMtVD2F!n`DBU&Y
zA@L!Y3w6XoW)F{rN=O!R5%FX>|1Ypcy+BCeYqX6PttY}QV(d8A+D=AhCvAj2I9Ci+
zE_xz1LN~*Y8IN@_s1s-}DbcJjI5vpO#CDDjrv=T!AxN@1Y#t5bfti^9CyoyfXpL_T
z2V8Sei{e7KzA*ct9Fu(Nld9;CL
z?d=gOO0=h4Y+4Jb!Gh3(cScOi?2L8L!@
zXRz-XiI$JM!z1>gk%aITI}Ha2`#~+lD$VpAZrrCeDp|VeRi;hXLX+MU&wulyCi{V@
zp~_QZXJ}92zB_-Nbp#$k+W_m_M`OPZC+5?&W-o>zKXw6;Mw
zPZVMo6>O;(y{(rJ))j>Jj--v{g0^&C9d>R#xu`p+I!;{+20Fvd@~tlHPH#Z}#D#80
zwJKsBYO=M&SD3rt(@+KWTkw{8Sk2`v+CyWht11NA9@xI&HVQx{ji8>XzDsLtBV)te
zncQFSH2RmvZZP^+XpO58RW`&kpI(%5tDHnrJ71E)Kc>S>es<7(F(N@%94gfc
zt}u%Qr8lQ*gBzd@RpP2l;SukoBN6k<1H@t7b$bS(TH|}1=7p2j`DH3Rgr=l(6PIL>
zoLb8o5hMoHL6p-P+JoNWY5<8%Jy_)&dQZbMH@;n1k5gZVSDG59CRwN@mS3YieR+R+
zBAkSWPvs4(spUN{Y+l|!Sg;6&bFUYtQyI6H=HmrUtM0Jb+GO9GuVy+uB51tb7Yv*T
zYFD3tL}TJ3oc#GNW=rR=aO>o4-~yYIy{l>KgSZEC^?)4Dv_{}AeTN7(PtHQSsCppR
z-O&ueZ%;ojbgn0xqy?c1=D}`fMTVQ+(Hf7#GMidk%E4&NTj|ys)55Ur?JSdKcj|Q#
z@lkkIq~gI09sUQhXE1Oi`1G%+0*FVX$zZ^K;H)*Biv-5nT~_VsJQLwR!63B8U?hW)?=-Hdlqq`a)%WG*cKqMfqu&U6`6B@bTa*hHb`MGTvKIJRjs3NL+*6oUu`f
zPz-+a;yzVqgUnl|_Ft%7(MqVuf;hXE{lHCF2ZJV3dw8A0ZK9=1GTeu=CHDQBU?IYD
zYb`v2rzovi+{2bQ@h4?87jd5uw$%IJMg@8LZ1vzM6o{&c7{V%n5d_#@0$C223kja0
zjv%e6ch#8!Yiyzet6(Ps>o6M6;8nan=LVmWkAUisOgL8(UDj`QAml+b0wtTWQz}))
zSJ`rn{zz=D(Z4h{djmEwSX!(^ZPaMhTGKdHXyg77DUCNG*u3gne57pNGR1|dUZ|DD
zUz|F?3wuqfM>2#Z)dh{pi{q#ASe1LBs*PR_05B!hk@A>Ki}d9}v5yvdfiOihrQ8wUSumgQPT
z^#CeUufkXX@5DLrvx5#hRD)I=NS3K=5*W_V>qWl{rNnBGEPPs!nOv=RtGrjq3z|oz
z%TQ`338%qxgAOAc(jbx<>pSsBsbK8L>)Xq6SeSZ@BwFdhWMPA9H$=OVZ%8pZ3SwOU
zve7>|_N5K7hM2X<8_siH#wcItPcL%K1u0ta&UGs3R;U
zDFUi^?@j0u_Vu&Ua)bjE8WCg%lxXp`R{m?P8%2g!!Sm&i8ysliZz-Pe)W~iKi$2@-
z%_3*UuodHBQkRe`Gg%(oKyxZiY$9Kkf}%9HjO|Gs??vP=@Th3JlaO^YUi*R06`J)L
zM<&jp6-PabbnTBvoEC@yMN~q%Hte32CG^+Hq!Y-3#Bck`o&Ye^n)8gAcjrS3G3;f#
ztlv78_U$6c{iV}g2vq6cNn)6j5UD?NVll)n<{W@3DD~vmQD0afGzl}{o*aCRADki_
z=2bm;e{nE5XBgAp9!e}Kj3yT4)qV7PJvnnErUkw1#M->mWvgOe+8O_dh*2zSE)^88
zHm|BVM?!u%g)5yXB(SvQ%{h1(*lmIK`cKw|O268HNamNIhp(p3)}H)Y
zPDp#QH5Ayq^3-4%J5cMD$!OkkaoPKe-}-JTT@VzuHovho{+xMvA)b$wYN|zTDK{_A
z!=;ipwz8(>5Q?(SiryT8!!Lqar~p8UnO`j=uM&6I*a>7SB%*^ANS&jk`adDWz7Sx2zfof8}0FuZtes9;}u
zB+1-Zal>$baBaxDuX&9iE1ln=o-T=^!RCgr5bsJ~CbW6gB=GQPFj?(4`p2#G(oAxe
zKV8Tn{kWAQX$9i_OdFVjLG*L=sG>-tI9wRH1Q$&*H~5=?sf
z00n0WnNK)qk3fD%dRC{TQE?y+baCD^r9)P~=SLLO6W>vFO;58*F`ox*%F>k6!x3eP
zc{T1$&hc9d;0GDo(7-vRvd2`T@-mUcE?7|-H>ONK0Yq}-H>J~aChwpa{&C^2T`ni|
zz*%QM45LVV0&)-tQ>Q{NTp92^7BAbrnT{X=
z{9VAVs&sD53A%Sg-2258V;u3+r`FgO<8l;^HMYd#YmI#r=S~9KckScO`lDlr5YJ*H
zTi?`7<`$KC)kJX=7tUgxcLwDBKwjd8!cf(cQor`?hg6AB>D0=FrBh?)RW8VhP1ByN
z)SlFH0!LQ*%68G_C6fTCp&&2fem+vRBmRkKB$Xxc=k(;|r)@Y%0}Wnp#Qlu=W?q%I
zCiOVHU(Drsu?a?sn+Gsw=b_S!Z^?s&q(`@$B9FqBJoJ#Xr)3nW#N~ydM4dP7PTb(t
zlMfWb={ATW2Afk+3ssZm9Am&uE$q-@f_UMx1Dod;oX)$GpGoCu2*2&EynoQJ>*{3a
zoZ^Vt6|5|YO|SfVPV8Lm$x+&q!JI(%%5kuSFHH)rbqC$g2l1>Ux5m8#4#{F8PY=8VI@V4ed8Ja-K;lqb{X!#!&;aj>ZKK?0ZXiqsqd&(KwQ!=z@*^8i?
z#a%onx%!-sH_EUGHPGr3#5%U+M#`Q?w}Uk52@(;DP87;v74K_x_RR*0!>X&5ktlO#
zmEzeP1rG74R6Zc)k)ZLcZFSRy+?rG@s)+duS#@ktn@C|03e3*a8spHy20vtI^`9bT
z_u`f)O#Ei@b@NBgI_(O!s3JdE!u(*Tcut&)y=WsL6Nwiyyej-%DU2D=c!%rQ?BN9R
zn<^_3*dgnGGaw`s2nTI<@3*@soU1iqFLm{L9%O65oe^%}+Em03Ncf~gPHAW7B|LXy
z0XAoQ6Q0}EOJTxui@bz$6>16rPWHPuQ*dpY}NlQP&(W~Yj6k}hp_|woF2JBV+Dt3<`-hr%Ezr=pxxW7j1
zQwQya#XN8`!r~?-DhW$G7|LP$7=SE~H0T%rEt}55mQ81YbJ9bhyDkeI2OSDJDZ<&H
zfCpc7z{})0@Nt=f179eoSpdWVRPk$8P4*5(N=#E;;=Ie`upgiM9uKzS
z@x}&0gFt?wmMqhh0#=h0PTsd*lS2lcL+|pf>WYJ00cC2+LrF&Ku@*@=<3Z4k@6y#!
z1HMbnm)Yt|r(a~xO`^ssNf!ar*|t-Y`Oe|QKy0%RQc&v8h?=9KfjzMc^aKlRn{_^f
zPOx^2NbYUce~}0pm&&~$NzXK7ifEu4c5>-SK}EYd6hM6C<_M=<>z^`Oj3k*G7N#-`
zxyvde%Z#-Cp}s%T3I@_;8$>*}*5a{_4bhZ5PS`}wwZ3Xg`+J=Nw~gilc5$!BBVGAY
zD&t7Tcn~`6DR*<+%e&|>X3_gVDM4CAw(lkKjiS9|fHYi7ehib9a)?dYa0xv1kYhY|
zK1s8QHID&!cPqsnt$usgt_PNiBC$i=EUeC-oJTG8+^^rP-j9@t9;JJwN>$
z4<-AaP5#qrU)yC(0;$ZBDYK-ka?;jB*)PXZ=Ze?K%?i!Ktb-ew40db_8Q7VV*EtTO
zdUh6LWukK?5E%5p%-dPvF~TA|IkI*G{jrh8Wn3>JB}N<@nAM*td3w9`L)w-lniZ-u
zc$M{GEz?Alj4g%}{#i}WSxk1qGl~wxM_gCa>p1@eM+n3+@v-S<(TCEr%<+pqQ7xQ?
zGQ;jyC|j5B74kB3+(IwtKkA%G?O`f>Qqfnj3f7$OTvI!j;|gTIK$q6|JB8Jn9_vO0
z_@W-;zA>)&S=##f=tfTy!#_^$B-!k5xF6oc-c@rjBk6M~M|wHubj3;$=AMofQ<_AOs>}JJ5>u%(%)41kNIq1IvFKc1K))za8*eVg&hY`m|wpzYQxnde<~
z0>F0FV=72u2bV~!IPY^z3hyaE&K20W0xTUoB(F?-BcLgo=QC)WAQ$vR`^$PY!pZ4@cA({mL4nip57
zdCG^p;&{{ayb!lpWN|AY_dYVga-|DRmxFPw@mJ2*&FX8R`r5DPFlu7wmpdZSrh4hXG*R{@B@?OJgoIBda|NU)=bHI
zoUCH*`Sx;vs`
zPpS@9wL>DBnYNtN0#XtqD+Z<19QA2O#!3`2H>av3C%Z1K->_Y=GO9r|_0?TF(ug(M
zsfVgD>2Z;^IabF9Wh7QDV{@_5e`@_9uF=vT!SfDZzgBP77YHt~taOO48%DIb^uUh$
z`infoEYMh5Eqxxb9)of#dL0(3HGTkLB(HK?r`|5C7LpMKO)@-WK;T8j%OIznZiwbB>UnP8=V#ywX^
z#w%pd#G^D3+yFp;7Y+X%**j9Ug~Lnk%jW3BS_}vJqIQ=_yHuY?brm}Bto2{Fs__T8
z>m`%(QzwTF&)35W3APj?m@{JQo40Vp&ghxSY@oCQu1}i%Y^G~yrc>?!%GwSUbZPtE
z`JSM$UpOC{HJjhnCYC-NJ=cy1Hhb%;Dq^GT&FVg(_S`i`KL)?`?}%Bdy1Myqr4=Ft
z)m|;AP?7ZW#NlI?Tw^Wh|f_hvJC4dygPAxw|6lgr!oKdcOn%DRBs|th9xAZWd^SbKBpPvt@oi4p4n^m-7BH#T&!dE0YfwmPv
zJvr9_xZ&mt8a@SddBG5X^FI&lR@2vs84pvpH}Kr*=JYUg(t6T3t2Vv*z-nBnO6}NE
zd7O;h6zmPVa$?uX!^?4*Sy;-w*#D+hP*|`1P)`;;LRIC&r<+@dCU=5$4=m8#=W_95
z9$r6TS8#2ZQPdPShq=FYud1yz-Ugeq!-aNd#NHAyp792bt!@mP??z0FA2Vkw_-1e$
zFc%5V;5y)fhG@XskZJ;5K~{qJfOyyR?QP)%$eys(X!`_~u7!y9`0aNY8C#Pqn;O9)
zHV(3XM>dH7)_*;5Za{8E&zB~v(*;JqJMNKpY=6-}Hh^_{2F%S6Fae{5=^|BJ@5~Db
z;0P59g7!1|nqyvOS9?e&k39|Qw|(EGD!0KUe^x5=>4YiXF%YJxZn}qQ55!Upy%(K@
z<~L{lgng+3LFW)>Wk^rl5&0K-bTpl5L`;>+E#Q^(V$QsaqM_u^Eyz6-cq3@0gW47Q
zgMs~Vq_Bar7K}V#VNjuQ?ySq&@jlx>);I}-OG)PvYaoGb&st}{GXTOlRh~YW`8{XK
zCi!O&8%jRv05ItdVe*_@YgZf(29C$6{J#S6FL59%7jaI(AhDDH&{8WCD?)$#0*U1U
zif=ejaG`mbg5nn$D88S>9m1==H>n7{S
z-m<4;{-#Kz1XZOyO--#9yrgMw?PQ#+F}XR?6Uq7(IU_p
z*UZ@^jji`;M$ZZU{z^LEm{a1HU~O|wvH0%FS+3Y}66jWgl5kevkUa$Fb1ZQfV^SBg
z)~s7uhAeXr{66iM`zERZg8MVJTQ8v1(eKDRRM39wpb=*f=Yuiz3j0JdaH)}79jJ^bPd-8#dQb7oZ4CAoR2{*B&Yq;uo2y@+8FZ|
z&34nQ-JV*`uQN$pq=D`8L=KVU&RjtdF$wI!^$qlh=Qw+LyDFS2pxOY(1!G1jS^{~Dde#<9}X
zTh;FEOqiNIfN*GhA@?=5i`;6IJ_CnLzdCeZm;2I%{XJa@R#BtYy#(Fi08_?wT%6?G
zN8}q53FEtj9)%%X@jGF|;@92I{Rlhb&r_+EN)QjC6Sr;n9EP5^1?f3rtY%N+B&s8Q?}lkqvyO=}aXDxXS++z+i%7g{o)&7W4e~2kZ8xiz11ICtT@a)-*m*yU3z*{=Nj2(#97}
ziWm#jI2HEQwIMUdP)B#a3U7HsY_^}U<6QPH`N6RFKJh_Az5^He)_fo?j;zw
zh@gUt2+okp1-!bth#+0e5xU$yV6&)&Ps#-YBe`H;R`bHC_W$92fq$`YA~b*Ib^&%F
zE>!r`?E){8MTpQlJRni6ajSa4eYlkuxm}>fdS;i%iRaJzu`
zVoHGjGV8n4Qnw3;Kxs9QN|dA@uvYS-CyNe3N`qGm&={u?;>Uo9I@p-VH65YTZICi}
zv%tkpyYUL^T;4+5EO0h%kkdNyRjEnVspJk^EHGRpP8A3?|BsqLp_1yMJD&4*Matnt
zEF})9GZ#)x%iJsQC@{dU(;I~T8|sCze8
zyG1AOj?}ipd5hImMY>ma&++yK-CC@WV^ufTU+RxU-Cfa&ZQMofY!^9?!vuk08i8-X
z!H3;e0@8Arm(o~<@<_EKL~0Rf_nJq|Lj*lNz@F4CYw!}rE4LjkRbiCiR@v?34oJWG
zQpoHQk>Cdit{Gem*+P}w0L6@Rhf`1;E(NGG$tfH&5ybcVbQndp_T|1j6XbW!L{L
z5{)Z8}}E{XmeqjG2}{hcnqYd6KY8b0_hg
z==3`dGPXA}I?Psdn8MBJeAdt7-HbEn^~c8I9Jv$g4tHbS&8T1>TH}X8vj{AB8kt=EsIb%i8orF&A`kcVoopxh&F_8Wyi|68R+Du~Bt(
zb?es2VHdX>%N@iYi|=tk^C42IYA$M>dxn28V4+DGYHJ2m)ms_?Q`QmPV9OA-g=r$63(u%WQjm72$7
ze0Ht*G8#Mw+($ej>mYBcEOevu~(tx*WziE6D$ESpc{vf+36xm6@}2>cse
zIlMZgm2b_sODzAo8N^7&sr4?a^S{NB;0ipkzgCP?*q_f)!xi4F-BV2~rw=afrTkX>
zMyc>4D#&IrLlOydA|~`vLP_yH{^J=CSHj2YcmO0l7;c>Yn&|Iv?+l
z>vkfjt)1;H{nm_c#XZ`_yGx4JJg6=*iBF(6Z_Ec&+{x-f=vUE9TBt1{aBB9|UhPTc
zPM6TqWAG(!HF}DT*5ct;lo+>qhujjDJ^YmQ4HGKH`Pw_5EA~aH8T?~>3-sDHt~}`s
z_dt|(V$s{e^~YItTQS?&iArlGFPV!AwhUv_ve~YhALlLLS&Po88ISOe#h9QEBIf@3
z0M`O@!p0Spjmg(R%Tr-_{P2I?6
zE)41(~C3dM|P)!0etmm?S)~ig9%2R3(F^1wW{Mn8njlaS1+%r9>fqN3|z(K
z{=R=hJz-d{-7od_&M_O+kYKyz)!77>&jwoxgh)c=(0e0?hOV{I^5MZtIXFTc6&riw
zw|NGeM`r5;xl}diekGFpYEC%0xG&TkDjyzhJP^A%TYv_tXdreCUTrna1=(!s==Nr+
z^h=ehU<3NY`Pq-uxm4;*qRzO%I!=WnRFyiHW~T*j^4D-fM1-5JtoF9gen2=YQAFTa
zubuxI(M-*&d8bgITl>y8c*QKbdo?S@{T7|}%k0Xa8??rY_y{z)TH`}VQ_NRUu;I%E
zVp=Kp=A}IiOUk{+BDK$8)R8}k=I+oFVM_(da~(Hk<03&1#-SPGwZ`}5{nBS*Mar2J
zqflxGImm35Zg+7SuwrZ^8P1VQ5DC}WlAC^j!+_MUD8k4TNHQ`+y9F{dCsvzAGGm;e
z#u(=gkngQl`$%2Y{jbGtVq8b=v+bdS(qrQr?q5(4J3Z7qIotBu@Pg*h^x^41gumG~
zLO#bm9qxj383g0>q;AW-ZYj=ae5BQ1(P~VS74Lb3SK7isHX69o(!N#5GDx#Z2Ju+!
z;43#hTyUX=A2Roa%ie9ce=#0PyTPnjw;JVq8-LAScSGDubE!Wwcy+pv){LWh4~_-8
z`co)iZ`Pi4&#L^pYxy-?9`v^Mj?mr6@zd()%APv0vU4At(j
zlsp@LJ8IrJH(2)iZVPwX8nZ(rQU08rcoxcEdcl^v<(t9}dPH=#eLW;#(FgD=6>zsf
zIDvL^Q4b2+%x~KEl^H~G;ZtYW{dQt?xt{t@$~5iSD2p>zgd_f`|0_W*Rs?y=AVG4t
z%HK8XhbGS_vo08TCdL7=8yzxNC@&@Q3Us*`VdbO{=6DE`KPprlAI|5z)PK>f(B?mR
zX0er_&Akq7f^qc0Ex8%ueBeGsk|S;3$M?#c*7PF^K%kCr0}ai)_p?MAP@}7>n!lI7
zdO=|4+Av(oSqDO@Yr`)ONmgZNw0U0nrRk_paq&R?IB`{@)0Z$+dgo@@3t)h5>$|r=
zTY^A(e{mIo3DVQ4>B4N@X33L)Qjh{&FV?;#!cF?jY)`@;2I#sF-*HgtpwJ<0CQ!(r
zCh$qj8$mw%=D#z&$4+AIcnuGmuiL)VD#)|n6Q5xHmBSKeC$hTKE1cSu3SyTv`tOYA
znQx^32l{xHPpNas#I7*jdXyA<%&Nhv(|=2ObuHwAfkV6-uFu@zi&%j9K{m?4T@p<{
zDBIin-1uqOvNv8yYZb2&czwn|v#CwMQt_(njX&otF!Qc=WpCs_0}^;IYWB$`tI_1l
z6=V|_hAi+lcTDE>u^^*V8{WZjl>Hmc~
zud4Qj{MbT9;iS(A8eio8K7#Ij)>>6V0jP_R@5p5JLX8(S|R^)bin<3&Qf2Q-fdM;3B
zw|UX(z7!dZ8;RvQ^HOdplAFr5@OL~{6k5CSHg&GO+N5IX1s-JNK|#jR1+l7Cqko|#
z8Q)Yv(Y7l+#lF(J3MahWW>{jb_GDYyt8Ln9O~y)rxE9YF?oQ|0EL|rSp781D7ulSM
zx@KVJE7fbc&mV907pvDkYj3xjm=@zQECfxjKKNb+r~yl|V>ud-TmRo;y1(qibYB=;
zJ0zrgB;B%g(R2J1iRd2X*q#4;ne{PijDW7)|A%mHWz)&}hbyr!`G?YS>T@pKEgOmH
z>1g3m!MSi#7aUD2{VJY&xk!ymv8psU0p0NDB{<#kSTGRF9VNAp|L0lZA7gh`7jv*A0o~-iX{SMpf8n=K!@o0r=sbuuu`oJEe|29ViRx#awqL9&lx8u_+
z@!Yj4o;zRoQGeXIi`3{}r8TwFP|I1APS3TwFd@mG$H9KYK0?Iyc76Aev>!wW0@k!E
ze5MQRt`L7kCm+3^Qisd7v+L=p`)DT{)O}zesC$VM)QyI6@4~!mh@_fZ9!y?yn2`8u
z(pP5#xewf19UhTJHg;kbtv{WcK^UYUo;1B%{6j;x6$VrC2PFkTPUyBduQZwo+P32P
zLLY@I24c6*S5qskaR29)fq?C?PQZ4t${P}}t2&wPgk`pVIM41Y*2O-h)C~|XSs)#>ramEx4ajCWvW0r@?
zme6R~dlbpWX){LLlK$+s`iXI78+uHIHOn%e%O{D`4wd??3y`I#f>bf<52
z4x;$**dbn0)ln)#D3V@-my3;s=YC4t$DD5SPBmf>P&mty~Xa~TEJa`D33TGJJrR1s&Z
z_V1c?L*r~ka1bY=zdj^L{aLA>bxoYD2pEG>_M&#^BND6RcWLZwewT@v;P}e;ql%TM
z9|<;8E{hkiHA=cL-3(_aPJfGEzq&>$xK{Rz1KNy>yCkG(g6kFvTN|L83hX(Ot6G8mRfCXYg@Ff(rQ~?S8!`sgy0Ie;ZjYlZJ!vmu~op0{J-bk
z=b21Gu=ag_{q^(y{vEhE=ehemcR%;sa~WJG3uH(gFOV^Gq`*~lOM&Q4@c?B8DwJ03
z^E~v7o{p^5r?NCU4B22Yb6441;okU+RW3_dY|64Xj)v8u*Gzi8M>!<(SESc-@M_mV
z+jm)kQTEeDaavkCyd7
zcv*PIk9h4jBY0cePdGc}9;KX&9d}2j_*L`%%+uBrKZV?~qEEJdrX%T#f3_~|^BKsH
zQV}5)#C$R<7*~#pKO~Jr#z4;bWzeO`-$S@|jy#?gxeMg?IOlfW1F~Q5t1EH4zcAZ{>yl
zn!Do*d3B%=tMID>F(0rYOw}909JXxPlvXx-9~{;XHOO9%?u>)z2w<-_*!s!+;Z5=V
zpd@TId-oBN?HBrAjja{z@;FKM*v@W`?Tb++FFIgPyuTW3Z5a(G+DOFj2*%c!I6gm&sPu)rv`%3$%p8J;WdZ_xb#PsWZ%U97u#ii?3=^c9SA|t1)zbi1=
zR^vw6lx8C(oErmNGnh9hBVC$heh%Td?&{Hy~(g(7P
z8mdwFWBuQZSWDA|mt;46eN?WafeJ?JQQEO6R*2L+!KbW-h*{wX@CWN9fnspe^&
zRJUt)wh5y_vN-|E*1B6{0Z`#tf0^t{v<|1qFnJhi-a&`c;TV{342w&{bAMY3u03^G
z&2aV@={iOUoKQQM{YG|E)r&unHz=}gWmfIq5lvQ%P%<)Qi&VsjV%Z9_E}1aa-q{^(
zyPU=vsV54_PIQc(K$q15N<-_hby=n8*ksv%(@YT
z`^ywm-NQ`d>}6~PRc0SUpRayGHsLu<<+89@y+-s?!Nsf?yHxfyLf)^pU+HXY-dTN-
z_MM&ZXLzQO3aXwRX;akGP)Cbpp3RC-QWb}isyJ5S70^JnZKBf%Da}qtN9cQ;J*{Gi
z;B0#SJ({Zeil(Z}W1e|DJ`xyP-J7DSZkr#J9`vH9iree9rm7dTG9Z6gRh6g=)2gbn
z*Z-OJ&t6a_;_QqG=n~+Ag9_ACWp9|!_VH(7Jyqx0daAxp9cCUiYN|Z*j?(-6J+xFk
z{vuI0TB^$MuD3vd;ma1=P
zPcKAz(&N%`TB^30#)O8d_E<9(%Ba}(?x&0d-L+LMZTr+%Mrx~CYP415X>C<`+q|?a
zsZPBQ>P=gf-pssg&1R#+u+gQh3iVduUC<&p#-!bgwkkVx4539>@kFYs3cIPQdI(tp
zVVCt#RaL0h(pDWilrB|O!u4I%K2ZY>OJy2u9}~`~PTr`ik{!^m@6}T`Jt=Gb!Bv-Q
zbyb(>ZPj+6gPqyMB%qrnc`!<-Bmi;BZphQHfB`{vL`T=La-#J}PMN@&uEm?JwQ4$^
zB6MA~?~pnBOI29)Cj@iQdkJlEV4@AmC`Rfhv%febwtc_=!O)Q0_9qZgVRc9>aPo+j
zs$NxCJ%o=Fs<8S2ju9%XHp*u?bTCS(zA2w<%I!}Xow}>Ax*VG(pV#=F&xd5%=$({_
zQj0gOGW#E+!b)=~tY&sM(5&q_hI6BBimj{O+UNp1>Z=g(^E4t|tU|{)Yw>F#jqcj3
z{B5j=S-a>hj=$|`omEkX)vNX@z1v|SC=@i>tCqCM5lnc~gH|kO(^Dtj{u%96i;2|T
zevw4oK9|3)_AIHFI9M{Gy=tnXx~f75<7{}|HYGEQieza@v>`1RCd))kj4stxM}=w#
zsrF&j78jg#ycVmS{w^(6i`GhKz5PU5tgP>F=3=i{&%a4(v@<*Xu3alFDHqJ@ygTo2yml~HLyoN
zi`qP4NBeo%JU|@U`-m$U#u|4IzHmkPN+?rb4zm^~w@>OpvOs|-EHhf}gz
zVR>kJ5Cm<`uy(rWkvHKW?JZ`&@x_imzSujX5WtEk_LEMrO~l0BmQCN{9-HT3WUA!l
zn1jKO{D^#Ur>(O^;^oMCeRPs=HaFl82l+K3mKgzOurL9Q@horcg_$yhIQ#Isxp
zle>zYDHmUguVSBeTdmXpNL@+6XqXZI93pA@MAEIZ{^duL_x(md=SX3igA4Y&y^N2zwh!*J33~
ziMY+t82jA)*pPFs297w$X+3=NF@XgV!EG{zp;Er7+7+1OFaAK&LS)UKe@4g=C!ye$
z!oqw>ri>52ujQgIlABaW$@`mz&yl!-4-m1|Pf3(_ApVipIPMD4;qjrpv87L$JEw*+
zS-s1~cHI}uYoxZU{f#258cG^O&aHVSMmKodVKQvjKT>+(Ge}`ibf%m`1);yqTqMj}
zK4T;YveJBJqy~>T$OjYlV&yNkq?F}P3yC_Ul$<%DCWfiD#Tqg~8WFd$xb5@DuL(~1
z^#Sd1XQ4J9fyanAOAL(WDuY|}V&^7XKfI>16UEp^Sn5%7Bmo-dBqN|nn~+=h(%<|c
z*SZY-AjX9HRjDz-aiJ{lEHCQC11Ymc3FtR#w1Bu-D(eRb_FI49+~XM{lkO)pkT}pC
zKu_mB&?WjnQ};|G!{3cITyWwR?46IxSc$y9Tq;6>i7C$?+O%2POX#T?Gq{h~bbYgY
z@!o}8@_Wzu=H=!X+@nR9SoYa6S>}a&Zdd_mALaw;%-CR3USqBsb!wk$Fd?$c(z*ZgJO4CKn1LyvCd
zE9lu1~A_lJqhsi*}FsNpRhl#m^Aa2vrXxGMQ6#e}ra*+570)b|b_`z@SL`P^QwqFoi
zU8V{Y$Qa=!bX~*{L2XiF&sz6NP%}i-b`23%jn;G215qjF~p89@W=ICI5n5pk)Jv7>LOEX)$
zki~kaGY5aXoV_u6L!7^Jujiqu;_{sJQm&pI2KMxTYgWVIz%X_Xzs{;V<_+}WZ{Oe@
z5=q}Z=ONMoPvq&Thar=v;g95^E|c@ay3D>o9!uNR{-L&)wV~V$;dP&xVag&`kP$
z_QWlv43cHmF747h0`quh**()6IB#a(z#Is2mgfof3VxwZC#B$#o{eO9moB^nwCT{E
zfD;7SC3czy2<%-V)nU>>kWZ)6HV8X?$%RW%WATY@#
zgvUbDp9A9=t(>>9Trv0TWoUb4PwYncChS);7D;;>F$&-Q##yfk4;6t?D2uLk7}N4b
zlwa?i;HJY4bxxTcm#uYifH@l`u>OtoXMR|_)L+cGu^*K~wHKil|3iP~ff}ayr>t>L
z;@?a;8F@{-AsdcYPbc=-)e2(G)&*^xHIl6OsPg9Q#t|Oy_Gr4SP=W3y8(H1xPrNqB
z;(e%vdTC&i^)%?76gtFI%$cz)EA^y&IE=j~lWGP6iUQO92R_p)p={nyL30CEX?oJ_
zOzB6o%#2jzMbg19KmyU89ep|m9bAI3G}UXPityU#g$26XC&=a9pVo@7%13(s{2BIK
zHE73y+4NSv%qT}uD;yClb`E6}I!o@z$lN8>?B#CTw*rK1npFqrU9X6ql$lUjzea|;
z+=N^56~mcZc>YlA-M5e)V@kbr|-c!U+6=&ZF_U9RBW=FR=671
z9?IIVc8R}nZAVVSvjKPG+M~XQliTC68%vL7Z)9x9KV&^JR~n{g{i(3}waCT#j$rbU
zJt`}XA!J6*p+Iy_{1>6;jQ$MR*s9q#W*({j_BWW
z*U8zFY*btD&oOWvAo3VEJJiuWH0$slcfd`OiX`9ni2!9*J8~Hvq5MLgL2C9rP8IR?
zRdQgW{23#EhRPpL{U=$$hMdff&?}x>c5?n7I)HZC&`a%coQ<_dgF19Xj+6|+v?ogovVvn4w9_vgQoKGHGtTB|qdh>e}B%|#|&{rSa#^c6@@d6V~_LoKT
zJllS5)g7{4BMwU6+L`hWR;=}YX?+W;y()>)wBPQ_d@|U_SND8YdtXuU5CiJ=hZePl
z60AXWgwz>+jXk8vuq~#}Tk|>bM5XB7Fy_6}V&bM*zSpSBc{hsx*
z49{tR#q|rCny=yGKrob$gF=j_I<4^t>NMuGNUaXF`jEkO8R9#TPewX9fozitWN52u
zTJ)mH!}7+pFIql!oDgKl^7^$eo)k>xVnz%8zndlJDxHDd#4gjc^;9d24J__AL3I{J
zlZ8j5M{ienU;npYQYh!pn4Q6xgb&-J5;~~#oiz73vt*SSIF;=bU^HJ*x;tb6M)4J+
z^j0fI1xI9W$XU`pWV^g+XSbMmZs06wkCEZV^kjs+XhS|8pUV!dZEjrK;#vPwu|PtP
zvNn&|L5wQP(;#Akg4PA9IrdpEOi6vWp+=C*KV6mVtN%Ras)_uKY_0zn>GhUb$C#XgCs79%uo<^bz9l^Fg+6P0
zkzCA@`~*kpv>BDG^tbF3Qb<9_rMF{F)&>~Y_F0rZu!@pzK|h&4)t8
znnHOR{%$OFt#?c}1q+_jCK|6GhUD7!xD+jvkXyW)u-rh5ZONIi+sZsuw;49LvgnF#
z&B=W4y4Tv#WxlrAZu7+n*&9naF_1Ryt9$1`PHihPR$HW4OMwAJ^|yYtp<*SF4w>HypQ?1Xw6K*2b{e%eZ(gGp%9@*K#HV|)tS9v38
z6?#p5M|NCC1S!lD|lnbb=G&6jm9m2FO
z|1J4Hi0IFlx*AaeiTaCu510{lIxBQ*GfpBn4s+^x>$~C)sY&~WX9J%sWt|(I
z`O(AQXphbd{hr&M8Dp=T$(1-6>m=aUbS#|#9c6xGlv&-QJmbrwr)avT&b;tHG?u8DGWYjHP3}*Pi2Vsu(+#OQ@>`a~W0csd14u&hrowoz1X4+WRq3
zleJf@EnEf(wTLd-$C35yd@_^JYxa5`-qW7tFPd>+=#
z$Mg-{RW#$c<&Ek7`Z(CQdZ+XX*|W}=DJ7@*i@0HSi4;;R=HpEsvsrT9vJUT;e)~OS
zni0MsSORjdIUxE55;=Z8*e=0IM63T0*6Q|e>AhI}K9_$+QVFX&dLe6Bn|IQs>wJ-|
zBotP(xeKGU&>Rd56gi-N*)SN!(YXULh!u=7d%Hr}#+K>PArA>v$u1f?S&g^KiAn5o
zIWf7cHD^Zgpx_wUlK1gE1OcM6GfI!@3lkmoA%Z+hlDhBNvOp%jXDb@>}V@1N_D7B(R?s
zdU<|rg)86f-V+^Gk0$Gi}*&?0`6a2LTD
zJI}x4-DL0?;FE296!;Kh9p7*`xE-d7i_XR0WBTtG`tRrZ?`Qh&r~2yHO~#8%uPK1HsL%_q6bS${OZwaRKaA&}0M`Jw0AF+etMWz42&;qb&|
zAE{LkPg^VWqTnk`!Tm>ITv2co4(6SioSWHlHIH(eLdW~Vgwkby^HIC(!a$UHo&iwp
zjdsdkEMuk|bp-l3<=>SI=izl3bSfir6Fy=^e=-CRHJ*W)p`2=RM8;v@a2N}ZiNTm!
zOOUeYt+begR$1P3&}{+ye^Atu?V5*E8p#(`m9y<
zb;&1akruWdkk}f=%1SC5Rzx#UJ7+W8
zWRbxP9OV!KG~Exr1w7AiJJa~w%%`X*dl`4H)&cJVs0qWhQ%12|Oi_Q6urY=k4K4ZstiwB^m>oh`)LT*Z%PWU>!~~LzRg8X%B}UY>>}ZP(USyDH
zc-Od#!V+6$3(r@!#>sM<8`HbAz82EZ35W)lzl$XbT;%5&$#BjO)Y0eSWpzDUBFqad
zjF(lI*Wc)C%@Z{)q3n3>IWL6kA$nbW9atU>zDQyt+rGgl92wsx&LZWpw3-LE5ux&=
z#>9J4v*WY;>vq)fO*UXrwuz5zS$yY(5>0w}o?U%0GXLkrCre_feC8&LU8>l5#V(C(
zWr=;O*jr+6GKK;OY&*pEXz*9L>nuqD=@S8-ddZ~GB(t5$Jih$UU{h{1igCJEkiT=E
zQ%Aaj{Pk^75tXDX2)meYB{>yT&{aY8ZEm5dCY&o6uAn$mK^*dgllY4DlO2ClDA7T}
zQbDQIMY2>7gd1d%@gdCEKlqZa9v1iA%d6{$+4E{sKh%X(OSqa${p^USpFBG~q3=br=F%riMN739XU|CiOzBh-&#iTr
zmeq48*KJ+%HR=5qBwODwNUBw45U+K)LDH;?4U%rtyF`QSssIASbYpqZGCZxPJEU1kw!v7Gs`mg2EpGj_$I;k8(hX0Yq!BS3%7<|9r)doK#c!|MV1z%!tOYl5{cL<(k@S}oH
zGq`Yrtu%wX1s`s3{Qyj|!BfRP#^7GTk1i1+m?vf4Gq`@yrPbgW;^#$!%fj1gF}U1;
zwH`CLJP2cLHF&k)KR5U)!EZBoo!~bbe1qV12Hzxjz~HwDUS{wz!Iv6*i{J$Y-zs>v
z!M6#XVen?bPd9jr;9i687krSxHw*4I_#weRU#!dCDtL#%Ey3S0c!%JJ41QGbXABO<
zR9VdimuI`J2MnGp_!fhw3Vyr6y@GEtc$(l122U4!mBBLvuP`{QSY;I&+%Nb-gBJ+y
zH~134XBxav@N|Qh2|m`~)q#8tO_fHx-Y=jmH!d)QimkV-sy`(y(zG
zn-3RBu`l2S!K7n1=xn}aY%;L<$k;q-j?C1ieG>kSq|d7-Cd4K!?{Yxc%Leb3$*yqKHjM77v|WJerfgMZ%CwH-dc
zX;9zg>)!74EMNEOQP0&+vj|3sBTZyy@OQb7INRsE=!5?H4hn|mx~V&J*Y67KZTI+x
zvEe(^xeLytta8{ek7tuS#@;XwlMS}Dio_aWRp#ELByibxJkiatelP`ak)V~`YSWy3NOkh&|yL|$KJD&j$KjJV1E{YqKx(^^OzN!8*cc6d$
zX9M8|1H0p*>bEuoQ~p
zj8IY|M?0Yd@EE+I*mdC1Etv<_p2nk!T2u24n+brBN{gG97m>yHhLV=xsr?1(RnC8M
z8)L?jvp8~g5`x>mbK^PlEsjIKCuxPAM@MjbY=~<}FJ->P!&PLtFIo1iPo)XvHR}9k
zzU9$u$?Qg*%eF6M19?>Mfc>7?`~A`TQ2|)fU;JD|-i1}v96U+$jG8WH8hyDYSKOvcxr9gL-+`{B
zrr}5Rk^b`&iM26S6l0;`t20F|H~HbfH}T?H%6-PMSUbKcFR
z81cflrNl=)>t7PGG$sAaFZ9dT^pfu7Y51;mt)`S~aL}c>LozH5*XTaSUGu-5u6_8m
z4>)+S*Ai)G$|~_FchR3W?#W^I<=TCTohiwVzZDWsV{9s(&}|)x^$5}rqz?!>{o^Dwa$C!grV3o9vo=$Lgp%IBNkB(u
z%IP|(R#C|{QxZC>^JM|BSK;yb^eb?3@h3yG`C#LJOf0_67x5Bzm^%VUW1|%yg#(^Y
z(mIJV^ZCFu-pvw$G5nm0T(4m~j>JQm?O|YN%7eBC_R#YB7=A)YBI4Yc@*~?NnQI5I
znNW15z0gjY9ahiv48usxvYph53A*~8(9C(zhxUuAG_s-p91ME#!0Q$JSe%fv0pf`Iy`k-vUY&tiPqL?X
zvbdHFYS-%QRTNw0a;_E}ofZE#A@+KUZ!$4dp*1|c4o(ssj&>wkjNm~aX$iNMcV14@ZI|{H
zteO#9yn&@U{r+j|$KTficN6^epS51~xY&fSu_`(9-m4Oc$sEe1%lMrkgUjW+tc!5e
zgK{8^X`#jX1dbAKLcU~WI1ZN@hgR(%0-TSU^Zzg(+AFW7aED6TPGE$v?$2xWANhN3
zW^=8_`jB8w;_b6g-wYRiU%+k67$s$3wB$Xs=d4%s)FPu#V6f=L>+hd{RBmFN6nK~Q
zA^ONfNwq$`Yr+CA|pKr0h>E5yX|AZ((`Y_fSPl*yW&O<`6hpr$o84=fePl5_C
zaAEblI|_9p=={%tjKW&}Qy)B05hJb3$n&TS>r9<>y=?g_8$~(U+kv0F5JIzmL=C|Y
zZ)J4f@p-JT{x2itfeVp|Ey%yJbBS+bz>^`fePLGA;jI0~kn)bwvfi#>U*yiT&fXvT
z4rhDNs-1*Z?WeU??I8oHfTyh&-;zr7G(5#-l0>GH$oZj|R=mf_>Gl0sTV>q8Vl3wn
zdnv2JW@#f$u?hH`amgUb2{IfW&n>$;Q@%~zNn~pY1t+^N;^&?Q*%BichZ7V)-sAVM
z`bpKsGH=pT&i!vuH0x=%)GL8)31qNbEr*FT7eaVPc5%>
zpSU6JKHQejp@j%9+xp|%wukSC2Lw+t^xt&FptzLtz_Eqqf~G!ooqABDH)4e{92UxX
zMrX>|0LWzQKOtB?ny+XZb^=4+M+5=f4>c;9Ej
z7tu5vdBuH+=f+sr}mV#cafb!(7!3=m#mFD
z_fnX*eH*epc{IzneS5Rx3ZQ|aZ|1dqqFdH!WBEMP_8uSFwjBftUrA^ogl_n>2W*^$!WUD&UoL(n6bH?yJyA+6E+Oy7Cl-d
z*t+q5LmxrcebPxks(H>oiW7E!(|QSy3YqK)OrF`)cT>_IS*7|zi958qAz7j8nwEO^
z`gOEPNKGP&=L73boh(8E8x%Eb4b
zzCsCqKgN_WpON=OB|MFS^ekbfl(0Vzx?I)bW1CPw`Y4B_T@^LCdx;WhZE~8UMWaMK
z%03I?P-P1wuh|pXqop@jPoOUXq#rLL1;pD$P4W*WphWe+QQnqt>cn*J%P0?e1f6Rp^+8hqunvz;&Sx6HQKa3hu^Pxm{_Jlp?Umh)V2_!_b2+z(u
zcHOpiR_segNsE@x6z*V}0y7Ty&>(SrGz8JD28qn_-zOuCpD~#2Ct1kRYrW2tIXVZ7^q;c=qU}w6z5VCR3nEV6wuJZbuMb_Fh^uaF_0jc?m?bbGyY)f%N3*m#X-rb81yl(n$b5OyH4h^jj
z?;S>*F8#NTsyxwu`zS6w^xr;oqkHS{Nd33A(yL}}@yzu+)X;Z7uD%@>8n5(9>nI8;
zWWMo*T3Et*8j8u8h>G9nHgK8^|8CpAX~WxX*gzIUq%yV^w8t3upxNUace9#R_-3US>Dy7DPR
zH-)(8{clrsI!>Z{|SY-y7{zE
zl2~;tT?%o}JK8P^aRFh4xZp84q4Rh&3#GaLe^7{f&ql_}6Dq_-9x>@zw!oTrkqU9s
zhtdxIM+$LoB3j;6PL+6iQ;54@oX!^J)DhX;)xaF))?PH
z#uF>V{p6=%Li-~X;(l_LPRdb;YgD_+(m1RU_xThA%r=hJ8gZwykYvIM#QW-x#-WCr
zrP-G&$h~>GS!8~hg4|gsU@Z$w;;*A1cN5oL-cM+6tUJ4cI~AQfkN}=GnIX}UEB2_!we3-nJ4x(IQ1C9W+|zKfKvd)o
z7Kn=6egaXE+eaX(9OYh;s5dHBKPasgRLU>A}1PDexrbo}5QDqzeS^fby<-qp+v|cr^tiSI#wx0<1w^RUtBPDx8gX9O_ES7s
zPhJ*YIbNG>tH}N4;mG?&EYL;JRWuG~upaoiA1cE%;+@V$9agpqUSN2^Q-L6iU
zbJBmXKT0Ncwkei{jHg-6x4{Sz-MCj}&dMaM+RARaakH`NZGR*eT+%3S#Qtc2eh0L$EcL`h|cCwTyo7meir45qW_ypeM~7y_JZ
z!o4-OO5no44Mw7whm8*g&6N^i6-SLi^G4f7iHoo3`o5hAKhi0$yDG)Hg>ww&z#wln
z-Dp=k3PBe!lIOQtcTY99OMLa;9Hcz!g{{VA#ti*NEh@III$w@_28a+m&$Pf=7e4g2
zzD+Ychgi++4r?lC-P)rnq~tnE_!fw4nd>A+^}7o%mwhrZr4v)|RLez(rprgOeS6d=
zO?WMLNMwkL2;H`bZ@5+L_4@3MX8XmI5|qfxsj}$AfKM?%H|l})Yttw(<>zSf^}rqQ^MA}coYYVK(Q7>GhiUuc
z${xCjvd`w&MIU}pfKRhb;XMsMXINmy2i-}^sUw=|1pn$$98FRi2rB9+R;a;6~fxl?~TJ;rMl$xRda5T${3Oy
zd3HcHr@kNhl%wU)@8x_Z#hQLecs%;xTy`Fx5_w)|6e>%MdX`6KVIhaWG3nCOEP4Zc
zd-0UnYP0|^pHUX&4^3ZECd?_G@4IEMKXdwgzJgU;s0@9;twqtX(*89#du}e1&FB~W
zxU)H|w`<`#p%2|cPDbPn;=b1QYjjo68JYvb{1g7l*k-L~rzh%nWP=ro;f$?0Xia_J
z-#8hPuJSide|3d)9@zT7Aa5Lph|XG?eXhijZ9Vz`F*e5TE`nKf_5H%GU%lG8>pso5
zueQ!u;?O`358-y-b@osD&mp!Lj`!Y@q{lS*-PTEUI?{PM<>mmKq%`PIU@{W)YAs0C
z$Jc33XWO2BVmwWd&(H_br*8Cz`s7b|&mTILd*BOsAgwyT7?G^zK+Y3F`h3yTwO=aW
zy#Hbv=Bh?;sNA5NJ!4v#r{NBKfF^>lzq
zb$pN|ZU^7_g)Bk$*;kFFs=e0BnN0oS?Gody?T2{karT%c2aoy=41CE?U`<+E@hn+O
zlbdqBhBeV6f+J~4DPrg4v@DAOSKpi)vqz59DP*iZW$o<_9b-s=3?DLb$R**>0pE6R
zH?fFs=9V4@q$r^4b<9J@lzrO!?$l0sSMxj<5-Zb>m|=n?NT2|_D0xvAH7I0QtdNQO
zJ(_tKvOPELAeGLPRQL_P-^s+nJ=g@#ux^GYXpUE{ZwY%4mtMy`
zdD-kT#=b{X9jwOZtT&0DvoK!6%*}kuA9^XrlfM`1d(0Ud7u{|%Ik|RN`|DOdG1q6r
z1{16?I=LhQ`+2%b^zuJvamYnhSH{cONPldZdayI)YQEYRt-cIG5jmdDW*H}iH2NvA
zXgf!$iFMgbydF8^ABJ4ZTij0d*P{@5ob|{8DVHQnpw}3AsEltK@!{1nR%n)CuKi>d2T@PY-k9ymfU~yL<&J9ht@~pg
zsbzbf*zY^=DK|Z`I8|Q)#5N!|KM<`AqzObvgjXQiA^fxJ@?7pZ4#J-1X1&T-$G6IG
zwWs&6zh2u%wWs3C<-V>x*>NWm*ksh9a3>h2b<*&_(vjDOHIGxx3MDOMLMqg4%m2u<
zG{pMJd}m0u7SG_YTUf2_@uAq!aCI78P`uu`56<9JF*em1t$8(4-nZr^QMU)K7yX6e
z$OG3;c^em`w#}qp_VU1WdywMw^1$`3MHICA1J`3eavIco(vn!eGQfG;himmbayZOd
zF+21mmL+5T*2{mEFA5+U{qO65&=u9G-(S%t(!U9u$k=_u#4Agc&UD^
zGa+fiXkX27H
zll;60td$0~ShuqcVcI}V-QM<8lXBOjVC{hjqV&=bm-9K2MXRc$TmK#(B`Ad84-00!
zBIKOUPopJ*M<^S2;j|FIWpNa_G4`${Qu5t?qnCl{`BrVg&HY3nNT5$=N+?!)N!!&q
z&I0Wm_pbgc>~fOi&LgRM{h@bR*%w$JOb}s2b~jwpjC9GeUhL@tStLxM^@#0~9vNmk
z!=bWPtm!2>Ct{ZaWhL_dg=sbxtI`?UY(s{cWdi36hm`YjV#_nu1YR2SRS^
z!Fzhk4da8dp7>^OPI}yycYu#0iI%6cHuUPGL#>Q(>QOw_6w1nva1Rr@{_#58*rSS#BR!2%5`H^JUW8LYM5t6CBi-t*er=)B!pCRzmQ8EXmAzy>l%Hj7up{f%TBR9RMK}mW|MUBQmIAG3NCQ{u
z0~@L-=DVK_(`hN3LD;F!`p258yoJnVXF-f+t5AL#Gh)z(``7@hIuwzYQrmR
zc)bmOXu~vFnD85H!#*~A?<`~gk?l`SGvA3e9BadwHoVY=SJ-fa4R5#MRvSKL!#8dC
zfenw@aKLnv&M7v$(1wLJth8Z+4R5yLW*gpX!-s6R(}pkF@NFA**zi*u#-C}@_1f@s
z8=hms`8NEz4XbUq!G@b`xY>sH+VBY*9d$J8PZ0NV)*KN4UhBw&odp7*J
z4Ii-K9vi-9!)bOs>dNKMGj=^bWWz&Fy*eIF05^{lrEW?MDl)L}pn=caZD7w}?$3;U
z-6_4hNBVaqeXvZvWhs-7X+5lf9K$B+5tt0KOO70fdIn~UFN*aWqGWIRR0(`9SQqm;?N
zf}WCJu0`s6O4%h}PJRrmb5
z_^R#UZ!!5O(IxNhvJl^;5x(=Gab-l<1-N(rmV7wrDq5MOr<93bz9l{>hr}cKmhh~6
z{AaIRd3J5ML6z`3-J8$PE68eo_##~X9U$&QBAml&o8Rf
zpQNiuOA)`st%y_N!&DM}wIVKwN6jr=rU;`J6a|7cB{=Y#TT^ah(4{O`Qycz*UZo|K
zr4bejgXSy0s#5z}5VT=YK;n_`5=P-q;YZ;vNhnuTbWCiYICtOpgv6wNp5*=m1`bLY
zJS27KNyCPZIC-RZ)aWr|$DJ}h?bOpIoIY{Vz5Z6Eh{c5UB05M{E90pR#sM3f1{>0
z5WMQ@RjaT0=9;zFUZ>_%)#R)y4;0i?6_-lwuB0s$Q};Erf>Je!mQ1^kQj$ap5>jf{=b
z56da_3cf0J|1H;JTV!0~UQU|jxL5G^8rz@ro_O86O#I@n1ovX?Ek%|D6Jgeb?QlKSvM87ZZSbtSekQhK$|E6Kmfdw^aorI%W)CB_Qvr%Ely
zPU4d~bxJ1VQx}~kYC5eXZ5dN#%<-x;W`ttCYSgKGEhoN8zNO5PC$W*1AoP?H9Z#uB
zokwXwW)6_@Nehb%nXU6Aqp9R;lCE88PfmSL3DqbeZN0_i)ooDPv6H7R
z`c6@2h2wMb^VRC}YSQXG#op`G&|wOrhLiuVo}Tn9>9hZx^rnZ?tEP>bHgFYj)extw
zIx3*r@jc1un_U!h@;@yc-&fE7<>Xw}N~=gWKpz$gIbYHuom%Wl&8hD*)QoU?z14RW
zwJP;xMndV|ReH3LQL~gWQbw&(9fQ-39B9gOMvwL+xsn)Vd@y5MC@_T%IE1|lKfkF|&gSBdxJJjbsld
zzrtj*-;$G6{j?eC%Xx7YqY$^PD&X#8`vLjSVtZ@HWyzm5ds&J_Ut+hTu@w7*;9jl0+WuC~8N
z+23_;()`k9?#x3GPbjc&-~JeK}L)U`k?&MDuWdjps?}#aHhxMYIGmf
zCn`B6CnqOXe$&&5OFVir3YNsV)miE3iwoeNd%e1exeLn*`6;!kdKEu6K6rV-?FP8{
zC!hcMK>_b^|I!!-&A;Q_j<@ksGhgz_+~wSSQ@T(7$RMZxp=D*v4D
z-v6|L>tB@XtNnArAK#+?S(|^<10RkcF}imB>egLf-?09MZ*6GY7`n0Prf+Zh&duMw
z<<{?g|F$3e@JF}*_$NQze8-(X`}r^Kx_iqne|68jzy8f{xBl0C_doF9Ll1A;{>Y<`
zJ^sY+ns@Bnwfo6Edt3HB_4G5(KKK0o0|#Gt@uinvIrQplufOs8H{WXg!`pv+=TCqB
zi`DjS`+M(y@YjwH|MvHfK0bWp=qI0k_BpC+{>KcO6Ek4G5`*U7UH*S}`u}74|04$3
ziQP4W?B8AfSk8mxfZq9y;9F$LoF6iZ-M*Xnj$BLJ)Z?4mzunw7_4wuvcsKW(dwhSl
z$G1FL8JV6uYZ>`1(kHT}ZpO$-{CTAguW@mCWl7c53j#%fa`>UxFRCrAnYZkU(&9jF
z*`q0Mc+_&!}WE8Vq;m+tzW+$!l$R#71V7|Zk0AZqhN6z
z>opd21qB-j>P@TLP)8`mvaYPG%X6^@^t?zN?XK!meeS#+g*)&@!_eR(BCFW1F#!gsk>1p~c#u=CgD4_bbS
zzeUuG!zXcg%f-};a3_RUA-hr8K?uJ?ILLQ+pNIj<;)4aPup!stnXrRd~ya
zDoZL#YrH+n*;RilN&{41dB9s-RZ{A$TJEiOc=Zy~B+^}laek9&Kegm&GVMTeF&Q`6
z)jPkORn>Gb(=trW6Yt8E6X0`$Usb$wOqb8}>qxrm+(r5?Db-CO(vLS-D}-6JaPCBN
zVjSsTr#yblcyEzi3TZ`=p-JI*|D(o3+KP&*t0iIy-J>}eq8%5mdyV!;rI&PyYE}fL
z!fU;0rB^Xhl`r>}uB;BMKJ_1`w~VG{4`M}Rw77`Y;524wu-=uWE351y!O?b49IZ!G
z>4#o*ydC_r1=$O3T{GeF-?yBX^Mk`lj~;vLYw0eEI_K=AGC$QWy_iP0dMW2+GEvno
ztu0?!T~T_uGY&5;DX$GI4V*b`Qgw+Lhz*%e_*dfYKhUiPmL#fy(-PFc`JVkr%?Z_S
z%rWu;cY2k25|bqY{rsNtD)lDD`R;#Gj5=w`;OdmZLFp1k;@dY$slQ{sW`}VNjaNeh
zNopu*3|*L@hEC(VCZ&1k#H8sXcYD;ZKtDC4B#HDBm1k;vO`q17{ZYcqSi>9$aK*={
zc*5XP?MiT|1WM)_6t4zN^Qb{nk~{jfChm`Kc2~z0_9^HuY3(MB0I;MlX}Q(V`6>II
zytSOJ)E_VbCvUv(5kq|ahsUbnvs0T*NtAN@Z|uz2brSq&?pKBo0k!)_k5e?W6`fh#p$rBZLH)LSZbkUC%6
zSN9*(M-3`*QwMQU2fDpTxpHSJwFDC`SDz@=XMWU|){ErtGH%9vgn7r#PZaF4AsFYo
zHyRe7%Xu-zNvnVVKB_-?>_0_XaD1Udt9!DPdLHxFFGz@AU)`Sis`&YR!uj6j<4k?F
zQbRvC(1o6)L|1?1@+K;8Nq^;Cn5?|e#alDHMYWcpDQj(#kqc@`;E{~o8&%x%-G@%@t4
zZify%esd{8`b!yWoIFS!)kLKa9qA@b_Tn{N{Ym@RUni3*Pi
z*Oe%BD`usgrpcG-A5I&c%QB(>v%&UL3NH6Iw?yW13TrdLxd&{Xi
z1Z14Bavf_KCLDG^j2bX4Ne#F;p}?j4qutMj$D2B&Zim-&)t^JF*RMb`(3L2N?VgA9
zp%WA6D;KF@3k&Ek^VBfc`O4HhnOVblL8e^86V&iPD(zzk?PIVS?i!#>uf$D{iS%#k
zb13y`_wVNZCuldnLJs9*1ZA9dWBNP&yu=<)=cjZ;_V?v1xqgNDi=FR@;JYwG>^|U1
zajO)@mK4U86xveCl>W{AkGI?J(BWq=>i>Y5;)K`vC+!l(*@fY8w%OGq|1KF{Ih1e>
zaWlsERYMj6skoRm1Nj|E>M^dzzD~6AKg4<7vbFWlUo18OFRcY|4-h
zLpxLF(oeRs6M7rtJ|-~{mmaGaqsUL{G`C8fV)sQU7jaO=Rx`VGjSWBk9%BQhD-Oa@
zC#lp)Ds&-^>Y?cgYUH%L)JWIus{3q1qSW>N7}6djeX}2ZGl{;Ls0Q7fT&-!bFrG1h
zaey(v_+j26e}l;1p!v2R>d?curTyss>el_Wuh5P$$*F_ITTyR_DWDDny2i$Lh+95aM;2Ttu*(=%LpIGl%Y{gmgvglZ>USHCFLZ%Vv)(e0)u>`AZ3pI2%J
zM%s$N{zKwvgRC_e2Zqca*x|GWhenGIDD_9oqc)99AB$K=F#kGzOyb;gkn!mSrCxPt
zdNO1E%?Yi2_s2EIR>u@Z7eu8CO}l8(HNOu%GeM1;_KoOquI16awJGl~^7|$2_6My>
zJ&keN?TO~TEB~O>Z!yl?XWDWJZTV}xw&fPatuIS=`}<10k8#pVm~)T#81>lyP;k5VVO8qHdferUe&1l`l!_)F}g66srs
z^UeCuH8N3+4D?qcOOol+{nW^=G2dS6bQ?cfSp%IYudR~Tp;Hso=s>A!bV-S8^t58v
zXxGz7)@6QM
zrV8#-&5pb~Ulw+oqq_XqUN!iSe7vE{f8^s09sak;$B%SHii0+};JeN-{GmK{)Qi=G
zm<6T6AS@^flr2`*@)gOgg?nc>xN3`{{{b*X*tc{w}+L*u_QVfw@&R
z3t%)y6x>0Nv!l^KXP`BFU4aekD>Pi!;#1xt_TfT*hog?g9rEU?5EC__%Kb0~_J{PX8
zE>)T0I;X0#wyL6ZPN1g3#8RU!)%L-f8ki>83
zj#*S$rkg}b&Z=TWzX=Zkh*YWjrJN^pj*8B$%`ROQT(P3Grl6*@7GkJVV&(@bE-t5%
ziYgXW!nb0-Gg9pGs;aIGR?mf1E(wrnVG5;+%bcQWO89(N@`42punm8KtTHlJ;YI8{#E8#scxLDh2n=VTL+@7t?@rvs7y&4dY@6qz+O86{UfmROHZWK}9L@
z{F9^e=HwSu(~4eHm
z>RPTqEG#FTT1inb^=*565sSsj7oAsCRFYS|tcEKOl=?N@2IiLO_3<~_LlMN!&ee&RkDtBlgoV
z^39a1zd26P-%M*d%zWE^femGLk@zpcNZKrZb-0y4FNUc}4acy+)cKcki2pi_M`QpfRX$lAEPCLe`0^%0hIjx93$!7jS+tjW28*aVZ{9vjJT&l6rqn8q07Ja
zmwdvXN!NSA-@i6r|F>d4vGASA!HI>x{%_^*U!Tqin}9t_pRfsd|MhwMH>B{tyh#+~
znDv({Dn<_=`)vOY;s5zN-?{T7^`|?nJ2~j=@e9X)?HxMAMNB9cz4rCjyz27Tu6S)q
z58sT(FC2Qa^%JGexYmS3RaWPm2w#5t-buC%vurrih8Z@TX2WzFrrFSI!&Do(ZFsbg
zq4Rq-Y_;JVHauj*7j3xThR@ir#fH0W*lfecY`D#a57=<44Y%0vHXGh(!v-5V@vpJJ
z12(L%VWAC|*wAmo3>&7~@N^q`ZRob)(O6UNzD)S82s(Gz_LdD>ZFtCr`)$}_!)6<9
zwc%zPZnEJj8y4EIz=jz%Ot)d04ZSu@wPCUi-8NJ67^?HGPnht$A)*?=`K|O{LVnuoY>z2TssI^0Ps5CKFk~7
z&j6E9R9ctjQiFiYFk8mDR0%L`2)ujz2%N`-=uO}Sz@=>5mx2pCG*YPtzy-dIkvNr?
z^BzpW7?<(_zrZX6SED%3!bn;HVC-n(#NG|e!PJqi==^LH96vV#Cyp_AI&kh-(!#$V
z*ou*~1b%OvDeq<=dcbs8fp=rX&lX_9cw?UkoMq!J!23@{R~d0W0PMtkB>6c_snalu
z{G1LfJ{=x`&;*z;k>Y_T0#C&hh#%nBXaq~ZmjZWUq%6CE?_wkm9|6xzM=lThEZ{dW
zLgzKWUt`42R^Z4plzNPp8@<4DFcNWNV
zux2J@!A}4;->+am1XP&M*H9i5q}Ku
zo3qhD1il7%6GrmC3HTbDjxy{;R_WCo@+mlQyB`@O@W+4y&nHgsrNA{92`lh+8yEOC
zM)IaEpqerJ@t+R#V-A5A058J40bU3!!nA^y0H^06j|-jwtipT*UJZ=TC;!x4B9Lo1
zDj+X#0x!l$9+m+AhLL*z2v`SmOz0`F`cmq0Jn;ZeTS`9#KOOiOW+Ax1GcKp!flmVt
zDB_F}96fnzCPw0~SfPi2)u3u>axM>fUYuQ9|L?9lY#vkz?5=hp9-90<9=Ys#%~1v4wH@lX5c3np~L6E
zd#*6}y}-;0+8cfXz#n2H4=uoPRkSzoG~ksO$$tQNH%9zy0bT<$@m}yXz)vwP;GYAp
zt2KBXFg9RtH*gb1>Pz6+LFyO(Gl36cWc=I)jJe7#FR%mSK9xAd?rPc!xWKqorXIb(
zKC7uC?A^dTjFeH}6cji}|C$C|^G(WvAAvu_NdLMW*ol#{h`iJYjFiy}T#MO^|E<7d
zn62PyEn4NTC7csuorkQM#|U%Z2AS?*lz+pd6%J23o!p~L)!x2w=fd_2H-x7ghel;ddJ2E
zKJZK9U*J2xGGnR0`|mYl<^#ZA{Tf=4*1f>ZzcF))z(W|RFM-LwHMqcCm{$B3Y^7Y7
z_rPxf&fEt7cmiz(*l#=I2zWAZHb&~S8u&a$^0{B|M`<(o*$?dVn2FyDy!CNTeX-vR
z{1Zm{y9J#5gu%0b7N!nA0`J=a9~}Gv;Q2eD8+ab@SGy=L_`Sf>c2j=vEMQI>x7rku!F9D8!#o%ec
zGK}~an0d&w!A)nZ<0X~Kidx0O@_)*|RpHd&#F9hzx$e8d9Fzz$z2zzv)s?#tM
zR_^J@y`#@*O9JJdkKh93uFO`(B7t%bM(hRdwsE-&Blk_jUZC775&r^*es1gqiVVK^
z5h(W^1Q#fG8w3|9_YedZ_%j=qy9jcRK4*h{2a#nJvb@yloP3GDZuz`pea_8lj%S3(5)7nyGI3GBTmuut#BUii0J*caT%
z*bRKgB%m^W!5Bk+obSTB7)#w<-|pWs#!(55d-VgjkL&tQeT{D_*>P`v7yrcVe5d`D
zZ_4C+Z{picB|G1@{f%)UBKV;J3YZyt
zLulU05;ZF`3vZR#1v4$R49XiNwZ7kP_8#02D%tt}pYMB~Gdj$E-*?u`T5Hx?vu4)p
zch8uCsRNfqQc|ALpnS%R*|S-pMD~^!$=-{2{4TEDaZv(TT+f)n>yt%z?Z3XpT~Y4%
zQzGT@`uU96rS9JYSl)47&nR*)BYb>3+Gk2<{k=u;@kQugU+ebc<0s=j>ZG&%ay(~3
z9bKRBOp`C-x*Yu%y`!(u3EMAb|2kbn|M+o{BH)MC;^T>5dC||sxVpd!8vps_PwXw<
zdx356`nS8sM503ZWVDa#I&ZvpYrExpi$Fp=`Qv^#Z(b8J0I7`Ezy70hOYw8wJ27I!h{E{6U2Q0>lcG|F
zifpvX+{b+iM~|?objc~)ID+lHh9CYb#kHKdc(FifDSk+)xn5r4QP)dUEoTPpU|sdu
z)M0(*Mh-raM$dM!Ji!Z)VB_|H6gFlJ->Z`A`T%0;}N}Ve?t-k(QRB36cx_I%TmKCz~27jzG
zW3VQG(6}$gK0*P62g=^C6mlxJs32_qjFw?)mZYd9lBqVzI(10)8+)p51-y3*=Ko~)
z?u)VREWo)Nz>hD5FCdN86M$b1_0N#6BoljEP!7K_?@Ln
z?Fal}z<&w&uK|Aw@Fjr1-~#X87V}%EV-JIC>4HUA>tOv=`WBS%Kw~9`J1O~Mn3Cfu
zN=|1gDP5=J!u|^U{ci=lH{b){v-AagG~h=AJ^}Ev8Y_9Cladv~l)RFnWJf02T&Lv3
z{>$)~*J5e|{zkyp1AKkJ!~T@Q84}AaO8RzEGG>^P2UChl;uh?*DG1KTgm=n7kCTcy90hO;Ku@f
zCg2wVel_4<1^hO^zYBQunfW>3PXNBO0{$+%9b^z%NI(mZqJ4?zf{2A4)_j$4+i|*fFB3=>41L>@Xt3EwWgD(4Z}q3Oc7O-
zDeCieqQ2XI8QvZ6-WDJkS<$}rwp*Jt85E=&@Y7Vb#B<;uPWaF$JioO`
zi$PA@2Dc{)fVT#RMh}UKiiol{X9LYVn>PJRg9Z)m7)%`boG<^6MlaUpl`2@t20jmq
zh>nbkijIi(s#F2becJ2x`$dOGMMQ^3_rJ6GZO$&xgSDM+hrytq?T6_u4A-oEJb?nQ
zz(3u7r{2+Eadbpfv?fC1X!sBOt>K+Qh(c6!w6%Hv{!N-R0TC6?t=(_5hWJOIiy?S^
zXaD}1$~!8ahc&9xD6nBbL{xY*xZK}MQ`w@wQ#^uug@$&n)u>KDur=Bm9T^erz!E0J
zks`yo_6zf`?N)z?^8tAk84h#hd~WUO?@{kYH@C(Efgw?hjtYh#U49YZ
z!9Vf`0v2*pDn?ud+S8tWierQ8*Xdkux0m&-iocXrpSD
z>U_Bj6os{O&&Uw>WESNLfAOKKVX
zjRN=zyLRnTAAb0u_6?37D^|yj9aG1TA6MUf_nrFw`|s6HKbC0U;QaaX>epX?Rhbt>
zt%I+y|B{QH^>2xZt_LPM=;h*ZnCNCiaiFg@!lZeCHi4+xYt5RNwbb=M9U+x4pl+
zdjns`$R(}5eem=k-{kQk?YwhRP
z(ofTO2a*M;WlMdj(i>qfzAh@lSJv4LWp#=7O&!(;obp0!5Cz8o6z~diAgZQm_o<6kKThd>Qi1
z$cE$i_#a8+58K7+rbdap^+2ZhTUM@I`8UivTT4nxzWL#YAHK!hbL`BSGbd0MpF4N1
z1nbh{pMU=Ox_94w_eq==^6S&54KM+bLDLgm}BkN6bU(Nb7gDBphjtzbSC?7MJ-N{Wk%1?z9&bI>4IgMV@I$n>TNs1Gg{Y;fEg{nw*>*zG%^+82Hu1tsSyo)22by0SO&Sfgf>t9^2;ys(@#I0{ph2QUiI8IHmj@3X6lUrM$AcyHgStbQ
z_uqeC)4(x={tL$ed-dGHtw^}Og!|=u4jt|c8fvP)%`RNHAee@<{09aGwxt|1a}2O=
zCR?{|6^yySXVyB*G2obBUrlG?zI*p>p{^W0d{}=b4YUE2H|hz;h`4R~K_u>oNbrXu
zUEdS&e_OH+2DZ(oWGIE?Rv
zd<6cJ28==X_eDB`hNMpr?o*)~Q6ucZf6N(T>_6on^N}!{@nA{(jhs{dIUbZN%7&qZI!-yG+*3D9Nxfhi{*}lO
z&=3Xr?+<>HhM;{SJwI^J5CC1kKDqc7>gu$kHAxzO*y2p=@pr>GETW##wp2$4<=u5m
zj9jrK|M{Hr0%2)qT+_(o&{5Lxg-9f5hyV=*XIyJs@$-*(vh)hXH2eAIp9S+wrA!)3
zHgr@sCWaPQIxbzhB>P`mAurC3lBFrZ^6Zoljo7{4h>ZAJWY|%WXwvYhQ=cI-eT+WS
zCPANRliETLsLvfBPt@t(5NE4~28}<)dm;8NykXZDkQUlQmWBqFlzYl`RXU7(QPwF-
z#d|l%OEdaN&a@zTA+4{bfdt%lO5`3V4Kc+cgMrgP&`_bzq`|4r9chzbQ`^8MwFC`b
zTO76(&CIJpgT~+6+q=U4V~*!M|GV*^{&GGr<3WAkwb2ve54$JZpL;-Tv--%I89|Z_
z8diV?rdZg|F`!`-Z2Vu3iI4`P&!oZVGi_3*cft3!MB0%C@Za*9$WYiRqz|jZANF3$
zKXh#o{UPd{p~KL^G2ndQIwpn|mc*ZSa^t*E$$zM~8cvG*
z4Kz@nX_KO0lZ-x-2BXiU!RRw>lJ8cHKTZ-=O3kQeoL}H)RbOwl|XoEkWPWlM2txw=V#Dhd<>s$4&X}MhyAC$F!JB4SFg4l
z3zMm@#GAMgcS8$pKkYr&HME^xUiJ`J^
zQa^btAxs_~A13J!gv*oD?v*b;*(apI)yL5K44cGM9saQQTK<90BH~WGSQ01VO?@Dr
z&6v=}yZXoUhe#vk|EIe2i|G-;jPM|FjvLgXu>wuU)%VY&M(FFE!bW2g|GJ3HD`W-wUy_
zlorm8;1
z-=P~wzq3EYAF`n3A9B5bv^?_2BVxDPH9dv~mRF*KG*Pw)dvgD)vU5QX*$x`Ef(EV6
zpn*2Y=<_Prq~)o>@?3I=JPjHafrckQ!#^NL^WwraqO?h&Eru5?_DmS2A@m#=m#mA4m$N#VnRvel{2v`I#vSHmWehGk9~{slQ&
zG$~XbPjJvcM756re|s?U3fQaPMBc9=`@C$%F~|IyiE|85Nl8h2h%5DxIxu(cTupX`7FHi5cut@O#R{0bj~`utC)K39Res{cd$Irakw4&*q<%P+sIeJ}bn*dLJT
z)2CO^a5XxvHYUV}HfaNG(zWy%G%!`e>*{VbBcE{AW(RDi`1tsU)oK--%@F!^X=!OP
zb?Q`ExNxD?Yi#*wyF*{7I$Fqk&JDCx^o8h;ay`IAeJ)xvP0D}4dLw7Lo}XQPjOU&D
zycoK`R2{PB9`-64!PoyQt~LH0I&|ocqdiIZsrK*SN9%FO&d!#hLx&2^7Rf*V`AJ*6CSE+mh3JsB{%$x2V}}k4o(Xp_DCX}<^OVYK}u
z?!=X2L>f%yJ@T8f$8`>UJB|hQfjX`A8RNkD+2}KElB+)N-Mo^1;4;JOtCc-__Iw6s
zdh|XB`*!NG;EbZ+Op;(vOWL(-C-AK`F2sZWl4~XnCL20fvOQ+Xlxdr`^@9~-<&A%Y
zV19P^7@VJNbHZfv)8pl{_czIf^XCs@ylcGwfU}gcV8Md3IHM%<=g&WX@4ffR0PJWRQ#k3?PJencVGle|YCiSq<$sV0F1KYz*b`lI_l
zYzw-(3VW2r4?XnIZ^RSlMs=o68yecTZ!b8zrpJME24$YUkD-J35`W4(>EzmgCG~+i
zLVYk}!jdvb-^9#4Tzhl<40-uO`=8V;%0FZ&3+KfSQBM&Om)Wyt>v13rw5!-#mQkZd
z>H8c5(qJ-aFza5fDd}TV?ujQ$$`ENY^2Ph~spxaVmi!BQPu+3O@F}h#t_Oe0Ju_vA
z`^}_@xN*-JXYaKBhJ}SmNJxm_bc!AaJ|~Q!fu)fvGbTn)s2{}Ll$0Z`87Nou137kP
ze*^e4+|~fOCP~{*(7g~L0B`Vc0rmOKH{aAWkU!ivgnnomVq;@v@ZiB(wh)uo_>FF7QanHuS(@7xBm=EJRHA&ik
zFfvI0$MBW-6c-n3d@!~G--4%#wLW0H1@fk4fO8c2%=*l{CJnTs#siiYDJ0r+$hw(UO5k+XsOPTYGGNHgwH)A&s@;!FRB_8LB?<`$1dB`N(xl$lFbuHp$YZOSL?c-zJl8
z$_{mheiN@LGi;A@0c_?=cm>1t@}0Od>HP=Sd6D>=Od3qS+L(|goQc-+6=jStq>cDe
zwhCc8NsGZ9{^^?Qly?W#m;dVhALy4}|6=}LKw8LumWB?Nh7OkGDeVh&fth2&dmIPa
zPUwC+4e-3H^KeKctIsIcCZh%Xi*+
zN84TUfci>#C;rp}$m42s|7T^!m4asEr`i96ov7IVAq|{6>A%qaQ4TmCaqc9Kx#lGA
zIS%v%%sB9xHlMbZb_{C@9oGQO|7qMwhn9a=*{CY#=Gy43Ye`w>HS2IJDD&hqX{4+}
zj(&rWa1vw1d6j1@{>a=!{H~R5K?R;c_O|L4kD&i34#CBFTF{*eA;Y1%XGyWpL{=5f8ns~NOD|^`>@3rrr8r7AmOQ6rK@LH_&U0F|0ZRXtneu!^UlAic4;j1Qask;j@tP5p
zs{4UxuG+-DJh2uGz?>fy7Z*1S{(d<2{6ld+0GU4bZO9dJ%C^5K6Pb=!&%lozF*fc6
zGp5iPv7Is48~*qwjndU{Agy=QsZ*y1)^;h-=`z|g_?+iqZ%<(^_#SrY2>kPG?2}D|
z4;+sAcoU2AhQCoV9x)Ea$+#C9g8f^@ycsV(bXKGYyK?wG64MW;f2Gs{HarJ?meGFF
zMsmGG9GFcE!|C(?2D^F|HuoUx=Id;jF;m7H8Ruerg?r+R4}Ek_WFKg+L9Ew{b{hK%
zvDkYWNgU`qaqY?Zk#?PP5p6YnS^6n_#?MTn5vyX%ld(3&czAw+F)GG)824w4jp<6n
z$D?oIvhAK?#nX}+}mf&
z*T^H|y^J&VLHSW-ne^hEg7s@G{M?bm!{Ea4rw>eAIR9*YK2h&!Gv>@adDA~}_&ac5
z+>`NhEswx~u{OqdD36R$G1kdA;XTk1q}|Rj@!~v=eU~JB|864ZW@du}eGc-3zIFcG
zFdge-e1`joOzfMPu~){A8S`_MN1kP2jE1p3#+WFNes5~f3#NbiK_FMbc>4P#G
zJh*Q{UNCWgov|jyI2bEqoQd%b#>p6CVr+vkGRBu?k8|W_=O+kp(DI15tBHNHJ=gwe
z1DNUCFuUSGTsZza9t{@8yVe5-?rCywoG}*4Bja3*uQ1lAV_eSu=RDA#U1^;#@)!g@
zAXSV%eKW)%V)4Dpk+cO|@6$gg-Nc1FV0j=XLB|aluVU<$F+;|lS32W`)CtB%7%R*`
zjLPtUow?d*#%_@+9q1IRK**)yX64Ic2T7!izpO_A8RG-;zi^JoVI5+V0Wra{inK4p&
z{E(D<{qbdagibJiO?g~4_a1g%?fehhN`Kpn{Ua9fRd6u9x*0k)jdv=@6Hh#$*LTE;
zc9D8OJjer%4|#L;3v|Fk<@z^X6-|slON7hO4nVeC=h{mB
z^8#_P5zu?R4&+?VxC~=U@UsNxzBS!u{Y5;O$q(XDb!>BYqo;@6nB9-)xiPOdRl@0Kt0&WXO;q8aLv=%sSKo)}!3ePollxxkT)R
zXQ5rL6>5^S{iXfEIr@oguNZssobzdO7+b(Q3v`T`dO`e{$Op5>OMjU$75eR^x=#@
zV|0qP%!+jr*G$|)H~_b%E6#Ny8UI)@e!#w{YqU$ulvlzs#bSBFHSlJKOQ!ER|C~p;
ziMEM)L|o}RP`3BIv05^xIDEr#&h;DDk6gcUt-!TP0M<&+124u}xh5y>lz;a7^4v)6
zIti`$(-}9FA^mKhjgmE>FCFRBNUsuMTeLD)Y
z-#-UyA7>)58_7S+3)*fmCdcywgf%j^Zf;*Wee|H_C)ZH_NYMSCGx9^(qphdi;aK9k
z>zb#u)f~$&cV^4_`2+NM?iDi!$)49=V)q*jJifc4eKp!I$_?!{eJ+j%?Wrp+{LH!B
zz_Mh0GydeA@wj`yVq*tjYhn(=r^y`an{5Y>fGq8Y0+<8tBZH5(cK%VhAS)nM&Lax!}0c*p=t=O
zBk{LCJ~O>py9B`yoUzJt0=f5ZYm6Q67hDISk#{dY+`X0r~4R=miUCNS@8av
zSTzlG#^K#@ap=vWMyNFXiIxA3ueQ2X^c{}-X{bq@l7U+S`ber#n-O?FP8=Yo0V)=F
zvTvr&_@%GQ?T^&`rn$7+32$0w4$vj}&COwVz>p79b^Ro?IN625saOVdMg2J4C*F&#
zTA}`6T#wf^$(dyEgj9?N)l<>t@9$aQOo!o%l@^Rb&G{9maU1GvnAmNAdekrP=N|9yu{NqjHo7-Z72)yH_1vb#hhN
zD)%hUEZ?latbtjhvL4riUrD$8=u_RQulHyi?tTII1SdHKV%u)pZQEx%Y&&Wzvnji~-NWu_x7dB{0ro(9h<%_v
z#y-k^zkPx|**?>rVSmiN*uKQR%D&cKVBccjZr^R+XFqH|YCmZ&wU^mdo_n50o@bsV
z&o?h1FEB48Z(!b*yu*2Ad7k-!`7`qu=P$`$mA^K>Ab)#4Xo;bg<5efwGqW?YAIn~x
zy(D{8_S)=%>@C^bvv+6j%Ra36R+g=D$UDy*OO9_&Ku%!JK+VVdb0*{@=giE>$RT6?
zoBqpz|6Lqt)iSKtm=S6C#pp4Kv8m&xjER{wZbEEoB7XdNB7XlpZOq7o2jZrT$B$&E
zS(6?2#&nz>mo$CM@NuaL$*7P#YwCn4Ngd;6#?`;Me$1d+wQtoF-Zf*=Jx|YS@YWt&
zF>{4#wI;?UB;nm@$+KEnI<)U#!EdqSXTEz`rcI6S5Xd|A6B3iBq)xS@#+grPv(oCv
zr%p+#
zXu4zCw6VUaKE55&GznvX&Wzb(NI|>c);@Pxe2`sW+Ep|XH#1?XuWQe~?b?B4w;OKN
jFY3UN;A)80+`yj

literal 0
HcmV?d00001

diff --git a/.venv/Scripts/pytest.exe b/.venv/Scripts/pytest.exe
new file mode 100644
index 0000000000000000000000000000000000000000..2f23cfb1ecdc21d047168a11f33eb4eabc0b4a67
GIT binary patch
literal 108433
zcmeFadw5jU)%ZWjWXKQ_P7p@IO-Bic#!G0tBo5RJ%;*`JC{}2xf}+8Qib}(bU_}i*
zNt@v~ed)#4zP;$%+PC)dzP-K@u*HN(5-vi(8(ykWyqs}B0W}HN^ZTrQW|Da6`@GNh
z?;nrOIeVXdS$plZ*IsMwwRUQ*Tjz4ST&_I+w{4fJg{Suk
zDk#k~{i~yk?|JX1Bd28lkG=4tDesa#KJ3?1I@I&=Dc@7ibyGgz`N6)QPkD>ydq35t
zw5a^YGUb1mdHz5>zj9mcQfc#FjbLurNVL)nYxs88p%GSZYD=wU2mVCNzLw{@99Q)S$;kf8bu9yca(9kvVm9ml^vrR!I-q`G>GNZ^tcvmFj1Tw`fDZD%
z5W|pvewS(+{hSy`MGklppb3cC_!<
z@h|$MW%{fb(kD6pOP~L^oj#w3zJ~Vs2kG-#R!FALiJ3n2#KKaqo`{tee@!>``%TYZ
zAvWDSs+)%@UX7YtqsdvvwN2d-bF206snTti-qaeKWO__hZf7u%6VXC1N9?vp8HGbt
z$J5=q87r;S&34^f$e4|1{5Q7m80e=&PpmHW&kxQE&JTVy_%+?!PrubsGZjsG&H_mA
zQ+};HYAVAOZ$}fiR9ee5mn&%QXlmtKAw{$wwpraLZCf`f17340_E;ehEotl68O}?z
z_Fyo%={Uuj?4YI}4_CCBFIkf)7FE?&m*#BB1OGwurHJ`#$n3Cu6PQBtS>5cm-c_yd
zm7$&vBt6p082K;-_NUj{k+KuI`&jBbOy5(mhdgt;_4`wte(4luajXgG4i5JF>$9DH
zLuPx#d`UNVTE7`D<#$S>tLTmKF}kZpFmlFe?$sV{v-Y20jP$OX&jnkAUs(V7XVtyb
zD?14U)*?`&hGB*eDs)t|y2JbRvVO)oJ=15@?4VCZW>wIq(@~Mrk@WIydI@Ul!>+o3
z=M=Kzo*MI=be*)8{ISB{9>(!J__N-a=8R&n#W%-gTYRcuDCpB^^s3~-GP@@5&-(G&
zdQS_V>w;D8SV2wM8)U9HoOaik`_z>Ep^Rpe3rnjb<}(rV`tpdmg4g@>h`BF#WAKLH
zqTs?sEDwi<=6_WPwY&oS9!h@ge4(br)-Q{|OY*#YAspuHyx;~|kASS3FIH@oGSl?L
zvQoe8yKukD)zqprHiFKlW%;G=hwx4l;FI%8m&(#zU|j&_bW@ThNpr9D0V}xa)%aIb
zI$i2CA2mPU{0nJmK0dxe)dY-`z>ln($
z;r!UXuLDDi42|Zd3Erx&m8GqlFWbIX0V<*Gn6lVNq%gD>gw}da}r}ZQB~ns?p8uy4i0%1Ti$Vt|~OUth4=+yEmPu8{3(w
zUDkd@?w?`_J9HBkx&ZF8v{+9phcT@3J8VI~wN7Ez)oJS6^dhb2N;;{RTXB`K*E$64
z3rDqRtY&&*}9yq2oUcvD7K)=@bWqC1X%l0jk)W<5-WBYC(#rn4H5)gp#eHMmwlLJq=^%|*gMQ*pq4VV(QhHA4CGj<;!d8i*#Z8CaN#*>VcCnj~;kkeUa{LUoKxFCaoQ)
z(Lz++&x3Lwz;=6UnhwM!MvN17>{Qmb?dwgsTmzkLB~jD#wiGz73hc0bFE|C9KA#|=
zH}%FQ>c&Y5z*TJD-<$$Y*WZx>5NNe-E-TfAt1!)%Wc@I;ZuNwxDGGasDIMyUNiVvG
zq;Q70PYHcLO=Xgv2698@cJrkun-^>P2}|fMHlm7xaZmE<{&cQtb`{N9zj0bRmpW^T
zzQV7oTs0ENHe&mxQ6DI7qd0SU4;3o*2qRd`X1>(=ew})X5Dx
zx$lyzZM^emtdsbk^u+xwdSX$lp7h*2CkHCqDohShL)V4hM9k+UQLP(GN-H7!C8gyq
zex`xuPQ(!g4}S>0r+CyH+xIAMP9Z&+?BT1!*kA<}dqRn*FwJPGe}l-sw(lGYN1b8}
zWQQjQN`9tdtF?#aqMN?wu4E3)qGxzOhwr*vb;kX_%&U*-=KLr0raiGc^x8|=Wqt`N
z?L0luR(~BF;DS@~yKDN7|*TJkj*-B%s1{65$`jY_(C#P&^rVi0?Ro4iaFbR)Z2NLxS0
zTL;%Kt22(A8JiL`U$i!iR&zLxx^E%H=*c-=+h@sisygu-_#m4J4LQqB?~vXvP4@yQo0-^oki(PiH+=FZl}&W)S-qI
zk>W;2Zl-vl6rbe4X6feZb)l-Mv2oh^5t8q5@(Y-SPoUZ;N<5Tdl!h|=x!1}5)E;}=RcAXJ8(<$^13IV==^rU>wwq$hX3V4iuA0>h<
zuxK^)myr=p7a)oeZ+g4u^9(OmpFl8J@{{UJfy=DjAf8lTTD00iSF3Kb9|GdM-PQp)0<*
zZkW*V-TPpIXEKDks>&FQ?qoV&Tfa*;TJyB^yJa8xcch+*-cYj6E7HdBX!5)TIXSNM
z4C2L57KVd0rioelfI{ELMrb&Y}?h%mk5iSTXrmJ
zwlk6qsS{}3<}Uc!G}Wr;Tek1Tym8$SrWokvCzU(FVIAWTEa1pwE
zBJ6JdS@$4RFBV*~g^Eo9MAFafx2rt|uRsR%xpNVyj8!g>2u0v=>eO
zS~4nHBgR%cVxB-_OwP@%JN(CpY3qHvqsbt-TUGivY2Dr$b+=`6PJSkbWF)!Jn=iZJ
zMt}mOG~-m{)L*SV+yRH!c@XR%)K^BqVRh
zq&wib)2#d0V3BD*|F5o2J6$vbdJGh`O-30SrMI;e*Y&m8c0Bi^cD-$Daq1haK*i4o
zS^0dLE!U;Du-W5i&*6##L30bjy7q7@lQPyCc8<%{>0)|vQlrFG_D_+v^1uh+p+bhA?!)dFEqi$(hoT?=hJt20DQXmOiJ``9LY)@=HE
zO1esvSjV70vmITir9t{Om5D&<%?UTa#`5Sp-x@^?6JCK@(Y_-+ye_agHcB_zSUEYe
zay}#@o~N5_?G>%q2t<~g3s!Y+G*Mj=P3Zn>mA2=HCm`lzap|)*f|(31R{)36WvAyz
zfea$wK&B|2YxO{n>twI{fk3f0YVK4T;XDy#cUe=*$V6#=30zz**pkdJOUUdHcyGKx
z={=%tU83}-sM&@LFz=EaBy8m5*VS4ZYhB<>lI{BnIk4cD&H_E|%!spiL((
z$1W0V$;KX^P(?<}XYHqoplpQo7H>!m)d{bdPaLde+h7(tf+ZB(6MxWZnoX6&>|)(q
z*DB~wjMmL&u~F-ZIbJ>BJ5ZM6ik)gUbdlBM`Quqove#M~lf*ebB4nBg}NN8q8e!?
zVj>HOMJZ@LQzOdvHUSih8gCt%IxvyHLmO^Ea(*!Nd-Zuw>`f87{SkAwbrcIp6hiff
zt7^x@FVoBVwDl9eTxT2$))(-5-O9W=qunp;*yvYT{VJ=~FI-x;pN&=5ArA%W0()Z}
z=?f87g#Y@j2_ct@T|gzY^?R)mq?NdksZ}7gJW^{18>hCuy{s)%iDWGzC?-DRKLl?l
zlnO5zQf3*!v6nJ;)xm`Sjm!6zf=o%-07p#e5?cL}gBtB`Nq!dTtt@<7#(o8m8xm*XOvN65AL(=C_D}
zJM9UyYteSSwriu8{DkKl6tSk&09e8kMrjh@N|SS;@9l|6^W@_Q=i{`@$NUzI6|VF>
zN{Rev95oVSa&%)ew#+uKZf{3cFg?f64ASokLt$^COgO2#BW71L>H7~o2Zg;=Z|nCM
zZ=N18^ET^uY+VpF$K*teqc&2xaTF!LhIKrwGne_WBX+B_9vi@rt2GKHy|kQxSUJ18@{fEswY{>va~$3%JGyYfr29k%@bck16c
zdf9Hh?|r@PC`@3R-j=#7868z@m3)O|u0`Iw|bd&(6~U$UMGD@Vncn>Lm}{NqU9US&{gYu`~lU+m1n
zi1g$#vC1#v|9B;ObTzhRor!#90$^5b(Gy`buihHrRfjV>-l^6#?Dg3lZ}@PRD|I(>
zVcp1Kiyr8xABHMWk$xp&hFzvUhIKbDi1339ve8Ac5ON73NDM}^^I8O?+8zk+GVA0S
zG|7G=o9JQQO;-x!z=zz5c@^<{-AWi)tG`b65v40t#CwnzKA}>?+z|q4`eNlNfRXZK%L4$WHQ)8Sgo0
zwE~@9)+4fUIf8fW?9TihJ6Hgttrta)MqB{FTBqxu|CDLzEKWn{Cn*>&wx$DtvzSvC
z(4Jr-g8~qe!NL-;BVhBlx}Y;!It5;VT~^q_HdZcH!a^(MA3%zpy!zmpD(NfkvF=9=
z6p^lmDSFnrRVn4npverH%%I5(CT}SgTNGB)0sCY%@`7%@lG#4Gt*2;3c3;0E8(QyS
zoo-l-h2)DEIh-3t!@^Gefe~>Aq|Sbf{goW=Op7FDAB-5amdpAhatG_BQh1V>p|DF2
zoM~XblmiX(kl0U_veatKBQ+uz9@Z1{N|y`0j<11Sd^JtI@w2S`$mW?%;MWLc4%=HL
zi!p2d7Nf9k{=Kw;xt19k$vh+UMEX9C2D?jRP0wn3ihvj
zIKqjR_QyB+t|%#l=^@PkY$HlM{<4z$Jve9n{#ZUhYv#%_q#uJnen
z7S7e0{d|oCJ_u>EJ_(yUqk*m3cisoGsENRi9?F=l*A~&-*(<$4vm*-sUaFT_dJdnX
zrOQM7ERMPl>SbN2|4`NV9yZ$|0jqv#7_|5qM&SK>FdA$Qn}>sahte?IEg|!hNZ-Lw
z+2M47yawJ6YgZhmd7`)o7cpN%77HvCf^&@h2FBhy;L2rI>K+Cp6&?pq
zlFhyiSR(126>L@rL1c*79q1?uBeI5<%2ZP3K!*8bJ8n5Vkdy&9Re{a#rI-
z6fv$Y@#|&(1pg>!eIKW$IeEqD_akO!YCNey`?q5Uh$a^MgG!T#n1>V}I*O@Oh-I-5
z%k{Du%Iw6?)MXzjh?<)@`1%M|Z2fN100q^u)YBKp;(8NX!a7BpNWL}bB60|{!@3IM
z&!_-j!}^5^fVs3)8n2d}7M6&L95t6HGcO7O>k8tJiY2gy{mtC0V*s
z;mM4hWAvYlP0?$+)i!p-gT`AH%yAiSovz=pXFBCU*-y1#y_wmwf!PgMrEDEyp_Y+h-3$ZW$Ny$8H)g+M&odOm3D+qCuDCyTVF4s8_v
zmEyLRLz)cEXCoqszT`H8*!|T3k)9}efv(zxR?xmMPtJ#z>B&Eo77PE!jE`0XJbxM^
zJEbz?Lu5g--#l!-Y#gzXP3G6p>XOps?99>9SjC=T%MY0{>#J9bVPGK(CmAlr@LDVu
zdtE8Cwy$lsu#8`O8L={lK%5}c`pb6GjOmh$5gX((WMNF8jU#kU?6HQLb+0+w?hE$3nE@wxIvFA6~zB7QMVyoEeHQuBH-S!>tRw89F
zyIi51ALX;4mfyl>Gbw7NUa`Y^`9s-NepV{j;n;E-$Ceyj?qimR?nQpJ7Zt@YCfL5$
zX%(74|FeDDa8Ol;N-078H81eqW|LX(_9$cc`%a*!#=7{V2=)|lNG5a40)v6g4t
z01XUUv68UZ2|@vkl?ceW7{YVw!nCy?
z+sAnJ?mvd`Ab`J#GpRgV_N#doE}<~&Z?VHb%c3L;ua)NW2qzfhmeh>}dH
zGKiE|U&0iVSyyQ$NO;+GkhAqI3{1v-UXl6k&ogShm<+H}bDWf8ZLbv`!7=F`^V*WW
z%|fH`g0dA}vmj?dt{;}&QQW)P9h)H{A4EQ&PP7V>>J53l4KOcs^mIW(
zWkEdG-lC&N1l;w9;87FIEh#42)wpNXA?u;BStwK2f%x9dIa=c%`6v*^^D7Rdeo3P2
zK9dB;uN>7oyTltCA%$60W`E3W-dBpg
zuqcq@x{}^i&v~(2yR)n>8M=s-@@eAy%xR>v4&Y%h*z7^|kj=+ut-*SgnXpUQ2Za%i
zw_32)!m77h`9S6v$7W)#c5Gu%xh%>rSYMFAD@|Kh-5MzR0ebF=8}-^F_#pg>cMe^Q
z_fFTrqJD?X&Jg+pQE^7T9S;~YZ`N{LIq@lM=%?CSV`D_iRT3c{J=yaikxU5%rHT=TI9ln9_p;9*QY6sX)@dJei;QU6QC|w1dx9PPU
z-k*1jcMjN$eZXl0=c@we30H5Z#G4Zf18#{O`?4|fubhbI#LpT6?u0J@S5*J&gl|g|
zx>4w6bp!F}L5Qb)5yTF=Q~b_2auNe$u2af-1--x-Y8ugJ)$~A7xqyDQUb~z9yjp?2
zS$2CCh3xpcnb+1EDhBdlycVY?TH-GQhOBi1Em;xS%mih!zz5d%5ZTK)kgI(;YVM1)
z9Y?6R=*3Ee3NQqA=9m}0tBfPY>WV^F{KDkb!>u=FvBx{<@$4HF#Ty?(D_|c16@7ar
z?3sMj4pkIxD3B@pYY^(UW7-_E@LkG|E4F$T>^}02mQUF3kyHzn_+N+p{xB`ffEMeA9vW5-D%{
zZltI*4Xan_uaQoJoSn85x~zjwdZGe`c|L&8DFe`!Uzz7`w0>!xulJ>+=37i-p5mR>
zWl?vJ+1b|P3AuYhVyI7#LAPEYZ87i$tRpmE}@el^F1lN0erixJ1-N#3v0fp0!puf
z11^VLsS9qh<=8A
zl(KovC21r`^>K0LV;-uDR<&qv-K@mIx|7<^+mo|TDsK^_F=k^064`x9BFi|CeU^vI
zA`v->wGlB>5s}S`2Vld*+LS4GWdW#Z9=Ld+EhF-ng5iU)X7A68`i#
zO|AEyO~DJK*d*(2vK_TGJ;J(KCFF$1nt-h(v%kz8V%#2jMxD`gWt|!-@k5${77Q@!{4z;ze=7&BScC
z{l96Ke7GeU{#P5P(1-)>pb!x>_limI(??L33;=E&UU`S^Xg(o6V~Xzp2+b869oyFB~+oK91m(zDG}-Ce|yro;clXhx0fm
zqA!a1;w8|CgOIS{tHtHPM)Qnv&@IQrVjZ>Cz6}8;hEX6s#`+#jXAT>_&8rE)U3h@u(3Rj2wHPF8HLr_+u|u2h!@v|soMqnSEk8Zd`9UErc
zRN_h>v@U-yBXM8Ej^Rk$+sR6^P!=M|4(TT&#@8NU-8`?Hjo1~wjxi#DFXslCbHj#H
zR5!NB>1Vtka3nsdw|a3-Y^?Qbif>?ajCQZ}h|~?V$4;Z2hvePt!VjWV5kP_Mdzd#2
z(Ya9OE~}OG95vq%MZN6^iVy-|(zl&p4c#oK!g~#g9ul0wCtz5||XBmlcb|@y+~5^oMA2
z%2&t|Z30b#v!su;P0>oP@n%l!68gTFk*t&4-cTiC(g?CTh0XM*M_NA`XrI~P!(S-N
zL`<-L&IbV?K2X3qpYwnLW)JqoQsvmwRaiiIOAWlUuFCW7CR}XuDqc-j>a`x<)1Wa~
zw1+(1-L|GuLWkn}HjH3W>Zkjq4e-!WA;hn0iSIXW`S*t~{JgUpYShtg%LoE=slzv~<=K*WA*ElMAxu<+e5ER>PXppG$|uZeA(Temu%&q(p;3AFN2!kq
zm=?vfxfpqDEN!LF)Xm0H1wg{HMEXo-l13}ryyuWqH$7J>Xgp69ORBMSo%EOR{GE@T
zp6`=69Ftb3=ONylwdwgfFVgK&D$mcnFSmVb{~?FB$0_H`z~O7eOlSLUCm#&_o;kIB
z^GO&pU!)Lg-zm3^a<;FL4;!T`wb1X9I%}R0*ioufT+j91NaBu?NMeOwVtj_4-Bj0@
z_j+s0>1Gh!;oi!cvc4Mg&8Yc4=Cmj3w59_z5~=-$9!bpUA~dL*qwByWnz05DbT{~4
z*jZ@K?vDlzYTtT-qUP-5@^1W$cjLZ1m)7`wc?;yk#>sw)Ni$-;5OH_f-AMb*3BElL
zTXVmwcEz1Nab&8Q-#V9uW2Z6VdwH||2KhpVBR4w8!{_^EvduYpj=@m1wadC|nCyj2
zt$A%;w3fp&nPJJ87ID86l?_lyq<-5M`#ZFGH^n*bFxrb{B4*!>glHD=IX
zaR4E?rmXV`e=Jb3r)umy9O_=}HG_<;wLag>;c-u)&Cx(xabWC&VP!^jmFM&Ib
z$EM)|j1Ueju0pu}b54-q=pis$~y&T*+xHtN5ij^Dv
z^%7mNlKsbrMJuxz??mDQn__!^I>*gYDhiq>gCh>6y-yP!!np!os_nT!v)geY)f(H$
zMdxVz82saUVjQ{l!Fyx32g`P8jl0P*QX^tlU_Sb?kt&IuWuyvXIfW6
zvj(<2h5p+D2H`EwSwH=TECv*ISR}=U4K0jI?@X;}rSnDnja37_hg1U|)xdV^hSx;N
zR_l)tW>JcPb8F@5C~uO{c@SQX_Wc-vx12+X_zdyQjX9DVg;djzhq7W0o
z))<;YTY1Kqwi$lJ9G%8d#&=Y2g-5J9EDiLvQu;DVkGayNG;o{qwO{JmzR6Uh$UG@x
zPCO=Jtf)bg*6_lp#3+w^Tg=a7c|p*fGtm(jE${gPmO7HD77SR?ytQ3_Bxr`(@-qAT
zWfSOxaSdnVed(w}=&i-FC`!Pi=?<=yrTgx#ws#DU@R`1IyXR+k0R7~IY6mXQnIYJ=|Dqf4+{O?83Q*D35
zm~q?{FH`;v)-R{BFDCMi3*t-k>{7fQ)8nw?9TyWqG3`Ursw{KR7s%pMMe3iM)dT*M`1?|}%AZgc@
zX30+IPfbP!7X!AEjBUyvWF0|-nESBQh0Mtj(=rdU9mNVG#;RgmWP&-P(zBuAracc-
zp+(j}^q7=iuyEi?+-C&NiI3TU^)U0@n#|Xx-UoNc*6NmU3HqR;Wl%dL
zkIaY`kZ}eU*h+@_w{SA-$LNPRs?I`9&yRXRk~$gghBqUHqL4xmtMtVD2F!n`DBU&Y
zA@L!Y3w6XoW)F{rN=O!R5%FX>|1Ypcy+BCeYqX6PttY}QV(d8A+D=AhCvAj2I9Ci+
zE_xz1LN~*Y8IN@_s1s-}DbcJjI5vpO#CDDjrv=T!AxN@1Y#t5bfti^9CyoyfXpL_T
z2V8Sei{e7KzA*ct9Fu(Nld9;CL
z?d=gOO0=h4Y+4Jb!Gh3(cScOi?2L8L!@
zXRz-XiI$JM!z1>gk%aITI}Ha2`#~+lD$VpAZrrCeDp|VeRi;hXLX+MU&wulyCi{V@
zp~_QZXJ}92zB_-Nbp#$k+W_m_M`OPZC+5?&W-o>zKXw6;Mw
zPZVMo6>O;(y{(rJ))j>Jj--v{g0^&C9d>R#xu`p+I!;{+20Fvd@~tlHPH#Z}#D#80
zwJKsBYO=M&SD3rt(@+KWTkw{8Sk2`v+CyWht11NA9@xI&HVQx{ji8>XzDsLtBV)te
zncQFSH2RmvZZP^+XpO58RW`&kpI(%5tDHnrJ71E)Kc>S>es<7(F(N@%94gfc
zt}u%Qr8lQ*gBzd@RpP2l;SukoBN6k<1H@t7b$bS(TH|}1=7p2j`DH3Rgr=l(6PIL>
zoLb8o5hMoHL6p-P+JoNWY5<8%Jy_)&dQZbMH@;n1k5gZVSDG59CRwN@mS3YieR+R+
zBAkSWPvs4(spUN{Y+l|!Sg;6&bFUYtQyI6H=HmrUtM0Jb+GO9GuVy+uB51tb7Yv*T
zYFD3tL}TJ3oc#GNW=rR=aO>o4-~yYIy{l>KgSZEC^?)4Dv_{}AeTN7(PtHQSsCppR
z-O&ueZ%;ojbgn0xqy?c1=D}`fMTVQ+(Hf7#GMidk%E4&NTj|ys)55Ur?JSdKcj|Q#
z@lkkIq~gI09sUQhXE1Oi`1G%+0*FVX$zZ^K;H)*Biv-5nT~_VsJQLwR!63B8U?hW)?=-Hdlqq`a)%WG*cKqMfqu&U6`6B@bTa*hHb`MGTvKIJRjs3NL+*6oUu`f
zPz-+a;yzVqgUnl|_Ft%7(MqVuf;hXE{lHCF2ZJV3dw8A0ZK9=1GTeu=CHDQBU?IYD
zYb`v2rzovi+{2bQ@h4?87jd5uw$%IJMg@8LZ1vzM6o{&c7{V%n5d_#@0$C223kja0
zjv%e6ch#8!Yiyzet6(Ps>o6M6;8nan=LVmWkAUisOgL8(UDj`QAml+b0wtTWQz}))
zSJ`rn{zz=D(Z4h{djmEwSX!(^ZPaMhTGKdHXyg77DUCNG*u3gne57pNGR1|dUZ|DD
zUz|F?3wuqfM>2#Z)dh{pi{q#ASe1LBs*PR_05B!hk@A>Ki}d9}v5yvdfiOihrQ8wUSumgQPT
z^#CeUufkXX@5DLrvx5#hRD)I=NS3K=5*W_V>qWl{rNnBGEPPs!nOv=RtGrjq3z|oz
z%TQ`338%qxgAOAc(jbx<>pSsBsbK8L>)Xq6SeSZ@BwFdhWMPA9H$=OVZ%8pZ3SwOU
zve7>|_N5K7hM2X<8_siH#wcItPcL%K1u0ta&UGs3R;U
zDFUi^?@j0u_Vu&Ua)bjE8WCg%lxXp`R{m?P8%2g!!Sm&i8ysliZz-Pe)W~iKi$2@-
z%_3*UuodHBQkRe`Gg%(oKyxZiY$9Kkf}%9HjO|Gs??vP=@Th3JlaO^YUi*R06`J)L
zM<&jp6-PabbnTBvoEC@yMN~q%Hte32CG^+Hq!Y-3#Bck`o&Ye^n)8gAcjrS3G3;f#
ztlv78_U$6c{iV}g2vq6cNn)6j5UD?NVll)n<{W@3DD~vmQD0afGzl}{o*aCRADki_
z=2bm;e{nE5XBgAp9!e}Kj3yT4)qV7PJvnnErUkw1#M->mWvgOe+8O_dh*2zSE)^88
zHm|BVM?!u%g)5yXB(SvQ%{h1(*lmIK`cKw|O268HNamNIhp(p3)}H)Y
zPDp#QH5Ayq^3-4%J5cMD$!OkkaoPKe-}-JTT@VzuHovho{+xMvA)b$wYN|zTDK{_A
z!=;ipwz8(>5Q?(SiryT8!!Lqar~p8UnO`j=uM&6I*a>7SB%*^ANS&jk`adDWz7Sx2zfof8}0FuZtes9;}u
zB+1-Zal>$baBaxDuX&9iE1ln=o-T=^!RCgr5bsJ~CbW6gB=GQPFj?(4`p2#G(oAxe
zKV8Tn{kWAQX$9i_OdFVjLG*L=sG>-tI9wRH1Q$&*H~5=?sf
z00n0WnNK)qk3fD%dRC{TQE?y+baCD^r9)P~=SLLO6W>vFO;58*F`ox*%F>k6!x3eP
zc{T1$&hc9d;0GDo(7-vRvd2`T@-mUcE?7|-H>ONK0Yq}-H>J~aChwpa{&C^2T`ni|
zz*%QM45LVV0&)-tQ>Q{NTp92^7BAbrnT{X=
z{9VAVs&sD53A%Sg-2258V;u3+r`FgO<8l;^HMYd#YmI#r=S~9KckScO`lDlr5YJ*H
zTi?`7<`$KC)kJX=7tUgxcLwDBKwjd8!cf(cQor`?hg6AB>D0=FrBh?)RW8VhP1ByN
z)SlFH0!LQ*%68G_C6fTCp&&2fem+vRBmRkKB$Xxc=k(;|r)@Y%0}Wnp#Qlu=W?q%I
zCiOVHU(Drsu?a?sn+Gsw=b_S!Z^?s&q(`@$B9FqBJoJ#Xr)3nW#N~ydM4dP7PTb(t
zlMfWb={ATW2Afk+3ssZm9Am&uE$q-@f_UMx1Dod;oX)$GpGoCu2*2&EynoQJ>*{3a
zoZ^Vt6|5|YO|SfVPV8Lm$x+&q!JI(%%5kuSFHH)rbqC$g2l1>Ux5m8#4#{F8PY=8VI@V4ed8Ja-K;lqb{X!#!&;aj>ZKK?0ZXiqsqd&(KwQ!=z@*^8i?
z#a%onx%!-sH_EUGHPGr3#5%U+M#`Q?w}Uk52@(;DP87;v74K_x_RR*0!>X&5ktlO#
zmEzeP1rG74R6Zc)k)ZLcZFSRy+?rG@s)+duS#@ktn@C|03e3*a8spHy20vtI^`9bT
z_u`f)O#Ei@b@NBgI_(O!s3JdE!u(*Tcut&)y=WsL6Nwiyyej-%DU2D=c!%rQ?BN9R
zn<^_3*dgnGGaw`s2nTI<@3*@soU1iqFLm{L9%O65oe^%}+Em03Ncf~gPHAW7B|LXy
z0XAoQ6Q0}EOJTxui@bz$6>16rPWHPuQ*dpY}NlQP&(W~Yj6k}hp_|woF2JBV+Dt3<`-hr%Ezr=pxxW7j1
zQwQya#XN8`!r~?-DhW$G7|LP$7=SE~H0T%rEt}55mQ81YbJ9bhyDkeI2OSDJDZ<&H
zfCpc7z{})0@Nt=f179eoSpdWVRPk$8P4*5(N=#E;;=Ie`upgiM9uKzS
z@x}&0gFt?wmMqhh0#=h0PTsd*lS2lcL+|pf>WYJ00cC2+LrF&Ku@*@=<3Z4k@6y#!
z1HMbnm)Yt|r(a~xO`^ssNf!ar*|t-Y`Oe|QKy0%RQc&v8h?=9KfjzMc^aKlRn{_^f
zPOx^2NbYUce~}0pm&&~$NzXK7ifEu4c5>-SK}EYd6hM6C<_M=<>z^`Oj3k*G7N#-`
zxyvde%Z#-Cp}s%T3I@_;8$>*}*5a{_4bhZ5PS`}wwZ3Xg`+J=Nw~gilc5$!BBVGAY
zD&t7Tcn~`6DR*<+%e&|>X3_gVDM4CAw(lkKjiS9|fHYi7ehib9a)?dYa0xv1kYhY|
zK1s8QHID&!cPqsnt$usgt_PNiBC$i=EUeC-oJTG8+^^rP-j9@t9;JJwN>$
z4<-AaP5#qrU)yC(0;$ZBDYK-ka?;jB*)PXZ=Ze?K%?i!Ktb-ew40db_8Q7VV*EtTO
zdUh6LWukK?5E%5p%-dPvF~TA|IkI*G{jrh8Wn3>JB}N<@nAM*td3w9`L)w-lniZ-u
zc$M{GEz?Alj4g%}{#i}WSxk1qGl~wxM_gCa>p1@eM+n3+@v-S<(TCEr%<+pqQ7xQ?
zGQ;jyC|j5B74kB3+(IwtKkA%G?O`f>Qqfnj3f7$OTvI!j;|gTIK$q6|JB8Jn9_vO0
z_@W-;zA>)&S=##f=tfTy!#_^$B-!k5xF6oc-c@rjBk6M~M|wHubj3;$=AMofQ<_AOs>}JJ5>u%(%)41kNIq1IvFKc1K))za8*eVg&hY`m|wpzYQxnde<~
z0>F0FV=72u2bV~!IPY^z3hyaE&K20W0xTUoB(F?-BcLgo=QC)WAQ$vR`^$PY!pZ4@cA({mL4nip57
zdCG^p;&{{ayb!lpWN|AY_dYVga-|DRmxFPw@mJ2*&FX8R`r5DPFlu7wmpdZSrh4hXG*R{@B@?OJgoIBda|NU)=bHI
zoUCH*`Sx;vs`
zPpS@9wL>DBnYNtN0#XtqD+Z<19QA2O#!3`2H>av3C%Z1K->_Y=GO9r|_0?TF(ug(M
zsfVgD>2Z;^IabF9Wh7QDV{@_5e`@_9uF=vT!SfDZzgBP77YHt~taOO48%DIb^uUh$
z`infoEYMh5Eqxxb9)of#dL0(3HGTkLB(HK?r`|5C7LpMKO)@-WK;T8j%OIznZiwbB>UnP8=V#ywX^
z#w%pd#G^D3+yFp;7Y+X%**j9Ug~Lnk%jW3BS_}vJqIQ=_yHuY?brm}Bto2{Fs__T8
z>m`%(QzwTF&)35W3APj?m@{JQo40Vp&ghxSY@oCQu1}i%Y^G~yrc>?!%GwSUbZPtE
z`JSM$UpOC{HJjhnCYC-NJ=cy1Hhb%;Dq^GT&FVg(_S`i`KL)?`?}%Bdy1Myqr4=Ft
z)m|;AP?7ZW#NlI?Tw^Wh|f_hvJC4dygPAxw|6lgr!oKdcOn%DRBs|th9xAZWd^SbKBpPvt@oi4p4n^m-7BH#T&!dE0YfwmPv
zJvr9_xZ&mt8a@SddBG5X^FI&lR@2vs84pvpH}Kr*=JYUg(t6T3t2Vv*z-nBnO6}NE
zd7O;h6zmPVa$?uX!^?4*Sy;-w*#D+hP*|`1P)`;;LRIC&r<+@dCU=5$4=m8#=W_95
z9$r6TS8#2ZQPdPShq=FYud1yz-Ugeq!-aNd#NHAyp792bt!@mP??z0FA2Vkw_-1e$
zFc%5V;5y)fhG@XskZJ;5K~{qJfOyyR?QP)%$eys(X!`_~u7!y9`0aNY8C#Pqn;O9)
zHV(3XM>dH7)_*;5Za{8E&zB~v(*;JqJMNKpY=6-}Hh^_{2F%S6Fae{5=^|BJ@5~Db
z;0P59g7!1|nqyvOS9?e&k39|Qw|(EGD!0KUe^x5=>4YiXF%YJxZn}qQ55!Upy%(K@
z<~L{lgng+3LFW)>Wk^rl5&0K-bTpl5L`;>+E#Q^(V$QsaqM_u^Eyz6-cq3@0gW47Q
zgMs~Vq_Bar7K}V#VNjuQ?ySq&@jlx>);I}-OG)PvYaoGb&st}{GXTOlRh~YW`8{XK
zCi!O&8%jRv05ItdVe*_@YgZf(29C$6{J#S6FL59%7jaI(AhDDH&{8WCD?)$#0*U1U
zif=ejaG`mbg5nn$D88S>9m1==H>n7{S
z-m<4;{-#Kz1XZOyO--#9yrgMw?PQ#+F}XR?6Uq7(IU_p
z*UZ@^jji`;M$ZZU{z^LEm{a1HU~O|wvH0%FS+3Y}66jWgl5kevkUa$Fb1ZQfV^SBg
z)~s7uhAeXr{66iM`zERZg8MVJTQ8v1(eKDRRM39wpb=*f=Yuiz3j0JdaH)}79jJ^bPd-8#dQb7oZ4CAoR2{*B&Yq;uo2y@+8FZ|
z&34nQ-JV*`uQN$pq=D`8L=KVU&RjtdF$wI!^$qlh=Qw+LyDFS2pxOY(1!G1jS^{~Dde#<9}X
zTh;FEOqiNIfN*GhA@?=5i`;6IJ_CnLzdCeZm;2I%{XJa@R#BtYy#(Fi08_?wT%6?G
zN8}q53FEtj9)%%X@jGF|;@92I{Rlhb&r_+EN)QjC6Sr;n9EP5^1?f3rtY%N+B&s8Q?}lkqvyO=}aXDxXS++z+i%7g{o)&7W4e~2kZ8xiz11ICtT@a)-*m*yU3z*{=Nj2(#97}
ziWm#jI2HEQwIMUdP)B#a3U7HsY_^}U<6QPH`N6RFKJh_Az5^He)_fo?j;zw
zh@gUt2+okp1-!bth#+0e5xU$yV6&)&Ps#-YBe`H;R`bHC_W$92fq$`YA~b*Ib^&%F
zE>!r`?E){8MTpQlJRni6ajSa4eYlkuxm}>fdS;i%iRaJzu`
zVoHGjGV8n4Qnw3;Kxs9QN|dA@uvYS-CyNe3N`qGm&={u?;>Uo9I@p-VH65YTZICi}
zv%tkpyYUL^T;4+5EO0h%kkdNyRjEnVspJk^EHGRpP8A3?|BsqLp_1yMJD&4*Matnt
zEF})9GZ#)x%iJsQC@{dU(;I~T8|sCze8
zyG1AOj?}ipd5hImMY>ma&++yK-CC@WV^ufTU+RxU-Cfa&ZQMofY!^9?!vuk08i8-X
z!H3;e0@8Arm(o~<@<_EKL~0Rf_nJq|Lj*lNz@F4CYw!}rE4LjkRbiCiR@v?34oJWG
zQpoHQk>Cdit{Gem*+P}w0L6@Rhf`1;E(NGG$tfH&5ybcVbQndp_T|1j6XbW!L{L
z5{)Z8}}E{XmeqjG2}{hcnqYd6KY8b0_hg
z==3`dGPXA}I?Psdn8MBJeAdt7-HbEn^~c8I9Jv$g4tHbS&8T1>TH}X8vj{AB8kt=EsIb%i8orF&A`kcVoopxh&F_8Wyi|68R+Du~Bt(
zb?es2VHdX>%N@iYi|=tk^C42IYA$M>dxn28V4+DGYHJ2m)ms_?Q`QmPV9OA-g=r$63(u%WQjm72$7
ze0Ht*G8#Mw+($ej>mYBcEOevu~(tx*WziE6D$ESpc{vf+36xm6@}2>cse
zIlMZgm2b_sODzAo8N^7&sr4?a^S{NB;0ipkzgCP?*q_f)!xi4F-BV2~rw=afrTkX>
zMyc>4D#&IrLlOydA|~`vLP_yH{^J=CSHj2YcmO0l7;c>Yn&|Iv?+l
z>vkfjt)1;H{nm_c#XZ`_yGx4JJg6=*iBF(6Z_Ec&+{x-f=vUE9TBt1{aBB9|UhPTc
zPM6TqWAG(!HF}DT*5ct;lo+>qhujjDJ^YmQ4HGKH`Pw_5EA~aH8T?~>3-sDHt~}`s
z_dt|(V$s{e^~YItTQS?&iArlGFPV!AwhUv_ve~YhALlLLS&Po88ISOe#h9QEBIf@3
z0M`O@!p0Spjmg(R%Tr-_{P2I?6
zE)41(~C3dM|P)!0etmm?S)~ig9%2R3(F^1wW{Mn8njlaS1+%r9>fqN3|z(K
z{=R=hJz-d{-7od_&M_O+kYKyz)!77>&jwoxgh)c=(0e0?hOV{I^5MZtIXFTc6&riw
zw|NGeM`r5;xl}diekGFpYEC%0xG&TkDjyzhJP^A%TYv_tXdreCUTrna1=(!s==Nr+
z^h=ehU<3NY`Pq-uxm4;*qRzO%I!=WnRFyiHW~T*j^4D-fM1-5JtoF9gen2=YQAFTa
zubuxI(M-*&d8bgITl>y8c*QKbdo?S@{T7|}%k0Xa8??rY_y{z)TH`}VQ_NRUu;I%E
zVp=Kp=A}IiOUk{+BDK$8)R8}k=I+oFVM_(da~(Hk<03&1#-SPGwZ`}5{nBS*Mar2J
zqflxGImm35Zg+7SuwrZ^8P1VQ5DC}WlAC^j!+_MUD8k4TNHQ`+y9F{dCsvzAGGm;e
z#u(=gkngQl`$%2Y{jbGtVq8b=v+bdS(qrQr?q5(4J3Z7qIotBu@Pg*h^x^41gumG~
zLO#bm9qxj383g0>q;AW-ZYj=ae5BQ1(P~VS74Lb3SK7isHX69o(!N#5GDx#Z2Ju+!
z;43#hTyUX=A2Roa%ie9ce=#0PyTPnjw;JVq8-LAScSGDubE!Wwcy+pv){LWh4~_-8
z`co)iZ`Pi4&#L^pYxy-?9`v^Mj?mr6@zd()%APv0vU4At(j
zlsp@LJ8IrJH(2)iZVPwX8nZ(rQU08rcoxcEdcl^v<(t9}dPH=#eLW;#(FgD=6>zsf
zIDvL^Q4b2+%x~KEl^H~G;ZtYW{dQt?xt{t@$~5iSD2p>zgd_f`|0_W*Rs?y=AVG4t
z%HK8XhbGS_vo08TCdL7=8yzxNC@&@Q3Us*`VdbO{=6DE`KPprlAI|5z)PK>f(B?mR
zX0er_&Akq7f^qc0Ex8%ueBeGsk|S;3$M?#c*7PF^K%kCr0}ai)_p?MAP@}7>n!lI7
zdO=|4+Av(oSqDO@Yr`)ONmgZNw0U0nrRk_paq&R?IB`{@)0Z$+dgo@@3t)h5>$|r=
zTY^A(e{mIo3DVQ4>B4N@X33L)Qjh{&FV?;#!cF?jY)`@;2I#sF-*HgtpwJ<0CQ!(r
zCh$qj8$mw%=D#z&$4+AIcnuGmuiL)VD#)|n6Q5xHmBSKeC$hTKE1cSu3SyTv`tOYA
znQx^32l{xHPpNas#I7*jdXyA<%&Nhv(|=2ObuHwAfkV6-uFu@zi&%j9K{m?4T@p<{
zDBIin-1uqOvNv8yYZb2&czwn|v#CwMQt_(njX&otF!Qc=WpCs_0}^;IYWB$`tI_1l
z6=V|_hAi+lcTDE>u^^*V8{WZjl>Hmc~
zud4Qj{MbT9;iS(A8eio8K7#Ij)>>6V0jP_R@5p5JLX8(S|R^)bin<3&Qf2Q-fdM;3B
zw|UX(z7!dZ8;RvQ^HOdplAFr5@OL~{6k5CSHg&GO+N5IX1s-JNK|#jR1+l7Cqko|#
z8Q)Yv(Y7l+#lF(J3MahWW>{jb_GDYyt8Ln9O~y)rxE9YF?oQ|0EL|rSp781D7ulSM
zx@KVJE7fbc&mV907pvDkYj3xjm=@zQECfxjKKNb+r~yl|V>ud-TmRo;y1(qibYB=;
zJ0zrgB;B%g(R2J1iRd2X*q#4;ne{PijDW7)|A%mHWz)&}hbyr!`G?YS>T@pKEgOmH
z>1g3m!MSi#7aUD2{VJY&xk!ymv8psU0p0NDB{<#kSTGRF9VNAp|L0lZA7gh`7jv*A0o~-iX{SMpf8n=K!@o0r=sbuuu`oJEe|29ViRx#awqL9&lx8u_+
z@!Yj4o;zRoQGeXIi`3{}r8TwFP|I1APS3TwFd@mG$H9KYK0?Iyc76Aev>!wW0@k!E
ze5MQRt`L7kCm+3^Qisd7v+L=p`)DT{)O}zesC$VM)QyI6@4~!mh@_fZ9!y?yn2`8u
z(pP5#xewf19UhTJHg;kbtv{WcK^UYUo;1B%{6j;x6$VrC2PFkTPUyBduQZwo+P32P
zLLY@I24c6*S5qskaR29)fq?C?PQZ4t${P}}t2&wPgk`pVIM41Y*2O-h)C~|XSs)#>ramEx4ajCWvW0r@?
zme6R~dlbpWX){LLlK$+s`iXI78+uHIHOn%e%O{D`4wd??3y`I#f>bf<52
z4x;$**dbn0)ln)#D3V@-my3;s=YC4t$DD5SPBmf>P&mty~Xa~TEJa`D33TGJJrR1s&Z
z_V1c?L*r~ka1bY=zdj^L{aLA>bxoYD2pEG>_M&#^BND6RcWLZwewT@v;P}e;ql%TM
z9|<;8E{hkiHA=cL-3(_aPJfGEzq&>$xK{Rz1KNy>yCkG(g6kFvTN|L83hX(Ot6G8mRfCXYg@Ff(rQ~?S8!`sgy0Ie;ZjYlZJ!vmu~op0{J-bk
z=b21Gu=ag_{q^(y{vEhE=ehemcR%;sa~WJG3uH(gFOV^Gq`*~lOM&Q4@c?B8DwJ03
z^E~v7o{p^5r?NCU4B22Yb6441;okU+RW3_dY|64Xj)v8u*Gzi8M>!<(SESc-@M_mV
z+jm)kQTEeDaavkCyd7
zcv*PIk9h4jBY0cePdGc}9;KX&9d}2j_*L`%%+uBrKZV?~qEEJdrX%T#f3_~|^BKsH
zQV}5)#C$R<7*~#pKO~Jr#z4;bWzeO`-$S@|jy#?gxeMg?IOlfW1F~Q5t1EH4zcAZ{>yl
zn!Do*d3B%=tMID>F(0rYOw}909JXxPlvXx-9~{;XHOO9%?u>)z2w<-_*!s!+;Z5=V
zpd@TId-oBN?HBrAjja{z@;FKM*v@W`?Tb++FFIgPyuTW3Z5a(G+DOFj2*%c!I6gm&sPu)rv`%3$%p8J;WdZ_xb#PsWZ%U97u#ii?3=^c9SA|t1)zbi1=
zR^vw6lx8C(oErmNGnh9hBVC$heh%Td?&{Hy~(g(7P
z8mdwFWBuQZSWDA|mt;46eN?WafeJ?JQQEO6R*2L+!KbW-h*{wX@CWN9fnspe^&
zRJUt)wh5y_vN-|E*1B6{0Z`#tf0^t{v<|1qFnJhi-a&`c;TV{342w&{bAMY3u03^G
z&2aV@={iOUoKQQM{YG|E)r&unHz=}gWmfIq5lvQ%P%<)Qi&VsjV%Z9_E}1aa-q{^(
zyPU=vsV54_PIQc(K$q15N<-_hby=n8*ksv%(@YT
z`^ywm-NQ`d>}6~PRc0SUpRayGHsLu<<+89@y+-s?!Nsf?yHxfyLf)^pU+HXY-dTN-
z_MM&ZXLzQO3aXwRX;akGP)Cbpp3RC-QWb}isyJ5S70^JnZKBf%Da}qtN9cQ;J*{Gi
z;B0#SJ({Zeil(Z}W1e|DJ`xyP-J7DSZkr#J9`vH9iree9rm7dTG9Z6gRh6g=)2gbn
z*Z-OJ&t6a_;_QqG=n~+Ag9_ACWp9|!_VH(7Jyqx0daAxp9cCUiYN|Z*j?(-6J+xFk
z{vuI0TB^$MuD3vd;ma1=P
zPcKAz(&N%`TB^30#)O8d_E<9(%Ba}(?x&0d-L+LMZTr+%Mrx~CYP415X>C<`+q|?a
zsZPBQ>P=gf-pssg&1R#+u+gQh3iVduUC<&p#-!bgwkkVx4539>@kFYs3cIPQdI(tp
zVVCt#RaL0h(pDWilrB|O!u4I%K2ZY>OJy2u9}~`~PTr`ik{!^m@6}T`Jt=Gb!Bv-Q
zbyb(>ZPj+6gPqyMB%qrnc`!<-Bmi;BZphQHfB`{vL`T=La-#J}PMN@&uEm?JwQ4$^
zB6MA~?~pnBOI29)Cj@iQdkJlEV4@AmC`Rfhv%febwtc_=!O)Q0_9qZgVRc9>aPo+j
zs$NxCJ%o=Fs<8S2ju9%XHp*u?bTCS(zA2w<%I!}Xow}>Ax*VG(pV#=F&xd5%=$({_
zQj0gOGW#E+!b)=~tY&sM(5&q_hI6BBimj{O+UNp1>Z=g(^E4t|tU|{)Yw>F#jqcj3
z{B5j=S-a>hj=$|`omEkX)vNX@z1v|SC=@i>tCqCM5lnc~gH|kO(^Dtj{u%96i;2|T
zevw4oK9|3)_AIHFI9M{Gy=tnXx~f75<7{}|HYGEQieza@v>`1RCd))kj4stxM}=w#
zsrF&j78jg#ycVmS{w^(6i`GhKz5PU5tgP>F=3=i{&%a4(v@<*Xu3alFDHqJ@ygTo2yml~HLyoN
zi`qP4NBeo%JU|@U`-m$U#u|4IzHmkPN+?rb4zm^~w@>OpvOs|-EHhf}gz
zVR>kJ5Cm<`uy(rWkvHKW?JZ`&@x_imzSujX5WtEk_LEMrO~l0BmQCN{9-HT3WUA!l
zn1jKO{D^#Ur>(O^;^oMCeRPs=HaFl82l+K3mKgzOurL9Q@horcg_$yhIQ#Isxp
zle>zYDHmUguVSBeTdmXpNL@+6XqXZI93pA@MAEIZ{^duL_x(md=SX3igA4Y&y^N2zwh!*J33~
ziMY+t82jA)*pPFs297w$X+3=NF@XgV!EG{zp;Er7+7+1OFaAK&LS)UKe@4g=C!ye$
z!oqw>ri>52ujQgIlABaW$@`mz&yl!-4-m1|Pf3(_ApVipIPMD4;qjrpv87L$JEw*+
zS-s1~cHI}uYoxZU{f#258cG^O&aHVSMmKodVKQvjKT>+(Ge}`ibf%m`1);yqTqMj}
zK4T;YveJBJqy~>T$OjYlV&yNkq?F}P3yC_Ul$<%DCWfiD#Tqg~8WFd$xb5@DuL(~1
z^#Sd1XQ4J9fyanAOAL(WDuY|}V&^7XKfI>16UEp^Sn5%7Bmo-dBqN|nn~+=h(%<|c
z*SZY-AjX9HRjDz-aiJ{lEHCQC11Ymc3FtR#w1Bu-D(eRb_FI49+~XM{lkO)pkT}pC
zKu_mB&?WjnQ};|G!{3cITyWwR?46IxSc$y9Tq;6>i7C$?+O%2POX#T?Gq{h~bbYgY
z@!o}8@_Wzu=H=!X+@nR9SoYa6S>}a&Zdd_mALaw;%-CR3USqBsb!wk$Fd?$c(z*ZgJO4CKn1LyvCd
zE9lu1~A_lJqhsi*}FsNpRhl#m^Aa2vrXxGMQ6#e}ra*+570)b|b_`z@SL`P^QwqFoi
zU8V{Y$Qa=!bX~*{L2XiF&sz6NP%}i-b`23%jn;G215qjF~p89@W=ICI5n5pk)Jv7>LOEX)$
zki~kaGY5aXoV_u6L!7^Jujiqu;_{sJQm&pI2KMxTYgWVIz%X_Xzs{;V<_+}WZ{Oe@
z5=q}Z=ONMoPvq&Thar=v;g95^E|c@ay3D>o9!uNR{-L&)wV~V$;dP&xVag&`kP$
z_QWlv43cHmF747h0`quh**()6IB#a(z#Is2mgfof3VxwZC#B$#o{eO9moB^nwCT{E
zfD;7SC3czy2<%-V)nU>>kWZ)6HV8X?$%RW%WATY@#
zgvUbDp9A9=t(>>9Trv0TWoUb4PwYncChS);7D;;>F$&-Q##yfk4;6t?D2uLk7}N4b
zlwa?i;HJY4bxxTcm#uYifH@l`u>OtoXMR|_)L+cGu^*K~wHKil|3iP~ff}ayr>t>L
z;@?a;8F@{-AsdcYPbc=-)e2(G)&*^xHIl6OsPg9Q#t|Oy_Gr4SP=W3y8(H1xPrNqB
z;(e%vdTC&i^)%?76gtFI%$cz)EA^y&IE=j~lWGP6iUQO92R_p)p={nyL30CEX?oJ_
zOzB6o%#2jzMbg19KmyU89ep|m9bAI3G}UXPityU#g$26XC&=a9pVo@7%13(s{2BIK
zHE73y+4NSv%qT}uD;yClb`E6}I!o@z$lN8>?B#CTw*rK1npFqrU9X6ql$lUjzea|;
z+=N^56~mcZc>YlA-M5e)V@kbr|-c!U+6=&ZF_U9RBW=FR=671
z9?IIVc8R}nZAVVSvjKPG+M~XQliTC68%vL7Z)9x9KV&^JR~n{g{i(3}waCT#j$rbU
zJt`}XA!J6*p+Iy_{1>6;jQ$MR*s9q#W*({j_BWW
z*U8zFY*btD&oOWvAo3VEJJiuWH0$slcfd`OiX`9ni2!9*J8~Hvq5MLgL2C9rP8IR?
zRdQgW{23#EhRPpL{U=$$hMdff&?}x>c5?n7I)HZC&`a%coQ<_dgF19Xj+6|+v?ogovVvn4w9_vgQoKGHGtTB|qdh>e}B%|#|&{rSa#^c6@@d6V~_LoKT
zJllS5)g7{4BMwU6+L`hWR;=}YX?+W;y()>)wBPQ_d@|U_SND8YdtXuU5CiJ=hZePl
z60AXWgwz>+jXk8vuq~#}Tk|>bM5XB7Fy_6}V&bM*zSpSBc{hsx*
z49{tR#q|rCny=yGKrob$gF=j_I<4^t>NMuGNUaXF`jEkO8R9#TPewX9fozitWN52u
zTJ)mH!}7+pFIql!oDgKl^7^$eo)k>xVnz%8zndlJDxHDd#4gjc^;9d24J__AL3I{J
zlZ8j5M{ienU;npYQYh!pn4Q6xgb&-J5;~~#oiz73vt*SSIF;=bU^HJ*x;tb6M)4J+
z^j0fI1xI9W$XU`pWV^g+XSbMmZs06wkCEZV^kjs+XhS|8pUV!dZEjrK;#vPwu|PtP
zvNn&|L5wQP(;#Akg4PA9IrdpEOi6vWp+=C*KV6mVtN%Ras)_uKY_0zn>GhUb$C#XgCs79%uo<^bz9l^Fg+6P0
zkzCA@`~*kpv>BDG^tbF3Qb<9_rMF{F)&>~Y_F0rZu!@pzK|h&4)t8
znnHOR{%$OFt#?c}1q+_jCK|6GhUD7!xD+jvkXyW)u-rh5ZONIi+sZsuw;49LvgnF#
z&B=W4y4Tv#WxlrAZu7+n*&9naF_1Ryt9$1`PHihPR$HW4OMwAJ^|yYtp<*SF4w>HypQ?1Xw6K*2b{e%eZ(gGp%9@*K#HV|)tS9v38
z6?#p5M|NCC1S!lD|lnbb=G&6jm9m2FO
z|1J4Hi0IFlx*AaeiTaCu510{lIxBQ*GfpBn4s+^x>$~C)sY&~WX9J%sWt|(I
z`O(AQXphbd{hr&M8Dp=T$(1-6>m=aUbS#|#9c6xGlv&-QJmbrwr)avT&b;tHG?u8DGWYjHP3}*Pi2Vsu(+#OQ@>`a~W0csd14u&hrowoz1X4+WRq3
zleJf@EnEf(wTLd-$C35yd@_^JYxa5`-qW7tFPd>+=#
z$Mg-{RW#$c<&Ek7`Z(CQdZ+XX*|W}=DJ7@*i@0HSi4;;R=HpEsvsrT9vJUT;e)~OS
zni0MsSORjdIUxE55;=Z8*e=0IM63T0*6Q|e>AhI}K9_$+QVFX&dLe6Bn|IQs>wJ-|
zBotP(xeKGU&>Rd56gi-N*)SN!(YXULh!u=7d%Hr}#+K>PArA>v$u1f?S&g^KiAn5o
zIWf7cHD^Zgpx_wUlK1gE1OcM6GfI!@3lkmoA%Z+hlDhBNvOp%jXDb@>}V@1N_D7B(R?s
zdU<|rg)86f-V+^Gk0$Gi}*&?0`6a2LTD
zJI}x4-DL0?;FE296!;Kh9p7*`xE-d7i_XR0WBTtG`tRrZ?`Qh&r~2yHO~#8%uPK1HsL%_q6bS${OZwaRKaA&}0M`Jw0AF+etMWz42&;qb&|
zAE{LkPg^VWqTnk`!Tm>ITv2co4(6SioSWHlHIH(eLdW~Vgwkby^HIC(!a$UHo&iwp
zjdsdkEMuk|bp-l3<=>SI=izl3bSfir6Fy=^e=-CRHJ*W)p`2=RM8;v@a2N}ZiNTm!
zOOUeYt+begR$1P3&}{+ye^Atu?V5*E8p#(`m9y<
zb;&1akruWdkk}f=%1SC5Rzx#UJ7+W8
zWRbxP9OV!KG~Exr1w7AiJJa~w%%`X*dl`4H)&cJVs0qWhQ%12|Oi_Q6urY=k4K4ZstiwB^m>oh`)LT*Z%PWU>!~~LzRg8X%B}UY>>}ZP(USyDH
zc-Od#!V+6$3(r@!#>sM<8`HbAz82EZ35W)lzl$XbT;%5&$#BjO)Y0eSWpzDUBFqad
zjF(lI*Wc)C%@Z{)q3n3>IWL6kA$nbW9atU>zDQyt+rGgl92wsx&LZWpw3-LE5ux&=
z#>9J4v*WY;>vq)fO*UXrwuz5zS$yY(5>0w}o?U%0GXLkrCre_feC8&LU8>l5#V(C(
zWr=;O*jr+6GKK;OY&*pEXz*9L>nuqD=@S8-ddZ~GB(t5$Jih$UU{h{1igCJEkiT=E
zQ%Aaj{Pk^75tXDX2)meYB{>yT&{aY8ZEm5dCY&o6uAn$mK^*dgllY4DlO2ClDA7T}
zQbDQIMY2>7gd1d%@gdCEKlqZa9v1iA%d6{$+4E{sKh%X(OSqa${p^USpFBG~q3=br=F%riMN739XU|CiOzBh-&#iTr
zmeq48*KJ+%HR=5qBwODwNUBw45U+K)LDH;?4U%rtyF`QSssIASbYpqZGCZxPJEU1kw!v7Gs`mg2EpGj_$I;k8(hX0Yq!BS3%7<|9r)doK#c!|MV1z%!tOYl5{cL<(k@S}oH
zGq`Yrtu%wX1s`s3{Qyj|!BfRP#^7GTk1i1+m?vf4Gq`@yrPbgW;^#$!%fj1gF}U1;
zwH`CLJP2cLHF&k)KR5U)!EZBoo!~bbe1qV12Hzxjz~HwDUS{wz!Iv6*i{J$Y-zs>v
z!M6#XVen?bPd9jr;9i687krSxHw*4I_#weRU#!dCDtL#%Ey3S0c!%JJ41QGbXABO<
zR9VdimuI`J2MnGp_!fhw3Vyr6y@GEtc$(l122U4!mBBLvuP`{QSY;I&+%Nb-gBJ+y
zH~134XBxav@N|Qh2|m`~)q#8tO_fHx-Y=jmH!d)QimkV-sy`(y(zG
zn-3RBu`l2S!K7n1=xn}aY%;L<$k;q-j?C1ieG>kSq|d7-Cd4K!?{Yxc%Leb3$*yqKHjM77v|WJerfgMZ%CwH-dc
zX;9zg>)!74EMNEOQP0&+vj|3sBTZyy@OQb7INRsE=!5?H4hn|mx~V&J*Y67KZTI+x
zvEe(^xeLytta8{ek7tuS#@;XwlMS}Dio_aWRp#ELByibxJkiatelP`ak)V~`YSWy3NOkh&|yL|$KJD&j$KjJV1E{YqKx(^^OzN!8*cc6d$
zX9M8|1H0p*>bEuoQ~p
zj8IY|M?0Yd@EE+I*mdC1Etv<_p2nk!T2u24n+brBN{gG97m>yHhLV=xsr?1(RnC8M
z8)L?jvp8~g5`x>mbK^PlEsjIKCuxPAM@MjbY=~<}FJ->P!&PLtFIo1iPo)XvHR}9k
zzU9$u$?Qg*%eF6M19?>Mfc>7?`~A`TQ2|)fU;JD|-i1}v96U+$jG8WH8hyDYSKOvcxr9gL-+`{B
zrr}5Rk^b`&iM26S6l0;`t20F|H~HbfH}T?H%6-PMSUbKcFR
z81cflrNl=)>t7PGG$sAaFZ9dT^pfu7Y51;mt)`S~aL}c>LozH5*XTaSUGu-5u6_8m
z4>)+S*Ai)G$|~_FchR3W?#W^I<=TCTohiwVzZDWsV{9s(&}|)x^$5}rqz?!>{o^Dwa$C!grV3o9vo=$Lgp%IBNkB(u
z%IP|(R#C|{QxZC>^JM|BSK;yb^eb?3@h3yG`C#LJOf0_67x5Bzm^%VUW1|%yg#(^Y
z(mIJV^ZCFu-pvw$G5nm0T(4m~j>JQm?O|YN%7eBC_R#YB7=A)YBI4Yc@*~?NnQI5I
znNW15z0gjY9ahiv48usxvYph53A*~8(9C(zhxUuAG_s-p91ME#!0Q$JSe%fv0pf`Iy`k-vUY&tiPqL?X
zvbdHFYS-%QRTNw0a;_E}ofZE#A@+KUZ!$4dp*1|c4o(ssj&>wkjNm~aX$iNMcV14@ZI|{H
zteO#9yn&@U{r+j|$KTficN6^epS51~xY&fSu_`(9-m4Oc$sEe1%lMrkgUjW+tc!5e
zgK{8^X`#jX1dbAKLcU~WI1ZN@hgR(%0-TSU^Zzg(+AFW7aED6TPGE$v?$2xWANhN3
zW^=8_`jB8w;_b6g-wYRiU%+k67$s$3wB$Xs=d4%s)FPu#V6f=L>+hd{RBmFN6nK~Q
zA^ONfNwq$`Yr+CA|pKr0h>E5yX|AZ((`Y_fSPl*yW&O<`6hpr$o84=fePl5_C
zaAEblI|_9p=={%tjKW&}Qy)B05hJb3$n&TS>r9<>y=?g_8$~(U+kv0F5JIzmL=C|Y
zZ)J4f@p-JT{x2itfeVp|Ey%yJbBS+bz>^`fePLGA;jI0~kn)bwvfi#>U*yiT&fXvT
z4rhDNs-1*Z?WeU??I8oHfTyh&-;zr7G(5#-l0>GH$oZj|R=mf_>Gl0sTV>q8Vl3wn
zdnv2JW@#f$u?hH`amgUb2{IfW&n>$;Q@%~zNn~pY1t+^N;^&?Q*%BichZ7V)-sAVM
z`bpKsGH=pT&i!vuH0x=%)GL8)31qNbEr*FT7eaVPc5%>
zpSU6JKHQejp@j%9+xp|%wukSC2Lw+t^xt&FptzLtz_Eqqf~G!ooqABDH)4e{92UxX
zMrX>|0LWzQKOtB?ny+XZb^=4+M+5=f4>c;9Ej
z7tu5vdBuH+=f+sr}mV#cafb!(7!3=m#mFD
z_fnX*eH*epc{IzneS5Rx3ZQ|aZ|1dqqFdH!WBEMP_8uSFwjBftUrA^ogl_n>2W*^$!WUD&UoL(n6bH?yJyA+6E+Oy7Cl-d
z*t+q5LmxrcebPxks(H>oiW7E!(|QSy3YqK)OrF`)cT>_IS*7|zi958qAz7j8nwEO^
z`gOEPNKGP&=L73boh(8E8x%Eb4b
zzCsCqKgN_WpON=OB|MFS^ekbfl(0Vzx?I)bW1CPw`Y4B_T@^LCdx;WhZE~8UMWaMK
z%03I?P-P1wuh|pXqop@jPoOUXq#rLL1;pD$P4W*WphWe+QQnqt>cn*J%P0?e1f6Rp^+8hqunvz;&Sx6HQKa3hu^Pxm{_Jlp?Umh)V2_!_b2+z(u
zcHOpiR_segNsE@x6z*V}0y7Ty&>(SrGz8JD28qn_-zOuCpD~#2Ct1kRYrW2tIXVZ7^q;c=qU}w6z5VCR3nEV6wuJZbuMb_Fh^uaF_0jc?m?bbGyY)f%N3*m#X-rb81yl(n$b5OyH4h^jj
z?;S>*F8#NTsyxwu`zS6w^xr;oqkHS{Nd33A(yL}}@yzu+)X;Z7uD%@>8n5(9>nI8;
zWWMo*T3Et*8j8u8h>G9nHgK8^|8CpAX~WxX*gzIUq%yV^w8t3upxNUace9#R_-3US>Dy7DPR
zH-)(8{clrsI!>Z{|SY-y7{zE
zl2~;tT?%o}JK8P^aRFh4xZp84q4Rh&3#GaLe^7{f&ql_}6Dq_-9x>@zw!oTrkqU9s
zhtdxIM+$LoB3j;6PL+6iQ;54@oX!^J)DhX;)xaF))?PH
z#uF>V{p6=%Li-~X;(l_LPRdb;YgD_+(m1RU_xThA%r=hJ8gZwykYvIM#QW-x#-WCr
zrP-G&$h~>GS!8~hg4|gsU@Z$w;;*A1cN5oL-cM+6tUJ4cI~AQfkN}=GnIX}UEB2_!we3-nJ4x(IQ1C9W+|zKfKvd)o
z7Kn=6egaXE+eaX(9OYh;s5dHBKPasgRLU>A}1PDexrbo}5QDqzeS^fby<-qp+v|cr^tiSI#wx0<1w^RUtBPDx8gX9O_ES7s
zPhJ*YIbNG>tH}N4;mG?&EYL;JRWuG~upaoiA1cE%;+@V$9agpqUSN2^Q-L6iU
zbJBmXKT0Ncwkei{jHg-6x4{Sz-MCj}&dMaM+RARaakH`NZGR*eT+%3S#Qtc2eh0L$EcL`h|cCwTyo7meir45qW_ypeM~7y_JZ
z!o4-OO5no44Mw7whm8*g&6N^i6-SLi^G4f7iHoo3`o5hAKhi0$yDG)Hg>ww&z#wln
z-Dp=k3PBe!lIOQtcTY99OMLa;9Hcz!g{{VA#ti*NEh@III$w@_28a+m&$Pf=7e4g2
zzD+Ychgi++4r?lC-P)rnq~tnE_!fw4nd>A+^}7o%mwhrZr4v)|RLez(rprgOeS6d=
zO?WMLNMwkL2;H`bZ@5+L_4@3MX8XmI5|qfxsj}$AfKM?%H|l})Yttw(<>zSf^}rqQ^MA}coYYVK(Q7>GhiUuc
z${xCjvd`w&MIU}pfKRhb;XMsMXINmy2i-}^sUw=|1pn$$98FRi2rB9+R;a;6~fxl?~TJ;rMl$xRda5T${3Oy
zd3HcHr@kNhl%wU)@8x_Z#hQLecs%;xTy`Fx5_w)|6e>%MdX`6KVIhaWG3nCOEP4Zc
zd-0UnYP0|^pHUX&4^3ZECd?_G@4IEMKXdwgzJgU;s0@9;twqtX(*89#du}e1&FB~W
zxU)H|w`<`#p%2|cPDbPn;=b1QYjjo68JYvb{1g7l*k-L~rzh%nWP=ro;f$?0Xia_J
z-#8hPuJSide|3d)9@zT7Aa5Lph|XG?eXhijZ9Vz`F*e5TE`nKf_5H%GU%lG8>pso5
zueQ!u;?O`358-y-b@osD&mp!Lj`!Y@q{lS*-PTEUI?{PM<>mmKq%`PIU@{W)YAs0C
z$Jc33XWO2BVmwWd&(H_br*8Cz`s7b|&mTILd*BOsAgwyT7?G^zK+Y3F`h3yTwO=aW
zy#Hbv=Bh?;sNA5NJ!4v#r{NBKfF^>lzq
zb$pN|ZU^7_g)Bk$*;kFFs=e0BnN0oS?Gody?T2{karT%c2aoy=41CE?U`<+E@hn+O
zlbdqBhBeV6f+J~4DPrg4v@DAOSKpi)vqz59DP*iZW$o<_9b-s=3?DLb$R**>0pE6R
zH?fFs=9V4@q$r^4b<9J@lzrO!?$l0sSMxj<5-Zb>m|=n?NT2|_D0xvAH7I0QtdNQO
zJ(_tKvOPELAeGLPRQL_P-^s+nJ=g@#ux^GYXpUE{ZwY%4mtMy`
zdD-kT#=b{X9jwOZtT&0DvoK!6%*}kuA9^XrlfM`1d(0Ud7u{|%Ik|RN`|DOdG1q6r
z1{16?I=LhQ`+2%b^zuJvamYnhSH{cONPldZdayI)YQEYRt-cIG5jmdDW*H}iH2NvA
zXgf!$iFMgbydF8^ABJ4ZTij0d*P{@5ob|{8DVHQnpw}3AsEltK@!{1nR%n)CuKi>d2T@PY-k9ymfU~yL<&J9ht@~pg
zsbzbf*zY^=DK|Z`I8|Q)#5N!|KM<`AqzObvgjXQiA^fxJ@?7pZ4#J-1X1&T-$G6IG
zwWs&6zh2u%wWs3C<-V>x*>NWm*ksh9a3>h2b<*&_(vjDOHIGxx3MDOMLMqg4%m2u<
zG{pMJd}m0u7SG_YTUf2_@uAq!aCI78P`uu`56<9JF*em1t$8(4-nZr^QMU)K7yX6e
z$OG3;c^em`w#}qp_VU1WdywMw^1$`3MHICA1J`3eavIco(vn!eGQfG;himmbayZOd
zF+21mmL+5T*2{mEFA5+U{qO65&=u9G-(S%t(!U9u$k=_u#4Agc&UD^
zGa+fiXkX27H
zll;60td$0~ShuqcVcI}V-QM<8lXBOjVC{hjqV&=bm-9K2MXRc$TmK#(B`Ad84-00!
zBIKOUPopJ*M<^S2;j|FIWpNa_G4`${Qu5t?qnCl{`BrVg&HY3nNT5$=N+?!)N!!&q
z&I0Wm_pbgc>~fOi&LgRM{h@bR*%w$JOb}s2b~jwpjC9GeUhL@tStLxM^@#0~9vNmk
z!=bWPtm!2>Ct{ZaWhL_dg=sbxtI`?UY(s{cWdi36hm`YjV#_nu1YR2SRS^
z!Fzhk4da8dp7>^OPI}yycYu#0iI%6cHuUPGL#>Q(>QOw_6w1nva1Rr@{_#58*rSS#BR!2%5`H^JUW8LYM5t6CBi-t*er=)B!pCRzmQ8EXmAzy>l%Hj7up{f%TBR9RMK}mW|MUBQmIAG3NCQ{u
z0~@L-=DVK_(`hN3LD;F!`p258yoJnVXF-f+t5AL#Gh)z(``7@hIuwzYQrmR
zc)bmOXu~vFnD85H!#*~A?<`~gk?l`SGvA3e9BadwHoVY=SJ-fa4R5#MRvSKL!#8dC
zfenw@aKLnv&M7v$(1wLJth8Z+4R5yLW*gpX!-s6R(}pkF@NFA**zi*u#-C}@_1f@s
z8=hms`8NEz4XbUq!G@b`xY>sH+VBY*9d$J8PZ0NV)*KN4UhBw&odp7*J
z4Ii-K9vi-9!)bOs>dNKMGj=^bWWz&Fy*eIF05^{lrEW?MDl)L}pn=caZD7w}?$3;U
z-6_4hNBVaqeXvZvWhs-7X+5lf9K$B+5tt0KOO70fdIn~UFN*aWqGWIRR0(`9SQqm;?N
zf}WCJu0`s6O4%h}PJRrmb5
z_^R#UZ!!5O(IxNhvJl^;5x(=Gab-l<1-N(rmV7wrDq5MOr<93bz9l{>hr}cKmhh~6
z{AaIRd3J5ML6z`3-J8$PE68eo_##~X9U$&QBAml&o8Rf
zpQNiuOA)`st%y_N!&DM}wIVKwN6jr=rU;`J6a|7cB{=Y#TT^ah(4{O`Qycz*UZo|K
zr4bejgXSy0s#5z}5VT=YK;n_`5=P-q;YZ;vNhnuTbWCiYICtOpgv6wNp5*=m1`bLY
zJS27KNyCPZIC-RZ)aWr|$DJ}h?bOpIoIY{Vz5Z6Eh{c5UB05M{E90pR#sM3f1{>0
z5WMQ@RjaT0=9;zFUZ>_%)#R)y4;0i?6_-lwuB0s$Q};Erf>Je!mQ1^kQj$ap5>jf{=b
z56da_3cf0J|1H;JTV!0~UQU|jxL5G^8rz@ro_O86O#I@n1ovX?Ek%|D6Jgeb?QlKSvM87ZZSbtSekQhK$|E6Kmfdw^aorI%W)CB_Qvr%Ely
zPU4d~bxJ1VQx}~kYC5eXZ5dN#%<-x;W`ttCYSgKGEhoN8zNO5PC$W*1AoP?H9Z#uB
zokwXwW)6_@Nehb%nXU6Aqp9R;lCE88PfmSL3DqbeZN0_i)ooDPv6H7R
z`c6@2h2wMb^VRC}YSQXG#op`G&|wOrhLiuVo}Tn9>9hZx^rnZ?tEP>bHgFYj)extw
zIx3*r@jc1un_U!h@;@yc-&fE7<>Xw}N~=gWKpz$gIbYHuom%Wl&8hD*)QoU?z14RW
zwJP;xMndV|ReH3LQL~gWQbw&(9fQ-39B9gOMvwL+xsn)Vd@y5MC@_T%IE1|lKfkF|&gSBdxJJjbsld
zzrtj*-;$G6{j?eC%Xx7YqY$^PD&X#8`vLjSVtZ@HWyzm5ds&J_Ut+hTu@w7*;9jl0+WuC~8N
z+23_;()`k9?#x3GPbjc&-~JeK}L)U`k?&MDuWdjps?}#aHhxMYIGmf
zCn`B6CnqOXe$&&5OFVir3YNsV)miE3iwoeNd%e1exeLn*`6;!kdKEu6K6rV-?FP8{
zC!hcMK>_b^|I!!-&A;Q_j<@ksGhgz_+~wSSQ@T(7$RMZxp=D*v4D
z-v6|L>tB@XtNnArAK#+?S(|^<10RkcF}imB>egLf-?09MZ*6GY7`n0Prf+Zh&duMw
z<<{?g|F$3e@JF}*_$NQze8-(X`}r^Kx_iqne|68jzy8f{xBl0C_doF9Ll1A;{>Y<`
zJ^sY+ns@Bnwfo6Edt3HB_4G5(KKK0o0|#Gt@uinvIrQplufOs8H{WXg!`pv+=TCqB
zi`DjS`+M(y@YjwH|MvHfK0bWp=qI0k_BpC+{>KcO6Ek4G5`*U7UH*S}`u}74|04$3
ziQP4W?B8AfSk8mxfZq9y;9F$LoF6iZ-M*Xnj$BLJ)Z?4mzunw7_4wuvcsKW(dwhSl
z$G1FL8JV6uYZ>`1(kHT}ZpO$-{CTAguW@mCWl7c53j#%fa`>UxFRCrAnYZkU(&9jF
z*`q0Mc+_&!}WE8Vq;m+tzW+$!l$R#71V7|Zk0AZqhN6z
z>opd21qB-j>P@TLP)8`mvaYPG%X6^@^t?zN?XK!meeS#+g*)&@!_eR(BCFW1F#!gsk>1p~c#u=CgD4_bbS
zzeUuG!zXcg%f-};a3_RUA-hr8K?uJ?ILLQ+pNIj<;)4aPup!stnXrRd~ya
zDoZL#YrH+n*;RilN&{41dB9s-RZ{A$TJEiOc=Zy~B+^}laek9&Kegm&GVMTeF&Q`6
z)jPkORn>Gb(=trW6Yt8E6X0`$Usb$wOqb8}>qxrm+(r5?Db-CO(vLS-D}-6JaPCBN
zVjSsTr#yblcyEzi3TZ`=p-JI*|D(o3+KP&*t0iIy-J>}eq8%5mdyV!;rI&PyYE}fL
z!fU;0rB^Xhl`r>}uB;BMKJ_1`w~VG{4`M}Rw77`Y;524wu-=uWE351y!O?b49IZ!G
z>4#o*ydC_r1=$O3T{GeF-?yBX^Mk`lj~;vLYw0eEI_K=AGC$QWy_iP0dMW2+GEvno
ztu0?!T~T_uGY&5;DX$GI4V*b`Qgw+Lhz*%e_*dfYKhUiPmL#fy(-PFc`JVkr%?Z_S
z%rWu;cY2k25|bqY{rsNtD)lDD`R;#Gj5=w`;OdmZLFp1k;@dY$slQ{sW`}VNjaNeh
zNopu*3|*L@hEC(VCZ&1k#H8sXcYD;ZKtDC4B#HDBm1k;vO`q17{ZYcqSi>9$aK*={
zc*5XP?MiT|1WM)_6t4zN^Qb{nk~{jfChm`Kc2~z0_9^HuY3(MB0I;MlX}Q(V`6>II
zytSOJ)E_VbCvUv(5kq|ahsUbnvs0T*NtAN@Z|uz2brSq&?pKBo0k!)_k5e?W6`fh#p$rBZLH)LSZbkUC%6
zSN9*(M-3`*QwMQU2fDpTxpHSJwFDC`SDz@=XMWU|){ErtGH%9vgn7r#PZaF4AsFYo
zHyRe7%Xu-zNvnVVKB_-?>_0_XaD1Udt9!DPdLHxFFGz@AU)`Sis`&YR!uj6j<4k?F
zQbRvC(1o6)L|1?1@+K;8Nq^;Cn5?|e#alDHMYWcpDQj(#kqc@`;E{~o8&%x%-G@%@t4
zZify%esd{8`b!yWoIFS!)kLKa9qA@b_Tn{N{Ym@RUni3*Pi
z*Oe%BD`usgrpcG-A5I&c%QB(>v%&UL3NH6Iw?yW13TrdLxd&{Xi
z1Z14Bavf_KCLDG^j2bX4Ne#F;p}?j4qutMj$D2B&Zim-&)t^JF*RMb`(3L2N?VgA9
zp%WA6D;KF@3k&Ek^VBfc`O4HhnOVblL8e^86V&iPD(zzk?PIVS?i!#>uf$D{iS%#k
zb13y`_wVNZCuldnLJs9*1ZA9dWBNP&yu=<)=cjZ;_V?v1xqgNDi=FR@;JYwG>^|U1
zajO)@mK4U86xveCl>W{AkGI?J(BWq=>i>Y5;)K`vC+!l(*@fY8w%OGq|1KF{Ih1e>
zaWlsERYMj6skoRm1Nj|E>M^dzzD~6AKg4<7vbFWlUo18OFRcY|4-h
zLpxLF(oeRs6M7rtJ|-~{mmaGaqsUL{G`C8fV)sQU7jaO=Rx`VGjSWBk9%BQhD-Oa@
zC#lp)Ds&-^>Y?cgYUH%L)JWIus{3q1qSW>N7}6djeX}2ZGl{;Ls0Q7fT&-!bFrG1h
zaey(v_+j26e}l;1p!v2R>d?curTyss>el_Wuh5P$$*F_ITTyR_DWDDny2i$Lh+95aM;2Ttu*(=%LpIGl%Y{gmgvglZ>USHCFLZ%Vv)(e0)u>`AZ3pI2%J
zM%s$N{zKwvgRC_e2Zqca*x|GWhenGIDD_9oqc)99AB$K=F#kGzOyb;gkn!mSrCxPt
zdNO1E%?Yi2_s2EIR>u@Z7eu8CO}l8(HNOu%GeM1;_KoOquI16awJGl~^7|$2_6My>
zJ&keN?TO~TEB~O>Z!yl?XWDWJZTV}xw&fPatuIS=`}<10k8#pVm~)T#81>lyP;k5VVO8qHdferUe&1l`l!_)F}g66srs
z^UeCuH8N3+4D?qcOOol+{nW^=G2dS6bQ?cfSp%IYudR~Tp;Hso=s>A!bV-S8^t58v
zXxGz7)@6QM
zrV8#-&5pb~Ulw+oqq_XqUN!iSe7vE{f8^s09sak;$B%SHii0+};JeN-{GmK{)Qi=G
zm<6T6AS@^flr2`*@)gOgg?nc>xN3`{{{b*X*tc{w}+L*u_QVfw@&R
z3t%)y6x>0Nv!l^KXP`BFU4aekD>Pi!;#1xt_TfT*hog?g9rEU?5EC__%Kb0~_J{PX8
zE>)T0I;X0#wyL6ZPN1g3#8RU!)%L-f8ki>83
zj#*S$rkg}b&Z=TWzX=Zkh*YWjrJN^pj*8B$%`ROQT(P3Grl6*@7GkJVV&(@bE-t5%
ziYgXW!nb0-Gg9pGs;aIGR?mf1E(wrnVG5;+%bcQWO89(N@`42punm8KtTHlJ;YI8{#E8#scxLDh2n=VTL+@7t?@rvs7y&4dY@6qz+O86{UfmROHZWK}9L@
z{F9^e=HwSu(~4eHm
z>RPTqEG#FTT1inb^=*565sSsj7oAsCRFYS|tcEKOl=?N@2IiLO_3<~_LlMN!&ee&RkDtBlgoV
z^39a1zd26P-%M*d%zWE^femGLk@zpcNZKrZb-0y4FNUc}4acy+)cKcki2pi_M`QpfRX$lAEPCLe`0^%0hIjx93$!7jS+tjW28*aVZ{9vjJT&l6rqn8q07Ja
zmwdvXN!NSA-@i6r|F>d4vGASA!HI>x{%_^*U!Tqin}9t_pRfsd|MhwMH>B{tyh#+~
znDv({Dn<_=`)vOY;s5zN-?{T7^`|?nJ2~j=@e9X)?HxMAMNB9cz4rCjyz27Tu6S)q
z58sT(FC2Qa^%JGexYmS3RaWPm2w#5t-buC%vurrih8Z@TX2WzFrrFSI!&Do(ZFsbg
zq4Rq-Y_;JVHauj*7j3xThR@ir#fH0W*lfecY`D#a57=<44Y%0vHXGh(!v-5V@vpJJ
z12(L%VWAC|*wAmo3>&7~@N^q`ZRob)(O6UNzD)S82s(Gz_LdD>ZFtCr`)$}_!)6<9
zwc%zPZnEJj8y4EIz=jz%Ot)d04ZSu@wPCUi-8NJ67^?HGPnht$A)*?=`K|O{LVnuoY>z2TssI^0Ps5CKFk~7
z&j6E9R9ctjQiFiYFk8mDR0%L`2)ujz2%N`-=uO}Sz@=>5mx2pCG*YPtzy-dIkvNr?
z^BzpW7?<(_zrZX6SED%3!bn;HVC-n(#NG|e!PJqi==^LH96vV#Cyp_AI&kh-(!#$V
z*ou*~1b%OvDeq<=dcbs8fp=rX&lX_9cw?UkoMq!J!23@{R~d0W0PMtkB>6c_snalu
z{G1LfJ{=x`&;*z;k>Y_T0#C&hh#%nBXaq~ZmjZWUq%6CE?_wkm9|6xzM=lThEZ{dW
zLgzKWUt`42R^Z4plzNPp8@<4DFcNWNV
zux2J@!A}4;->+am1XP&M*H9i5q}Ku
zo3qhD1il7%6GrmC3HTbDjxy{;R_WCo@+mlQyB`@O@W+4y&nHgsrNA{92`lh+8yEOC
zM)IaEpqerJ@t+R#V-A5A058J40bU3!!nA^y0H^06j|-jwtipT*UJZ=TC;!x4B9Lo1
zDj+X#0x!l$9+m+AhLL*z2v`SmOz0`F`cmq0Jn;ZeTS`9#KOOiOW+Ax1GcKp!flmVt
zDB_F}96fnzCPw0~SfPi2)u3u>axM>fUYuQ9|L?9lY#vkz?5=hp9-90<9=Ys#%~1v4wH@lX5c3np~L6E
zd#*6}y}-;0+8cfXz#n2H4=uoPRkSzoG~ksO$$tQNH%9zy0bT<$@m}yXz)vwP;GYAp
zt2KBXFg9RtH*gb1>Pz6+LFyO(Gl36cWc=I)jJe7#FR%mSK9xAd?rPc!xWKqorXIb(
zKC7uC?A^dTjFeH}6cji}|C$C|^G(WvAAvu_NdLMW*ol#{h`iJYjFiy}T#MO^|E<7d
zn62PyEn4NTC7csuorkQM#|U%Z2AS?*lz+pd6%J23o!p~L)!x2w=fd_2H-x7ghel;ddJ2E
zKJZK9U*J2xGGnR0`|mYl<^#ZA{Tf=4*1f>ZzcF))z(W|RFM-LwHMqcCm{$B3Y^7Y7
z_rPxf&fEt7cmiz(*l#=I2zWAZHb&~S8u&a$^0{B|M`<(o*$?dVn2FyDy!CNTeX-vR
z{1Zm{y9J#5gu%0b7N!nA0`J=a9~}Gv;Q2eD8+ab@SGy=L_`Sf>c2j=vEMQI>x7rku!F9D8!#o%ec
zGK}~an0d&w!A)nZ<0X~Kidx0O@_)*|RpHd&#F9hzx$e8d9Fzz$z2zzv)s?#tM
zR_^J@y`#@*O9JJdkKh93uFO`(B7t%bM(hRdwsE-&Blk_jUZC775&r^*es1gqiVVK^
z5h(W^1Q#fG8w3|9_YedZ_%j=qy9jcRK4*h{2a#nJvb@yloP3GDZuz`pea_8lj%S3(5)7nyGI3GBTmuut#BUii0J*caT%
z*bRKgB%m^W!5Bk+obSTB7)#w<-|pWs#!(55d-VgjkL&tQeT{D_*>P`v7yrcVe5d`D
zZ_4C+Z{picB|G1@{f%)UBKV;J3YZyt
zLulU05;ZF`3vZR#1v4$R49XiNwZ7kP_8#02D%tt}pYMB~Gdj$E-*?u`T5Hx?vu4)p
zch8uCsRNfqQc|ALpnS%R*|S-pMD~^!$=-{2{4TEDaZv(TT+f)n>yt%z?Z3XpT~Y4%
zQzGT@`uU96rS9JYSl)47&nR*)BYb>3+Gk2<{k=u;@kQugU+ebc<0s=j>ZG&%ay(~3
z9bKRBOp`C-x*Yu%y`!(u3EMAb|2kbn|M+o{BH)MC;^T>5dC||sxVpd!8vps_PwXw<
zdx356`nS8sM503ZWVDa#I&ZvpYrExpi$Fp=`Qv^#Z(b8J0I7`Ezy70hOYw8wJ27I!h{E{6U2Q0>lcG|F
zifpvX+{b+iM~|?objc~)ID+lHh9CYb#kHKdc(FifDSk+)xn5r4QP)dUEoTPpU|sdu
z)M0(*Mh-raM$dM!Ji!Z)VB_|H6gFlJ->Z`A`T%0;}N}Ve?t-k(QRB36cx_I%TmKCz~27jzG
zW3VQG(6}$gK0*P62g=^C6mlxJs32_qjFw?)mZYd9lBqVzI(10)8+)p51-y3*=Ko~)
z?u)VREWo)Nz>hD5FCdN86M$b1_0N#6BoljEP!7K_?@Ln
z?Fal}z<&w&uK|Aw@Fjr1-~#X87V}%EV-JIC>4HUA>tOv=`WBS%Kw~9`J1O~Mn3Cfu
zN=|1gDP5=J!u|^U{ci=lH{b){v-AagG~h=AJ^}Ev8Y_9Cladv~l)RFnWJf02T&Lv3
z{>$)~*J5e|{zkyp1AKkJ!~T@Q84}AaO8RzEGG>^P2UChl;uh?*DG1KTgm=n7kCTcy90hO;Ku@f
zCg2wVel_4<1^hO^zYBQunfW>3PXNBO0{$+%9b^z%NI(mZqJ4?zf{2A4)_j$4+i|*fFB3=>41L>@Xt3EwWgD(4Z}q3Oc7O-
zDeCieqQ2XI8QvZ6-WDJkS<$}rwp*Jt85E=&@Y7Vb#B<;uPWaF$JioO`
zi$PA@2Dc{)fVT#RMh}UKiiol{X9LYVn>PJRg9Z)m7)%`boG<^6MlaUpl`2@t20jmq
zh>nbkijIi(s#F2becJ2x`$dOGMMQ^3_rJ6GZO$&xgSDM+hrytq?T6_u4A-oEJb?nQ
zz(3u7r{2+Eadbpfv?fC1X!sBOt>K+Qh(c6!w6%Hv{!N-R0TC6?t=(_5hWJOIiy?S^
zXaD}1$~!8ahc&9xD6nBbL{xY*xZK}MQ`w@wQ#^uug@$&n)u>KDur=Bm9T^erz!E0J
zks`yo_6zf`?N)z?^8tAk84h#hd~WUO?@{kYH@C(Efgw?hjtYh#U49YZ
z!9Vf`0v2*pDn?ud+S8tWierQ8*Xdkux0m&-iocXrpSD
z>U_Bj6os{O&&Uw>WESNLfAOKKVX
zjRN=zyLRnTAAb0u_6?37D^|yj9aG1TA6MUf_nrFw`|s6HKbC0U;QaaX>epX?Rhbt>
zt%I+y|B{QH^>2xZt_LPM=;h*ZnCNCiaiFg@!lZeCHi4+xYt5RNwbb=M9U+x4pl+
zdjns`$R(}5eem=k-{kQk?YwhRP
z(ofTO2a*M;WlMdj(i>qfzAh@lSJv4LWp#=7O&!(;obp0!5Cz8o6z~diAgZQm_o<6kKThd>Qi1
z$cE$i_#a8+58K7+rbdap^+2ZhTUM@I`8UivTT4nxzWL#YAHK!hbL`BSGbd0MpF4N1
z1nbh{pMU=Ox_94w_eq==^6S&54KM+bLDLgm}BkN6bU(Nb7gDBphjtzbSC?7MJ-N{Wk%1?z9&bI>4IgMV@I$n>TNs1Gg{Y;fEg{nw*>*zG%^+82Hu1tsSyo)22by0SO&Sfgf>t9^2;ys(@#I0{ph2QUiI8IHmj@3X6lUrM$AcyHgStbQ
z_uqeC)4(x={tL$ed-dGHtw^}Og!|=u4jt|c8fvP)%`RNHAee@<{09aGwxt|1a}2O=
zCR?{|6^yySXVyB*G2obBUrlG?zI*p>p{^W0d{}=b4YUE2H|hz;h`4R~K_u>oNbrXu
zUEdS&e_OH+2DZ(oWGIE?Rv
zd<6cJ28==X_eDB`hNMpr?o*)~Q6ucZf6N(T>_6on^N}!{@nA{(jhs{dIUbZN%7&qZI!-yG+*3D9Nxfhi{*}lO
z&=3Xr?+<>HhM;{SJwI^J5CC1kKDqc7>gu$kHAxzO*y2p=@pr>GETW##wp2$4<=u5m
zj9jrK|M{Hr0%2)qT+_(o&{5Lxg-9f5hyV=*XIyJs@$-*(vh)hXH2eAIp9S+wrA!)3
zHgr@sCWaPQIxbzhB>P`mAurC3lBFrZ^6Zoljo7{4h>ZAJWY|%WXwvYhQ=cI-eT+WS
zCPANRliETLsLvfBPt@t(5NE4~28}<)dm;8NykXZDkQUlQmWBqFlzYl`RXU7(QPwF-
z#d|l%OEdaN&a@zTA+4{bfdt%lO5`3V4Kc+cgMrgP&`_bzq`|4r9chzbQ`^8MwFC`b
zTO76(&CIJpgT~+6+q=U4V~*!M|GV*^{&GGr<3WAkwb2ve54$JZpL;-Tv--%I89|Z_
z8diV?rdZg|F`!`-Z2Vu3iI4`P&!oZVGi_3*cft3!MB0%C@Za*9$WYiRqz|jZANF3$
zKXh#o{UPd{p~KL^G2ndQIwpn|mc*ZSa^t*E$$zM~8cvG*
z4Kz@nX_KO0lZ-x-2BXiU!RRw>lJ8cHKTZ-=O3kQeoL}H)RbOwl|XoEkWPWlM2txw=V#Dhd<>s$4&X}MhyAC$F!JB4SFg4l
z3zMm@#GAMgcS8$pKkYr&HME^xUiJ`J^
zQa^btAxs_~A13J!gv*oD?v*b;*(apI)yL5K44cGM9saQQTK<90BH~WGSQ01VO?@Dr
z&6v=}yZXoUhe#vk|EIe2i|G-;jPM|FjvLgXu>wuU)%VY&M(FFE!bW2g|GJ3HD`W-wUy_
zlorm8;1
z-=P~wzq3EYAF`n3A9B5bv^?_2BVxDPH9dv~mRF*KG*Pw)dvgD)vU5QX*$x`Ef(EV6
zpn*2Y=<_Prq~)o>@?3I=JPjHafrckQ!#^NL^WwraqO?h&Eru5?_DmS2A@m#=m#mA4m$N#VnRvel{2v`I#vSHmWehGk9~{slQ&
zG$~XbPjJvcM756re|s?U3fQaPMBc9=`@C$%F~|IyiE|85Nl8h2h%5DxIxu(cTupX`7FHi5cut@O#R{0bj~`utC)K39Res{cd$Irakw4&*q<%P+sIeJ}bn*dLJT
z)2CO^a5XxvHYUV}HfaNG(zWy%G%!`e>*{VbBcE{AW(RDi`1tsU)oK--%@F!^X=!OP
zb?Q`ExNxD?Yi#*wyF*{7I$Fqk&JDCx^o8h;ay`IAeJ)xvP0D}4dLw7Lo}XQPjOU&D
zycoK`R2{PB9`-64!PoyQt~LH0I&|ocqdiIZsrK*SN9%FO&d!#hLx&2^7Rf*V`AJ*6CSE+mh3JsB{%$x2V}}k4o(Xp_DCX}<^OVYK}u
z?!=X2L>f%yJ@T8f$8`>UJB|hQfjX`A8RNkD+2}KElB+)N-Mo^1;4;JOtCc-__Iw6s
zdh|XB`*!NG;EbZ+Op;(vOWL(-C-AK`F2sZWl4~XnCL20fvOQ+Xlxdr`^@9~-<&A%Y
zV19P^7@VJNbHZfv)8pl{_czIf^XCs@ylcGwfU}gcV8Md3IHM%<=g&WX@4ffR0PJWRQ#k3?PJencVGle|YCiSq<$sV0F1KYz*b`lI_l
zYzw-(3VW2r4?XnIZ^RSlMs=o68yecTZ!b8zrpJME24$YUkD-J35`W4(>EzmgCG~+i
zLVYk}!jdvb-^9#4Tzhl<40-uO`=8V;%0FZ&3+KfSQBM&Om)Wyt>v13rw5!-#mQkZd
z>H8c5(qJ-aFza5fDd}TV?ujQ$$`ENY^2Ph~spxaVmi!BQPu+3O@F}h#t_Oe0Ju_vA
z`^}_@xN*-JXYaKBhJ}SmNJxm_bc!AaJ|~Q!fu)fvGbTn)s2{}Ll$0Z`87Nou137kP
ze*^e4+|~fOCP~{*(7g~L0B`Vc0rmOKH{aAWkU!ivgnnomVq;@v@ZiB(wh)uo_>FF7QanHuS(@7xBm=EJRHA&ik
zFfvI0$MBW-6c-n3d@!~G--4%#wLW0H1@fk4fO8c2%=*l{CJnTs#siiYDJ0r+$hw(UO5k+XsOPTYGGNHgwH)A&s@;!FRB_8LB?<`$1dB`N(xl$lFbuHp$YZOSL?c-zJl8
z$_{mheiN@LGi;A@0c_?=cm>1t@}0Od>HP=Sd6D>=Od3qS+L(|goQc-+6=jStq>cDe
zwhCc8NsGZ9{^^?Qly?W#m;dVhALy4}|6=}LKw8LumWB?Nh7OkGDeVh&fth2&dmIPa
zPUwC+4e-3H^KeKctIsIcCZh%Xi*+
zN84TUfci>#C;rp}$m42s|7T^!m4asEr`i96ov7IVAq|{6>A%qaQ4TmCaqc9Kx#lGA
zIS%v%%sB9xHlMbZb_{C@9oGQO|7qMwhn9a=*{CY#=Gy43Ye`w>HS2IJDD&hqX{4+}
zj(&rWa1vw1d6j1@{>a=!{H~R5K?R;c_O|L4kD&i34#CBFTF{*eA;Y1%XGyWpL{=5f8ns~NOD|^`>@3rrr8r7AmOQ6rK@LH_&U0F|0ZRXtneu!^UlAic4;j1Qask;j@tP5p
zs{4UxuG+-DJh2uGz?>fy7Z*1S{(d<2{6ld+0GU4bZO9dJ%C^5K6Pb=!&%lozF*fc6
zGp5iPv7Is48~*qwjndU{Agy=QsZ*y1)^;h-=`z|g_?+iqZ%<(^_#SrY2>kPG?2}D|
z4;+sAcoU2AhQCoV9x)Ea$+#C9g8f^@ycsV(bXKGYyK?wG64MW;f2Gs{HarJ?meGFF
zMsmGG9GFcE!|C(?2D^F|HuoUx=Id;jF;m7H8Ruerg?r+R4}Ek_WFKg+L9Ew{b{hK%
zvDkYWNgU`qaqY?Zk#?PP5p6YnS^6n_#?MTn5vyX%ld(3&czAw+F)GG)824w4jp<6n
z$D?oIvhAK?#nX}+}mf&
z*T^H|y^J&VLHSW-ne^hEg7s@G{M?bm!{Ea4rw>eAIR9*YK2h&!Gv>@adDA~}_&ac5
z+>`NhEswx~u{OqdD36R$G1kdA;XTk1q}|Rj@!~v=eU~JB|864ZW@du}eGc-3zIFcG
zFdge-e1`joOzfMPu~){A8S`_MN1kP2jE1p3#+WFNes5~f3#NbiK_FMbc>4P#G
zJh*Q{UNCWgov|jyI2bEqoQd%b#>p6CVr+vkGRBu?k8|W_=O+kp(DI15tBHNHJ=gwe
z1DNUCFuUSGTsZza9t{@8yVe5-?rCywoG}*4Bja3*uQ1lAV_eSu=RDA#U1^;#@)!g@
zAXSV%eKW)%V)4Dpk+cO|@6$gg-Nc1FV0j=XLB|aluVU<$F+;|lS32W`)CtB%7%R*`
zjLPtUow?d*#%_@+9q1IRK**)yX64Ic2T7!izpO_A8RG-;zi^JoVI5+V0Wra{inK4p&
z{E(D<{qbdagibJiO?g~4_a1g%?fehhN`Kpn{Ua9fRd6u9x*0k)jdv=@6Hh#$*LTE;
zc9D8OJjer%4|#L;3v|Fk<@z^X6-|slON7hO4nVeC=h{mB
z^8#_P5zu?R4&+?VxC~=U@UsNxzBS!u{Y5;O$q(XDb!>BYqo;@6nB9-)xiPOdRl@0Kt0&WXO;q8aLv=%sSKo)}!3ePollxxkT)R
zXQ5rL6>5^S{iXfEIr@oguNZssobzdO7+b(Q3v`T`dO`e{$Op5>OMjU$75eR^x=#@
zV|0qP%!+jr*G$|)H~_b%E6#Ny8UI)@e!#w{YqU$ulvlzs#bSBFHSlJKOQ!ER|C~p;
ziMEM)L|o}RP`3BIv05^xIDEr#&h;DDk6gcUt-!TP0M<&+124u}xh5y>lz;a7^4v)6
zIti`$(-}9FA^mKhjgmE>FCFRBNUsuMTeLD)Y
z-#-UyA7>)58_7S+3)*fmCdcywgf%j^Zf;*Wee|H_C)ZH_NYMSCGx9^(qphdi;aK9k
z>zb#u)f~$&cV^4_`2+NM?iDi!$)49=V)q*jJifc4eKp!I$_?!{eJ+j%?Wrp+{LH!B
zz_Mh0GydeA@wj`yVq*tjYhn(=r^y`an{5Y>fGq8Y0+<8tBZH5(cK%VhAS)nM&Lax!}0c*p=t=O
zBk{LCJ~O>py9B`yoUzJt0=f5ZYm6Q67hDISk#{dY+`X0r~4R=miUCNS@8av
zSTzlG#^K#@ap=vWMyNFXiIxA3ueQ2X^c{}-X{bq@l7U+S`ber#n-O?FP8=Yo0V)=F
zvTvr&_@%GQ?T^&`rn$7+32$0w4$vj}&COwVz>p79b^Ro?IN625saOVdMg2J4C*F&#
zTA}`6T#wf^$(dyEgj9?N)l<>t@9$aQOo!o%l@^Rb&G{9maU1GvnAmNAdekrP=N|9yu{NqjHo7-Z72)yH_1vb#hhN
zD)%hUEZ?latbtjhvL4riUrD$8=u_RQulHyi?tTII1SdHKV%u)pZQEx%Y&&Wzvnji~-NWu_x7dB{0ro(9h<%_v
z#y-k^zkPx|**?>rVSmiN*uKQR%D&cKVBccjZr^R+XFqH|YCmZ&wU^mdo_n50o@bsV
z&o?h1FEB48Z(!b*yu*2Ad7k-!`7`qu=P$`$mA^K>Ab)#4Xo;bg<5efwGqW?YAIn~x
zy(D{8_S)=%>@C^bvv+6j%Ra36R+g=D$UDy*OO9_&Ku%!JK+VVdb0*{@=giE>$RT6?
zoBqpz|6Lqt)iSKtm=S6C#pp4Kv8m&xjER{wZbEEoB7XdNB7XlpZOq7o2jZrT$B$&E
zS(6?2#&nz>mo$CM@NuaL$*7P#YwCn4Ngd;6#?`;Me$1d+wQtoF-Zf*=Jx|YS@YWt&
zF>{4#wI;?UB;nm@$+KEnI<)U#!EdqSXTEz`rcI6S5Xd|A6B3iBq)xS@#+grPv(oCv
zr%p+#
zXu4zCw6VUaKE55&GznvX&Wzb(NI|>c);@Pxe2`sW+Ep|XH#1?XuWQe~?b?B4w;OKN
jFY3UN;A)80+`yj

literal 0
HcmV?d00001

diff --git a/pytets.ini b/pytets.ini
new file mode 100644
index 00000000..92b26c6a
--- /dev/null
+++ b/pytets.ini
@@ -0,0 +1,3 @@
+
+[pytest]
+DJANGO_SETTINGS_MODULE = Videogames_project.settings
diff --git a/videogames_register/tests.py b/videogames_register/test_app.py
similarity index 99%
rename from videogames_register/tests.py
rename to videogames_register/test_app.py
index 65e3dc08..0183fc93 100644
--- a/videogames_register/tests.py
+++ b/videogames_register/test_app.py
@@ -11,6 +11,7 @@
 # Create your tests here.
 
 
+
 class GenreModelTest(TestCase):
     def setUp(self):
         self.genre = Genre.objects.create(title='Action')

From c187cded26991b48a27b0931e719a2dcc6518fed Mon Sep 17 00:00:00 2001
From: Marco 
Date: Mon, 11 Nov 2024 17:07:47 +0100
Subject: [PATCH 02/17] Update requirements.txt with new testing dependencies
 for compatibity with pytest and coverage, also version upgrades

---
 requirements.txt | Bin 370 -> 698 bytes
 1 file changed, 0 insertions(+), 0 deletions(-)

diff --git a/requirements.txt b/requirements.txt
index 83ddeef611d2abe1211809892226c4ec020c7d01..58784fb2a6dd5ab8b400e06c2eaa6f7263afc1d0 100644
GIT binary patch
literal 698
zcmZWnT~C8R5S(Wde+opfR(
Date: Mon, 11 Nov 2024 17:09:28 +0100
Subject: [PATCH 03/17] Added workflow file for gitflow strategy

---
 .github/workflows/gitflow.yml | 55 +++++++++++++++++++++++++++++++++++
 1 file changed, 55 insertions(+)
 create mode 100644 .github/workflows/gitflow.yml

diff --git a/.github/workflows/gitflow.yml b/.github/workflows/gitflow.yml
new file mode 100644
index 00000000..fd8ff71c
--- /dev/null
+++ b/.github/workflows/gitflow.yml
@@ -0,0 +1,55 @@
+name: Git flow CI
+
+on:
+  push:
+    branches:
+      - develop
+      - main
+      - 'feature/*'
+      - 'release/*'
+
+jobs:
+  build:
+    runs-on: ubuntu-latest
+
+    steps:
+      - name: Checkout code
+        uses: actions/checkout@v4
+
+      - name: Set up Python
+        uses: actions/setup-python@v5
+        with:
+          python-version: '3.12'
+
+
+      - name: Install dependencies
+        run: |
+          python -m pip install --upgrade pip
+          pip install -r requirements.txt
+
+      - name: Run the tests with coverage
+        run: |
+          coverage run -m pytest -v -s
+
+      - name: Generate Coverage Report
+        run: |
+          coverage report -m
+
+  release:
+    name: Create Release Tag
+    if: github.ref == 'refs/heads/release/*'
+    needs: build
+    runs-on: ubuntu-latest
+
+    steps:
+      - name: Checkout the code
+        uses: actions/checkout@v4
+
+      - name: Create the release tag
+        env:
+          GIT_TAG: "v$(date +'%Y.%m.%d.%H%M')"
+        run: |
+          git config --global user.name "github-actions[bot]"
+          git config --global user.email "github-actions[bot]@users.noreply.github.com"
+          git tag $GIT_TAG
+          git push origin $GIT_TAG

From 5cafa0a3e43acf10c1f7996f1b0740f7fd72a28d Mon Sep 17 00:00:00 2001
From: Marco 
Date: Mon, 11 Nov 2024 17:15:31 +0100
Subject: [PATCH 04/17] modified gitflow workflow file tell django where to
 find the configuration

---
 .github/workflows/gitflow.yml | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/.github/workflows/gitflow.yml b/.github/workflows/gitflow.yml
index fd8ff71c..7f3a3859 100644
--- a/.github/workflows/gitflow.yml
+++ b/.github/workflows/gitflow.yml
@@ -28,6 +28,8 @@ jobs:
           pip install -r requirements.txt
 
       - name: Run the tests with coverage
+        env:
+          DJANGO_SETTINGS_MODULE: "Videogames_project.settings"
         run: |
           coverage run -m pytest -v -s
 

From bbf318b6694d475ec9796b3ef2b37ab1df627b20 Mon Sep 17 00:00:00 2001
From: Marco 
Date: Mon, 11 Nov 2024 17:29:44 +0100
Subject: [PATCH 05/17] Added credentials as github secrets

---
 .github/workflows/gitflow.yml | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/.github/workflows/gitflow.yml b/.github/workflows/gitflow.yml
index 7f3a3859..5d2856f2 100644
--- a/.github/workflows/gitflow.yml
+++ b/.github/workflows/gitflow.yml
@@ -30,6 +30,11 @@ jobs:
       - name: Run the tests with coverage
         env:
           DJANGO_SETTINGS_MODULE: "Videogames_project.settings"
+          DB_NAME: ${{ secrets.DB_NAME }}
+          DB_USER: ${{ secrets.DB_USER }}
+          DB_PASSWORD: ${{ secrets.DB_PASSWORD }}
+          DB_HOST: ${{ secrets.DB_HOST }}
+
         run: |
           coverage run -m pytest -v -s
 

From e6a27aa4c4da92795fb4c980001743b459184b95 Mon Sep 17 00:00:00 2001
From: Marco 
Date: Mon, 11 Nov 2024 17:46:54 +0100
Subject: [PATCH 06/17] added port number to github secrets

---
 .github/workflows/gitflow.yml | 1 +
 1 file changed, 1 insertion(+)

diff --git a/.github/workflows/gitflow.yml b/.github/workflows/gitflow.yml
index 5d2856f2..fd3fcbd7 100644
--- a/.github/workflows/gitflow.yml
+++ b/.github/workflows/gitflow.yml
@@ -34,6 +34,7 @@ jobs:
           DB_USER: ${{ secrets.DB_USER }}
           DB_PASSWORD: ${{ secrets.DB_PASSWORD }}
           DB_HOST: ${{ secrets.DB_HOST }}
+          DB_PORT: ${{ secrets.DB_PORT }}
 
         run: |
           coverage run -m pytest -v -s

From 1e11bb0569cae86b66253a35fa80119bb64d2f4d Mon Sep 17 00:00:00 2001
From: Marco 
Date: Mon, 11 Nov 2024 17:54:57 +0100
Subject: [PATCH 07/17] changed from pytest to normal manage.py test

---
 .github/workflows/gitflow.yml | 12 +++---------
 1 file changed, 3 insertions(+), 9 deletions(-)

diff --git a/.github/workflows/gitflow.yml b/.github/workflows/gitflow.yml
index fd3fcbd7..6e01e595 100644
--- a/.github/workflows/gitflow.yml
+++ b/.github/workflows/gitflow.yml
@@ -13,7 +13,7 @@ jobs:
     runs-on: ubuntu-latest
 
     steps:
-      - name: Checkout code
+      - name: Checkout the code
         uses: actions/checkout@v4
 
       - name: Set up Python
@@ -21,13 +21,12 @@ jobs:
         with:
           python-version: '3.12'
 
-
       - name: Install dependencies
         run: |
           python -m pip install --upgrade pip
           pip install -r requirements.txt
 
-      - name: Run the tests with coverage
+      - name: Run Django Tests
         env:
           DJANGO_SETTINGS_MODULE: "Videogames_project.settings"
           DB_NAME: ${{ secrets.DB_NAME }}
@@ -35,13 +34,8 @@ jobs:
           DB_PASSWORD: ${{ secrets.DB_PASSWORD }}
           DB_HOST: ${{ secrets.DB_HOST }}
           DB_PORT: ${{ secrets.DB_PORT }}
-
-        run: |
-          coverage run -m pytest -v -s
-
-      - name: Generate Coverage Report
         run: |
-          coverage report -m
+          python manage.py test --verbosity=2
 
   release:
     name: Create Release Tag

From 20d288b60faf991bb09e17263590987a5ec17f21 Mon Sep 17 00:00:00 2001
From: Marco 
Date: Mon, 11 Nov 2024 19:20:20 +0100
Subject: [PATCH 08/17] Added the postgresql service so that is available to
 connect to a database and configured env variables

---
 .github/workflows/gitflow.yml | 15 ++++++++++++---
 1 file changed, 12 insertions(+), 3 deletions(-)

diff --git a/.github/workflows/gitflow.yml b/.github/workflows/gitflow.yml
index 6e01e595..76946033 100644
--- a/.github/workflows/gitflow.yml
+++ b/.github/workflows/gitflow.yml
@@ -11,6 +11,15 @@ on:
 jobs:
   build:
     runs-on: ubuntu-latest
+    services:
+      postgres:
+        image: postgres:17
+        env:
+          POSTGRES_USER: ${{ secrets.DB_USER }}
+          POSTGRES_PASSWORD: ${{ secrets.DB_PASSWORD }}
+          POSTGRES_DB: ${{ secrets.DB_NAME }}
+        ports:
+          - 5432:5432
 
     steps:
       - name: Checkout the code
@@ -32,13 +41,13 @@ jobs:
           DB_NAME: ${{ secrets.DB_NAME }}
           DB_USER: ${{ secrets.DB_USER }}
           DB_PASSWORD: ${{ secrets.DB_PASSWORD }}
-          DB_HOST: ${{ secrets.DB_HOST }}
-          DB_PORT: ${{ secrets.DB_PORT }}
+          DB_HOST: postgres
+          DB_PORT: "5432"
         run: |
           python manage.py test --verbosity=2
 
   release:
-    name: Create Release Tag
+    name: Set the Release Tag
     if: github.ref == 'refs/heads/release/*'
     needs: build
     runs-on: ubuntu-latest

From 5c80d2870ffa9e6afe9107209ca66a437d170637 Mon Sep 17 00:00:00 2001
From: Marco 
Date: Mon, 11 Nov 2024 19:25:00 +0100
Subject: [PATCH 09/17] changes host to localhost

---
 .github/workflows/gitflow.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/gitflow.yml b/.github/workflows/gitflow.yml
index 76946033..bdf763ea 100644
--- a/.github/workflows/gitflow.yml
+++ b/.github/workflows/gitflow.yml
@@ -41,7 +41,7 @@ jobs:
           DB_NAME: ${{ secrets.DB_NAME }}
           DB_USER: ${{ secrets.DB_USER }}
           DB_PASSWORD: ${{ secrets.DB_PASSWORD }}
-          DB_HOST: postgres
+          DB_HOST: localhost
           DB_PORT: "5432"
         run: |
           python manage.py test --verbosity=2

From 315a337500edb6fa35a27ed3575bbdd4f3bd4532 Mon Sep 17 00:00:00 2001
From: Marco 
Date: Mon, 11 Nov 2024 19:31:03 +0100
Subject: [PATCH 10/17] Added migrations from the app

---
 .../migrations/0001_initial.py                |   6 +++---
 .../0002_rename_genre_videogame_genre.py      |  18 ------------------
 .../__pycache__/0001_initial.cpython-312.pyc  | Bin 1571 -> 1569 bytes
 3 files changed, 3 insertions(+), 21 deletions(-)
 delete mode 100644 videogames_register/migrations/0002_rename_genre_videogame_genre.py

diff --git a/videogames_register/migrations/0001_initial.py b/videogames_register/migrations/0001_initial.py
index faaaf3b2..868813e6 100644
--- a/videogames_register/migrations/0001_initial.py
+++ b/videogames_register/migrations/0001_initial.py
@@ -1,4 +1,4 @@
-
+# Generated by Django 5.1.2 on 2024-11-11 15:14
 
 import django.db.models.deletion
 from django.db import migrations, models
@@ -20,13 +20,13 @@ class Migration(migrations.Migration):
             ],
         ),
         migrations.CreateModel(
-            name='Employee',
+            name='VideoGame',
             fields=[
                 ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
                 ('title', models.CharField(max_length=100)),
                 ('release_date', models.DateField()),
                 ('description', models.CharField(max_length=100)),
-                ('position', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='videogames_register.genre')),
+                ('genre', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='videogames_register.genre')),
             ],
         ),
     ]
diff --git a/videogames_register/migrations/0002_rename_genre_videogame_genre.py b/videogames_register/migrations/0002_rename_genre_videogame_genre.py
deleted file mode 100644
index 0742ae4f..00000000
--- a/videogames_register/migrations/0002_rename_genre_videogame_genre.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Generated by Django 5.1.2 on 2024-11-03 10:24
-
-from django.db import migrations
-
-
-class Migration(migrations.Migration):
-
-    dependencies = [
-        ('videogames_register', '0001_initial'),
-    ]
-
-    operations = [
-        migrations.RenameField(
-            model_name='videogame',
-            old_name='Genre',
-            new_name='genre',
-        ),
-    ]
diff --git a/videogames_register/migrations/__pycache__/0001_initial.cpython-312.pyc b/videogames_register/migrations/__pycache__/0001_initial.cpython-312.pyc
index e7bc0e8864ecbb318d1320548f997572b963396e..a7673e3b2407946b02616f0ec3f29e582373dd79 100644
GIT binary patch
delta 108
zcmV-y0F(ct4517S%MA?*00000Js&b>J+Te&0tpFLX=G(@M`3M~1p^`m1!rY$a%HnH
z1H%CU9FwL5Et4SyGc6d<8Y`G4*BUF)8Y{47w`s9y*EUAcHb&PrPti6{*EUztHdoMG
Ochnkq&=7f(fd!()(xE8+|xf$0vNzpk;*Ev$rIa1d-TG2UL
Q&|iAg9edCae3O3#qDX=!Q2+n{


From d3327745b1b45f4a0221fc6a8a53bf817da6dd8c Mon Sep 17 00:00:00 2001
From: Marco 
Date: Mon, 11 Nov 2024 19:33:50 +0100
Subject: [PATCH 11/17] run migrations before the tests and changed to a
 different version of postgre

---
 .github/workflows/gitflow.yml | 21 ++++++++++++++++-----
 1 file changed, 16 insertions(+), 5 deletions(-)

diff --git a/.github/workflows/gitflow.yml b/.github/workflows/gitflow.yml
index bdf763ea..3a2b6057 100644
--- a/.github/workflows/gitflow.yml
+++ b/.github/workflows/gitflow.yml
@@ -13,7 +13,7 @@ jobs:
     runs-on: ubuntu-latest
     services:
       postgres:
-        image: postgres:17
+        image: postgres:13
         env:
           POSTGRES_USER: ${{ secrets.DB_USER }}
           POSTGRES_PASSWORD: ${{ secrets.DB_PASSWORD }}
@@ -22,7 +22,7 @@ jobs:
           - 5432:5432
 
     steps:
-      - name: Checkout the code
+      - name: Checkout code
         uses: actions/checkout@v4
 
       - name: Set up Python
@@ -35,7 +35,18 @@ jobs:
           python -m pip install --upgrade pip
           pip install -r requirements.txt
 
-      - name: Run Django Tests
+      - name: Run migrations
+        env:
+          DJANGO_SETTINGS_MODULE: "Videogames_project.settings"
+          DB_NAME: ${{ secrets.DB_NAME }}
+          DB_USER: ${{ secrets.DB_USER }}
+          DB_PASSWORD: ${{ secrets.DB_PASSWORD }}
+          DB_HOST: localhost
+          DB_PORT: "5432"
+        run: |
+          python manage.py migrate --noinput
+
+      - name: Run tests
         env:
           DJANGO_SETTINGS_MODULE: "Videogames_project.settings"
           DB_NAME: ${{ secrets.DB_NAME }}
@@ -53,10 +64,10 @@ jobs:
     runs-on: ubuntu-latest
 
     steps:
-      - name: Checkout the code
+      - name: Checkout code
         uses: actions/checkout@v4
 
-      - name: Create the release tag
+      - name: Create release tag
         env:
           GIT_TAG: "v$(date +'%Y.%m.%d.%H%M')"
         run: |

From 085600bc773730f5dd579b8855c9c841bac77480 Mon Sep 17 00:00:00 2001
From: Marco 
Date: Mon, 11 Nov 2024 20:08:44 +0100
Subject: [PATCH 12/17] Changed to a reusable workflow.yml

---
 .../workflows/{gitflow.yml => workflow.yml}   | 42 +++++++------------
 1 file changed, 15 insertions(+), 27 deletions(-)
 rename .github/workflows/{gitflow.yml => workflow.yml} (66%)

diff --git a/.github/workflows/gitflow.yml b/.github/workflows/workflow.yml
similarity index 66%
rename from .github/workflows/gitflow.yml
rename to .github/workflows/workflow.yml
index 3a2b6057..7ae175ed 100644
--- a/.github/workflows/gitflow.yml
+++ b/.github/workflows/workflow.yml
@@ -1,12 +1,19 @@
-name: Git flow CI
+
+name: CI Workflow
 
 on:
-  push:
-    branches:
-      - develop
-      - main
-      - 'feature/*'
-      - 'release/*'
+  workflow_call:
+    inputs:
+      python-version:
+        required: true
+        type: string
+    secrets:
+      DB_USER:
+        required: true
+      DB_PASSWORD:
+        required: true
+      DB_NAME:
+        required: true
 
 jobs:
   build:
@@ -28,7 +35,7 @@ jobs:
       - name: Set up Python
         uses: actions/setup-python@v5
         with:
-          python-version: '3.12'
+          python-version: ${{ inputs.python-version }}
 
       - name: Install dependencies
         run: |
@@ -56,22 +63,3 @@ jobs:
           DB_PORT: "5432"
         run: |
           python manage.py test --verbosity=2
-
-  release:
-    name: Set the Release Tag
-    if: github.ref == 'refs/heads/release/*'
-    needs: build
-    runs-on: ubuntu-latest
-
-    steps:
-      - name: Checkout code
-        uses: actions/checkout@v4
-
-      - name: Create release tag
-        env:
-          GIT_TAG: "v$(date +'%Y.%m.%d.%H%M')"
-        run: |
-          git config --global user.name "github-actions[bot]"
-          git config --global user.email "github-actions[bot]@users.noreply.github.com"
-          git tag $GIT_TAG
-          git push origin $GIT_TAG

From 5272e18c7ea0ffd17004726e585a2c2df6ad02cb Mon Sep 17 00:00:00 2001
From: Marco 
Date: Mon, 11 Nov 2024 20:19:51 +0100
Subject: [PATCH 13/17] Added the Gitflow workflow that reuses workflow.yml

---
 .github/workflows/Gitflow.yml | 41 +++++++++++++++++++++++++++++++++++
 1 file changed, 41 insertions(+)
 create mode 100644 .github/workflows/Gitflow.yml

diff --git a/.github/workflows/Gitflow.yml b/.github/workflows/Gitflow.yml
new file mode 100644
index 00000000..526a116a
--- /dev/null
+++ b/.github/workflows/Gitflow.yml
@@ -0,0 +1,41 @@
+name: Git Flow CI
+
+on:
+  push:
+    branches:
+      - develop
+      - main
+      - 'feature/*'
+      - 'release/*'
+
+permissions:
+  contents: write
+
+jobs:
+  build:
+    uses: ./.github/workflows/ci-workflow.yml
+    with:
+      python-version: '3.12'
+    secrets:
+      DB_USER: ${{ secrets.DB_USER }}
+      DB_PASSWORD: ${{ secrets.DB_PASSWORD }}
+      DB_NAME: ${{ secrets.DB_NAME }}
+
+  release:
+    name: Create Release Tag
+    if: startsWith(github.ref, 'refs/heads/release/')
+    needs: build
+    runs-on: ubuntu-latest
+
+    steps:
+      - name: Checkout code
+        uses: actions/checkout@v4
+
+      - name: Create release tag
+        env:
+          GIT_TAG: "v$(date +'%Y.%m.%d.%H%M')"
+        run: |
+          git config user.name "github-actions[bot]"
+          git config user.email "github-actions[bot]@users.noreply.github.com"
+          git tag $GIT_TAG
+          git push origin $GIT_TAG

From e9acc8dcc421f282a3ea355cc9ca45c8228b46e0 Mon Sep 17 00:00:00 2001
From: Marco 
Date: Mon, 11 Nov 2024 20:23:03 +0100
Subject: [PATCH 14/17] changed to correct name of workflow file

---
 .github/workflows/Gitflow.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/Gitflow.yml b/.github/workflows/Gitflow.yml
index 526a116a..239cf976 100644
--- a/.github/workflows/Gitflow.yml
+++ b/.github/workflows/Gitflow.yml
@@ -13,7 +13,7 @@ permissions:
 
 jobs:
   build:
-    uses: ./.github/workflows/ci-workflow.yml
+    uses: ./.github/workflows/workflow.yml
     with:
       python-version: '3.12'
     secrets:

From dcdd5e344fb6275908a9675786d3103fd9b7fea5 Mon Sep 17 00:00:00 2001
From: Marco 
Date: Mon, 11 Nov 2024 20:26:06 +0100
Subject: [PATCH 15/17] Added trunk workfile which runs on pushes only to main

---
 .github/workflows/trunk.yml | 42 +++++++++++++++++++++++++++++++++++++
 1 file changed, 42 insertions(+)
 create mode 100644 .github/workflows/trunk.yml

diff --git a/.github/workflows/trunk.yml b/.github/workflows/trunk.yml
new file mode 100644
index 00000000..1f1e4a47
--- /dev/null
+++ b/.github/workflows/trunk.yml
@@ -0,0 +1,42 @@
+
+name: Trunk CI
+
+on:
+  push:
+    branches:
+      - main
+  pull_request:
+    branches:
+      - main
+
+permissions:
+  contents: write
+
+jobs:
+  build:
+    uses: ./.github/workflows/workflow.yml
+    with:
+      python-version: '3.12'
+    secrets:
+      DB_USER: ${{ secrets.DB_USER }}
+      DB_PASSWORD: ${{ secrets.DB_PASSWORD }}
+      DB_NAME: ${{ secrets.DB_NAME }}
+
+  release:
+    name: Create Release Tag
+    if: github.event_name == 'push' && github.ref == 'refs/heads/main'
+    needs: build
+    runs-on: ubuntu-latest
+
+    steps:
+      - name: Checkout code
+        uses: actions/checkout@v4
+
+      - name: Create release tag
+        env:
+          GIT_TAG: "v$(date +'%Y.%m.%d.%H%M')"
+        run: |
+          git config user.name "github-actions[bot]"
+          git config user.email "github-actions[bot]@users.noreply.github.com"
+          git tag $GIT_TAG
+          git push origin $GIT_TAG

From 4de95674d8f62e5bf721c1e7c519bbe483706a30 Mon Sep 17 00:00:00 2001
From: Marco 
Date: Thu, 14 Nov 2024 11:22:52 +0100
Subject: [PATCH 16/17] Add .gitignore to ignore .venv, .idea, __pycache__, and
 credentials.env

---
 .gitignore | 11 +++++++++++
 1 file changed, 11 insertions(+)
 create mode 100644 .gitignore

diff --git a/.gitignore b/.gitignore
new file mode 100644
index 00000000..120ec239
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,11 @@
+
+.venv/
+.idea/
+.vscode/
+__pycache__/
+*.pyc
+
+# Ignore the credentials (for obvious reasons)
+credentials.env
+videogames_register/migrations/__pycache__/
+Videogames_project/__pycache__/

From 01eb29ed5ca7d183f0dc59ec1ffea6f152249650 Mon Sep 17 00:00:00 2001
From: Marco 
Date: Thu, 14 Nov 2024 11:24:55 +0100
Subject: [PATCH 17/17] Remove .venv, .idea, __pycache__ from tracking

---
 .idea/Videogames_project.iml                  |   34 -
 .idea/dbnavigator.xml                         |  402 --
 .idea/inspectionProfiles/Project_Default.xml  |   15 -
 .../inspectionProfiles/profiles_settings.xml  |    6 -
 .idea/jsLibraryMappings.xml                   |    6 -
 .idea/misc.xml                                |    7 -
 .idea/modules.xml                             |    8 -
 .../shelved.patch                             |  210 -
 ..._Update_at_11_9_2024_1_05_PM__Changes_.xml |    4 -
 .idea/vcs.xml                                 |    7 -
 .idea/workspace.xml                           |  289 --
 .../Django-5.1.2.dist-info/AUTHORS            | 1106 ----
 .../Django-5.1.2.dist-info/INSTALLER          |    1 -
 .../Django-5.1.2.dist-info/LICENSE            |   27 -
 .../Django-5.1.2.dist-info/LICENSE.python     |  288 --
 .../Django-5.1.2.dist-info/METADATA           |  100 -
 .../Django-5.1.2.dist-info/RECORD             | 4540 -----------------
 .../Django-5.1.2.dist-info/REQUESTED          |    0
 .../Django-5.1.2.dist-info/WHEEL              |    5 -
 .../Django-5.1.2.dist-info/entry_points.txt   |    2 -
 .../Django-5.1.2.dist-info/top_level.txt      |    1 -
 .venv/Lib/site-packages/_pytest/__init__.py   |   13 -
 .../Lib/site-packages/_pytest/_argcomplete.py |  117 -
 .../site-packages/_pytest/_code/__init__.py   |   26 -
 .venv/Lib/site-packages/_pytest/_code/code.py | 1403 -----
 .../Lib/site-packages/_pytest/_code/source.py |  215 -
 .../Lib/site-packages/_pytest/_io/__init__.py |   10 -
 .venv/Lib/site-packages/_pytest/_io/pprint.py |  673 ---
 .../Lib/site-packages/_pytest/_io/saferepr.py |  130 -
 .../_pytest/_io/terminalwriter.py             |  274 -
 .../Lib/site-packages/_pytest/_io/wcwidth.py  |   57 -
 .../Lib/site-packages/_pytest/_py/__init__.py |    0
 .venv/Lib/site-packages/_pytest/_py/error.py  |  111 -
 .venv/Lib/site-packages/_pytest/_py/path.py   | 1475 ------
 .venv/Lib/site-packages/_pytest/_version.py   |   16 -
 .../_pytest/assertion/__init__.py             |  192 -
 .../_pytest/assertion/rewrite.py              | 1204 -----
 .../_pytest/assertion/truncate.py             |  117 -
 .../site-packages/_pytest/assertion/util.py   |  609 ---
 .../site-packages/_pytest/cacheprovider.py    |  626 ---
 .venv/Lib/site-packages/_pytest/capture.py    | 1087 ----
 .venv/Lib/site-packages/_pytest/compat.py     |  351 --
 .../site-packages/_pytest/config/__init__.py  | 1972 -------
 .../_pytest/config/argparsing.py              |  551 --
 .../site-packages/_pytest/config/compat.py    |   85 -
 .../_pytest/config/exceptions.py              |   13 -
 .../site-packages/_pytest/config/findpaths.py |  228 -
 .venv/Lib/site-packages/_pytest/debugging.py  |  385 --
 .venv/Lib/site-packages/_pytest/deprecated.py |   91 -
 .venv/Lib/site-packages/_pytest/doctest.py    |  755 ---
 .../Lib/site-packages/_pytest/faulthandler.py |  105 -
 .venv/Lib/site-packages/_pytest/fixtures.py   | 1932 -------
 .../site-packages/_pytest/freeze_support.py   |   45 -
 .venv/Lib/site-packages/_pytest/helpconfig.py |  276 -
 .venv/Lib/site-packages/_pytest/hookspec.py   | 1333 -----
 .venv/Lib/site-packages/_pytest/junitxml.py   |  697 ---
 .venv/Lib/site-packages/_pytest/legacypath.py |  473 --
 .venv/Lib/site-packages/_pytest/logging.py    |  955 ----
 .venv/Lib/site-packages/_pytest/main.py       | 1072 ----
 .../site-packages/_pytest/mark/__init__.py    |  292 --
 .../site-packages/_pytest/mark/expression.py  |  333 --
 .../site-packages/_pytest/mark/structures.py  |  615 ---
 .../Lib/site-packages/_pytest/monkeypatch.py  |  415 --
 .venv/Lib/site-packages/_pytest/nodes.py      |  766 ---
 .venv/Lib/site-packages/_pytest/outcomes.py   |  318 --
 .venv/Lib/site-packages/_pytest/pastebin.py   |  113 -
 .venv/Lib/site-packages/_pytest/pathlib.py    |  975 ----
 .venv/Lib/site-packages/_pytest/py.typed      |    0
 .venv/Lib/site-packages/_pytest/pytester.py   | 1766 -------
 .../_pytest/pytester_assertions.py            |   74 -
 .venv/Lib/site-packages/_pytest/python.py     | 1679 ------
 .venv/Lib/site-packages/_pytest/python_api.py | 1020 ----
 .../Lib/site-packages/_pytest/python_path.py  |   26 -
 .venv/Lib/site-packages/_pytest/recwarn.py    |  365 --
 .venv/Lib/site-packages/_pytest/reports.py    |  636 ---
 .venv/Lib/site-packages/_pytest/runner.py     |  571 ---
 .venv/Lib/site-packages/_pytest/scope.py      |   91 -
 .venv/Lib/site-packages/_pytest/setuponly.py  |  102 -
 .venv/Lib/site-packages/_pytest/setupplan.py  |   39 -
 .venv/Lib/site-packages/_pytest/skipping.py   |  301 --
 .venv/Lib/site-packages/_pytest/stash.py      |  116 -
 .venv/Lib/site-packages/_pytest/stepwise.py   |  125 -
 .venv/Lib/site-packages/_pytest/terminal.py   | 1577 ------
 .../site-packages/_pytest/threadexception.py  |   97 -
 .venv/Lib/site-packages/_pytest/timing.py     |   16 -
 .venv/Lib/site-packages/_pytest/tmpdir.py     |  322 --
 .venv/Lib/site-packages/_pytest/unittest.py   |  435 --
 .../_pytest/unraisableexception.py            |  100 -
 .../site-packages/_pytest/warning_types.py    |  166 -
 .venv/Lib/site-packages/_pytest/warnings.py   |  151 -
 .../colorama-0.4.6.dist-info/INSTALLER        |    1 -
 .../colorama-0.4.6.dist-info/METADATA         |  441 --
 .../colorama-0.4.6.dist-info/RECORD           |   31 -
 .../colorama-0.4.6.dist-info/WHEEL            |    5 -
 .../licenses/LICENSE.txt                      |   27 -
 .venv/Lib/site-packages/colorama/__init__.py  |    7 -
 .venv/Lib/site-packages/colorama/ansi.py      |  102 -
 .../Lib/site-packages/colorama/ansitowin32.py |  277 -
 .../Lib/site-packages/colorama/initialise.py  |  121 -
 .../site-packages/colorama/tests/__init__.py  |    1 -
 .../site-packages/colorama/tests/ansi_test.py |   76 -
 .../colorama/tests/ansitowin32_test.py        |  294 --
 .../colorama/tests/initialise_test.py         |  189 -
 .../colorama/tests/isatty_test.py             |   57 -
 .../Lib/site-packages/colorama/tests/utils.py |   49 -
 .../colorama/tests/winterm_test.py            |  131 -
 .venv/Lib/site-packages/colorama/win32.py     |  180 -
 .venv/Lib/site-packages/colorama/winterm.py   |  195 -
 .../coverage-7.6.4.dist-info/INSTALLER        |    1 -
 .../coverage-7.6.4.dist-info/LICENSE.txt      |  177 -
 .../coverage-7.6.4.dist-info/METADATA         |  199 -
 .../coverage-7.6.4.dist-info/RECORD           |  104 -
 .../coverage-7.6.4.dist-info/WHEEL            |    5 -
 .../coverage-7.6.4.dist-info/entry_points.txt |    4 -
 .../coverage-7.6.4.dist-info/top_level.txt    |    1 -
 .venv/Lib/site-packages/coverage/__init__.py  |   38 -
 .venv/Lib/site-packages/coverage/__main__.py  |   10 -
 .venv/Lib/site-packages/coverage/annotate.py  |  115 -
 .venv/Lib/site-packages/coverage/bytecode.py  |   22 -
 .venv/Lib/site-packages/coverage/cmdline.py   | 1009 ----
 .venv/Lib/site-packages/coverage/collector.py |  497 --
 .venv/Lib/site-packages/coverage/config.py    |  627 ---
 .venv/Lib/site-packages/coverage/context.py   |   75 -
 .venv/Lib/site-packages/coverage/control.py   | 1402 -----
 .venv/Lib/site-packages/coverage/core.py      |   99 -
 .venv/Lib/site-packages/coverage/data.py      |  228 -
 .venv/Lib/site-packages/coverage/debug.py     |  613 ---
 .../Lib/site-packages/coverage/disposition.py |   58 -
 .venv/Lib/site-packages/coverage/env.py       |  125 -
 .../Lib/site-packages/coverage/exceptions.py  |   63 -
 .venv/Lib/site-packages/coverage/execfile.py  |  324 --
 .venv/Lib/site-packages/coverage/files.py     |  548 --
 .venv/Lib/site-packages/coverage/html.py      |  810 ---
 .../coverage/htmlfiles/coverage_html.js       |  733 ---
 .../coverage/htmlfiles/favicon_32.png         |  Bin 1732 -> 0 bytes
 .../coverage/htmlfiles/index.html             |  164 -
 .../coverage/htmlfiles/keybd_closed.png       |  Bin 9004 -> 0 bytes
 .../coverage/htmlfiles/pyfile.html            |  149 -
 .../coverage/htmlfiles/style.css              |  337 --
 .../coverage/htmlfiles/style.scss             |  760 ---
 .venv/Lib/site-packages/coverage/inorout.py   |  594 ---
 .../Lib/site-packages/coverage/jsonreport.py  |  179 -
 .../Lib/site-packages/coverage/lcovreport.py  |  223 -
 .venv/Lib/site-packages/coverage/misc.py      |  369 --
 .venv/Lib/site-packages/coverage/multiproc.py |  115 -
 .venv/Lib/site-packages/coverage/numbits.py   |  147 -
 .venv/Lib/site-packages/coverage/parser.py    | 1287 -----
 .../Lib/site-packages/coverage/phystokens.py  |  198 -
 .venv/Lib/site-packages/coverage/plugin.py    |  617 ---
 .../site-packages/coverage/plugin_support.py  |  298 --
 .venv/Lib/site-packages/coverage/py.typed     |    1 -
 .venv/Lib/site-packages/coverage/python.py    |  273 -
 .venv/Lib/site-packages/coverage/pytracer.py  |  362 --
 .venv/Lib/site-packages/coverage/regions.py   |  126 -
 .venv/Lib/site-packages/coverage/report.py    |  282 -
 .../Lib/site-packages/coverage/report_core.py |  120 -
 .venv/Lib/site-packages/coverage/results.py   |  419 --
 .venv/Lib/site-packages/coverage/sqldata.py   | 1103 ----
 .venv/Lib/site-packages/coverage/sqlitedb.py  |  231 -
 .venv/Lib/site-packages/coverage/sysmon.py    |  433 --
 .venv/Lib/site-packages/coverage/templite.py  |  306 --
 .../Lib/site-packages/coverage/tomlconfig.py  |  209 -
 .../coverage/tracer.cp312-win_amd64.pyd       |  Bin 22528 -> 0 bytes
 .venv/Lib/site-packages/coverage/tracer.pyi   |   41 -
 .venv/Lib/site-packages/coverage/types.py     |  203 -
 .venv/Lib/site-packages/coverage/version.py   |   50 -
 .venv/Lib/site-packages/coverage/xmlreport.py |  261 -
 .../iniconfig-2.0.0.dist-info/INSTALLER       |    1 -
 .../iniconfig-2.0.0.dist-info/METADATA        |   80 -
 .../iniconfig-2.0.0.dist-info/RECORD          |   14 -
 .../iniconfig-2.0.0.dist-info/WHEEL           |    4 -
 .../licenses/LICENSE                          |   19 -
 .venv/Lib/site-packages/iniconfig/__init__.py |  216 -
 .venv/Lib/site-packages/iniconfig/_parse.py   |   82 -
 .venv/Lib/site-packages/iniconfig/_version.py |    4 -
 .../Lib/site-packages/iniconfig/exceptions.py |   20 -
 .venv/Lib/site-packages/iniconfig/py.typed    |    0
 .../packaging-24.2.dist-info/INSTALLER        |    1 -
 .../packaging-24.2.dist-info/LICENSE          |    3 -
 .../packaging-24.2.dist-info/LICENSE.APACHE   |  177 -
 .../packaging-24.2.dist-info/LICENSE.BSD      |   23 -
 .../packaging-24.2.dist-info/METADATA         |  102 -
 .../packaging-24.2.dist-info/RECORD           |   40 -
 .../packaging-24.2.dist-info/WHEEL            |    4 -
 .venv/Lib/site-packages/packaging/__init__.py |   15 -
 .venv/Lib/site-packages/packaging/_elffile.py |  110 -
 .../Lib/site-packages/packaging/_manylinux.py |  263 -
 .../Lib/site-packages/packaging/_musllinux.py |   85 -
 .venv/Lib/site-packages/packaging/_parser.py  |  354 --
 .../site-packages/packaging/_structures.py    |   61 -
 .../Lib/site-packages/packaging/_tokenizer.py |  194 -
 .../packaging/licenses/__init__.py            |  145 -
 .../site-packages/packaging/licenses/_spdx.py |  759 ---
 .venv/Lib/site-packages/packaging/markers.py  |  331 --
 .venv/Lib/site-packages/packaging/metadata.py |  863 ----
 .venv/Lib/site-packages/packaging/py.typed    |    0
 .../site-packages/packaging/requirements.py   |   91 -
 .../Lib/site-packages/packaging/specifiers.py | 1020 ----
 .venv/Lib/site-packages/packaging/tags.py     |  617 ---
 .venv/Lib/site-packages/packaging/utils.py    |  163 -
 .venv/Lib/site-packages/packaging/version.py  |  582 ---
 .../pluggy-1.5.0.dist-info/INSTALLER          |    1 -
 .../pluggy-1.5.0.dist-info/LICENSE            |   21 -
 .../pluggy-1.5.0.dist-info/METADATA           |  155 -
 .../pluggy-1.5.0.dist-info/RECORD             |   23 -
 .../pluggy-1.5.0.dist-info/WHEEL              |    5 -
 .../pluggy-1.5.0.dist-info/top_level.txt      |    1 -
 .venv/Lib/site-packages/pluggy/__init__.py    |   37 -
 .venv/Lib/site-packages/pluggy/_callers.py    |  182 -
 .venv/Lib/site-packages/pluggy/_hooks.py      |  715 ---
 .venv/Lib/site-packages/pluggy/_manager.py    |  528 --
 .venv/Lib/site-packages/pluggy/_result.py     |  104 -
 .venv/Lib/site-packages/pluggy/_tracing.py    |   73 -
 .venv/Lib/site-packages/pluggy/_version.py    |   16 -
 .venv/Lib/site-packages/pluggy/_warnings.py   |   27 -
 .venv/Lib/site-packages/pluggy/py.typed       |    0
 .../psycopg-3.2.3.dist-info/INSTALLER         |    1 -
 .../psycopg-3.2.3.dist-info/LICENSE.txt       |  165 -
 .../psycopg-3.2.3.dist-info/METADATA          |  109 -
 .../psycopg-3.2.3.dist-info/RECORD            |  165 -
 .../psycopg-3.2.3.dist-info/REQUESTED         |    0
 .../psycopg-3.2.3.dist-info/WHEEL             |    5 -
 .../psycopg-3.2.3.dist-info/top_level.txt     |    1 -
 .venv/Lib/site-packages/psycopg/__init__.py   |  121 -
 .venv/Lib/site-packages/psycopg/_acompat.py   |  106 -
 .../site-packages/psycopg/_adapters_map.py    |  296 --
 .../site-packages/psycopg/_capabilities.py    |  132 -
 .venv/Lib/site-packages/psycopg/_cmodule.py   |   24 -
 .venv/Lib/site-packages/psycopg/_column.py    |  104 -
 .venv/Lib/site-packages/psycopg/_compat.py    |   59 -
 .../site-packages/psycopg/_connection_base.py |  691 ---
 .../site-packages/psycopg/_connection_info.py |  173 -
 .../psycopg/_conninfo_attempts.py             |   96 -
 .../psycopg/_conninfo_attempts_async.py       |  101 -
 .../site-packages/psycopg/_conninfo_utils.py  |  124 -
 .venv/Lib/site-packages/psycopg/_copy.py      |  298 --
 .../Lib/site-packages/psycopg/_copy_async.py  |  299 --
 .venv/Lib/site-packages/psycopg/_copy_base.py |  438 --
 .../Lib/site-packages/psycopg/_cursor_base.py |  627 ---
 .venv/Lib/site-packages/psycopg/_dns.py       |  247 -
 .venv/Lib/site-packages/psycopg/_encodings.py |  154 -
 .venv/Lib/site-packages/psycopg/_enums.py     |   80 -
 .venv/Lib/site-packages/psycopg/_oids.py      |   97 -
 .venv/Lib/site-packages/psycopg/_pipeline.py  |  268 -
 .venv/Lib/site-packages/psycopg/_preparing.py |  201 -
 .../site-packages/psycopg/_py_transformer.py  |  361 --
 .venv/Lib/site-packages/psycopg/_queries.py   |  425 --
 .venv/Lib/site-packages/psycopg/_struct.py    |   57 -
 .venv/Lib/site-packages/psycopg/_tpc.py       |  115 -
 .../Lib/site-packages/psycopg/_transformer.py |   21 -
 .venv/Lib/site-packages/psycopg/_typeinfo.py  |  343 --
 .venv/Lib/site-packages/psycopg/_typemod.py   |   86 -
 .venv/Lib/site-packages/psycopg/_tz.py        |   45 -
 .venv/Lib/site-packages/psycopg/_wrappers.py  |  137 -
 .venv/Lib/site-packages/psycopg/abc.py        |  251 -
 .venv/Lib/site-packages/psycopg/adapt.py      |  153 -
 .../site-packages/psycopg/client_cursor.py    |   93 -
 .venv/Lib/site-packages/psycopg/connection.py |  479 --
 .../site-packages/psycopg/connection_async.py |  519 --
 .venv/Lib/site-packages/psycopg/conninfo.py   |  154 -
 .venv/Lib/site-packages/psycopg/copy.py       |   35 -
 .../site-packages/psycopg/crdb/__init__.py    |   20 -
 .../Lib/site-packages/psycopg/crdb/_types.py  |  201 -
 .../site-packages/psycopg/crdb/connection.py  |  105 -
 .venv/Lib/site-packages/psycopg/cursor.py     |  288 --
 .../Lib/site-packages/psycopg/cursor_async.py |  298 --
 .venv/Lib/site-packages/psycopg/dbapi20.py    |  134 -
 .venv/Lib/site-packages/psycopg/errors.py     | 1759 -------
 .venv/Lib/site-packages/psycopg/generators.py |  407 --
 .venv/Lib/site-packages/psycopg/postgres.py   |  158 -
 .../Lib/site-packages/psycopg/pq/__init__.py  |  137 -
 .venv/Lib/site-packages/psycopg/pq/_debug.py  |  104 -
 .venv/Lib/site-packages/psycopg/pq/_enums.py  |  259 -
 .../site-packages/psycopg/pq/_pq_ctypes.py    |  841 ---
 .../site-packages/psycopg/pq/_pq_ctypes.pyi   |  227 -
 .venv/Lib/site-packages/psycopg/pq/abc.py     |  336 --
 .venv/Lib/site-packages/psycopg/pq/misc.py    |  187 -
 .../Lib/site-packages/psycopg/pq/pq_ctypes.py | 1276 -----
 .venv/Lib/site-packages/psycopg/py.typed      |    0
 .venv/Lib/site-packages/psycopg/raw_cursor.py |   73 -
 .venv/Lib/site-packages/psycopg/rows.py       |  276 -
 .../site-packages/psycopg/server_cursor.py    |  475 --
 .venv/Lib/site-packages/psycopg/sql.py        |  484 --
 .../Lib/site-packages/psycopg/transaction.py  |  286 --
 .../site-packages/psycopg/types/__init__.py   |   11 -
 .../Lib/site-packages/psycopg/types/array.py  |  476 --
 .venv/Lib/site-packages/psycopg/types/bool.py |   50 -
 .../site-packages/psycopg/types/composite.py  |  386 --
 .../site-packages/psycopg/types/datetime.py   |  763 ---
 .venv/Lib/site-packages/psycopg/types/enum.py |  254 -
 .../Lib/site-packages/psycopg/types/hstore.py |  146 -
 .venv/Lib/site-packages/psycopg/types/json.py |  253 -
 .../site-packages/psycopg/types/multirange.py |  561 --
 .venv/Lib/site-packages/psycopg/types/net.py  |  203 -
 .venv/Lib/site-packages/psycopg/types/none.py |   27 -
 .../site-packages/psycopg/types/numeric.py    |  551 --
 .../Lib/site-packages/psycopg/types/numpy.py  |   99 -
 .../Lib/site-packages/psycopg/types/range.py  |  753 ---
 .../site-packages/psycopg/types/shapely.py    |   89 -
 .../Lib/site-packages/psycopg/types/string.py |  233 -
 .venv/Lib/site-packages/psycopg/types/uuid.py |   64 -
 .venv/Lib/site-packages/psycopg/version.py    |   12 -
 .venv/Lib/site-packages/psycopg/waiting.py    |  424 --
 .../psycopg_binary-3.2.3.dist-info/DELVEWHEEL |    2 -
 .../psycopg_binary-3.2.3.dist-info/INSTALLER  |    1 -
 .../LICENSE.txt                               |  165 -
 .../psycopg_binary-3.2.3.dist-info/METADATA   |   70 -
 .../psycopg_binary-3.2.3.dist-info/RECORD     |   22 -
 .../psycopg_binary-3.2.3.dist-info/REQUESTED  |    0
 .../psycopg_binary-3.2.3.dist-info/WHEEL      |    5 -
 .../top_level.txt                             |    1 -
 ...1-x64-15ec4b7b2d8017a72acf488cff1a1c38.dll |  Bin 2878976 -> 0 bytes
 .../psycopg_binary.libs/libiconv-2.dll        |  Bin 1851113 -> 0 bytes
 ...ntl-9-6b2fa35014185f5e79c4c76df54d19ae.dll |  Bin 475769 -> 0 bytes
 ...libpq-de0d6d7eef3cda3760db155fac0f1d34.dll |  Bin 308224 -> 0 bytes
 ...1-x64-56850767f4919548e5b00c9d311d045e.dll |  Bin 686592 -> 0 bytes
 .../psycopg_binary.libs/libwinpthread-1.dll   |  Bin 52736 -> 0 bytes
 .../site-packages/psycopg_binary/__init__.py  |   27 -
 .../_psycopg.cp312-win_amd64.pyd              |  Bin 669184 -> 0 bytes
 .../site-packages/psycopg_binary/_psycopg.pyi |   83 -
 .../psycopg_binary/pq.cp312-win_amd64.pyd     |  Bin 284672 -> 0 bytes
 .../Lib/site-packages/psycopg_binary/py.typed |    0
 .../site-packages/psycopg_binary/version.py   |   12 -
 .venv/Lib/site-packages/py.py                 |   15 -
 .../pytest-8.3.3.dist-info/AUTHORS            |  469 --
 .../pytest-8.3.3.dist-info/INSTALLER          |    1 -
 .../pytest-8.3.3.dist-info/LICENSE            |   21 -
 .../pytest-8.3.3.dist-info/METADATA           |  212 -
 .../pytest-8.3.3.dist-info/RECORD             |  155 -
 .../pytest-8.3.3.dist-info/REQUESTED          |    0
 .../pytest-8.3.3.dist-info/WHEEL              |    5 -
 .../pytest-8.3.3.dist-info/entry_points.txt   |    3 -
 .../pytest-8.3.3.dist-info/top_level.txt      |    3 -
 .venv/Lib/site-packages/pytest-cov.pth        |    1 -
 .venv/Lib/site-packages/pytest/__init__.py    |  172 -
 .venv/Lib/site-packages/pytest/__main__.py    |    9 -
 .venv/Lib/site-packages/pytest/py.typed       |    0
 .../pytest_cov-6.0.0.dist-info/AUTHORS.rst    |   64 -
 .../pytest_cov-6.0.0.dist-info/INSTALLER      |    1 -
 .../pytest_cov-6.0.0.dist-info/LICENSE        |   21 -
 .../pytest_cov-6.0.0.dist-info/METADATA       |  598 ---
 .../pytest_cov-6.0.0.dist-info/RECORD         |   20 -
 .../pytest_cov-6.0.0.dist-info/REQUESTED      |    0
 .../pytest_cov-6.0.0.dist-info/WHEEL          |    5 -
 .../entry_points.txt                          |    2 -
 .../pytest_cov-6.0.0.dist-info/top_level.txt  |    1 -
 .../Lib/site-packages/pytest_cov/__init__.py  |   47 -
 .venv/Lib/site-packages/pytest_cov/compat.py  |   15 -
 .venv/Lib/site-packages/pytest_cov/embed.py   |  123 -
 .venv/Lib/site-packages/pytest_cov/engine.py  |  454 --
 .venv/Lib/site-packages/pytest_cov/plugin.py  |  446 --
 .../pytest_django-2.3.1.dist-info/AUTHORS     |   13 -
 .../pytest_django-2.3.1.dist-info/INSTALLER   |    1 -
 .../pytest_django-2.3.1.dist-info/LICENSE     |   54 -
 .../pytest_django-2.3.1.dist-info/METADATA    |   86 -
 .../pytest_django-2.3.1.dist-info/RECORD      |   27 -
 .../pytest_django-2.3.1.dist-info/REQUESTED   |    0
 .../pytest_django-2.3.1.dist-info/WHEEL       |    5 -
 .../entry_points.txt                          |    2 -
 .../top_level.txt                             |    1 -
 .../pytest_django-4.9.0.dist-info/AUTHORS     |   23 -
 .../pytest_django-4.9.0.dist-info/INSTALLER   |    1 -
 .../pytest_django-4.9.0.dist-info/LICENSE     |   54 -
 .../pytest_django-4.9.0.dist-info/METADATA    |  161 -
 .../pytest_django-4.9.0.dist-info/RECORD      |   26 -
 .../pytest_django-4.9.0.dist-info/REQUESTED   |    0
 .../pytest_django-4.9.0.dist-info/WHEEL       |    5 -
 .../entry_points.txt                          |    2 -
 .../top_level.txt                             |    1 -
 .../site-packages/pytest_django/__init__.py   |    1 -
 .../site-packages/pytest_django/_version.py   |   16 -
 .../site-packages/pytest_django/asserts.py    |  221 -
 .../Lib/site-packages/pytest_django/client.py |   45 -
 .../site-packages/pytest_django/db_reuse.py   |   77 -
 .../site-packages/pytest_django/deprecated.py |   17 -
 .../pytest_django/django_compat.py            |   19 -
 .../site-packages/pytest_django/fixtures.py   |  227 -
 .../pytest_django/lazy_django.py              |   21 -
 .../pytest_django/live_server_helper.py       |   96 -
 .../Lib/site-packages/pytest_django/plugin.py |  267 -
 .../Lib/site-packages/pytest_django/py.typed  |    0
 .../INSTALLER                                 |    1 -
 .../LICENSE                                   |  279 -
 .../METADATA                                  |   67 -
 .../typing_extensions-4.12.2.dist-info/RECORD |    7 -
 .../typing_extensions-4.12.2.dist-info/WHEEL  |    4 -
 .venv/Lib/site-packages/typing_extensions.py  | 3641 -------------
 .venv/Scripts/coverage-3.12.exe               |  Bin 108427 -> 0 bytes
 .venv/Scripts/coverage.exe                    |  Bin 108427 -> 0 bytes
 .venv/Scripts/coverage3.exe                   |  Bin 108427 -> 0 bytes
 .venv/Scripts/django-admin.exe                |  Bin 108475 -> 0 bytes
 .venv/Scripts/py.test.exe                     |  Bin 108433 -> 0 bytes
 .venv/Scripts/pytest.exe                      |  Bin 108433 -> 0 bytes
 __pycache__/manage.cpython-312.pyc            |  Bin 1067 -> 0 bytes
 394 files changed, 99454 deletions(-)
 delete mode 100644 .idea/Videogames_project.iml
 delete mode 100644 .idea/dbnavigator.xml
 delete mode 100644 .idea/inspectionProfiles/Project_Default.xml
 delete mode 100644 .idea/inspectionProfiles/profiles_settings.xml
 delete mode 100644 .idea/jsLibraryMappings.xml
 delete mode 100644 .idea/misc.xml
 delete mode 100644 .idea/modules.xml
 delete mode 100644 .idea/shelf/Uncommitted_changes_before_Update_at_11_9_2024_1_05_PM_[Changes]/shelved.patch
 delete mode 100644 .idea/shelf/Uncommitted_changes_before_Update_at_11_9_2024_1_05_PM__Changes_.xml
 delete mode 100644 .idea/vcs.xml
 delete mode 100644 .idea/workspace.xml
 delete mode 100644 .venv/Lib/site-packages/Django-5.1.2.dist-info/AUTHORS
 delete mode 100644 .venv/Lib/site-packages/Django-5.1.2.dist-info/INSTALLER
 delete mode 100644 .venv/Lib/site-packages/Django-5.1.2.dist-info/LICENSE
 delete mode 100644 .venv/Lib/site-packages/Django-5.1.2.dist-info/LICENSE.python
 delete mode 100644 .venv/Lib/site-packages/Django-5.1.2.dist-info/METADATA
 delete mode 100644 .venv/Lib/site-packages/Django-5.1.2.dist-info/RECORD
 delete mode 100644 .venv/Lib/site-packages/Django-5.1.2.dist-info/REQUESTED
 delete mode 100644 .venv/Lib/site-packages/Django-5.1.2.dist-info/WHEEL
 delete mode 100644 .venv/Lib/site-packages/Django-5.1.2.dist-info/entry_points.txt
 delete mode 100644 .venv/Lib/site-packages/Django-5.1.2.dist-info/top_level.txt
 delete mode 100644 .venv/Lib/site-packages/_pytest/__init__.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/_argcomplete.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/_code/__init__.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/_code/code.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/_code/source.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/_io/__init__.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/_io/pprint.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/_io/saferepr.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/_io/terminalwriter.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/_io/wcwidth.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/_py/__init__.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/_py/error.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/_py/path.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/_version.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/assertion/__init__.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/assertion/rewrite.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/assertion/truncate.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/assertion/util.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/cacheprovider.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/capture.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/compat.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/config/__init__.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/config/argparsing.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/config/compat.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/config/exceptions.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/config/findpaths.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/debugging.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/deprecated.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/doctest.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/faulthandler.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/fixtures.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/freeze_support.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/helpconfig.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/hookspec.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/junitxml.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/legacypath.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/logging.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/main.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/mark/__init__.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/mark/expression.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/mark/structures.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/monkeypatch.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/nodes.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/outcomes.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/pastebin.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/pathlib.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/py.typed
 delete mode 100644 .venv/Lib/site-packages/_pytest/pytester.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/pytester_assertions.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/python.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/python_api.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/python_path.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/recwarn.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/reports.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/runner.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/scope.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/setuponly.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/setupplan.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/skipping.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/stash.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/stepwise.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/terminal.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/threadexception.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/timing.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/tmpdir.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/unittest.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/unraisableexception.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/warning_types.py
 delete mode 100644 .venv/Lib/site-packages/_pytest/warnings.py
 delete mode 100644 .venv/Lib/site-packages/colorama-0.4.6.dist-info/INSTALLER
 delete mode 100644 .venv/Lib/site-packages/colorama-0.4.6.dist-info/METADATA
 delete mode 100644 .venv/Lib/site-packages/colorama-0.4.6.dist-info/RECORD
 delete mode 100644 .venv/Lib/site-packages/colorama-0.4.6.dist-info/WHEEL
 delete mode 100644 .venv/Lib/site-packages/colorama-0.4.6.dist-info/licenses/LICENSE.txt
 delete mode 100644 .venv/Lib/site-packages/colorama/__init__.py
 delete mode 100644 .venv/Lib/site-packages/colorama/ansi.py
 delete mode 100644 .venv/Lib/site-packages/colorama/ansitowin32.py
 delete mode 100644 .venv/Lib/site-packages/colorama/initialise.py
 delete mode 100644 .venv/Lib/site-packages/colorama/tests/__init__.py
 delete mode 100644 .venv/Lib/site-packages/colorama/tests/ansi_test.py
 delete mode 100644 .venv/Lib/site-packages/colorama/tests/ansitowin32_test.py
 delete mode 100644 .venv/Lib/site-packages/colorama/tests/initialise_test.py
 delete mode 100644 .venv/Lib/site-packages/colorama/tests/isatty_test.py
 delete mode 100644 .venv/Lib/site-packages/colorama/tests/utils.py
 delete mode 100644 .venv/Lib/site-packages/colorama/tests/winterm_test.py
 delete mode 100644 .venv/Lib/site-packages/colorama/win32.py
 delete mode 100644 .venv/Lib/site-packages/colorama/winterm.py
 delete mode 100644 .venv/Lib/site-packages/coverage-7.6.4.dist-info/INSTALLER
 delete mode 100644 .venv/Lib/site-packages/coverage-7.6.4.dist-info/LICENSE.txt
 delete mode 100644 .venv/Lib/site-packages/coverage-7.6.4.dist-info/METADATA
 delete mode 100644 .venv/Lib/site-packages/coverage-7.6.4.dist-info/RECORD
 delete mode 100644 .venv/Lib/site-packages/coverage-7.6.4.dist-info/WHEEL
 delete mode 100644 .venv/Lib/site-packages/coverage-7.6.4.dist-info/entry_points.txt
 delete mode 100644 .venv/Lib/site-packages/coverage-7.6.4.dist-info/top_level.txt
 delete mode 100644 .venv/Lib/site-packages/coverage/__init__.py
 delete mode 100644 .venv/Lib/site-packages/coverage/__main__.py
 delete mode 100644 .venv/Lib/site-packages/coverage/annotate.py
 delete mode 100644 .venv/Lib/site-packages/coverage/bytecode.py
 delete mode 100644 .venv/Lib/site-packages/coverage/cmdline.py
 delete mode 100644 .venv/Lib/site-packages/coverage/collector.py
 delete mode 100644 .venv/Lib/site-packages/coverage/config.py
 delete mode 100644 .venv/Lib/site-packages/coverage/context.py
 delete mode 100644 .venv/Lib/site-packages/coverage/control.py
 delete mode 100644 .venv/Lib/site-packages/coverage/core.py
 delete mode 100644 .venv/Lib/site-packages/coverage/data.py
 delete mode 100644 .venv/Lib/site-packages/coverage/debug.py
 delete mode 100644 .venv/Lib/site-packages/coverage/disposition.py
 delete mode 100644 .venv/Lib/site-packages/coverage/env.py
 delete mode 100644 .venv/Lib/site-packages/coverage/exceptions.py
 delete mode 100644 .venv/Lib/site-packages/coverage/execfile.py
 delete mode 100644 .venv/Lib/site-packages/coverage/files.py
 delete mode 100644 .venv/Lib/site-packages/coverage/html.py
 delete mode 100644 .venv/Lib/site-packages/coverage/htmlfiles/coverage_html.js
 delete mode 100644 .venv/Lib/site-packages/coverage/htmlfiles/favicon_32.png
 delete mode 100644 .venv/Lib/site-packages/coverage/htmlfiles/index.html
 delete mode 100644 .venv/Lib/site-packages/coverage/htmlfiles/keybd_closed.png
 delete mode 100644 .venv/Lib/site-packages/coverage/htmlfiles/pyfile.html
 delete mode 100644 .venv/Lib/site-packages/coverage/htmlfiles/style.css
 delete mode 100644 .venv/Lib/site-packages/coverage/htmlfiles/style.scss
 delete mode 100644 .venv/Lib/site-packages/coverage/inorout.py
 delete mode 100644 .venv/Lib/site-packages/coverage/jsonreport.py
 delete mode 100644 .venv/Lib/site-packages/coverage/lcovreport.py
 delete mode 100644 .venv/Lib/site-packages/coverage/misc.py
 delete mode 100644 .venv/Lib/site-packages/coverage/multiproc.py
 delete mode 100644 .venv/Lib/site-packages/coverage/numbits.py
 delete mode 100644 .venv/Lib/site-packages/coverage/parser.py
 delete mode 100644 .venv/Lib/site-packages/coverage/phystokens.py
 delete mode 100644 .venv/Lib/site-packages/coverage/plugin.py
 delete mode 100644 .venv/Lib/site-packages/coverage/plugin_support.py
 delete mode 100644 .venv/Lib/site-packages/coverage/py.typed
 delete mode 100644 .venv/Lib/site-packages/coverage/python.py
 delete mode 100644 .venv/Lib/site-packages/coverage/pytracer.py
 delete mode 100644 .venv/Lib/site-packages/coverage/regions.py
 delete mode 100644 .venv/Lib/site-packages/coverage/report.py
 delete mode 100644 .venv/Lib/site-packages/coverage/report_core.py
 delete mode 100644 .venv/Lib/site-packages/coverage/results.py
 delete mode 100644 .venv/Lib/site-packages/coverage/sqldata.py
 delete mode 100644 .venv/Lib/site-packages/coverage/sqlitedb.py
 delete mode 100644 .venv/Lib/site-packages/coverage/sysmon.py
 delete mode 100644 .venv/Lib/site-packages/coverage/templite.py
 delete mode 100644 .venv/Lib/site-packages/coverage/tomlconfig.py
 delete mode 100644 .venv/Lib/site-packages/coverage/tracer.cp312-win_amd64.pyd
 delete mode 100644 .venv/Lib/site-packages/coverage/tracer.pyi
 delete mode 100644 .venv/Lib/site-packages/coverage/types.py
 delete mode 100644 .venv/Lib/site-packages/coverage/version.py
 delete mode 100644 .venv/Lib/site-packages/coverage/xmlreport.py
 delete mode 100644 .venv/Lib/site-packages/iniconfig-2.0.0.dist-info/INSTALLER
 delete mode 100644 .venv/Lib/site-packages/iniconfig-2.0.0.dist-info/METADATA
 delete mode 100644 .venv/Lib/site-packages/iniconfig-2.0.0.dist-info/RECORD
 delete mode 100644 .venv/Lib/site-packages/iniconfig-2.0.0.dist-info/WHEEL
 delete mode 100644 .venv/Lib/site-packages/iniconfig-2.0.0.dist-info/licenses/LICENSE
 delete mode 100644 .venv/Lib/site-packages/iniconfig/__init__.py
 delete mode 100644 .venv/Lib/site-packages/iniconfig/_parse.py
 delete mode 100644 .venv/Lib/site-packages/iniconfig/_version.py
 delete mode 100644 .venv/Lib/site-packages/iniconfig/exceptions.py
 delete mode 100644 .venv/Lib/site-packages/iniconfig/py.typed
 delete mode 100644 .venv/Lib/site-packages/packaging-24.2.dist-info/INSTALLER
 delete mode 100644 .venv/Lib/site-packages/packaging-24.2.dist-info/LICENSE
 delete mode 100644 .venv/Lib/site-packages/packaging-24.2.dist-info/LICENSE.APACHE
 delete mode 100644 .venv/Lib/site-packages/packaging-24.2.dist-info/LICENSE.BSD
 delete mode 100644 .venv/Lib/site-packages/packaging-24.2.dist-info/METADATA
 delete mode 100644 .venv/Lib/site-packages/packaging-24.2.dist-info/RECORD
 delete mode 100644 .venv/Lib/site-packages/packaging-24.2.dist-info/WHEEL
 delete mode 100644 .venv/Lib/site-packages/packaging/__init__.py
 delete mode 100644 .venv/Lib/site-packages/packaging/_elffile.py
 delete mode 100644 .venv/Lib/site-packages/packaging/_manylinux.py
 delete mode 100644 .venv/Lib/site-packages/packaging/_musllinux.py
 delete mode 100644 .venv/Lib/site-packages/packaging/_parser.py
 delete mode 100644 .venv/Lib/site-packages/packaging/_structures.py
 delete mode 100644 .venv/Lib/site-packages/packaging/_tokenizer.py
 delete mode 100644 .venv/Lib/site-packages/packaging/licenses/__init__.py
 delete mode 100644 .venv/Lib/site-packages/packaging/licenses/_spdx.py
 delete mode 100644 .venv/Lib/site-packages/packaging/markers.py
 delete mode 100644 .venv/Lib/site-packages/packaging/metadata.py
 delete mode 100644 .venv/Lib/site-packages/packaging/py.typed
 delete mode 100644 .venv/Lib/site-packages/packaging/requirements.py
 delete mode 100644 .venv/Lib/site-packages/packaging/specifiers.py
 delete mode 100644 .venv/Lib/site-packages/packaging/tags.py
 delete mode 100644 .venv/Lib/site-packages/packaging/utils.py
 delete mode 100644 .venv/Lib/site-packages/packaging/version.py
 delete mode 100644 .venv/Lib/site-packages/pluggy-1.5.0.dist-info/INSTALLER
 delete mode 100644 .venv/Lib/site-packages/pluggy-1.5.0.dist-info/LICENSE
 delete mode 100644 .venv/Lib/site-packages/pluggy-1.5.0.dist-info/METADATA
 delete mode 100644 .venv/Lib/site-packages/pluggy-1.5.0.dist-info/RECORD
 delete mode 100644 .venv/Lib/site-packages/pluggy-1.5.0.dist-info/WHEEL
 delete mode 100644 .venv/Lib/site-packages/pluggy-1.5.0.dist-info/top_level.txt
 delete mode 100644 .venv/Lib/site-packages/pluggy/__init__.py
 delete mode 100644 .venv/Lib/site-packages/pluggy/_callers.py
 delete mode 100644 .venv/Lib/site-packages/pluggy/_hooks.py
 delete mode 100644 .venv/Lib/site-packages/pluggy/_manager.py
 delete mode 100644 .venv/Lib/site-packages/pluggy/_result.py
 delete mode 100644 .venv/Lib/site-packages/pluggy/_tracing.py
 delete mode 100644 .venv/Lib/site-packages/pluggy/_version.py
 delete mode 100644 .venv/Lib/site-packages/pluggy/_warnings.py
 delete mode 100644 .venv/Lib/site-packages/pluggy/py.typed
 delete mode 100644 .venv/Lib/site-packages/psycopg-3.2.3.dist-info/INSTALLER
 delete mode 100644 .venv/Lib/site-packages/psycopg-3.2.3.dist-info/LICENSE.txt
 delete mode 100644 .venv/Lib/site-packages/psycopg-3.2.3.dist-info/METADATA
 delete mode 100644 .venv/Lib/site-packages/psycopg-3.2.3.dist-info/RECORD
 delete mode 100644 .venv/Lib/site-packages/psycopg-3.2.3.dist-info/REQUESTED
 delete mode 100644 .venv/Lib/site-packages/psycopg-3.2.3.dist-info/WHEEL
 delete mode 100644 .venv/Lib/site-packages/psycopg-3.2.3.dist-info/top_level.txt
 delete mode 100644 .venv/Lib/site-packages/psycopg/__init__.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/_acompat.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/_adapters_map.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/_capabilities.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/_cmodule.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/_column.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/_compat.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/_connection_base.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/_connection_info.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/_conninfo_attempts.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/_conninfo_attempts_async.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/_conninfo_utils.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/_copy.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/_copy_async.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/_copy_base.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/_cursor_base.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/_dns.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/_encodings.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/_enums.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/_oids.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/_pipeline.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/_preparing.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/_py_transformer.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/_queries.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/_struct.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/_tpc.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/_transformer.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/_typeinfo.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/_typemod.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/_tz.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/_wrappers.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/abc.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/adapt.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/client_cursor.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/connection.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/connection_async.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/conninfo.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/copy.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/crdb/__init__.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/crdb/_types.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/crdb/connection.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/cursor.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/cursor_async.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/dbapi20.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/errors.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/generators.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/postgres.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/pq/__init__.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/pq/_debug.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/pq/_enums.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/pq/_pq_ctypes.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/pq/_pq_ctypes.pyi
 delete mode 100644 .venv/Lib/site-packages/psycopg/pq/abc.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/pq/misc.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/pq/pq_ctypes.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/py.typed
 delete mode 100644 .venv/Lib/site-packages/psycopg/raw_cursor.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/rows.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/server_cursor.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/sql.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/transaction.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/types/__init__.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/types/array.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/types/bool.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/types/composite.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/types/datetime.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/types/enum.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/types/hstore.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/types/json.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/types/multirange.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/types/net.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/types/none.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/types/numeric.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/types/numpy.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/types/range.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/types/shapely.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/types/string.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/types/uuid.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/version.py
 delete mode 100644 .venv/Lib/site-packages/psycopg/waiting.py
 delete mode 100644 .venv/Lib/site-packages/psycopg_binary-3.2.3.dist-info/DELVEWHEEL
 delete mode 100644 .venv/Lib/site-packages/psycopg_binary-3.2.3.dist-info/INSTALLER
 delete mode 100644 .venv/Lib/site-packages/psycopg_binary-3.2.3.dist-info/LICENSE.txt
 delete mode 100644 .venv/Lib/site-packages/psycopg_binary-3.2.3.dist-info/METADATA
 delete mode 100644 .venv/Lib/site-packages/psycopg_binary-3.2.3.dist-info/RECORD
 delete mode 100644 .venv/Lib/site-packages/psycopg_binary-3.2.3.dist-info/REQUESTED
 delete mode 100644 .venv/Lib/site-packages/psycopg_binary-3.2.3.dist-info/WHEEL
 delete mode 100644 .venv/Lib/site-packages/psycopg_binary-3.2.3.dist-info/top_level.txt
 delete mode 100644 .venv/Lib/site-packages/psycopg_binary.libs/libcrypto-1_1-x64-15ec4b7b2d8017a72acf488cff1a1c38.dll
 delete mode 100644 .venv/Lib/site-packages/psycopg_binary.libs/libiconv-2.dll
 delete mode 100644 .venv/Lib/site-packages/psycopg_binary.libs/libintl-9-6b2fa35014185f5e79c4c76df54d19ae.dll
 delete mode 100644 .venv/Lib/site-packages/psycopg_binary.libs/libpq-de0d6d7eef3cda3760db155fac0f1d34.dll
 delete mode 100644 .venv/Lib/site-packages/psycopg_binary.libs/libssl-1_1-x64-56850767f4919548e5b00c9d311d045e.dll
 delete mode 100644 .venv/Lib/site-packages/psycopg_binary.libs/libwinpthread-1.dll
 delete mode 100644 .venv/Lib/site-packages/psycopg_binary/__init__.py
 delete mode 100644 .venv/Lib/site-packages/psycopg_binary/_psycopg.cp312-win_amd64.pyd
 delete mode 100644 .venv/Lib/site-packages/psycopg_binary/_psycopg.pyi
 delete mode 100644 .venv/Lib/site-packages/psycopg_binary/pq.cp312-win_amd64.pyd
 delete mode 100644 .venv/Lib/site-packages/psycopg_binary/py.typed
 delete mode 100644 .venv/Lib/site-packages/psycopg_binary/version.py
 delete mode 100644 .venv/Lib/site-packages/py.py
 delete mode 100644 .venv/Lib/site-packages/pytest-8.3.3.dist-info/AUTHORS
 delete mode 100644 .venv/Lib/site-packages/pytest-8.3.3.dist-info/INSTALLER
 delete mode 100644 .venv/Lib/site-packages/pytest-8.3.3.dist-info/LICENSE
 delete mode 100644 .venv/Lib/site-packages/pytest-8.3.3.dist-info/METADATA
 delete mode 100644 .venv/Lib/site-packages/pytest-8.3.3.dist-info/RECORD
 delete mode 100644 .venv/Lib/site-packages/pytest-8.3.3.dist-info/REQUESTED
 delete mode 100644 .venv/Lib/site-packages/pytest-8.3.3.dist-info/WHEEL
 delete mode 100644 .venv/Lib/site-packages/pytest-8.3.3.dist-info/entry_points.txt
 delete mode 100644 .venv/Lib/site-packages/pytest-8.3.3.dist-info/top_level.txt
 delete mode 100644 .venv/Lib/site-packages/pytest-cov.pth
 delete mode 100644 .venv/Lib/site-packages/pytest/__init__.py
 delete mode 100644 .venv/Lib/site-packages/pytest/__main__.py
 delete mode 100644 .venv/Lib/site-packages/pytest/py.typed
 delete mode 100644 .venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/AUTHORS.rst
 delete mode 100644 .venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/INSTALLER
 delete mode 100644 .venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/LICENSE
 delete mode 100644 .venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/METADATA
 delete mode 100644 .venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/RECORD
 delete mode 100644 .venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/REQUESTED
 delete mode 100644 .venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/WHEEL
 delete mode 100644 .venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/entry_points.txt
 delete mode 100644 .venv/Lib/site-packages/pytest_cov-6.0.0.dist-info/top_level.txt
 delete mode 100644 .venv/Lib/site-packages/pytest_cov/__init__.py
 delete mode 100644 .venv/Lib/site-packages/pytest_cov/compat.py
 delete mode 100644 .venv/Lib/site-packages/pytest_cov/embed.py
 delete mode 100644 .venv/Lib/site-packages/pytest_cov/engine.py
 delete mode 100644 .venv/Lib/site-packages/pytest_cov/plugin.py
 delete mode 100644 .venv/Lib/site-packages/pytest_django-2.3.1.dist-info/AUTHORS
 delete mode 100644 .venv/Lib/site-packages/pytest_django-2.3.1.dist-info/INSTALLER
 delete mode 100644 .venv/Lib/site-packages/pytest_django-2.3.1.dist-info/LICENSE
 delete mode 100644 .venv/Lib/site-packages/pytest_django-2.3.1.dist-info/METADATA
 delete mode 100644 .venv/Lib/site-packages/pytest_django-2.3.1.dist-info/RECORD
 delete mode 100644 .venv/Lib/site-packages/pytest_django-2.3.1.dist-info/REQUESTED
 delete mode 100644 .venv/Lib/site-packages/pytest_django-2.3.1.dist-info/WHEEL
 delete mode 100644 .venv/Lib/site-packages/pytest_django-2.3.1.dist-info/entry_points.txt
 delete mode 100644 .venv/Lib/site-packages/pytest_django-2.3.1.dist-info/top_level.txt
 delete mode 100644 .venv/Lib/site-packages/pytest_django-4.9.0.dist-info/AUTHORS
 delete mode 100644 .venv/Lib/site-packages/pytest_django-4.9.0.dist-info/INSTALLER
 delete mode 100644 .venv/Lib/site-packages/pytest_django-4.9.0.dist-info/LICENSE
 delete mode 100644 .venv/Lib/site-packages/pytest_django-4.9.0.dist-info/METADATA
 delete mode 100644 .venv/Lib/site-packages/pytest_django-4.9.0.dist-info/RECORD
 delete mode 100644 .venv/Lib/site-packages/pytest_django-4.9.0.dist-info/REQUESTED
 delete mode 100644 .venv/Lib/site-packages/pytest_django-4.9.0.dist-info/WHEEL
 delete mode 100644 .venv/Lib/site-packages/pytest_django-4.9.0.dist-info/entry_points.txt
 delete mode 100644 .venv/Lib/site-packages/pytest_django-4.9.0.dist-info/top_level.txt
 delete mode 100644 .venv/Lib/site-packages/pytest_django/__init__.py
 delete mode 100644 .venv/Lib/site-packages/pytest_django/_version.py
 delete mode 100644 .venv/Lib/site-packages/pytest_django/asserts.py
 delete mode 100644 .venv/Lib/site-packages/pytest_django/client.py
 delete mode 100644 .venv/Lib/site-packages/pytest_django/db_reuse.py
 delete mode 100644 .venv/Lib/site-packages/pytest_django/deprecated.py
 delete mode 100644 .venv/Lib/site-packages/pytest_django/django_compat.py
 delete mode 100644 .venv/Lib/site-packages/pytest_django/fixtures.py
 delete mode 100644 .venv/Lib/site-packages/pytest_django/lazy_django.py
 delete mode 100644 .venv/Lib/site-packages/pytest_django/live_server_helper.py
 delete mode 100644 .venv/Lib/site-packages/pytest_django/plugin.py
 delete mode 100644 .venv/Lib/site-packages/pytest_django/py.typed
 delete mode 100644 .venv/Lib/site-packages/typing_extensions-4.12.2.dist-info/INSTALLER
 delete mode 100644 .venv/Lib/site-packages/typing_extensions-4.12.2.dist-info/LICENSE
 delete mode 100644 .venv/Lib/site-packages/typing_extensions-4.12.2.dist-info/METADATA
 delete mode 100644 .venv/Lib/site-packages/typing_extensions-4.12.2.dist-info/RECORD
 delete mode 100644 .venv/Lib/site-packages/typing_extensions-4.12.2.dist-info/WHEEL
 delete mode 100644 .venv/Lib/site-packages/typing_extensions.py
 delete mode 100644 .venv/Scripts/coverage-3.12.exe
 delete mode 100644 .venv/Scripts/coverage.exe
 delete mode 100644 .venv/Scripts/coverage3.exe
 delete mode 100644 .venv/Scripts/django-admin.exe
 delete mode 100644 .venv/Scripts/py.test.exe
 delete mode 100644 .venv/Scripts/pytest.exe
 delete mode 100644 __pycache__/manage.cpython-312.pyc

diff --git a/.idea/Videogames_project.iml b/.idea/Videogames_project.iml
deleted file mode 100644
index 858f2dba..00000000
--- a/.idea/Videogames_project.iml
+++ /dev/null
@@ -1,34 +0,0 @@
-
-
-  
-    
-      
-        
-    
-  
-  
-    
-      
-    
-    
-    
-    
-    
-    
-    
-  
-  
-    
-  
-
\ No newline at end of file
diff --git a/.idea/dbnavigator.xml b/.idea/dbnavigator.xml
deleted file mode 100644
index 92e8398b..00000000
--- a/.idea/dbnavigator.xml
+++ /dev/null
@@ -1,402 +0,0 @@
-
-
-  
-    
-  
-  
-    
-    
-      
-        
-        
-        
-        
-      
-      
-        
-          
-          
-          
-          
-          
-          
-          
-          
-          
-          
-          
-          
-          
-          
-          
-          
-          
-          
-          
-          
-          
-          
-          
-          
-          
-        
-      
-      
-        
-        
-        
-        
-        
-      
-      
-        
-        
-        
-      
-    
-    
-      
-        
-          
-          
-          
-          
-          
-          
-          
-          
-          
-          
-          
-          
-          
-          
-          
-          
-          
-          
-          
-          
-          
-        
-        
-        
-        
-      
-    
-    
-      
-        
-        
-      
-      
-        
-        
-      
-      
-        
-        
-        
-      
-    
-    
-      
-        
-        
-        
-        
-      
-      
-        
-        
-        
-      
-      
-        
-        
-        
-        
-        
-        
-      
-      
-        
-        
-      
-      
-        
-          
-          
-          
-          
-          
-          
-          
-          
-          
-          
-          
-          
-          
-        
-      
-      
-        
-      
-    
-    
-      
-        
-        
-        
-        
-      
-      
-        
-        
-        
-      
-    
-    
-      
-        
-          
-          
-          
-          
-          
-          
-          
-          
-          
-          
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-          
-          
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-          
-          
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-          
-        
-        
-          
-          
-          
-          
-          
-          
-          
-          
-          
-          
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-          
-          
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-          
-          
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-            
-          
-        
-      
-      
-        
-        
-        
-        
-        
-        
-        
-        
-        
-        
-        
-        
-        
-        
-        
-        
-        
-        
-        
-        
-        
-        
-        
-      
-      
-        
-      
-    
-    
-      
-        
-        
-        
-        
-        
-      
-      
-        
-        
-      
-      
-        
-        
-        
-      
-    
-    
-      
-        
-          
-          
-          
-        
-        
-          
-          
-        
-      
-      
-        
-        
-        
-      
-      
-        
-        
-        
-      
-    
-    
-      
-        
-        
-        
-        
-        
-        
-        
-        
-        
-        
-      
-      
-        
-        
-        
-        
-        
-      
-    
-    
-      
-        
-        
-        
-        
-      
-      
-        
-          
-          
-          
-          
-        
-        
-          
-          
-          
-          
-          
-        
-      
-    
-  
-
\ No newline at end of file
diff --git a/.idea/inspectionProfiles/Project_Default.xml b/.idea/inspectionProfiles/Project_Default.xml
deleted file mode 100644
index 80cef708..00000000
--- a/.idea/inspectionProfiles/Project_Default.xml
+++ /dev/null
@@ -1,15 +0,0 @@
-
-  
-    
-
\ No newline at end of file
diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml
deleted file mode 100644
index 105ce2da..00000000
--- a/.idea/inspectionProfiles/profiles_settings.xml
+++ /dev/null
@@ -1,6 +0,0 @@
-
-  
-    
-
\ No newline at end of file
diff --git a/.idea/jsLibraryMappings.xml b/.idea/jsLibraryMappings.xml
deleted file mode 100644
index 74c88439..00000000
--- a/.idea/jsLibraryMappings.xml
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-  
-    
-  
-
\ No newline at end of file
diff --git a/.idea/misc.xml b/.idea/misc.xml
deleted file mode 100644
index d38f9556..00000000
--- a/.idea/misc.xml
+++ /dev/null
@@ -1,7 +0,0 @@
-
-
-  
-    
-  
-
\ No newline at end of file
diff --git a/.idea/modules.xml b/.idea/modules.xml
deleted file mode 100644
index b277d59b..00000000
--- a/.idea/modules.xml
+++ /dev/null
@@ -1,8 +0,0 @@
-
-
-  
-    
-      
-    
-  
-
\ No newline at end of file
diff --git a/.idea/shelf/Uncommitted_changes_before_Update_at_11_9_2024_1_05_PM_[Changes]/shelved.patch b/.idea/shelf/Uncommitted_changes_before_Update_at_11_9_2024_1_05_PM_[Changes]/shelved.patch
deleted file mode 100644
index 6fc2a27e..00000000
--- a/.idea/shelf/Uncommitted_changes_before_Update_at_11_9_2024_1_05_PM_[Changes]/shelved.patch
+++ /dev/null
@@ -1,210 +0,0 @@
-Index: credentials.env
-IDEA additional info:
-Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
-<+>UTF-8
-===================================================================
-diff --git a/credentials.env b/credentials.env
-new file mode 100644
---- /dev/null	(date 1731151836401)
-+++ b/credentials.env	(date 1731151836401)
-@@ -0,0 +1,5 @@
-+DB_NAME=videogames_db
-+DB_USER=postgres
-+DB_PASSWORD=Neoclassical42
-+DB_HOST=localhost
-+DB_PORT=5432
-\ No newline at end of file
-Index: .idea/workspace.xml
-IDEA additional info:
-Subsystem: com.intellij.openapi.diff.impl.patch.BaseRevisionTextPatchEP
-<+>\r\n\r\n  \r\n    \r\n  \r\n    \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n      \r\n    \r\n    \r\n  \r\n    \r\n  \r\n  \r\n    \r\n  \r\n    \r\n  {\r\n  "associatedIndex": 5\r\n}\r\n  \r\n  \r\n    \r\n  \r\n  \r\n    \r\n  \r\n  \r\n    \r\n      \r\n    \r\n  \r\n  \r\n    \r\n      \r\n      \r\n    \r\n      \r\n      \r\n    \r\n      \r\n      \r\n    \r\n      \r\n      \r\n    \r\n      \r\n      \r\n    \r\n      \r\n      \r\n    \r\n      \r\n        \r\n        \r\n        \r\n        \r\n        \r\n      \r\n    \r\n  \r\n  \r\n    \r\n      \r\n        \r\n    \r\n  \r\n  \r\n  \r\n    \r\n      \r\n      1730029466445\r\n      \r\n    \r\n      \r\n    \r\n      \r\n    \r\n      \r\n    \r\n  \r\n    \r\n  \r\n    \r\n  \r\n    \r\n    \r\n    \r\n    \r\n    \r\n    \r\n    \r\n    \r\n    \r\n    \r\n    \r\n    \r\n  \r\n
-Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
-<+>UTF-8
-===================================================================
-diff --git a/.idea/workspace.xml b/.idea/workspace.xml
---- a/.idea/workspace.xml	(revision fdcf924de63919ab3ffaac2eb83fb5a072d53cc2)
-+++ b/.idea/workspace.xml	(date 1731153831845)
-@@ -4,137 +4,10 @@
-