diff --git a/.github/workflows/build-ultraplot.yml b/.github/workflows/build-ultraplot.yml index 736ecc485..f558cd6c0 100644 --- a/.github/workflows/build-ultraplot.yml +++ b/.github/workflows/build-ultraplot.yml @@ -43,7 +43,7 @@ jobs: - name: Test Ultraplot run: | - pytest -n auto --cov=ultraplot --cov-branch --cov-report term-missing --cov-report=xml ultraplot + pytest --cov=ultraplot --cov-branch --cov-report term-missing --cov-report=xml ultraplot - name: Upload coverage reports to Codecov uses: codecov/codecov-action@v5 @@ -76,14 +76,14 @@ jobs: git fetch origin ${{ github.event.pull_request.base.sha }} git checkout ${{ github.event.pull_request.base.sha }} python -c "import ultraplot as plt; plt.config.Configurator()._save_yaml('ultraplot.yml')" - pytest -n auto -W ignore --mpl-generate-path=baseline --mpl-default-style="./ultraplot.yml" + pytest -W ignore --mpl-generate-path=baseline --mpl-default-style="./ultraplot.yml" git checkout ${{ github.sha }} # Return to PR branch - name: Image Comparison Ultraplot run: | mkdir -p results python -c "import ultraplot as plt; plt.config.Configurator()._save_yaml('ultraplot.yml')" - pytest -n auto -W ignore --mpl --mpl-baseline-path=baseline --mpl-generate-summary=html --mpl-results-path=./results/ --mpl-default-style="./ultraplot.yml" --store-failed-only ultraplot/tests + pytest -W ignore --mpl --mpl-baseline-path=baseline --mpl-generate-summary=html --mpl-results-path=./results/ --mpl-default-style="./ultraplot.yml" --store-failed-only ultraplot/tests # Return the html output of the comparison even if failed - name: Upload comparison failures diff --git a/environment.yml b/environment.yml index 16dda8d45..dc35dc6b1 100644 --- a/environment.yml +++ b/environment.yml @@ -2,32 +2,31 @@ name: ultraplot-dev channels: - conda-forge dependencies: - - basemap >=1.4.1 - - cartopy - - jupyter - - jupytext - - matplotlib>=3.9 - - nbsphinx - - networkx + - python>=3.10,<3.14 - numpy + - matplotlib>=3.9 + - cartopy + - xarray + - seaborn - pandas - - pint - - pip - - pre-commit - - pyarrow - pytest - - pytest-cov - pytest-mpl - - pytest-xdist - - python>=3.10,<3.14 - - seaborn + - pytest-cov + - jupyter + - pip + - pint - sphinx + - nbsphinx + - jupytext + - sphinx-copybutton - sphinx-autoapi - sphinx-automodapi - - sphinx-copybutton - - sphinx-design - sphinx-rtd-theme - typing-extensions - - xarray + - basemap >=1.4.1 + - pre-commit + - sphinx-design + - networkx + - pyarrow - pip: - git+https://github.com/ultraplot/UltraTheme.git diff --git a/ultraplot/config.py b/ultraplot/config.py index 8cdfe5c90..dc0458d6d 100644 --- a/ultraplot/config.py +++ b/ultraplot/config.py @@ -10,8 +10,10 @@ # Because I think it makes sense to have all the code that "runs" (i.e. not # just definitions) in the same place, and I was having issues with circular # dependencies and where import order of __init__.py was affecting behavior. -import logging, os, re, sys, threading - +import logging +import os +import re +import sys from collections import namedtuple from collections.abc import MutableMapping from numbers import Real @@ -763,10 +765,7 @@ def __init__(self, local=True, user=True, default=True, **kwargs): ---------- %(rc.params)s """ - import threading - self._context = [] - self._lock = threading.RLock() self._init(local=local, user=user, default=default, **kwargs) def __getitem__(self, key): @@ -786,10 +785,9 @@ def __setitem__(self, key, value): Modify an `rc_matplotlib` or `rc_ultraplot` setting using dictionary notation (e.g., ``uplt.rc[name] = value``). """ - with self._lock: - kw_ultraplot, kw_matplotlib = self._get_item_dicts(key, value) - rc_ultraplot.update(kw_ultraplot) - rc_matplotlib.update(kw_matplotlib) + kw_ultraplot, kw_matplotlib = self._get_item_dicts(key, value) + rc_ultraplot.update(kw_ultraplot) + rc_matplotlib.update(kw_matplotlib) def __getattr__(self, attr): """ @@ -815,83 +813,78 @@ def __enter__(self): """ Apply settings from the most recent context block. """ - with self._lock: - if not self._context: - raise RuntimeError( - "rc object must be initialized for context block using rc.context()." - ) - context = self._context[-1] - kwargs = context.kwargs - rc_new = context.rc_new # used for context-based _get_item_context - rc_old = ( - context.rc_old - ) # used to re-apply settings without copying whole dict - for key, value in kwargs.items(): - try: - kw_ultraplot, kw_matplotlib = self._get_item_dicts(key, value) - except Exception as e: - self.__exit__() - raise e - - for rc_dict, kw_new in zip( - (rc_ultraplot, rc_matplotlib), - (kw_ultraplot, kw_matplotlib), - ): - for key, value in kw_new.items(): - rc_old[key] = rc_dict[key] - rc_new[key] = rc_dict[key] = value + if not self._context: + raise RuntimeError( + "rc object must be initialized for context block using rc.context()." + ) + context = self._context[-1] + kwargs = context.kwargs + rc_new = context.rc_new # used for context-based _get_item_context + rc_old = context.rc_old # used to re-apply settings without copying whole dict + for key, value in kwargs.items(): + try: + kw_ultraplot, kw_matplotlib = self._get_item_dicts(key, value) + except Exception as e: + self.__exit__() + raise e + + for rc_dict, kw_new in zip( + (rc_ultraplot, rc_matplotlib), + (kw_ultraplot, kw_matplotlib), + ): + for key, value in kw_new.items(): + rc_old[key] = rc_dict[key] + rc_new[key] = rc_dict[key] = value def __exit__(self, *args): # noqa: U100 """ Restore settings from the most recent context block. """ - with self._lock: - if not self._context: - raise RuntimeError( - "rc object must be initialized for context block using rc.context()." - ) - context = self._context[-1] - for key, value in context.rc_old.items(): - kw_ultraplot, kw_matplotlib = self._get_item_dicts(key, value) - rc_ultraplot.update(kw_ultraplot) - rc_matplotlib.update(kw_matplotlib) - del self._context[-1] + if not self._context: + raise RuntimeError( + "rc object must be initialized for context block using rc.context()." + ) + context = self._context[-1] + for key, value in context.rc_old.items(): + kw_ultraplot, kw_matplotlib = self._get_item_dicts(key, value) + rc_ultraplot.update(kw_ultraplot) + rc_matplotlib.update(kw_matplotlib) + del self._context[-1] def _init(self, *, local, user, default, skip_cycle=False): """ Initialize the configurator. """ - with self._lock: - # Always remove context objects - self._context.clear() - - # Update from default settings - # NOTE: see _remove_blacklisted_style_params bugfix - if default: - rc_matplotlib.update(_get_style_dict("original", filter=False)) - rc_matplotlib.update(rcsetup._rc_matplotlib_default) - rc_ultraplot.update(rcsetup._rc_ultraplot_default) - for key, value in rc_ultraplot.items(): - kw_ultraplot, kw_matplotlib = self._get_item_dicts( - key, value, skip_cycle=skip_cycle - ) - rc_matplotlib.update(kw_matplotlib) - rc_ultraplot.update(kw_ultraplot) - - # Update from user home - user_path = None - if user: - user_path = self.user_file() - if os.path.isfile(user_path): - self.load(user_path) - - # Update from local paths - if local: - local_paths = self.local_files() - for path in local_paths: - if path == user_path: # local files always have precedence - continue - self.load(path) + # Always remove context objects + self._context.clear() + + # Update from default settings + # NOTE: see _remove_blacklisted_style_params bugfix + if default: + rc_matplotlib.update(_get_style_dict("original", filter=False)) + rc_matplotlib.update(rcsetup._rc_matplotlib_default) + rc_ultraplot.update(rcsetup._rc_ultraplot_default) + for key, value in rc_ultraplot.items(): + kw_ultraplot, kw_matplotlib = self._get_item_dicts( + key, value, skip_cycle=skip_cycle + ) + rc_matplotlib.update(kw_matplotlib) + rc_ultraplot.update(kw_ultraplot) + + # Update from user home + user_path = None + if user: + user_path = self.user_file() + if os.path.isfile(user_path): + self.load(user_path) + + # Update from local paths + if local: + local_paths = self.local_files() + for path in local_paths: + if path == user_path: # local files always have precedence + continue + self.load(path) @staticmethod def _validate_key(key, value=None): @@ -937,28 +930,27 @@ def _get_item_context(self, key, mode=None): As with `~Configurator.__getitem__` but the search is limited based on the context mode and ``None`` is returned if the key is not found. """ - with self._lock: - key, _ = self._validate_key(key) - if mode is None: - mode = self._context_mode - cache = tuple(context.rc_new for context in self._context) - if mode == 0: - rcdicts = (*cache, rc_ultraplot, rc_matplotlib) - elif mode == 1: - rcdicts = (*cache, rc_ultraplot) # added settings only! - elif mode == 2: - rcdicts = (*cache,) - else: - raise ValueError(f"Invalid caching mode {mode!r}.") - for rcdict in rcdicts: - if not rcdict: - continue - try: - return rcdict[key] - except KeyError: - continue - if mode == 0: # otherwise return None - raise KeyError(f"Invalid rc setting {key!r}.") + key, _ = self._validate_key(key) + if mode is None: + mode = self._context_mode + cache = tuple(context.rc_new for context in self._context) + if mode == 0: + rcdicts = (*cache, rc_ultraplot, rc_matplotlib) + elif mode == 1: + rcdicts = (*cache, rc_ultraplot) # added settings only! + elif mode == 2: + rcdicts = (*cache,) + else: + raise ValueError(f"Invalid caching mode {mode!r}.") + for rcdict in rcdicts: + if not rcdict: + continue + try: + return rcdict[key] + except KeyError: + continue + if mode == 0: # otherwise return None + raise KeyError(f"Invalid rc setting {key!r}.") def _get_item_dicts(self, key, value, skip_cycle=False): """ @@ -1460,26 +1452,25 @@ def context(self, *args, mode=0, file=None, **kwargs): >>> fig, ax = uplt.subplots() >>> ax.format(ticklen=5, metalinewidth=2) """ - with self._lock: - # Add input dictionaries - for arg in args: - if not isinstance(arg, dict): - raise ValueError(f"Non-dictionary argument {arg!r}.") - kwargs.update(arg) - - # Add settings from file - if file is not None: - kw = self._load_file(file) - kw = {key: value for key, value in kw.items() if key not in kwargs} - kwargs.update(kw) - - # Activate context object - if mode not in range(3): - raise ValueError(f"Invalid mode {mode!r}.") - cls = namedtuple("RcContext", ("mode", "kwargs", "rc_new", "rc_old")) - context = cls(mode=mode, kwargs=kwargs, rc_new={}, rc_old={}) - self._context.append(context) - return self + # Add input dictionaries + for arg in args: + if not isinstance(arg, dict): + raise ValueError(f"Non-dictionary argument {arg!r}.") + kwargs.update(arg) + + # Add settings from file + if file is not None: + kw = self._load_file(file) + kw = {key: value for key, value in kw.items() if key not in kwargs} + kwargs.update(kw) + + # Activate context object + if mode not in range(3): + raise ValueError(f"Invalid mode {mode!r}.") + cls = namedtuple("RcContext", ("mode", "kwargs", "rc_new", "rc_old")) + context = cls(mode=mode, kwargs=kwargs, rc_new={}, rc_old={}) + self._context.append(context) + return self def category(self, cat, *, trimcat=True, context=False): """ @@ -1585,30 +1576,25 @@ def update(self, *args, **kwargs): Configurator.category Configurator.fill """ - with self._lock: - prefix, kw = "", {} - if not args: - pass - elif len(args) == 1 and isinstance(args[0], str): - prefix = args[0] - elif len(args) == 1 and isinstance(args[0], dict): - kw = args[0] - elif ( - len(args) == 2 - and isinstance(args[0], str) - and isinstance(args[1], dict) - ): - prefix, kw = args - else: - raise ValueError( - f"Invalid arguments {args!r}. Usage is either " - "rc.update(dict), rc.update(kwy=value, ...), " - "rc.update(category, dict), or rc.update(category, key=value, ...)." - ) - prefix = prefix and prefix + "." - kw.update(kwargs) - for key, value in kw.items(): - self.__setitem__(prefix + key, value) + prefix, kw = "", {} + if not args: + pass + elif len(args) == 1 and isinstance(args[0], str): + prefix = args[0] + elif len(args) == 1 and isinstance(args[0], dict): + kw = args[0] + elif len(args) == 2 and isinstance(args[0], str) and isinstance(args[1], dict): + prefix, kw = args + else: + raise ValueError( + f"Invalid arguments {args!r}. Usage is either " + "rc.update(dict), rc.update(kwy=value, ...), " + "rc.update(category, dict), or rc.update(category, key=value, ...)." + ) + prefix = prefix and prefix + "." + kw.update(kwargs) + for key, value in kw.items(): + self.__setitem__(prefix + key, value) @docstring._snippet_manager def reset(self, local=True, user=True, default=True, **kwargs): @@ -1786,8 +1772,7 @@ def _context_mode(self): """ Return the highest (least permissive) context mode. """ - with self._lock: - return max((context.mode for context in self._context), default=0) + return max((context.mode for context in self._context), default=0) @property def changed(self): diff --git a/ultraplot/internals/context.py b/ultraplot/internals/context.py index cc93f0d49..f429e6898 100644 --- a/ultraplot/internals/context.py +++ b/ultraplot/internals/context.py @@ -2,7 +2,6 @@ """ Utilities for manging context. """ -import threading from . import ic # noqa: F401 @@ -26,10 +25,6 @@ class _state_context(object): Temporarily modify attribute(s) for an arbitrary object. """ - _lock = ( - threading.RLock() - ) # class-wide reentrant lock (or use instance-wide if needed) - def __init__(self, obj, **kwargs): self._obj = obj self._attrs_new = kwargs @@ -38,14 +33,12 @@ def __init__(self, obj, **kwargs): } def __enter__(self): - self._lock.acquire() for key, value in self._attrs_new.items(): setattr(self._obj, key, value) - def __exit__(self, *args): + def __exit__(self, *args): # noqa: U100 for key in self._attrs_new.keys(): if key in self._attrs_prev: setattr(self._obj, key, self._attrs_prev[key]) else: delattr(self._obj, key) - self._lock.release() diff --git a/ultraplot/tests/conftest.py b/ultraplot/tests/conftest.py index 51839c19c..0a76ac245 100644 --- a/ultraplot/tests/conftest.py +++ b/ultraplot/tests/conftest.py @@ -1,36 +1,6 @@ -""" -Conftest.py for UltraPlot testing with modular MPL plugin architecture. - -This file provides essential test fixtures and integrates the enhanced matplotlib -testing functionality through a clean, modular plugin system. - -Thread-Safe Random Number Generation: -- Provides explicit RNG fixtures for test functions that need random numbers -- Each thread gets independent, deterministic RNG instances -- Compatible with pytest-xdist parallel execution -- Clean separation of concerns - tests explicitly declare RNG dependencies -""" - -import threading, os, shutil, pytest, re -import numpy as np, ultraplot as uplt -import warnings, logging +import os, shutil, pytest, re, numpy as np, ultraplot as uplt from pathlib import Path -from datetime import datetime - -# Import the modular MPL plugin components -from ultraplot.tests.mpl_plugin import ( - StoreFailedMplPlugin, - ProgressTracker, - CleanupManager, - HTMLReportGenerator, -) -from ultraplot.tests.mpl_plugin.utils import ( - count_mpl_tests, - should_generate_html_report, - get_failed_mpl_tests, -) -from ultraplot.tests.mpl_plugin.progress import get_progress_tracker -from ultraplot.tests.mpl_plugin.cleanup import get_cleanup_manager +import warnings, logging SEED = 51423 @@ -38,101 +8,64 @@ @pytest.fixture def rng(): """ - Fixture providing a numpy random generator for tests. - - This fixture provides a numpy.random.Generator instance that: - - Uses the same seed (51423) for each test - - Ensures reproducible results - - Resets state for each test - - Usage in tests: - def test_something(rng): - random_data = rng.normal(0, 1, size=100) - random_ints = rng.integers(0, 10, size=5) + Ensure all tests start with the same rng """ - # Each test gets the same seed for reproducibility - return np.random.default_rng(seed=SEED) - - -@pytest.fixture(autouse=True) -def isolate_mpl_testing(): - """ - Isolate matplotlib testing for parallel execution. - - This prevents race conditions in parallel testing (pytest-xdist) where - multiple processes can interfere with each other's image comparison tests. - The main issue is that pytest-mpl uses shared temporary directories that - can conflict between processes. - """ - import matplotlib as mpl - import matplotlib.pyplot as plt - import tempfile - import os - - # Store original backend and ensure consistent state - original_backend = mpl.get_backend() - if original_backend != "Agg": - mpl.use("Agg", force=True) - - # Clear any existing figures - plt.close("all") - - # Create process-specific temporary directory for mpl results - # This prevents file conflicts between parallel processes - worker_id = os.environ.get("PYTEST_XDIST_WORKER", "master") - with tempfile.TemporaryDirectory(prefix=f"mpl_test_{worker_id}_") as temp_dir: - os.environ["MPL_TEST_TEMP_DIR"] = temp_dir - - yield - - # Clean up after test - plt.close("all") - uplt.close("all") - - # Remove environment variable - if "MPL_TEST_TEMP_DIR" in os.environ: - del os.environ["MPL_TEST_TEMP_DIR"] - - # Restore original backend - if original_backend != "Agg": - mpl.use(original_backend, force=True) + return np.random.default_rng(SEED) @pytest.fixture(autouse=True) def close_figures_after_test(): - """Automatically close all figures after each test.""" yield uplt.close("all") +# Define command line option def pytest_addoption(parser): - """Add command line options for enhanced matplotlib testing.""" parser.addoption( "--store-failed-only", action="store_true", - help="Store only failed matplotlib comparison images (enables artifact optimization)", + help="Store only failed matplotlib comparison images", ) -def pytest_collection_modifyitems(config, items): - """ - Modify test items during collection to set up MPL testing. +class StoreFailedMplPlugin: + def __init__(self, config): + self.config = config - This function: - - Counts matplotlib image comparison tests - - Sets up progress tracking - - Skips tests with missing baseline images - """ - # Count total mpl tests for progress tracking - total_mpl_tests = count_mpl_tests(items) + # Get base directories as Path objects + self.result_dir = Path(config.getoption("--mpl-results-path", "./results")) + self.baseline_dir = Path(config.getoption("--mpl-baseline-path", "./baseline")) + + print(f"Store Failed MPL Plugin initialized") + print(f"Result dir: {self.result_dir}") + + def _has_mpl_marker(self, report: pytest.TestReport): + """Check if the test has the mpl_image_compare marker.""" + return report.keywords.get("mpl_image_compare", False) + + def _remove_success(self, report: pytest.TestReport): + """Remove successful test images.""" - if total_mpl_tests > 0: - print(f"๐Ÿ“Š Detected {total_mpl_tests} matplotlib image comparison tests") - # Initialize progress tracker with total count - progress_tracker = get_progress_tracker() - progress_tracker.set_total_tests(total_mpl_tests) + pattern = r"(?P::|/)|\[|\]|\.py" + name = re.sub( + pattern, + lambda m: "." if m.group("sep") else "_" if m.group(0) == "[" else "", + report.nodeid, + ) + target = (self.result_dir / name).absolute() + if target.is_dir(): + shutil.rmtree(target) - # Skip tests that don't have baseline images + @pytest.hookimpl(trylast=True) + def pytest_runtest_logreport(self, report): + """Hook that processes each test report.""" + # Delete successfull tests + if report.when == "call" and report.failed == False: + if self._has_mpl_marker(report): + self._remove_success(report) + + +def pytest_collection_modifyitems(config, items): for item in items: for mark in item.own_markers: if base_dir := config.getoption("--mpl-baseline-path", default=None): @@ -144,90 +77,10 @@ def pytest_collection_modifyitems(config, items): ) -@pytest.hookimpl(trylast=True) -def pytest_terminal_summary(terminalreporter, exitstatus, config): - """ - Generate enhanced summary and HTML reports after all tests complete. - - This function: - - Finalizes progress tracking - - Performs deferred cleanup - - Generates interactive HTML reports - - Only runs on the main process (not xdist workers) - """ - # Skip on workers, only run on the main process - if hasattr(config, "workerinput"): - return - - # Check if we should generate reports - if not should_generate_html_report(config): - return - - # Get the plugin instance to finalize operations - plugin = _get_plugin_instance(config) - if plugin: - # Finalize progress and cleanup - plugin.finalize() - - # Generate HTML report - html_generator = HTMLReportGenerator(config) - failed_tests = plugin.get_failed_tests() - html_generator.generate_report(failed_tests) - - +# Register the plugin if the option is used def pytest_configure(config): - """ - Configure pytest with the enhanced MPL plugin. - - This function: - - Suppresses verbose matplotlib logging - - Registers the StoreFailedMplPlugin for enhanced functionality - - Sets up the plugin regardless of cleanup options (HTML reports always available) - - Configures process-specific temporary directories for parallel testing - """ - # Suppress ultraplot config loading which mpl does not recognize - logging.getLogger("matplotlib").setLevel(logging.ERROR) - logging.getLogger("ultraplot").setLevel(logging.WARNING) - - # Configure process-specific results directory for parallel testing - worker_id = os.environ.get("PYTEST_XDIST_WORKER", "master") - if ( - not hasattr(config.option, "mpl_results_path") - or not config.option.mpl_results_path - ): - config.option.mpl_results_path = f"./mpl-results-{worker_id}" - try: - # Always register the plugin - it provides enhanced functionality beyond just cleanup - config.pluginmanager.register(StoreFailedMplPlugin(config)) + if config.getoption("--store-failed-only", False): + config.pluginmanager.register(StoreFailedMplPlugin(config)) except Exception as e: - print(f"Error during MPL plugin configuration: {e}") - - -def _get_plugin_instance(config): - """Get the StoreFailedMplPlugin instance from the plugin manager.""" - for plugin in config.pluginmanager.get_plugins(): - if isinstance(plugin, StoreFailedMplPlugin): - return plugin - return None - - -# Legacy support - these functions are kept for backward compatibility -# but now delegate to the modular plugin system - - -def _should_generate_html_report(config): - """Legacy function - delegates to utils module.""" - return should_generate_html_report(config) - - -def _get_failed_mpl_tests(config): - """Legacy function - delegates to utils module.""" - return get_failed_mpl_tests(config) - - -def _get_results_directory(config): - """Legacy function - delegates to utils module.""" - from ultraplot.tests.mpl_plugin.utils import get_results_directory - - return get_results_directory(config) + print(f"Error during plugin configuration: {e}") diff --git a/ultraplot/tests/mpl_plugin/__init__.py b/ultraplot/tests/mpl_plugin/__init__.py deleted file mode 100644 index e5326d58e..000000000 --- a/ultraplot/tests/mpl_plugin/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -""" -MPL Plugin Module for Enhanced Matplotlib Testing - -This module provides enhanced functionality for matplotlib image comparison tests, -including progress tracking, artifact optimization, and HTML report generation. - -The module is structured as follows: -- core.py: Main plugin class and core functionality -- progress.py: Progress bar and visual feedback -- cleanup.py: Deferred cleanup and artifact optimization -- reporting.py: HTML report generation -- utils.py: Utility functions and helpers -""" - -from .core import StoreFailedMplPlugin -from .progress import ProgressTracker -from .cleanup import CleanupManager -from .reporting import HTMLReportGenerator -from .utils import extract_test_name_from_filename, categorize_image_file - -__all__ = [ - "StoreFailedMplPlugin", - "ProgressTracker", - "CleanupManager", - "HTMLReportGenerator", - "extract_test_name_from_filename", - "categorize_image_file", -] - -__version__ = "1.0.0" diff --git a/ultraplot/tests/mpl_plugin/cleanup.py b/ultraplot/tests/mpl_plugin/cleanup.py deleted file mode 100644 index 4947c0ee5..000000000 --- a/ultraplot/tests/mpl_plugin/cleanup.py +++ /dev/null @@ -1,109 +0,0 @@ -""" -Cleanup management module for matplotlib test artifacts. - -This module provides deferred cleanup functionality to optimize artifact sizes -and eliminate race conditions in parallel test execution. -""" - -import shutil -import threading -from pathlib import Path - - -class CleanupManager: - """Manages deferred cleanup of successful test artifacts.""" - - def __init__(self): - self.pending_cleanups = set() - self.lock = threading.Lock() - - def mark_for_cleanup(self, target_path): - """Mark a directory for cleanup without blocking the worker.""" - with self.lock: - if target_path.exists() and target_path.is_dir(): - self.pending_cleanups.add(target_path) - return True - return False - - def perform_cleanup(self, store_failed_only=False): - """Perform deferred cleanup of all marked directories.""" - if not store_failed_only: - self._handle_no_cleanup() - return - - with self.lock: - cleanup_list = list(self.pending_cleanups) - self.pending_cleanups.clear() - - if cleanup_list: - self._cleanup_directories(cleanup_list) - else: - print("๐Ÿ’พ Perfect optimization: No cleanup needed (all tests failed)") - - def _handle_no_cleanup(self): - """Handle case where cleanup optimization is disabled.""" - with self.lock: - total_items = len(self.pending_cleanups) - self.pending_cleanups.clear() - - if total_items > 0: - print(f"๐Ÿ’พ All {total_items} test images preserved for review") - print(" ๐Ÿ’ก Use --store-failed-only to enable artifact size optimization") - - def _cleanup_directories(self, cleanup_list): - """Clean up the list of directories with progress tracking.""" - print( - f"๐Ÿงน Cleaning up {len(cleanup_list)} successful test directories (--store-failed-only enabled)..." - ) - success_count = 0 - - for i, target in enumerate(cleanup_list, 1): - # Update cleanup progress bar - percentage = int((i / len(cleanup_list)) * 100) - bar_width = 20 - filled_width = int((percentage / 100) * bar_width) - bar = ( - "=" * filled_width - + (">" if filled_width < bar_width else "") - + " " - * (bar_width - filled_width - (1 if filled_width < bar_width else 0)) - ) - - try: - if target.exists() and target.is_dir(): - shutil.rmtree(target) - success_count += 1 - status = "โœ“" - else: - status = "~" - except (FileNotFoundError, OSError, PermissionError): - status = "~" - except Exception as e: - status = "โœ—" - - cleanup_line = f"\rCleanup: [{bar}] {percentage:3d}% ({i}/{len(cleanup_list)}) {status}" - print(cleanup_line, end="", flush=True) - - print() # New line after progress bar - print( - f"โœ… Cleanup completed: {success_count}/{len(cleanup_list)} directories removed" - ) - if success_count < len(cleanup_list): - print( - f" Note: {len(cleanup_list) - success_count} directories were already removed or inaccessible" - ) - print("๐Ÿ’พ Artifact optimization: Only failed tests preserved for debugging") - - def get_pending_count(self): - """Get the number of directories pending cleanup.""" - with self.lock: - return len(self.pending_cleanups) - - -# Global cleanup manager instance -cleanup_manager = CleanupManager() - - -def get_cleanup_manager(): - """Get the global cleanup manager instance.""" - return cleanup_manager diff --git a/ultraplot/tests/mpl_plugin/core.py b/ultraplot/tests/mpl_plugin/core.py deleted file mode 100644 index c09dd2da2..000000000 --- a/ultraplot/tests/mpl_plugin/core.py +++ /dev/null @@ -1,99 +0,0 @@ -""" -Core plugin module for enhanced matplotlib testing. - -This module contains the main StoreFailedMplPlugin class that coordinates -all matplotlib test functionality including progress tracking, cleanup management, -and HTML report generation. -""" - -import re -import pytest -from pathlib import Path - -from .progress import get_progress_tracker -from .cleanup import get_cleanup_manager -from .utils import create_nodeid_to_path_mapping, validate_config_paths - - -class StoreFailedMplPlugin: - """ - Main plugin class for enhanced matplotlib image comparison testing. - - This plugin provides: - - Real-time progress tracking with visual progress bars - - Deferred cleanup to eliminate race conditions - - Thread-safe artifact optimization - - Failed test tracking for HTML report generation - """ - - def __init__(self, config): - self.config = config - - # Validate and set up paths - paths = validate_config_paths(config) - self.result_dir = paths["results"] - self.baseline_dir = paths["baseline"] - - # Track failed mpl tests for HTML report generation - self.failed_mpl_tests = set() - - # Get global managers - self.progress_tracker = get_progress_tracker() - self.cleanup_manager = get_cleanup_manager() - - # Only show initialization message if MPL tests will be run - if any("--mpl" in str(arg) for arg in getattr(config, "args", [])): - print(f"Store Failed MPL Plugin initialized") - print(f"Result dir: {self.result_dir}") - - def _has_mpl_marker(self, report: pytest.TestReport): - """Check if the test has the mpl_image_compare marker.""" - return report.keywords.get("mpl_image_compare", False) - - def _remove_success(self, report: pytest.TestReport): - """Mark successful test images for deferred cleanup to eliminate blocking.""" - - # Only perform cleanup if --store-failed-only is enabled - if not self.config.getoption("--store-failed-only", False): - return - - # Convert nodeid to filesystem path - name = create_nodeid_to_path_mapping(report.nodeid) - target = (self.result_dir / name).absolute() - - # Mark for deferred cleanup (non-blocking) - if self.cleanup_manager.mark_for_cleanup(target): - print(".", end="", flush=True) - - @pytest.hookimpl(trylast=True) - def pytest_runtest_logreport(self, report): - """Hook that processes each test report.""" - # Track failed mpl tests and handle successful ones - if report.when == "call" and self._has_mpl_marker(report): - try: - # Update progress tracking - if report.outcome == "failed": - self.failed_mpl_tests.add(report.nodeid) - self.progress_tracker.increment_processed(failed=True) - else: - self.progress_tracker.increment_processed(failed=False) - # Mark successful tests for cleanup (if enabled) - self._remove_success(report) - - except Exception as e: - # Log but don't fail on processing errors - print(f"Warning: Error during test processing for {report.nodeid}: {e}") - - def get_failed_tests(self): - """Get the set of failed test nodeids.""" - return self.failed_mpl_tests.copy() - - def get_stats(self): - """Get current test statistics.""" - return self.progress_tracker.get_stats() - - def finalize(self): - """Finalize progress tracking and perform cleanup.""" - self.progress_tracker.finalize_progress() - store_failed_only = self.config.getoption("--store-failed-only", False) - self.cleanup_manager.perform_cleanup(store_failed_only) diff --git a/ultraplot/tests/mpl_plugin/progress.py b/ultraplot/tests/mpl_plugin/progress.py deleted file mode 100644 index be22f2752..000000000 --- a/ultraplot/tests/mpl_plugin/progress.py +++ /dev/null @@ -1,80 +0,0 @@ -""" -Progress tracking module for matplotlib test execution. - -This module provides real-time progress bars and visual feedback for matplotlib -image comparison tests, including success/failure counters and completion percentages. -""" - -import threading - - -class ProgressTracker: - """Manages progress tracking and visual feedback for matplotlib tests.""" - - def __init__(self): - self.total_tests = 0 - self.processed_tests = 0 - self.failed_tests = 0 - self.lock = threading.Lock() - - def set_total_tests(self, total): - """Set the total number of matplotlib tests expected.""" - with self.lock: - self.total_tests = total - - def increment_processed(self, failed=False): - """Increment the processed test counter.""" - with self.lock: - self.processed_tests += 1 - if failed: - self.failed_tests += 1 - self._update_progress_bar() - - def _update_progress_bar(self): - """Update the progress bar with current test status.""" - if self.total_tests == 0: - return - - percentage = int((self.processed_tests / self.total_tests) * 100) - success_count = self.processed_tests - self.failed_tests - - # Create progress bar: [=========> ] 67% (45/67) | โœ“32 โœ—13 - bar_width = 20 - filled_width = int((percentage / 100) * bar_width) - bar = ( - "=" * filled_width - + (">" if filled_width < bar_width else "") - + " " * (bar_width - filled_width - (1 if filled_width < bar_width else 0)) - ) - - progress_line = f"\rMPL Tests: [{bar}] {percentage:3d}% ({self.processed_tests}/{self.total_tests}) | โœ“{success_count} โœ—{self.failed_tests}" - print(progress_line, end="", flush=True) - - def finalize_progress(self): - """Finalize the progress bar and show summary.""" - print() # New line after progress bar - success_count = self.processed_tests - self.failed_tests - - if self.failed_tests > 0: - print(f"๐Ÿ“Š MPL Summary: {success_count} passed, {self.failed_tests} failed") - else: - print(f"๐Ÿ“Š MPL Summary: All {success_count} tests passed!") - - def get_stats(self): - """Get current test statistics.""" - with self.lock: - return { - "total": self.total_tests, - "processed": self.processed_tests, - "failed": self.failed_tests, - "passed": self.processed_tests - self.failed_tests, - } - - -# Global progress tracker instance -progress_tracker = ProgressTracker() - - -def get_progress_tracker(): - """Get the global progress tracker instance.""" - return progress_tracker diff --git a/ultraplot/tests/mpl_plugin/reporting.py b/ultraplot/tests/mpl_plugin/reporting.py deleted file mode 100644 index 6086da3f1..000000000 --- a/ultraplot/tests/mpl_plugin/reporting.py +++ /dev/null @@ -1,642 +0,0 @@ -""" -HTML reporting module for matplotlib test results. - -This module provides comprehensive HTML report generation with interactive features, -including visual comparisons, filtering capabilities, and responsive design. -""" - -import os -import shutil -from pathlib import Path -from datetime import datetime - -from .utils import ( - extract_test_name_from_filename, - categorize_image_file, - get_results_directory, -) - - -class HTMLReportGenerator: - """Generates interactive HTML reports for matplotlib test results.""" - - def __init__(self, config): - self.config = config - self.template_dir = Path(__file__).parent / "templates" - self.results_dir = get_results_directory(config) - # Ensure template directory exists - if not self.template_dir.exists(): - print(f"Warning: Template directory not found: {self.template_dir}") - - def generate_report(self, failed_tests_set): - """Generate the complete HTML report.""" - if not self._should_generate_report(): - return - - print("\nGenerating HTML report for image comparison tests...") - print( - "Note: When using --store-failed-only, only failed tests will be included in the report" - ) - - test_results = self._process_test_results() - if not test_results: - print("No test results found for HTML report generation") - return - - # Generate display names and mark failed tests - self._enhance_test_results(test_results, failed_tests_set) - - # Copy template files to results directory - self._copy_template_assets() - - # Generate HTML content - html_content = self._generate_html_content(test_results) - - # Write the report - report_path = self.results_dir / "index.html" - report_path.parent.mkdir(parents=True, exist_ok=True) - - with open(report_path, "w") as f: - f.write(html_content) - - print(f"HTML report generated at: {report_path}") - print(f"Template directory: {self.template_dir}") - print(f"Results directory: {self.results_dir}") - print("Open the report in a web browser to view the results.") - - def _should_generate_report(self): - """Check if HTML report should be generated.""" - if not self.results_dir.exists(): - print(f"Results directory not found: {self.results_dir}") - return False - return True - - def _copy_template_assets(self): - """Copy CSS and JS files to results directory.""" - try: - # Copy CSS file - css_src = self.template_dir / "styles.css" - css_dst = self.results_dir / "styles.css" - if css_src.exists(): - shutil.copy2(css_src, css_dst) - print(f"Copied CSS to: {css_dst}") - else: - print(f"Warning: CSS template not found at: {css_src}") - - # Copy JS file - js_src = self.template_dir / "scripts.js" - js_dst = self.results_dir / "scripts.js" - if js_src.exists(): - shutil.copy2(js_src, js_dst) - print(f"Copied JS to: {js_dst}") - else: - print(f"Warning: JS template not found at: {js_src}") - except Exception as e: - print(f"Error copying template assets: {e}") - - def _load_template(self, template_name): - """Load a template file.""" - template_path = self.template_dir / template_name - print(f"Attempting to load template: {template_path}") - print(f"Template exists: {template_path.exists()}") - try: - with open(template_path, "r", encoding="utf-8") as f: - content = f.read() - print( - f"Successfully loaded template: {template_path} ({len(content)} chars)" - ) - return content - except FileNotFoundError: - print( - f"Warning: Template {template_name} not found at {template_path}, using fallback" - ) - return None - except Exception as e: - print(f"Error loading template {template_name}: {e}") - return None - - def _process_test_results(self): - """Process test result files and organize by test.""" - test_results = {} - - # Recursively search for all PNG files - for image_file in self.results_dir.rglob("*.png"): - rel_path = image_file.relative_to(self.results_dir) - parent_dir = rel_path.parent if rel_path.parent != Path(".") else None - filename = image_file.name - - # Skip hash files - if "hash" in filename: - continue - - # Handle pytest-mpl directory structure - if parent_dir: - test_name = str(parent_dir) - - if test_name not in test_results: - test_results[test_name] = { - "baseline": None, - "result": None, - "diff": None, - "path": parent_dir, - } - - # Categorize files based on pytest-mpl naming convention - if filename == "baseline.png": - test_results[test_name]["baseline"] = image_file - elif filename == "result.png": - test_results[test_name]["result"] = image_file - elif filename == "result-failed-diff.png": - test_results[test_name]["diff"] = image_file - else: - # Fallback for files in root directory (legacy naming) - test_id = image_file.stem - test_name = extract_test_name_from_filename(filename, test_id) - image_type = categorize_image_file(filename, test_id) - - if test_name not in test_results: - test_results[test_name] = { - "baseline": None, - "result": None, - "diff": None, - "path": parent_dir, - } - - if image_type == "baseline": - test_results[test_name]["baseline"] = image_file - elif image_type == "diff": - test_results[test_name]["diff"] = image_file - elif image_type == "result" and not test_results[test_name]["result"]: - test_results[test_name]["result"] = image_file - - return test_results - - def _enhance_test_results(self, test_results, failed_tests_set): - """Add display names and test status to results.""" - for test_name, data in test_results.items(): - # Generate display name - if data["path"]: - data["display_name"] = test_name.replace("/", ".").replace("\\", ".") - else: - data["display_name"] = test_name - - # Mark as failed if tracked during test execution - data["test_failed"] = any( - any( - pattern in nodeid - for pattern in [ - test_name.replace(".", "::"), - test_name.replace( - "ultraplot.tests.", "ultraplot/tests/" - ).replace(".", "::"), - f"ultraplot/tests/{test_name.split('.')[-2]}.py::{test_name.split('.')[-1]}", - ] - ) - for nodeid in failed_tests_set - ) - - def _generate_html_content(self, test_results): - """Generate the complete HTML content with enhanced inline styling.""" - # Calculate statistics - total_tests = len(test_results) - failed_tests = sum( - 1 - for data in test_results.values() - if data.get("test_failed", False) or data.get("diff") - ) - passed_tests = sum( - 1 - for data in test_results.values() - if data.get("baseline") - and data.get("result") - and not data.get("test_failed", False) - ) - unknown_tests = total_tests - failed_tests - passed_tests - - # Generate test cases HTML - test_cases_html = self._generate_all_test_cases(test_results) - - # Enhanced CSS styling - css_content = """""" - - # Enhanced JavaScript - js_content = """""" - - # Generate timestamp - timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") - - # Build HTML - html_content = f""" - - - - - UltraPlot Image Comparison Report - {css_content} - - -
-
-

UltraPlot Image Comparison Report

-
-
- {total_tests} - Total Tests -
-
- {failed_tests} - Failed -
-
- {passed_tests} - Passed -
-
- {unknown_tests} - Unknown -
-
-
- -
- - - - -
- -
- {test_cases_html} -
- -
Report generated on {timestamp}
-
- {js_content} - -""" - - return html_content - - def _generate_all_test_cases(self, test_results): - """Generate HTML for all test cases.""" - test_cases_html = [] - - # Sort tests by display name - sorted_tests = sorted( - test_results.items(), key=lambda x: x[1].get("display_name", x[0]) - ) - - for test_name, data in sorted_tests: - test_case_html = self._generate_test_case_html(test_name, data) - test_cases_html.append(test_case_html) - - return "\n".join(test_cases_html) - - def _generate_test_case_html(self, test_name, data): - """Generate HTML for a single test case.""" - display_name = data.get("display_name", test_name) - - # Determine test status - if data.get("test_failed", False) or data.get("diff"): - status = "failed" - status_text = "FAILED" - elif ( - data.get("baseline") - and data.get("result") - and not data.get("test_failed", False) - ): - status = "passed" - status_text = "PASSED" - else: - status = "unknown" - status_text = "UNKNOWN" - - # Generate image columns - image_columns = [] - - # Add baseline image column - if data.get("baseline"): - rel_path = data["baseline"].relative_to(self.results_dir) - image_columns.append( - f""" -
-

Baseline (Expected)

- Baseline image -
""" - ) - else: - image_columns.append( - """ -
-

Baseline (Expected)

-
No baseline image
-
""" - ) - - # Add result image column - if data.get("result"): - rel_path = data["result"].relative_to(self.results_dir) - image_columns.append( - f""" -
-

Result (Actual)

- Result image -
""" - ) - else: - image_columns.append( - """ -
-

Result (Actual)

-
No result image
-
""" - ) - - # Add diff image column (only if it exists) - if data.get("diff"): - rel_path = data["diff"].relative_to(self.results_dir) - image_columns.append( - f""" -
-

Difference

- Difference image -
""" - ) - - image_columns_html = "\n".join(image_columns) - - return f""" -
-
-
{display_name}
-
{status_text}
-
-
-
- {image_columns_html} -
-
-
""" - - def _generate_fallback_html(self, test_results): - """Generate fallback HTML if templates are not available.""" - # Calculate statistics - total_tests = len(test_results) - failed_tests = sum( - 1 - for data in test_results.values() - if data.get("test_failed", False) or data.get("diff") - ) - passed_tests = sum( - 1 - for data in test_results.values() - if data.get("baseline") - and data.get("result") - and not data.get("test_failed", False) - ) - unknown_tests = total_tests - failed_tests - passed_tests - - # Try to load external CSS for better styling - css_content = "" - css_template = self._load_template("styles.css") - if css_template: - css_content = f"" - else: - css_content = """""" - - html_parts = [ - "", - "", - "", - " ", - " ", - " UltraPlot Image Comparison Report", - css_content, - "", - "", - "
", - "

UltraPlot Image Comparison Report

", - "
", - f"

Total: {total_tests} Passed: {passed_tests} Failed: {failed_tests} Unknown: {unknown_tests}

", - "
", - "
", - " ", - " ", - " ", - " ", - "
", - ] - - # Add test cases - for test_name, data in sorted(test_results.items()): - html_parts.append(self._generate_test_case_html(test_name, data)) - - # Try to load external JavaScript or use inline fallback - js_content = "" - js_template = self._load_template("scripts.js") - if js_template: - js_content = f"" - else: - js_content = """""" - - # Add footer with JavaScript and timestamp - timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") - html_parts.extend( - [ - f"
Report generated on {timestamp}
", - "
", - js_content, - "", - "", - ] - ) - - return "\n".join(html_parts) diff --git a/ultraplot/tests/mpl_plugin/templates/image_column.html b/ultraplot/tests/mpl_plugin/templates/image_column.html deleted file mode 100644 index 669bcb40e..000000000 --- a/ultraplot/tests/mpl_plugin/templates/image_column.html +++ /dev/null @@ -1,8 +0,0 @@ -
-

{{title}}

- {{#if image_path}} - {{title}} image - {{else}} -
No {{title_lower}} image
- {{/if}} -
diff --git a/ultraplot/tests/mpl_plugin/templates/report.html b/ultraplot/tests/mpl_plugin/templates/report.html deleted file mode 100644 index a41b7acc1..000000000 --- a/ultraplot/tests/mpl_plugin/templates/report.html +++ /dev/null @@ -1,49 +0,0 @@ - - - - - - {{title}} - - - -
-
-

{{title}}

-
-
- {{total_tests}} - Total Tests -
-
- {{failed_count}} - Failed -
-
- {{passed_count}} - Passed -
-
- {{unknown_count}} - Unknown -
-
-
- -
- - - - -
- -
- {{test_cases}} -
- -
Report generated on {{timestamp}}
-
- - - - diff --git a/ultraplot/tests/mpl_plugin/templates/scripts.js b/ultraplot/tests/mpl_plugin/templates/scripts.js deleted file mode 100644 index ed2e849d5..000000000 --- a/ultraplot/tests/mpl_plugin/templates/scripts.js +++ /dev/null @@ -1,245 +0,0 @@ -// Filter functionality for test results -function filterTests(filterType) { - const testCases = document.querySelectorAll('.test-case'); - const filterBtns = document.querySelectorAll('.filter-btn'); - - // Remove active class from all buttons - filterBtns.forEach(btn => btn.classList.remove('active')); - - // Add active class to clicked button or find the correct one - if (event && event.target) { - event.target.classList.add('active'); - } else { - // Find button by filter type for programmatic calls - const targetBtn = Array.from(filterBtns).find(btn => - btn.textContent.toLowerCase().includes(filterType === 'all' ? 'show all' : filterType) - ); - if (targetBtn) targetBtn.classList.add('active'); - } - - // Filter test cases - testCases.forEach(testCase => { - const status = testCase.getAttribute('data-status'); - if (filterType === 'all') { - testCase.classList.remove('hidden'); - } else if (filterType === 'failed' && status === 'failed') { - testCase.classList.remove('hidden'); - } else if (filterType === 'passed' && status === 'passed') { - testCase.classList.remove('hidden'); - } else if (filterType === 'unknown' && status === 'unknown') { - testCase.classList.remove('hidden'); - } else { - testCase.classList.add('hidden'); - } - }); - - // Update URL hash for bookmarking - history.replaceState(null, null, `#filter-${filterType}`); -} - -// Image zoom functionality -function setupImageZoom() { - const images = document.querySelectorAll('.image-column img'); - - images.forEach(img => { - img.style.cursor = 'zoom-in'; - img.addEventListener('click', function() { - if (this.classList.contains('zoomed')) { - // Zoom out - this.classList.remove('zoomed'); - this.style.position = ''; - this.style.top = ''; - this.style.left = ''; - this.style.width = ''; - this.style.height = ''; - this.style.zIndex = ''; - this.style.cursor = 'zoom-in'; - document.body.style.overflow = ''; - - // Remove backdrop - const backdrop = document.querySelector('.image-backdrop'); - if (backdrop) { - backdrop.remove(); - } - } else { - // Zoom in - this.classList.add('zoomed'); - - // Create backdrop - const backdrop = document.createElement('div'); - backdrop.className = 'image-backdrop'; - backdrop.style.cssText = ` - position: fixed; - top: 0; - left: 0; - width: 100%; - height: 100%; - background: rgba(0, 0, 0, 0.8); - z-index: 9998; - cursor: zoom-out; - `; - - backdrop.addEventListener('click', () => { - this.click(); // Trigger zoom out - }); - - document.body.appendChild(backdrop); - - // Style the image - this.style.position = 'fixed'; - this.style.top = '50%'; - this.style.left = '50%'; - this.style.transform = 'translate(-50%, -50%)'; - this.style.maxWidth = '90vw'; - this.style.maxHeight = '90vh'; - this.style.width = 'auto'; - this.style.height = 'auto'; - this.style.zIndex = '9999'; - this.style.cursor = 'zoom-out'; - document.body.style.overflow = 'hidden'; - } - }); - }); -} - -// Keyboard navigation -function setupKeyboardNavigation() { - document.addEventListener('keydown', function(e) { - switch(e.key) { - case '1': - filterTests('all'); - break; - case '2': - filterTests('failed'); - break; - case '3': - filterTests('passed'); - break; - case '4': - filterTests('unknown'); - break; - case 'Escape': - // Close any zoomed images - const zoomedImage = document.querySelector('.image-column img.zoomed'); - if (zoomedImage) { - zoomedImage.click(); - } - break; - } - }); -} - -// Search functionality -function setupSearch() { - // Create search input if it doesn't exist - const filterControls = document.querySelector('.filter-controls'); - if (filterControls && !document.querySelector('#test-search')) { - const searchInput = document.createElement('input'); - searchInput.id = 'test-search'; - searchInput.type = 'text'; - searchInput.placeholder = 'Search test names...'; - searchInput.style.cssText = ` - padding: 10px 15px; - border: 2px solid #dee2e6; - border-radius: 25px; - margin-left: auto; - max-width: 300px; - font-size: 14px; - `; - - searchInput.addEventListener('input', function() { - const searchTerm = this.value.toLowerCase(); - const testCases = document.querySelectorAll('.test-case'); - - testCases.forEach(testCase => { - const testName = testCase.querySelector('.test-name').textContent.toLowerCase(); - const matchesSearch = testName.includes(searchTerm); - - if (matchesSearch) { - testCase.classList.remove('search-hidden'); - } else { - testCase.classList.add('search-hidden'); - } - }); - }); - - filterControls.appendChild(searchInput); - - // Add CSS for search-hidden - const style = document.createElement('style'); - style.textContent = '.test-case.search-hidden { display: none !important; }'; - document.head.appendChild(style); - } -} - -// Initialize page with 'failed' filter on load and restore from URL hash -function initializePage() { - // Check URL hash for filter preference - const hash = window.location.hash; - let initialFilter = 'failed'; // Default to failed - - if (hash.startsWith('#filter-')) { - const filterType = hash.replace('#filter-', ''); - if (['all', 'failed', 'passed', 'unknown'].includes(filterType)) { - initialFilter = filterType; - } - } - - filterTests(initialFilter); -} - -// Setup smooth scrolling for internal links -function setupSmoothScrolling() { - const links = document.querySelectorAll('a[href^="#"]'); - links.forEach(link => { - link.addEventListener('click', function(e) { - e.preventDefault(); - const target = document.querySelector(this.getAttribute('href')); - if (target) { - target.scrollIntoView({ - behavior: 'smooth', - block: 'start' - }); - } - }); - }); -} - -// Initialize everything when DOM is loaded -document.addEventListener('DOMContentLoaded', function() { - initializePage(); - setupImageZoom(); - setupKeyboardNavigation(); - setupSearch(); - setupSmoothScrolling(); - - // Add keyboard shortcuts info - const container = document.querySelector('.container'); - if (container) { - const helpText = document.createElement('div'); - helpText.style.cssText = ` - position: fixed; - bottom: 20px; - right: 20px; - background: rgba(0, 0, 0, 0.8); - color: white; - padding: 10px; - border-radius: 5px; - font-size: 12px; - opacity: 0.7; - z-index: 1000; - `; - helpText.innerHTML = ` - Keyboard shortcuts:
- 1: Show All | 2: Failed Only | 3: Passed Only | 4: Unknown
- ESC: Close zoomed image | Click images to zoom - `; - document.body.appendChild(helpText); - - // Hide help after 10 seconds - setTimeout(() => { - helpText.style.opacity = '0'; - setTimeout(() => helpText.remove(), 1000); - }, 10000); - } -}); diff --git a/ultraplot/tests/mpl_plugin/templates/styles.css b/ultraplot/tests/mpl_plugin/templates/styles.css deleted file mode 100644 index cbd0e0f7e..000000000 --- a/ultraplot/tests/mpl_plugin/templates/styles.css +++ /dev/null @@ -1,293 +0,0 @@ -/* Reset and base styles */ -* { - margin: 0; - padding: 0; - box-sizing: border-box; -} - -body { - font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, sans-serif; - line-height: 1.6; - color: #333; - background-color: #f5f5f5; -} - -.container { - max-width: 1200px; - margin: 0 auto; - padding: 20px; -} - -/* Header styles */ -.header { - background: white; - border-radius: 8px; - padding: 30px; - margin-bottom: 20px; - box-shadow: 0 2px 4px rgba(0,0,0,0.1); -} - -.header h1 { - color: #2c3e50; - margin-bottom: 20px; - font-size: 2.5em; - font-weight: 300; -} - -/* Summary section */ -.summary { - display: flex; - gap: 20px; - flex-wrap: wrap; -} - -.summary-item { - background: #f8f9fa; - border-radius: 6px; - padding: 15px 20px; - text-align: center; - min-width: 120px; - border-left: 4px solid #6c757d; -} - -.summary-item.failed { - border-left-color: #dc3545; - background: #fff5f5; -} - -.summary-item.passed { - border-left-color: #28a745; - background: #f0fff4; -} - -.summary-item.unknown { - border-left-color: #ffc107; - background: #fffbf0; -} - -.summary-item .count { - display: block; - font-size: 2em; - font-weight: bold; - color: #2c3e50; -} - -.summary-item .label { - font-size: 0.9em; - color: #6c757d; - text-transform: uppercase; - letter-spacing: 0.5px; -} - -/* Filter controls */ -.filter-controls { - margin-bottom: 20px; - display: flex; - gap: 10px; - flex-wrap: wrap; -} - -.filter-btn { - background: white; - border: 2px solid #dee2e6; - border-radius: 25px; - padding: 10px 20px; - cursor: pointer; - font-size: 14px; - font-weight: 500; - transition: all 0.3s ease; - color: #495057; -} - -.filter-btn:hover { - background: #f8f9fa; - border-color: #adb5bd; -} - -.filter-btn.active { - background: #007bff; - border-color: #007bff; - color: white; -} - -/* Test results */ -.test-results { - display: flex; - flex-direction: column; - gap: 15px; -} - -.test-case { - background: white; - border-radius: 8px; - box-shadow: 0 2px 4px rgba(0,0,0,0.1); - overflow: hidden; - transition: all 0.3s ease; -} - -.test-case:hover { - box-shadow: 0 4px 8px rgba(0,0,0,0.15); -} - -.test-case.hidden { - display: none; -} - -.test-header { - padding: 20px; - background: #f8f9fa; - border-bottom: 1px solid #dee2e6; - display: flex; - justify-content: space-between; - align-items: center; - flex-wrap: wrap; - gap: 10px; -} - -.test-name { - font-weight: 600; - font-size: 1.1em; - color: #2c3e50; -} - -.status-badge { - padding: 5px 12px; - border-radius: 15px; - font-size: 0.85em; - font-weight: 600; - text-transform: uppercase; - letter-spacing: 0.5px; -} - -.status-badge.failed { - background: #dc3545; - color: white; -} - -.status-badge.passed { - background: #28a745; - color: white; -} - -.status-badge.unknown { - background: #ffc107; - color: #212529; -} - -.test-content { - padding: 20px; -} - -.images-container { - display: flex; - gap: 20px; - flex-wrap: wrap; -} - -.image-column { - flex: 1; - min-width: 300px; - max-width: 400px; -} - -.image-column h4 { - margin-bottom: 10px; - color: #495057; - font-size: 1em; - font-weight: 600; - text-align: center; - padding: 10px; - background: #f8f9fa; - border-radius: 4px; -} - -.image-column img { - width: 100%; - height: auto; - border: 1px solid #dee2e6; - border-radius: 4px; - box-shadow: 0 2px 4px rgba(0,0,0,0.1); -} - -.no-image { - padding: 40px 20px; - text-align: center; - color: #6c757d; - background: #f8f9fa; - border: 2px dashed #dee2e6; - border-radius: 4px; - font-style: italic; -} - -/* Timestamp */ -.timestamp { - text-align: center; - color: #6c757d; - font-size: 0.9em; - margin-top: 30px; - padding: 20px; - background: white; - border-radius: 8px; - box-shadow: 0 2px 4px rgba(0,0,0,0.1); -} - -/* Responsive design */ -@media (max-width: 768px) { - .container { - padding: 10px; - } - - .header { - padding: 20px; - } - - .header h1 { - font-size: 2em; - } - - .summary { - justify-content: center; - } - - .summary-item { - min-width: 100px; - } - - .test-header { - flex-direction: column; - align-items: flex-start; - } - - .images-container { - flex-direction: column; - } - - .image-column { - min-width: 100%; - max-width: 100%; - } - - .filter-controls { - justify-content: center; - } -} - -@media (max-width: 480px) { - .filter-btn { - padding: 8px 16px; - font-size: 13px; - } - - .summary { - gap: 10px; - } - - .summary-item { - padding: 10px 15px; - min-width: 80px; - } - - .summary-item .count { - font-size: 1.5em; - } -} diff --git a/ultraplot/tests/mpl_plugin/templates/test_case.html b/ultraplot/tests/mpl_plugin/templates/test_case.html deleted file mode 100644 index 2f00238cf..000000000 --- a/ultraplot/tests/mpl_plugin/templates/test_case.html +++ /dev/null @@ -1,11 +0,0 @@ -
-
-
{{test_name}}
-
{{status}}
-
-
-
- {{image_columns}} -
-
-
diff --git a/ultraplot/tests/mpl_plugin/utils.py b/ultraplot/tests/mpl_plugin/utils.py deleted file mode 100644 index b38d21df8..000000000 --- a/ultraplot/tests/mpl_plugin/utils.py +++ /dev/null @@ -1,131 +0,0 @@ -""" -Utility functions for matplotlib test processing. - -This module provides helper functions for file processing, test name extraction, -and other common operations used throughout the MPL plugin. -""" - -import re -from pathlib import Path - - -def extract_test_name_from_filename(filename, test_id): - """Extract test name from various pytest-mpl filename patterns.""" - # Handle different pytest-mpl filename patterns - if filename.endswith("-expected.png"): - return test_id.replace("-expected", "") - elif filename.endswith("-failed-diff.png"): - return test_id.replace("-failed-diff", "") - elif filename.endswith("-result.png"): - return test_id.replace("-result", "") - elif filename.endswith("-actual.png"): - return test_id.replace("-actual", "") - else: - # Remove common result suffixes if present - possible_test_name = test_id - for suffix in ["-result", "-actual", "-diff"]: - if possible_test_name.endswith(suffix): - possible_test_name = possible_test_name.replace(suffix, "") - return possible_test_name - - -def categorize_image_file(filename, test_id): - """Categorize an image file based on its filename pattern.""" - if filename.endswith("-expected.png"): - return "baseline" - elif filename.endswith("-failed-diff.png"): - return "diff" - elif filename.endswith("-result.png") or filename.endswith("-actual.png"): - return "result" - else: - # Default assumption for uncategorized files - return "result" - - -def get_results_directory(config): - """Get the results directory path from config.""" - results_path = ( - getattr(config.option, "mpl_results_path", None) - or getattr(config, "_mpl_results_path", None) - or "./mpl-results" - ) - return Path(results_path) - - -def should_generate_html_report(config): - """Determine if HTML report should be generated.""" - # Check if matplotlib comparison tests are being used - if hasattr(config.option, "mpl_results_path"): - return True - if hasattr(config, "_mpl_results_path"): - return True - # Check if any mpl_image_compare markers were collected - if hasattr(config, "_mpl_image_compare_found"): - return True - return False - - -def get_failed_mpl_tests(config): - """Get set of failed mpl test nodeids from the plugin.""" - # Look for our plugin instance - for plugin in config.pluginmanager.get_plugins(): - if hasattr(plugin, "failed_mpl_tests"): - return plugin.failed_mpl_tests - return set() - - -def create_nodeid_to_path_mapping(nodeid): - """Convert pytest nodeid to filesystem path pattern.""" - pattern = r"(?P::|/)|\[|\]|\.py" - name = re.sub( - pattern, - lambda m: "." if m.group("sep") else "_" if m.group(0) == "[" else "", - nodeid, - ) - return name - - -def safe_path_conversion(path_input): - """Safely convert path input to Path object, handling None values.""" - if path_input is None: - return None - return Path(path_input) - - -def count_mpl_tests(items): - """Count the number of matplotlib image comparison tests in the item list.""" - return sum( - 1 - for item in items - if any(mark.name == "mpl_image_compare" for mark in item.own_markers) - ) - - -def is_mpl_test(item): - """Check if a test item is a matplotlib image comparison test.""" - return any(mark.name == "mpl_image_compare" for mark in item.own_markers) - - -def format_file_size(size_bytes): - """Format file size in human-readable format.""" - if size_bytes == 0: - return "0 bytes" - - size_names = ["bytes", "KB", "MB", "GB"] - i = 0 - while size_bytes >= 1024 and i < len(size_names) - 1: - size_bytes /= 1024.0 - i += 1 - - return f"{size_bytes:.1f} {size_names[i]}" - - -def validate_config_paths(config): - """Validate and normalize configuration paths.""" - results_path = config.getoption("--mpl-results-path", None) or "./results" - baseline_path = config.getoption("--mpl-baseline-path", None) or "./baseline" - - return { - "results": Path(results_path), - "baseline": Path(baseline_path), - } diff --git a/ultraplot/tests/test_1dplots.py b/ultraplot/tests/test_1dplots.py index 77c40a138..fdd2be6a9 100644 --- a/ultraplot/tests/test_1dplots.py +++ b/ultraplot/tests/test_1dplots.py @@ -525,7 +525,7 @@ def test_heatmap_labels(rng): return fig -@pytest.mark.mpl_image_compare +@pytest.mark.mpl_image_compare() def test_networks(rng): """ Create a baseline network graph that tests @@ -575,7 +575,7 @@ def test_networks(rng): inax = ax.inset_axes([*pos, 0.2, 0.2], zoom=0) layout_kw = {} if layout in ("random", "spring", "arf"): - layout_kw = dict(seed=np.random.default_rng(SEED)) + layout_kw = dict(seed=SEED) inax.graph( g,