Skip to content

mypy: Fix import-untyped and most misc issues #378

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
May 28, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 18 additions & 5 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ tests = [
"scipy-stubs",
"typing_extensions",

# Untyped libraries, used to prevent "reportMissingImports" and get inferred typing
# Untyped libraries and partial stubs. Used to prevent "reportMissingImports" and get inferred typing
"joblib",
"networkx",
"PyOpenGL",
Expand Down Expand Up @@ -142,15 +142,16 @@ python_version = "3.9" # Target oldest supported Python version
strict = true
check_untyped_defs = true # Strict check on all defs
show_column_numbers = true
# Not all imports in these stubs are gonna be typed
# Don't infer symbols from untyped packages as Any
follow_untyped_imports = true
warn_unused_ignores = false # Change from pandas
# Partial stubs are acceptable
disallow_any_generics = false
disallow_incomplete_defs = false
disallow_untyped_defs = false
# Suppressing errors
disable_error_code = [
# Not all imports in these stubs are gonna be typed
"import-untyped",
# mypy's overload implementation differs from pyright
# `assert-type` issues is tests mostly comme from checking overloads
# Since this project is specific to Pylance, just ignore them
Expand All @@ -159,6 +160,18 @@ disable_error_code = [
# as they are inherited from the implementation.
"override",
# TODO
"assignment", # 773 errors in 172 files
"misc", # 692 errors in 132 files
"assignment", # 744 errors in 155 files
]

[[tool.mypy.overrides]]
# These modules are to be removed soon, not worth solving many issues
module = ["matplotlib.*", "networkx.*"]
disable_error_code = [
"assignment",
"misc",
]
[[tool.mypy.overrides]]
module = ["sympy.*", "skimage.*", "sklearn.*"]
# TODO: Too many untyped decorators still left
# https://github.com/python/mypy/issues/19148
disable_error_code = ["misc"]
1 change: 0 additions & 1 deletion stubs/skimage/__init__.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ from ._shared.version_requirements import ensure_python_version as ensure_python

__version__: str = ...
submodules: list = ...
__getattr__ = ...
__lazy_dir__ = ...

def __dir__(): ...
Expand Down
91 changes: 87 additions & 4 deletions stubs/skimage/data/__init__.pyi
Original file line number Diff line number Diff line change
@@ -1,5 +1,88 @@
from .._shared import lazy as lazy
from ._binary_blobs import binary_blobs
from ._fetchers import (
astronaut,
brain,
brick,
camera,
cat,
cell,
cells3d,
checkerboard,
chelsea,
clock,
coffee,
coins,
colorwheel,
data_dir,
download_all,
eagle,
file_hash,
grass,
gravel,
horse,
hubble_deep_field,
human_mitosis,
immunohistochemistry,
kidney,
lbp_frontal_face_cascade_filename,
lfw_subset,
lily,
logo,
microaneurysms,
moon,
nickel_solidification,
page,
palisades_of_vogt as palisades_of_vogt,
protein_transport,
retina,
rocket,
shepp_logan_phantom,
skin,
stereo_motorcycle,
text,
vortex,
)

__getattr__ = ...
__dir__ = ...
__all__ = ... # pyright: ignore[reportUnsupportedDunderAll] # TODO
__all__ = [
"astronaut",
"binary_blobs",
"brain",
"brick",
"camera",
"cat",
"cell",
"cells3d",
"checkerboard",
"chelsea",
"clock",
"coffee",
"coins",
"colorwheel",
"data_dir",
"download_all",
"eagle",
"file_hash",
"grass",
"gravel",
"horse",
"hubble_deep_field",
"human_mitosis",
"immunohistochemistry",
"kidney",
"lbp_frontal_face_cascade_filename",
"lfw_subset",
"lily",
"logo",
"microaneurysms",
"moon",
"nickel_solidification",
"page",
"protein_transport",
"retina",
"rocket",
"shepp_logan_phantom",
"skin",
"stereo_motorcycle",
"text",
"vortex",
]
3 changes: 3 additions & 0 deletions stubs/skimage/data/_fetchers.pyi
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
from os import PathLike

def file_hash(fname, alg="sha256") -> str: ...

legacy_data_dir = ...
skimage_distribution_dir = ...

Expand Down Expand Up @@ -46,6 +48,7 @@ def hubble_deep_field(): ...
def retina(): ...
def shepp_logan_phantom(): ...
def colorwheel(): ...
def palisades_of_vogt(): ...
def rocket(): ...
def stereo_motorcycle(): ...
def lfw_subset(): ...
Expand Down
23 changes: 0 additions & 23 deletions stubs/sklearn/gaussian_process/kernels.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -23,29 +23,6 @@ class Hyperparameter(NamedTuple):
value_type: str
name: str

# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __init__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ...

def __new__(
cls,
name: str,
value_type: str,
bounds: ndarray | str | tuple[float, int] | tuple[float, float],
n_elements: int = 1,
fixed=None,
) -> Self: ...

# This is mainly a testing utility to check that two hyperparameters
# are equal.
def __eq__(self, other) -> bool: ...

class Kernel(metaclass=ABCMeta):
def get_params(self, deep: bool = True) -> dict: ...
def set_params(self, **params) -> Self: ...
Expand Down
4 changes: 2 additions & 2 deletions stubs/sklearn/metrics/_regression.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -56,10 +56,10 @@ def mean_squared_error(
sample_weight: None | ArrayLike = None,
multioutput: ArrayLike | Literal["raw_values", "uniform_average"] = "uniform_average",
) -> ndarray | Float: ...
@overload
@deprecated(
"`squared` is deprecated in 1.4 and will be removed in 1.6. Use `root_mean_squared_error` instead to calculate the root mean squared error."
)
@overload
def mean_squared_error(
y_true: MatrixLike | ArrayLike,
y_pred: MatrixLike | ArrayLike,
Expand All @@ -76,10 +76,10 @@ def mean_squared_log_error(
sample_weight: None | ArrayLike = None,
multioutput: ArrayLike | Literal["raw_values", "uniform_average"] = "uniform_average",
) -> float | ndarray: ...
@overload
@deprecated(
"`squared` is deprecated in 1.4 and will be removed in 1.6. Use `root_mean_squared_log_error` instead to calculate the root mean squared logarithmic error."
)
@overload
def mean_squared_log_error(
y_true: MatrixLike | ArrayLike,
y_pred: MatrixLike | ArrayLike,
Expand Down
42 changes: 1 addition & 41 deletions stubs/sklearn/preprocessing/_data.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -112,12 +112,10 @@ class StandardScaler(OneToOneFeatureMixin, TransformerMixin, BaseEstimator):
def transform(self, X: spmatrix, copy: None | bool = None) -> spmatrix: ...
@overload
def transform(self, X: ArrayLike, copy: None | bool = None) -> ndarray: ...
def transform(self, X: MatrixLike, copy: None | bool = None) -> ndarray | spmatrix: ...
@overload
def inverse_transform(self, X: spmatrix, copy: None | bool = None) -> spmatrix: ...
@overload
def inverse_transform(self, X: ArrayLike, copy: None | bool = None) -> ndarray: ...
def inverse_transform(self, X: MatrixLike | ArrayLike, copy: None | bool = None) -> ndarray | spmatrix: ...

class MaxAbsScaler(OneToOneFeatureMixin, TransformerMixin, BaseEstimator):
n_samples_seen_: int = ...
Expand All @@ -135,12 +133,10 @@ class MaxAbsScaler(OneToOneFeatureMixin, TransformerMixin, BaseEstimator):
def transform(self, X: spmatrix) -> spmatrix: ...
@overload
def transform(self, X: ArrayLike) -> ndarray: ...
def transform(self, X: MatrixLike | ArrayLike) -> ndarray | spmatrix: ...
@overload
def inverse_transform(self, X: spmatrix) -> spmatrix: ...
@overload
def inverse_transform(self, X: ArrayLike) -> ndarray: ...
def inverse_transform(self, X: MatrixLike | ArrayLike) -> ndarray | spmatrix: ...

def maxabs_scale(X: MatrixLike | ArrayLike, *, axis: Int = 0, copy: bool = True): ...

Expand All @@ -166,12 +162,10 @@ class RobustScaler(OneToOneFeatureMixin, TransformerMixin, BaseEstimator):
def transform(self, X: spmatrix) -> spmatrix: ...
@overload
def transform(self, X: ArrayLike) -> ndarray: ...
def transform(self, X: MatrixLike | ArrayLike) -> ndarray | spmatrix: ...
@overload
def inverse_transform(self, X: spmatrix) -> spmatrix: ...
@overload
def inverse_transform(self, X: ArrayLike) -> ndarray: ...
def inverse_transform(self, X: MatrixLike | ArrayLike) -> ndarray | spmatrix: ...

@overload
def robust_scale(
Expand All @@ -186,7 +180,7 @@ def robust_scale(
) -> spmatrix: ...
@overload
def robust_scale(
X: ndarray,
X: ArrayLike,
*,
axis: Int = 0,
with_centering: bool = True,
Expand All @@ -195,16 +189,6 @@ def robust_scale(
copy: bool = True,
unit_variance: bool = False,
) -> ndarray: ...
def robust_scale(
X: MatrixLike,
*,
axis: Int = 0,
with_centering: bool = True,
with_scaling: bool = True,
quantile_range: tuple[float, float] = ...,
copy: bool = True,
unit_variance: bool = False,
) -> ndarray | spmatrix: ...
@overload
def normalize(
X: spmatrix,
Expand Down Expand Up @@ -241,14 +225,6 @@ def normalize(
copy: bool = True,
return_norm: Literal[False] = False,
) -> ndarray: ...
def normalize(
X: MatrixLike | ArrayLike,
norm: Literal["l1", "l2", "max"] = "l2",
*,
axis: int = 1,
copy: bool = True,
return_norm: bool = False,
) -> csr_matrix | tuple[ndarray | spmatrix, ndarray] | ndarray: ...

class Normalizer(OneToOneFeatureMixin, TransformerMixin, BaseEstimator):
feature_names_in_: ndarray = ...
Expand All @@ -262,7 +238,6 @@ class Normalizer(OneToOneFeatureMixin, TransformerMixin, BaseEstimator):
def transform(self, X: spmatrix, copy: None | bool = None) -> spmatrix: ...
@overload
def transform(self, X: ArrayLike, copy: None | bool = None) -> ndarray: ...
def transform(self, X: MatrixLike | ArrayLike, copy: None | bool = None) -> ndarray | spmatrix: ...

def binarize(X: MatrixLike | ArrayLike, *, threshold: Float = 0.0, copy: bool = True) -> ndarray | spmatrix: ...

Expand All @@ -278,7 +253,6 @@ class Binarizer(OneToOneFeatureMixin, TransformerMixin, BaseEstimator):
def transform(self, X: spmatrix, copy: None | bool = None) -> spmatrix: ...
@overload
def transform(self, X: ArrayLike, copy: None | bool = None) -> ndarray: ...
def transform(self, X: MatrixLike | ArrayLike, copy: None | bool = None) -> ndarray | spmatrix: ...

class KernelCenterer(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
feature_names_in_: ndarray = ...
Expand All @@ -294,7 +268,6 @@ class KernelCenterer(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEsti
def add_dummy_feature(X: spmatrix, value: Float = 1.0) -> spmatrix: ...
@overload
def add_dummy_feature(X: ArrayLike, value: Float = 1.0) -> ndarray: ...
def add_dummy_feature(X: MatrixLike | ArrayLike, value: Float = 1.0) -> ndarray | spmatrix: ...

class QuantileTransformer(OneToOneFeatureMixin, TransformerMixin, BaseEstimator):
feature_names_in_: ndarray = ...
Expand Down Expand Up @@ -324,12 +297,10 @@ class QuantileTransformer(OneToOneFeatureMixin, TransformerMixin, BaseEstimator)
def transform(self, X: spmatrix) -> spmatrix: ...
@overload
def transform(self, X: ArrayLike) -> ndarray: ...
def transform(self, X: MatrixLike | ArrayLike) -> ndarray | spmatrix: ...
@overload
def inverse_transform(self, X: spmatrix) -> spmatrix: ...
@overload
def inverse_transform(self, X: ArrayLike) -> ndarray: ...
def inverse_transform(self, X: MatrixLike | ArrayLike) -> ndarray | spmatrix: ...

@overload
def quantile_transform(
Expand All @@ -355,17 +326,6 @@ def quantile_transform(
random_state: RandomState | None | Int = None,
copy: bool = True,
) -> ndarray: ...
def quantile_transform(
X: MatrixLike | ArrayLike,
*,
axis: Int = 0,
n_quantiles: Int = 1000,
output_distribution: Literal["uniform", "normal"] = "uniform",
ignore_implicit_zeros: bool = False,
subsample: Int = ...,
random_state: RandomState | None | Int = None,
copy: bool = True,
) -> ndarray | spmatrix: ...

class PowerTransformer(OneToOneFeatureMixin, TransformerMixin, BaseEstimator):
feature_names_in_: ndarray = ...
Expand Down
12 changes: 11 additions & 1 deletion stubs/sklearn/utils/_isfinite.pyi
Original file line number Diff line number Diff line change
@@ -1,7 +1,17 @@
from enum import IntEnum
from typing import Final

import numpy as np

class FiniteStatus(IntEnum): ...
__test__: dict

class FiniteStatus(IntEnum):
all_finite = 0
has_nan = 1
has_infinite = 2

all_finite: Final = FiniteStatus.all_finite
has_nan: Final = FiniteStatus.has_nan
has_infinite: Final = FiniteStatus.has_infinite

def cy_isfinite(a: np.ndarray, allow_nan: bool = False) -> bool: ...
3 changes: 3 additions & 0 deletions stubs/sympy-stubs/printing/defaults.pyi
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
from sympy.core._print_helpers import Printable as Printable

DefaultPrinting = Printable
6 changes: 4 additions & 2 deletions stubs/transformers-stubs/models/auto/configuration_auto.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ from typing import Any, NoReturn, TypeVar

from transformers.configuration_utils import PretrainedConfig

_F = TypeVar("_F", bound=Callable[..., Any])
_CallableT = TypeVar("_CallableT", bound=Callable)

CONFIG_MAPPING_NAMES: OrderedDict[str, str]
MODEL_NAMES_MAPPING: OrderedDict[str, str]
Expand Down Expand Up @@ -35,7 +35,9 @@ class _LazyLoadAllMappings(OrderedDict[str, str]):
def __iter__(self) -> Iterator[str]: ...
def __contains__(self, item: object) -> bool: ...

def replace_list_option_in_docstrings(config_to_class=None, use_model_types: bool = True) -> Callable[[_F], _F]: ...
def replace_list_option_in_docstrings(
config_to_class=None, use_model_types: bool = True
) -> Callable[[_CallableT], _CallableT]: ...

class AutoConfig:
def __init__(self) -> None: ...
Expand Down
Loading