Skip to content

Address more comments #22

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Jan 26, 2025
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion scipy/_lib/tests/test_array_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def test_array_api_extra_hook(self):

@skip_xp_backends(
"dask.array",
reason="raw dask.array namespace doesn't ignores copy=True in asarray"
reason="raw dask.array namespace ignores copy=True in asarray"
)
def test_copy(self, xp):
for _xp in [xp, None]:
Expand Down
2 changes: 2 additions & 0 deletions scipy/fft/tests/test_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -518,6 +518,7 @@ def test_definition(self, xp):
x2 = xp.asarray([0, 1, 2, 3, 4, -5, -4, -3, -2, -1], dtype=xp.float64)

# default dtype varies across backends

y = 9 * fft.fftfreq(9, xp=xp)
xp_assert_close(y, x, check_dtype=False, check_namespace=True)

Expand Down Expand Up @@ -549,6 +550,7 @@ def test_definition(self, xp):
x2 = xp.asarray([0, 1, 2, 3, 4, 5], dtype=xp.float64)

# default dtype varies across backends

y = 9 * fft.rfftfreq(9, xp=xp)
xp_assert_close(y, x, check_dtype=False, check_namespace=True)

Expand Down
12 changes: 5 additions & 7 deletions scipy/fft/tests/test_real_transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from scipy.fft import dct, idct, dctn, idctn, dst, idst, dstn, idstn
import scipy.fft as fft
from scipy import fftpack
from scipy._lib._array_api import xp_assert_close
from scipy._lib._array_api import xp_copy, xp_assert_close

skip_xp_backends = pytest.mark.skip_xp_backends

Expand Down Expand Up @@ -195,10 +195,9 @@ def test_orthogonalize_noop(func, type, norm, xp):
@skip_xp_backends(cpu_only=True)
@pytest.mark.parametrize("norm", ["backward", "ortho", "forward"])
def test_orthogonalize_dct1(norm, xp):
x_np = np.random.rand(100)
x = xp.asarray(x_np)
x = xp.asarray(np.random.rand(100))

x2 = xp.asarray(x_np.copy())
x2 = xp_copy(x, xp=xp)
x2[0] *= SQRT_2
x2[-1] *= SQRT_2

Expand Down Expand Up @@ -230,9 +229,8 @@ def test_orthogonalize_dcst2(func, norm, xp):
@pytest.mark.parametrize("norm", ["backward", "ortho", "forward"])
@pytest.mark.parametrize("func", [dct, dst])
def test_orthogonalize_dcst3(func, norm, xp):
x_np = np.random.rand(100)
x = xp.asarray(x_np)
x2 = xp.asarray(x_np.copy())
x = xp.asarray(np.random.rand(100))
x2 = xp_copy(x, xp=xp)
x2[0 if func == dct else -1] *= SQRT_2

y1 = func(x, type=3, norm=norm, orthogonalize=True)
Expand Down
6 changes: 3 additions & 3 deletions scipy/ndimage/_morphology.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
from . import _nd_image
from . import _filters

from scipy._lib._array_api import is_dask, array_namespace
from scipy._lib.array_api_compat import is_dask_array

__all__ = ['iterate_structure', 'generate_binary_structure', 'binary_erosion',
'binary_dilation', 'binary_opening', 'binary_closing',
Expand Down Expand Up @@ -222,7 +222,7 @@ def _binary_erosion(input, structure, iterations, mask, output,
except TypeError as e:
raise TypeError('iterations parameter should be an integer') from e

if is_dask(array_namespace(input)):
if is_dask_array(input):
# Note: If you create an dask array with ones
# it does a stride trick where it makes an array
# (with stride 0) using a scalar
Expand Down Expand Up @@ -1800,6 +1800,7 @@ def morphological_laplace(input, size=None, footprint=None, structure=None,
Output

"""
input = np.asarray(input)
tmp1 = grey_dilation(input, size, footprint, structure, None, mode,
cval, origin, axes=axes)
if isinstance(output, np.ndarray):
Expand All @@ -1812,7 +1813,6 @@ def morphological_laplace(input, size=None, footprint=None, structure=None,
tmp2 = grey_erosion(input, size, footprint, structure, None, mode,
cval, origin, axes=axes)
np.add(tmp1, tmp2, tmp2)
input = np.asarray(input)
np.subtract(tmp2, input, tmp2)
np.subtract(tmp2, input, tmp2)
return tmp2
Expand Down
12 changes: 5 additions & 7 deletions scipy/ndimage/tests/test_filters.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ def test_correlate01(self, xp):

@xfail_xp_backends('cupy', reason="Differs by a factor of two?")
@skip_xp_backends("jax.numpy", reason="output array is read-only.")
@skip_xp_backends("dask.array", reason="wrong answer")
@xfail_xp_backends("dask.array", reason="wrong answer")
def test_correlate01_overlap(self, xp):
array = xp.reshape(xp.arange(256), (16, 16))
weights = xp.asarray([2])
Expand Down Expand Up @@ -881,7 +881,7 @@ def test_gauss06(self, xp):
assert_array_almost_equal(output1, output2)

@skip_xp_backends("jax.numpy", reason="output array is read-only.")
@skip_xp_backends("dask.array", reason="wrong result")
@xfail_xp_backends("dask.array", reason="output keyword not handled properly")
def test_gauss_memory_overlap(self, xp):
input = xp.arange(100 * 100, dtype=xp.float32)
input = xp.reshape(input, (100, 100))
Expand Down Expand Up @@ -1228,7 +1228,7 @@ def test_prewitt01(self, dtype, xp):
assert_array_almost_equal(t, output)

@skip_xp_backends("jax.numpy", reason="output array is read-only.")
@skip_xp_backends("dask.array", reason="output array is read-only.")
@xfail_xp_backends("dask.array", reason="output array is read-only.")
@pytest.mark.parametrize('dtype', types + complex_types)
def test_prewitt02(self, dtype, xp):
if is_torch(xp) and dtype in ("uint16", "uint32", "uint64"):
Expand Down Expand Up @@ -1838,9 +1838,7 @@ def test_rank06(self, xp):
@skip_xp_backends("jax.numpy",
reason="assignment destination is read-only",
)
@xfail_xp_backends("dask.array",
reason="wrong answer",
)
@xfail_xp_backends("dask.array", reason="wrong answer")
def test_rank06_overlap(self, xp):
array = xp.asarray([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
Expand Down Expand Up @@ -2647,7 +2645,7 @@ def test_gaussian_radius_invalid(xp):


@skip_xp_backends("jax.numpy", reason="output array is read-only")
@skip_xp_backends("dask.array", reason="wrong answer")
@xfail_xp_backends("dask.array", reason="wrong answer")
class TestThreading:
def check_func_thread(self, n, fun, args, out):
from threading import Thread
Expand Down
13 changes: 8 additions & 5 deletions scipy/ndimage/tests/test_measurements.py
Original file line number Diff line number Diff line change
Expand Up @@ -548,17 +548,16 @@ def test_value_indices02(xp):
ndimage.value_indices(data)


@skip_xp_backends("dask.array", reason="len on data-dependent output shapes")
def test_value_indices03(xp):
"Test different input array shapes, from 1-D to 4-D"
for shape in [(36,), (18, 2), (3, 3, 4), (3, 3, 2, 2)]:
a = xp.asarray((12*[1]+12*[2]+12*[3]), dtype=xp.int32)
a = xp.reshape(a, shape)

trueKeys = xp.unique_values(a)
# convert to numpy to prevent issues with data-dependent shapes
# from unique for dask
trueKeys = np.asarray(xp.unique_values(a))
vi = ndimage.value_indices(a)
# TODO: list(trueKeys) needs len of trueKeys
# (which is unknown for dask since it is the result of an unique call)
assert list(vi.keys()) == list(trueKeys)
for k in [int(x) for x in trueKeys]:
trueNdx = xp.nonzero(a == k)
Expand Down Expand Up @@ -688,6 +687,7 @@ def test_sum_labels(xp):
assert xp.all(output_sum == output_labels)
assert_array_almost_equal(output_labels, xp.asarray([4.0, 0.0, 5.0]))


def test_mean01(xp):
labels = np.asarray([1, 0], dtype=bool)
labels = xp.asarray(labels)
Expand Down Expand Up @@ -819,7 +819,6 @@ def test_maximum05(xp):
assert ndimage.maximum(x) == -1


@pytest.mark.filterwarnings("ignore::FutureWarning:dask")
def test_median01(xp):
a = xp.asarray([[1, 2, 0, 1],
[5, 3, 0, 4],
Expand Down Expand Up @@ -862,6 +861,7 @@ def test_median_gh12836_bool(xp):
output = ndimage.median(a, labels=xp.ones((2,)), index=xp.asarray([1]))
assert_array_almost_equal(output, xp.asarray([1.0]))


def test_median_no_int_overflow(xp):
# test integer overflow fix on example from gh-12836
a = xp.asarray([65, 70], dtype=xp.int8)
Expand Down Expand Up @@ -902,6 +902,7 @@ def test_variance04(xp):
output = ndimage.variance(input)
assert_almost_equal(output, xp.asarray(0.25), check_0d=False)


def test_variance05(xp):
labels = xp.asarray([2, 2, 3])
for type in types:
Expand All @@ -911,6 +912,7 @@ def test_variance05(xp):
output = ndimage.variance(input, labels, 2)
assert_almost_equal(output, xp.asarray(1.0), check_0d=False)


def test_variance06(xp):
labels = xp.asarray([2, 2, 3, 3, 4])
with np.errstate(all='ignore'):
Expand Down Expand Up @@ -1126,6 +1128,7 @@ def test_maximum_position06(xp):
assert output[0] == (0, 0)
assert output[1] == (1, 1)


@xfail_xp_backends("torch", reason="output[1] is wrong on pytorch")
def test_maximum_position07(xp):
# Test float labels
Expand Down
6 changes: 5 additions & 1 deletion scipy/ndimage/tests/test_morphology.py
Original file line number Diff line number Diff line change
Expand Up @@ -2315,7 +2315,6 @@ def test_grey_erosion01(self, xp):
[5, 5, 3, 3, 1]]))

@skip_xp_backends("jax.numpy", reason="output array is read-only.")
@skip_xp_backends("dask.array", reason="output array is read-only.")
@xfail_xp_backends("cupy", reason="https://github.com/cupy/cupy/issues/8398")
def test_grey_erosion01_overlap(self, xp):

Expand Down Expand Up @@ -2521,6 +2520,8 @@ def test_white_tophat01(self, xp):
tmp = ndimage.grey_opening(array, footprint=footprint,
structure=structure)
expected = array - tmp
# array created by xp.zeros is non-writeable for dask
# and jax

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not true. All arrays are non-writeabla for jax. All arrays are writeable for dask.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

r.e. writeability, I am not referring to the mutability of the original dask array, but the numpy array created from the dask array with np.asarray.

MRE

import dask.array as da
a = da.zeros(10)
a_np = np.asarray(a)
a_np[1] = 10
Traceback (most recent call last):
  File "<python-input-4>", line 1, in <module>
    a_np[1] = 10
    ~~~~^^^
ValueError: assignment destination is read-only

I'll remove the jax comment (and I'll raise an issue on scipy to disallow the output keyword for jax since it looks like there's no way to make that work).

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, that's because the chunks of a are broadcasted, and so is a_np unless it's a concatenation of chunks.

>>> da.zeros(10).__array__().strides
(0,)
>>> da.zeros(10, chunk=5).__array__().strides
()

Broadcasted arrays are read-only as the whole thing is only 8 bytes in size.

Copy link

@crusaderky crusaderky Jan 23, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The whole idea of having an output= parameter makes no sense if you are going to convert it to numpy.

This makes sense:

def f(x: Array, output: Array) -> None:
    output[:] = x * 2

This does not:

def f(x: Array, output: Array) -> None:
    x = np.asarray(x)
    output = np.asarray(output)
    output[:] = x * 2

In the second example, even without the broadcasting issue you're facing, the output item that the user is holding will remain completely blank, unless np.asarray returns a view of the original memory. which, for dask, can never be.

Copy link

@crusaderky crusaderky Jan 23, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Note that np.asarray(output, copy=False) will not crash, while it should. This is a bug in Dask.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, makes sense.

I'll put in a separate PR to the main scipy repo disabling the output kwarg for jax (to keep the diff down here).

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

output = xp.zeros(array.shape, dtype=array.dtype)
ndimage.white_tophat(array, footprint=footprint,
structure=structure, output=output)
Expand Down Expand Up @@ -2574,6 +2575,8 @@ def test_white_tophat04(self, xp):
structure = xp.asarray(structure)

# Check that type mismatch is properly handled
# This output array is read-only for dask and jax
# TODO: investigate why for dask?

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Also this makes no sense to me.

output = xp.empty_like(array, dtype=xp.float64)
ndimage.white_tophat(array, structure=structure, output=output)

Expand All @@ -2588,6 +2591,7 @@ def test_black_tophat01(self, xp):
tmp = ndimage.grey_closing(array, footprint=footprint,
structure=structure)
expected = tmp - array
# This output array is read-only for dask and jax
output = xp.zeros(array.shape, dtype=array.dtype)
ndimage.black_tophat(array, footprint=footprint,
structure=structure, output=output)
Expand Down
5 changes: 4 additions & 1 deletion scipy/signal/_filter_design.py
Original file line number Diff line number Diff line change
Expand Up @@ -1791,7 +1791,10 @@ def normalize(b, a):
raise ValueError("Denominator must have at least on nonzero element.")

# Trim leading zeros in denominator, leave at least one.
den = np.trim_zeros(den, 'f')

# cast to numpy by hand to avoid libraries like dask
# trying to dispatch this function via NEP 18
den = np.trim_zeros(np.asarray(den), 'f')

# Normalize transfer function
num, den = num / den[0], den / den[0]
Expand Down
5 changes: 4 additions & 1 deletion scipy/signal/_signaltools.py
Original file line number Diff line number Diff line change
Expand Up @@ -4087,7 +4087,10 @@ def detrend(data: np.ndarray, axis: int = -1,
else:
dshape = data.shape
N = dshape[axis]
bp = np.sort(np.unique(np.concatenate(np.atleast_1d(0, bp, N))))
# Manually cast to numpy to prevent
# NEP18 dispatching for libraries like dask
bp = np.asarray(np.concatenate(np.atleast_1d(0, bp, N)))
bp = np.sort(np.unique(bp))
if np.any(bp > N):
raise ValueError("Breakpoints must be less than length "
"of data along given axis.")
Expand Down
34 changes: 18 additions & 16 deletions scipy/signal/tests/test_signaltools.py
Original file line number Diff line number Diff line change
Expand Up @@ -291,7 +291,7 @@ def test_dtype_deprecation(self, xp):
convolve(a, b)


@pytest.mark.filterwarnings("ignore::FutureWarning:dask")

@skip_xp_backends(cpu_only=True, exceptions=['cupy'])
class TestConvolve2d:

Expand Down Expand Up @@ -477,8 +477,12 @@ def test_consistency_convolve_funcs(self, xp):
a = xp.arange(5)
b = xp.asarray([3.2, 1.4, 3])
for mode in ['full', 'valid', 'same']:
xp_assert_close(xp.asarray(np.convolve(a, b, mode=mode)),
signal.convolve(a, b, mode=mode))
# cast to numpy when calling np.convolve
# to prevent NEP18 dispatching e.g. from dask
xp_assert_close(
xp.asarray(np.convolve(np.asarray(a), np.asarray(b), mode=mode)),
signal.convolve(a, b, mode=mode)
)
xp_assert_close(
xp.squeeze(
signal.convolve2d(xp.asarray([a]), xp.asarray([b]), mode=mode),
Expand Down Expand Up @@ -508,7 +512,7 @@ def test_large_array(self, xp):
assert fails[0].size == 0


@pytest.mark.filterwarnings("ignore::FutureWarning:dask")

@skip_xp_backends(cpu_only=True, exceptions=['cupy'])
class TestFFTConvolve:

Expand Down Expand Up @@ -845,7 +849,9 @@ def test_random_data(self, axes, xp):
np.random.seed(1234)
a = xp.asarray(np.random.rand(1233) + 1j * np.random.rand(1233))
b = xp.asarray(np.random.rand(1321) + 1j * np.random.rand(1321))
expected = xp.asarray(np.convolve(a, b, 'full'))
# cast to numpy before np.convolve
# to prevent NEP 18 dispatching for e.g. dask
expected = xp.asarray(np.convolve(np.asarray(a), np.asarray(b), 'full'))

if axes == '':
out = fftconvolve(a, b, 'full')
Expand All @@ -860,7 +866,9 @@ def test_random_data_axes(self, axes, xp):
np.random.seed(1234)
a = xp.asarray(np.random.rand(1233) + 1j * np.random.rand(1233))
b = xp.asarray(np.random.rand(1321) + 1j * np.random.rand(1321))
expected = xp.asarray(np.convolve(a, b, 'full'))
# cast to numpy before np.convolve
# to prevent NEP 18 dispatching for e.g. dask
expected = xp.asarray(np.convolve(np.asarray(a), np.asarray(b), 'full'))

a = xp.asarray(np.tile(a, [2, 1]))
b = xp.asarray(np.tile(b, [2, 1]))
Expand Down Expand Up @@ -913,7 +921,9 @@ def test_random_data_multidim_axes(self, axes, xp):
def test_many_sizes(self, n, xp):
a = xp.asarray(np.random.rand(n) + 1j * np.random.rand(n))
b = xp.asarray(np.random.rand(n) + 1j * np.random.rand(n))
expected = xp.asarray(np.convolve(a, b, 'full'))
# cast to numpy before np.convolve
# to prevent NEP 18 dispatching for e.g. dask
expected = xp.asarray(np.convolve(np.asarray(a), np.asarray(b), 'full'))

out = fftconvolve(a, b, 'full')
xp_assert_close(out, expected, atol=1e-10)
Expand Down Expand Up @@ -965,10 +975,7 @@ def gen_oa_shapes_eq(sizes):


@skip_xp_backends("jax.numpy", reason="fails all around")
@skip_xp_backends("dask.array",
reason="Gets converted to numpy at some point for some reason. "
"Probably also suffers from boolean indexing issues"
)
@skip_xp_backends("dask.array", reason="wrong answer")
class TestOAConvolve:
@pytest.mark.slow()
@pytest.mark.parametrize('shape_a_0, shape_b_0',
Expand Down Expand Up @@ -2452,7 +2459,6 @@ def decimal(self, dt, xp):
dt = np.cdouble

# emulate np.finfo(dt).precision for complex64 and complex128
# note: unwrapped dask has no finfo
prec = {64: 15, 32: 6}[xp.finfo(dt).bits]
return int(2 * prec / 3)

Expand Down Expand Up @@ -4018,7 +4024,6 @@ def test_nonnumeric_dtypes(func, xp):
class TestSOSFilt:

# The test_rank* tests are pulled from _TestLinearFilter
@pytest.mark.filterwarnings("ignore::FutureWarning") # for dask
@skip_xp_backends('jax.numpy', reason='buffer array is read-only')
def test_rank1(self, dt, xp):
dt = getattr(xp, dt)
Expand Down Expand Up @@ -4050,7 +4055,6 @@ def test_rank1(self, dt, xp):
y = sosfilt(sos, x)
xp_assert_close(y, xp.asarray([1.0, 2, 2, 2, 2, 2, 2, 2]))

@pytest.mark.filterwarnings("ignore::FutureWarning") # for dask
@skip_xp_backends('jax.numpy', reason='buffer array is read-only')
def test_rank2(self, dt, xp):
dt = getattr(xp, dt)
Expand Down Expand Up @@ -4078,7 +4082,6 @@ def test_rank2(self, dt, xp):
y = sosfilt(sos, x, axis=1)
assert_array_almost_equal(y_r2_a1, y)

@pytest.mark.filterwarnings("ignore::FutureWarning") # for dask
@skip_xp_backends('jax.numpy', reason='buffer array is read-only')
def test_rank3(self, dt, xp):
dt = getattr(xp, dt)
Expand Down Expand Up @@ -4334,7 +4337,6 @@ def test_bp(self, xp):
with assert_raises(ValueError):
detrend(data, type="linear", bp=3)

@pytest.mark.filterwarnings("ignore::FutureWarning:dask")
@pytest.mark.parametrize('bp', [np.array([0, 2]), [0, 2]])
def test_detrend_array_bp(self, bp, xp):
# regression test for https://github.com/scipy/scipy/issues/18675
Expand Down
Loading