diff --git a/changes/3004.feature.rst b/changes/3004.feature.rst new file mode 100644 index 0000000000..b15a5ec943 --- /dev/null +++ b/changes/3004.feature.rst @@ -0,0 +1,3 @@ +Optimizes reading more than one, but not all, chunks from a shard. Chunks are now read in parallel +and reads of nearby chunks within the same shard are combined to reduce the number of calls to the store. +See :ref:`user-guide-config` for more details. \ No newline at end of file diff --git a/docs/user-guide/config.rst b/docs/user-guide/config.rst index 0ae8017ca9..78acf9f331 100644 --- a/docs/user-guide/config.rst +++ b/docs/user-guide/config.rst @@ -33,6 +33,10 @@ Configuration options include the following: - Async and threading options, e.g. ``async.concurrency`` and ``threading.max_workers`` - Selections of implementations of codecs, codec pipelines and buffers - Enabling GPU support with ``zarr.config.enable_gpu()``. See :ref:`user-guide-gpu` for more. +- Tuning reads from sharded zarrs. When reading less than a complete shard, reads of nearby chunks + within the same shard will be combined into a single request if they are less than + ``sharding.read.coalesce_max_gap_bytes`` apart and the combined request size is less than + ``sharding.read.coalesce_max_bytes``. For selecting custom implementations of codecs, pipelines, buffers and ndbuffers, first register the implementations in the registry and then select them in the config. @@ -61,4 +65,6 @@ This is the current default configuration:: 'default_zarr_format': 3, 'json_indent': 2, 'ndbuffer': 'zarr.buffer.cpu.NDBuffer', - 'threading': {'max_workers': None}} + 'sharding': {'read': {'coalesce_max_bytes': 104857600, + 'coalesce_max_gap_bytes': 1048576}}, + 'threading': {'max_workers': None}} \ No newline at end of file diff --git a/src/zarr/codecs/sharding.py b/src/zarr/codecs/sharding.py index cd8676b4d1..b29e24cfb9 100644 --- a/src/zarr/codecs/sharding.py +++ b/src/zarr/codecs/sharding.py @@ -38,11 +38,13 @@ from zarr.core.common import ( ChunkCoords, ChunkCoordsLike, + concurrent_map, parse_enum, parse_named_configuration, parse_shapelike, product, ) +from zarr.core.config import config from zarr.core.dtype.npy.int import UInt64 from zarr.core.indexing import ( BasicIndexer, @@ -198,7 +200,9 @@ async def from_bytes( @classmethod def create_empty( - cls, chunks_per_shard: ChunkCoords, buffer_prototype: BufferPrototype | None = None + cls, + chunks_per_shard: ChunkCoords, + buffer_prototype: BufferPrototype | None = None, ) -> _ShardReader: if buffer_prototype is None: buffer_prototype = default_buffer_prototype() @@ -248,7 +252,9 @@ def merge_with_morton_order( @classmethod def create_empty( - cls, chunks_per_shard: ChunkCoords, buffer_prototype: BufferPrototype | None = None + cls, + chunks_per_shard: ChunkCoords, + buffer_prototype: BufferPrototype | None = None, ) -> _ShardBuilder: if buffer_prototype is None: buffer_prototype = default_buffer_prototype() @@ -329,9 +335,18 @@ async def finalize( return await shard_builder.finalize(index_location, index_encoder) +class _ChunkCoordsByteSlice(NamedTuple): + """Holds a chunk's coordinates and its byte range in a serialized shard.""" + + coords: ChunkCoords + byte_slice: slice + + @dataclass(frozen=True) class ShardingCodec( - ArrayBytesCodec, ArrayBytesCodecPartialDecodeMixin, ArrayBytesCodecPartialEncodeMixin + ArrayBytesCodec, + ArrayBytesCodecPartialDecodeMixin, + ArrayBytesCodecPartialEncodeMixin, ): chunk_shape: ChunkCoords codecs: tuple[Codec, ...] @@ -508,32 +523,21 @@ async def _decode_partial_single( all_chunk_coords = {chunk_coords for chunk_coords, *_ in indexed_chunks} # reading bytes of all requested chunks - shard_dict: ShardMapping = {} + shard_dict_maybe: ShardMapping | None = {} if self._is_total_shard(all_chunk_coords, chunks_per_shard): # read entire shard shard_dict_maybe = await self._load_full_shard_maybe( - byte_getter=byte_getter, - prototype=chunk_spec.prototype, - chunks_per_shard=chunks_per_shard, + byte_getter, chunk_spec.prototype, chunks_per_shard ) - if shard_dict_maybe is None: - return None - shard_dict = shard_dict_maybe else: # read some chunks within the shard - shard_index = await self._load_shard_index_maybe(byte_getter, chunks_per_shard) - if shard_index is None: - return None - shard_dict = {} - for chunk_coords in all_chunk_coords: - chunk_byte_slice = shard_index.get_chunk_slice(chunk_coords) - if chunk_byte_slice: - chunk_bytes = await byte_getter.get( - prototype=chunk_spec.prototype, - byte_range=RangeByteRequest(chunk_byte_slice[0], chunk_byte_slice[1]), - ) - if chunk_bytes: - shard_dict[chunk_coords] = chunk_bytes + shard_dict_maybe = await self._load_partial_shard_maybe( + byte_getter, chunk_spec.prototype, chunks_per_shard, all_chunk_coords + ) + + if shard_dict_maybe is None: + return None + shard_dict = shard_dict_maybe # decoding chunks and writing them into the output buffer await self.codec_pipeline.read( @@ -615,7 +619,9 @@ async def _encode_partial_single( indexer = list( get_indexer( - selection, shape=shard_shape, chunk_grid=RegularChunkGrid(chunk_shape=chunk_shape) + selection, + shape=shard_shape, + chunk_grid=RegularChunkGrid(chunk_shape=chunk_shape), ) ) @@ -689,7 +695,8 @@ def _shard_index_size(self, chunks_per_shard: ChunkCoords) -> int: get_pipeline_class() .from_codecs(self.index_codecs) .compute_encoded_size( - 16 * product(chunks_per_shard), self._get_index_chunk_spec(chunks_per_shard) + 16 * product(chunks_per_shard), + self._get_index_chunk_spec(chunks_per_shard), ) ) @@ -734,7 +741,8 @@ async def _load_shard_index_maybe( ) else: index_bytes = await byte_getter.get( - prototype=numpy_buffer_prototype(), byte_range=SuffixByteRequest(shard_index_size) + prototype=numpy_buffer_prototype(), + byte_range=SuffixByteRequest(shard_index_size), ) if index_bytes is not None: return await self._decode_shard_index(index_bytes, chunks_per_shard) @@ -748,7 +756,10 @@ async def _load_shard_index( ) or _ShardIndex.create_empty(chunks_per_shard) async def _load_full_shard_maybe( - self, byte_getter: ByteGetter, prototype: BufferPrototype, chunks_per_shard: ChunkCoords + self, + byte_getter: ByteGetter, + prototype: BufferPrototype, + chunks_per_shard: ChunkCoords, ) -> _ShardReader | None: shard_bytes = await byte_getter.get(prototype=prototype) @@ -758,6 +769,115 @@ async def _load_full_shard_maybe( else None ) + async def _load_partial_shard_maybe( + self, + byte_getter: ByteGetter, + prototype: BufferPrototype, + chunks_per_shard: ChunkCoords, + all_chunk_coords: set[ChunkCoords], + ) -> ShardMapping | None: + """ + Read chunks from `byte_getter` for the case where the read is less than a full shard. + Returns a mapping of chunk coordinates to bytes or None. + """ + shard_index = await self._load_shard_index_maybe(byte_getter, chunks_per_shard) + if shard_index is None: + return None + + chunks = [ + _ChunkCoordsByteSlice(chunk_coords, slice(*chunk_byte_slice)) + for chunk_coords in all_chunk_coords + # Drop chunks where index lookup fails + # e.g. empty chunks when write_empty_chunks = False + if (chunk_byte_slice := shard_index.get_chunk_slice(chunk_coords)) + ] + + groups = self._coalesce_chunks(chunks) + + shard_dicts = await concurrent_map( + [(group, byte_getter, prototype) for group in groups], + self._get_group_bytes, + config.get("async.concurrency"), + ) + + shard_dict: ShardMutableMapping = {} + for d in shard_dicts: + if d is None: + return None + shard_dict.update(d) + + return shard_dict + + def _coalesce_chunks( + self, + chunks: list[_ChunkCoordsByteSlice], + ) -> list[list[_ChunkCoordsByteSlice]]: + """ + Combine chunks from a single shard into groups that should be read together + in a single request to the store. + + Respects the following configuration options: + - `sharding.read.coalesce_max_gap_bytes`: The maximum gap between + chunks to coalesce into a single group. + - `sharding.read.coalesce_max_bytes`: The maximum number of bytes in a group. + """ + max_gap_bytes = config.get("sharding.read.coalesce_max_gap_bytes") + coalesce_max_bytes = config.get("sharding.read.coalesce_max_bytes") + + sorted_chunks = sorted(chunks, key=lambda c: c.byte_slice.start) + + if len(sorted_chunks) == 0: + return [] + + groups = [] + current_group = [sorted_chunks[0]] + + for chunk in sorted_chunks[1:]: + gap_to_chunk = chunk.byte_slice.start - current_group[-1].byte_slice.stop + size_if_coalesced = chunk.byte_slice.stop - current_group[0].byte_slice.start + if gap_to_chunk < max_gap_bytes and size_if_coalesced < coalesce_max_bytes: + current_group.append(chunk) + else: + groups.append(current_group) + current_group = [chunk] + + groups.append(current_group) + + return groups + + async def _get_group_bytes( + self, + group: list[_ChunkCoordsByteSlice], + byte_getter: ByteGetter, + prototype: BufferPrototype, + ) -> ShardMapping | None: + """ + Reads a possibly coalesced group of one or more chunks from a shard. + Returns a mapping of chunk coordinates to bytes. + """ + # _coalesce_chunks ensures that the group is not empty. + group_start = group[0].byte_slice.start + group_end = group[-1].byte_slice.stop + + # A single call to retrieve the bytes for the entire group. + group_bytes = await byte_getter.get( + prototype=prototype, + byte_range=RangeByteRequest(group_start, group_end), + ) + if group_bytes is None: + return None + + # Extract the bytes corresponding to each chunk in group from group_bytes. + shard_dict = {} + for chunk in group: + chunk_slice = slice( + chunk.byte_slice.start - group_start, + chunk.byte_slice.stop - group_start, + ) + shard_dict[chunk.coords] = group_bytes[chunk_slice] + + return shard_dict + def compute_encoded_size(self, input_byte_length: int, shard_spec: ArraySpec) -> int: chunks_per_shard = self._get_chunks_per_shard(shard_spec) return input_byte_length + self._shard_index_size(chunks_per_shard) diff --git a/src/zarr/core/config.py b/src/zarr/core/config.py index cc3c33cd17..6dd2bc3e5b 100644 --- a/src/zarr/core/config.py +++ b/src/zarr/core/config.py @@ -109,6 +109,12 @@ def enable_gpu(self) -> ConfigSet: }, "async": {"concurrency": 10, "timeout": None}, "threading": {"max_workers": None}, + "sharding": { + "read": { + "coalesce_max_bytes": 100 * 2**20, # 100MiB + "coalesce_max_gap_bytes": 2**20, # 1MiB + } + }, "json_indent": 2, "codec_pipeline": { "path": "zarr.core.codec_pipeline.BatchedCodecPipeline", diff --git a/tests/test_codecs/test_sharding.py b/tests/test_codecs/test_sharding.py index 403fd80e81..f124e14675 100644 --- a/tests/test_codecs/test_sharding.py +++ b/tests/test_codecs/test_sharding.py @@ -1,5 +1,6 @@ import pickle from typing import Any +from unittest.mock import AsyncMock import numpy as np import numpy.typing as npt @@ -9,7 +10,7 @@ import zarr.api import zarr.api.asynchronous from zarr import Array -from zarr.abc.store import Store +from zarr.abc.store import RangeByteRequest, Store, SuffixByteRequest from zarr.codecs import ( BloscCodec, ShardingCodec, @@ -110,7 +111,9 @@ def test_sharding_scalar( indirect=["array_fixture"], ) def test_sharding_partial( - store: Store, array_fixture: npt.NDArray[Any], index_location: ShardingCodecIndexLocation + store: Store, + array_fixture: npt.NDArray[Any], + index_location: ShardingCodecIndexLocation, ) -> None: data = array_fixture spath = StorePath(store) @@ -146,7 +149,9 @@ def test_sharding_partial( indirect=["array_fixture"], ) def test_sharding_partial_readwrite( - store: Store, array_fixture: npt.NDArray[Any], index_location: ShardingCodecIndexLocation + store: Store, + array_fixture: npt.NDArray[Any], + index_location: ShardingCodecIndexLocation, ) -> None: data = array_fixture spath = StorePath(store) @@ -178,7 +183,9 @@ def test_sharding_partial_readwrite( @pytest.mark.parametrize("index_location", ["start", "end"]) @pytest.mark.parametrize("store", ["local", "memory", "zip"], indirect=["store"]) def test_sharding_partial_read( - store: Store, array_fixture: npt.NDArray[Any], index_location: ShardingCodecIndexLocation + store: Store, + array_fixture: npt.NDArray[Any], + index_location: ShardingCodecIndexLocation, ) -> None: data = array_fixture spath = StorePath(store) @@ -197,6 +204,301 @@ def test_sharding_partial_read( assert np.all(read_data == 1) +@pytest.mark.parametrize("index_location", ["start", "end"]) +@pytest.mark.parametrize("store", ["local", "memory", "zip"], indirect=["store"]) +@pytest.mark.parametrize("coalesce_reads", [True, False]) +def test_sharding_multiple_chunks_partial_shard_read( + store: Store, index_location: ShardingCodecIndexLocation, coalesce_reads: bool +) -> None: + array_shape = (16, 64) + shard_shape = (8, 32) + chunk_shape = (2, 4) + data = np.arange(np.prod(array_shape), dtype="float32").reshape(array_shape) + + if coalesce_reads: + # 1MiB, enough to coalesce all chunks within a shard in this example + zarr.config.set({"sharding.read.coalesce_max_gap_bytes": 2**20}) + else: + # disable coalescing + zarr.config.set({"sharding.read.coalesce_max_gap_bytes": -1}) + + store_mock = AsyncMock(wraps=store, spec=store.__class__) + a = zarr.create_array( + StorePath(store_mock), + shape=data.shape, + chunks=chunk_shape, + shards={"shape": shard_shape, "index_location": index_location}, + compressors=BloscCodec(cname="lz4"), + dtype=data.dtype, + fill_value=1, + ) + a[:] = data + + store_mock.reset_mock() # ignore store calls during array creation + + # Reads 3 (2 full, 1 partial) chunks each from 2 shards (a subset of both shards) + # for a total of 6 chunks accessed + assert np.allclose(a[0, 22:42], np.arange(22, 42, dtype="float32")) + + if coalesce_reads: + # 2 shard index requests + 2 coalesced chunk data byte ranges (one for each shard) + assert store_mock.get.call_count == 4 + else: + # 2 shard index requests + 6 chunks + assert store_mock.get.call_count == 8 + + for method, args, kwargs in store_mock.method_calls: + assert method == "get" + assert args[0].startswith("c/") # get from a chunk + assert isinstance(kwargs["byte_range"], (SuffixByteRequest, RangeByteRequest)) + + store_mock.reset_mock() + + # Reads 4 chunks from both shards along dimension 0 for a total of 8 chunks accessed + assert np.allclose(a[:, 0], np.arange(0, data.size, array_shape[1], dtype="float32")) + + if coalesce_reads: + # 2 shard index requests + 2 coalesced chunk data byte ranges (one for each shard) + assert store_mock.get.call_count == 4 + else: + # 2 shard index requests + 8 chunks + assert store_mock.get.call_count == 10 + + for method, args, kwargs in store_mock.method_calls: + assert method == "get" + assert args[0].startswith("c/") # get from a chunk + assert isinstance(kwargs["byte_range"], (SuffixByteRequest, RangeByteRequest)) + + +@pytest.mark.parametrize("index_location", ["start", "end"]) +@pytest.mark.parametrize("store", ["local", "memory", "zip"], indirect=["store"]) +@pytest.mark.parametrize("coalesce_reads", [True, False]) +def test_sharding_duplicate_read_indexes( + store: Store, index_location: ShardingCodecIndexLocation, coalesce_reads: bool +) -> None: + """ + Check that coalesce optimization parses the grouped reads back out correctly + when there are multiple reads for the same index. + """ + array_shape = (15,) + shard_shape = (8,) + chunk_shape = (2,) + data = np.arange(np.prod(array_shape), dtype="float32").reshape(array_shape) + + if coalesce_reads: + # 1MiB, enough to coalesce all chunks within a shard in this example + zarr.config.set({"sharding.read.coalesce_max_gap_bytes": 2**20}) + else: + # disable coalescing + zarr.config.set({"sharding.read.coalesce_max_gap_bytes": -1}) + + store_mock = AsyncMock(wraps=store, spec=store.__class__) + a = zarr.create_array( + StorePath(store_mock), + shape=data.shape, + chunks=chunk_shape, + shards={"shape": shard_shape, "index_location": index_location}, + compressors=BloscCodec(cname="lz4"), + dtype=data.dtype, + fill_value=-1, + ) + a[:] = data + + store_mock.reset_mock() # ignore store calls during array creation + + # Read the same index multiple times, do that from two chunks which can be coalesced + indexer = [8, 8, 12, 12] + np.array_equal(a[indexer], data[indexer]) + + if coalesce_reads: + # 1 shard index request + 1 coalesced read + assert store_mock.get.call_count == 2 + else: + # 1 shard index request + 2 chunks + assert store_mock.get.call_count == 3 + + +@pytest.mark.parametrize("index_location", ["start", "end"]) +@pytest.mark.parametrize("store", ["local", "memory", "zip"], indirect=["store"]) +def test_sharding_read_empty_chunks_within_non_empty_shard_write_empty_false( + store: Store, index_location: ShardingCodecIndexLocation +) -> None: + """ + Case where + - some, but not all, chunks in the last shard are empty + - the last shard is not complete (array length is not a multiple of shard shape), + this takes us down the partial shard read path + - write_empty_chunks=False so the shard index will have fewer entries than chunks in the shard + """ + # array with mixed empty and non-empty chunks in second shard + data = np.array([ + # shard 0. full 8 elements, all chunks have some non-fill data + 0, 1, 2, 3, 4, 5, 6, 7, + # shard 1. 6 elements (< shard shape) + 2, 0, # chunk 0, written + -9, -9, # chunk 1, all fill, not written + 4, 5 # chunk 2, written + ], dtype="int32") # fmt: off + + spath = StorePath(store) + a = zarr.create_array( + spath, + shape=(14,), + chunks=(2,), + shards={"shape": (8,), "index_location": index_location}, + dtype="int32", + fill_value=-9, + filters=None, + compressors=None, + config={"write_empty_chunks": False}, + ) + a[:] = data + + assert np.array_equal(a[:], data) + + +@pytest.mark.parametrize("index_location", ["start", "end"]) +@pytest.mark.parametrize("store", ["local", "memory", "zip"], indirect=["store"]) +def test_sharding_read_empty_chunks_within_empty_shard_write_empty_false( + store: Store, index_location: ShardingCodecIndexLocation +) -> None: + """ + Case where + - all chunks in last shard are empty + - the last shard is not complete (array length is not a multiple of shard shape), + this takes us down the partial shard read path + - write_empty_chunks=False so the shard index will have no entries + """ + fill_value = -99 + shard_size = 8 + data = np.arange(14, dtype="int32") + data[shard_size:] = fill_value # 2nd shard is all fill value + + spath = StorePath(store) + a = zarr.create_array( + spath, + shape=(14,), + chunks=(2,), + shards={"shape": (shard_size,), "index_location": index_location}, + dtype="int32", + fill_value=fill_value, + filters=None, + compressors=None, + config={"write_empty_chunks": False}, + ) + a[:] = data + + assert np.array_equal(a[:], data) + + +@pytest.mark.parametrize("index_location", ["start", "end"]) +@pytest.mark.parametrize("store", ["local", "memory", "zip"], indirect=["store"]) +def test_sharding_partial_shard_read__index_load_fails( + store: Store, index_location: ShardingCodecIndexLocation +) -> None: + """Test fill value is returned when the call to the store to load the bytes of the shard's chunk index fails.""" + array_shape = (16,) + shard_shape = (16,) + chunk_shape = (8,) + data = np.arange(np.prod(array_shape), dtype="float32").reshape(array_shape) + fill_value = -999 + + store_mock = AsyncMock(wraps=store, spec=store.__class__) + # loading the index is the first call to .get() so returning None will simulate an index load failure + store_mock.get.return_value = None + + a = zarr.create_array( + StorePath(store_mock), + shape=data.shape, + chunks=chunk_shape, + shards={"shape": shard_shape, "index_location": index_location}, + compressors=BloscCodec(cname="lz4"), + dtype=data.dtype, + fill_value=fill_value, + ) + a[:] = data + + # Read from one of two chunks in a shard to test the partial shard read path + assert a[0] == fill_value + assert a[0] != data[0] + + +@pytest.mark.parametrize("index_location", ["start", "end"]) +@pytest.mark.parametrize("store", ["local", "memory", "zip"], indirect=["store"]) +def test_sharding_partial_shard_read__index_chunk_slice_fails( + store: Store, + index_location: ShardingCodecIndexLocation, + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Test fill value is returned when looking up a chunk's byte slice within a shard fails.""" + array_shape = (16,) + shard_shape = (16,) + chunk_shape = (8,) + data = np.arange(np.prod(array_shape), dtype="float32").reshape(array_shape) + fill_value = -999 + + monkeypatch.setattr( + "zarr.codecs.sharding._ShardIndex.get_chunk_slice", + lambda self, chunk_coords: None, + ) + + a = zarr.create_array( + StorePath(store), + shape=data.shape, + chunks=chunk_shape, + shards={"shape": shard_shape, "index_location": index_location}, + compressors=BloscCodec(cname="lz4"), + dtype=data.dtype, + fill_value=fill_value, + ) + a[:] = data + + # Read from one of two chunks in a shard to test the partial shard read path + assert a[0] == fill_value + assert a[0] != data[0] + + +@pytest.mark.parametrize("index_location", ["start", "end"]) +@pytest.mark.parametrize("store", ["local", "memory", "zip"], indirect=["store"]) +def test_sharding_partial_shard_read__chunk_load_fails( + store: Store, index_location: ShardingCodecIndexLocation +) -> None: + """Test fill value is returned when the call to the store to load a chunk's bytes fails.""" + array_shape = (16,) + shard_shape = (16,) + chunk_shape = (8,) + data = np.arange(np.prod(array_shape), dtype="float32").reshape(array_shape) + fill_value = -999 + + store_mock = AsyncMock(wraps=store, spec=store.__class__) + + a = zarr.create_array( + StorePath(store_mock), + shape=data.shape, + chunks=chunk_shape, + shards={"shape": shard_shape, "index_location": index_location}, + compressors=BloscCodec(cname="lz4"), + dtype=data.dtype, + fill_value=fill_value, + ) + a[:] = data + + # Set up store mock after array creation to only modify calls during array indexing + # Succeed on first call (index load), fail on subsequent calls (chunk loads) + async def first_success_then_fail(*args: Any, **kwargs: Any) -> Any: + if store_mock.get.call_count == 1: + return await store.get(*args, **kwargs) + else: + return None + + store_mock.get.reset_mock() + store_mock.get.side_effect = first_success_then_fail + + # Read from one of two chunks in a shard to test the partial shard read path + assert a[0] == fill_value + assert a[0] != data[0] + + @pytest.mark.parametrize( "array_fixture", [ @@ -207,7 +509,9 @@ def test_sharding_partial_read( @pytest.mark.parametrize("index_location", ["start", "end"]) @pytest.mark.parametrize("store", ["local", "memory", "zip"], indirect=["store"]) def test_sharding_partial_overwrite( - store: Store, array_fixture: npt.NDArray[Any], index_location: ShardingCodecIndexLocation + store: Store, + array_fixture: npt.NDArray[Any], + index_location: ShardingCodecIndexLocation, ) -> None: data = array_fixture[:10, :10, :10] spath = StorePath(store) @@ -320,7 +624,6 @@ def test_nested_sharding_create_array( filters=None, compressors=None, ) - print(a.metadata.to_dict()) a[:, :, :] = data @@ -380,7 +683,6 @@ async def test_delete_empty_shards(store: Store) -> None: compressors=None, fill_value=1, ) - print(a.metadata.to_dict()) await _AsyncArrayProxy(a)[:, :].set(np.zeros((16, 16))) await _AsyncArrayProxy(a)[8:, :].set(np.ones((8, 16))) await _AsyncArrayProxy(a)[:, 8:].set(np.ones((16, 8))) @@ -425,7 +727,6 @@ async def test_sharding_with_empty_inner_chunk( ) data[:4, :4] = fill_value await a.setitem(..., data) - print("read data") data_read = await a.getitem(...) assert np.array_equal(data_read, data) @@ -437,7 +738,9 @@ async def test_sharding_with_empty_inner_chunk( ) @pytest.mark.parametrize("chunks_per_shard", [(5, 2), (2, 5), (5, 5)]) async def test_sharding_with_chunks_per_shard( - store: Store, index_location: ShardingCodecIndexLocation, chunks_per_shard: tuple[int] + store: Store, + index_location: ShardingCodecIndexLocation, + chunks_per_shard: tuple[int], ) -> None: chunk_shape = (2, 1) shape = tuple(x * y for x, y in zip(chunks_per_shard, chunk_shape, strict=False)) diff --git a/tests/test_config.py b/tests/test_config.py index da5b2cc488..3355c9d744 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -75,6 +75,12 @@ def test_config_defaults_set() -> None: }, "buffer": "zarr.buffer.cpu.Buffer", "ndbuffer": "zarr.buffer.cpu.NDBuffer", + "sharding": { + "read": { + "coalesce_max_bytes": 100 * 2**20, # 100 MiB + "coalesce_max_gap_bytes": 2**20, # 1 MiB + } + }, } ] ) @@ -83,6 +89,8 @@ def test_config_defaults_set() -> None: assert config.get("async.timeout") is None assert config.get("codec_pipeline.batch_size") == 1 assert config.get("json_indent") == 2 + assert config.get("sharding.read.coalesce_max_bytes") == 100 * 2**20 # 100 MiB + assert config.get("sharding.read.coalesce_max_gap_bytes") == 2**20 # 1 MiB @pytest.mark.parametrize(