@@ -42,9 +42,9 @@ def reduce_references(results):
42
42
"""
43
43
combined_vds = xr .combine_nested (
44
44
results ,
45
- concat_dim = [' Time' ],
46
- coords = ' minimal' ,
47
- compat = ' override' ,
45
+ concat_dim = [" Time" ],
46
+ coords = " minimal" ,
47
+ compat = " override" ,
48
48
)
49
49
# possibly write parquet to s3 here
50
50
return combined_vds
@@ -60,24 +60,25 @@ def reduce_references(results):
60
60
)
61
61
62
62
ds = futures .get_result ()
63
- ds .virtualize .to_kerchunk (' combined.json' , format = ' json' )
63
+ ds .virtualize .to_kerchunk (" combined.json" , format = " json" )
64
64
65
65
# NOTE: In jupyter, open_dataset seems to cache the json, such that changes
66
66
# aren't propogated until the kernel is restarted.
67
- combined_ds = xr .open_dataset (' combined.json' ,
67
+ combined_ds = xr .open_dataset (" combined.json" ,
68
68
engine = "kerchunk" ,
69
69
chunks = {},
70
- chunked_array_type = ' cubed' ,
70
+ chunked_array_type = " cubed" ,
71
71
)
72
72
73
73
combined_ds ['Time' ].attrs = {} # to_zarr complains about attrs
74
74
75
75
rechunked_ds = combined_ds .chunk (
76
- chunks = {'Time' : 5 , 'south_north' : 25 , 'west_east' : 32 }
76
+ chunks = {'Time' : 5 , 'south_north' : 25 , 'west_east' : 32 },
77
+ chunked_array_type = "cubed" ,
77
78
)
78
79
79
- rechunked_ds .to_zarr (' rechunked.zarr' ,
80
- mode = 'w' ,
80
+ rechunked_ds .to_zarr (" rechunked.zarr" ,
81
+ mode = "w" ,
81
82
encoding = {}, # TODO
82
83
consolidated = True ,
83
84
safe_chunks = False ,
0 commit comments