Skip to content

Commit 73177a2

Browse files
committed
Merge branch 'dev'
2 parents 6a464ee + d2130e6 commit 73177a2

21 files changed

+494
-281
lines changed

ci/parse_pyrad_name_mappings.py

Lines changed: 38 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -5,43 +5,48 @@
55

66
from pyrad.io import io_aux
77

8-
FUNCTIONS_TO_PARSE = ['get_fieldname_pyart',
9-
'get_datatype_odim',
10-
'get_datatype_metranet',
11-
'get_fieldname_icon']
8+
FUNCTIONS_TO_PARSE = [
9+
"get_datatype_odim",
10+
"get_datatype_metranet",
11+
"get_fieldname_icon",
12+
]
1213

1314
mainpath = Path(__file__).resolve().parent.parent
14-
OUT_DIRECTORY = str(Path(mainpath, 'doc', 'source', 'overview', 'mappings'))
15+
OUT_DIRECTORY = str(Path(mainpath, "doc", "source", "overview", "mappings"))
1516

1617

1718
for fun_name in FUNCTIONS_TO_PARSE:
18-
print('Parsing function {:s}'.format(fun_name))
19+
print("Parsing function {:s}".format(fun_name))
1920
fun = getattr(io_aux, fun_name)
20-
mapping = fun_name.split('_')[-1]
21+
mapping = fun_name.split("_")[-1]
2122
pyrad_dtypes = []
2223
srccode = inspect.getsource(fun)
23-
srccode = srccode.split('\n')
24+
srccode = srccode.split("\n")
2425
for line in srccode:
25-
if ('datatype' in line or 'field_name' in line) and '==' in line:
26-
pyrad_dtypes.append(line.split('==')[1].split(':')[
27-
0].strip().replace('"', '').replace("'",""))
28-
if 'return' in line:
29-
returnline = line.replace('return', '').strip()
26+
if ("datatype" in line or "field_name" in line) and "==" in line:
27+
pyrad_dtypes.append(
28+
line.split("==")[1]
29+
.split(":")[0]
30+
.strip()
31+
.replace('"', "")
32+
.replace("'", "")
33+
)
34+
if "return" in line:
35+
returnline = line.replace("return", "").strip()
3036

3137
# Try to get output types from return statements of srccode
3238
all_values = []
3339
all_keys = []
34-
if '{' and '}' in returnline:
35-
keyname, valname = returnline.replace(
36-
'{', '').replace('}', '').split(':')
40+
if "{" and "}" in returnline:
41+
keyname, valname = returnline.replace("{", "").replace("}", "").split(":")
3742
keyname = keyname.strip()
3843
valname = valname.strip()
3944
if mapping not in keyname:
40-
keyname += '_' + mapping
45+
keyname += "_" + mapping
4146
else:
4247
valname = returnline.strip()
4348
if mapping not in valname:
44-
valname += '_' + mapping
49+
valname += "_" + mapping
4550

4651
for v in pyrad_dtypes:
4752
out = fun(v)
@@ -52,11 +57,22 @@
5257
all_values.append(out)
5358

5459
dic = {}
55-
dic['pyrad_name'] = pyrad_dtypes
60+
dic["pyrad_name"] = pyrad_dtypes
5661
if len(all_keys):
5762
dic[keyname] = all_keys
5863
dic[valname] = all_values
59-
6064
df = pd.DataFrame(dic)
61-
df.to_csv(str(Path(OUT_DIRECTORY, 'pyrad_to_{:s}.txt'.format(mapping))),
62-
index=False)
65+
df.to_csv(
66+
str(Path(OUT_DIRECTORY, "pyrad_to_{:s}.txt".format(mapping))), index=False
67+
)
68+
69+
print("Parsing function pyrad_to_pyart")
70+
# Treat get_datatype_pyart separately
71+
from pyrad.io.io_aux import pyrad_to_pyart_keys_dict
72+
73+
dic = {
74+
"pyrad_name": list(pyrad_to_pyart_keys_dict.keys()),
75+
"pyart_name": list(pyrad_to_pyart_keys_dict.values()),
76+
}
77+
df = pd.DataFrame(dic)
78+
df.to_csv(str(Path(OUT_DIRECTORY, "pyrad_to_pyart.txt")), index=False)

config/pyart/mch_config.py

Lines changed: 13 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -236,7 +236,8 @@
236236
elevation_angle = "elevation_angle"
237237
visibility = "visibility"
238238
min_vis_altitude = "min_vis_altitude"
239-
min_vis_altitude_above_ground = "min_vis_altitude_above_ground"
239+
min_vis_height_above_ground = "min_vis_height_above_ground"
240+
min_rad_vis_height_above_ground = "min_rad_vis_height_above_ground"
240241
min_vis_elevation = "min_vis_elevation"
241242
incident_angle = "incident_angle"
242243
effective_area = "effective_area"
@@ -564,7 +565,8 @@
564565
"elevation_angle": elevation_angle,
565566
"visibility": visibility,
566567
"min_vis_altitude": min_vis_altitude,
567-
"min_vis_altitude_above_ground": min_vis_altitude_above_ground,
568+
"min_vis_height_above_ground": min_vis_height_above_ground,
569+
"min_rad_vis_height_above_ground": min_rad_vis_height_above_ground,
568570
"min_vis_elevation": min_vis_elevation,
569571
"incident_angle": incident_angle,
570572
"effective_area": effective_area,
@@ -1646,10 +1648,16 @@
16461648
"long_name": "Minimum visible altitude",
16471649
"coordinates": "x y",
16481650
},
1649-
min_vis_altitude_above_ground: {
1651+
min_vis_height_above_ground: {
16501652
"units": "meters",
1651-
"standard_name": "min_vis_altitude_above_ground",
1652-
"long_name": "Minimum visible altitude above ground",
1653+
"standard_name": "min_vis_height_above_ground",
1654+
"long_name": "Minimum visible height above ground",
1655+
"coordinates": "x y",
1656+
},
1657+
min_rad_vis_height_above_ground: {
1658+
"units": "meters",
1659+
"standard_name": "min_rad_vis_height_above_ground",
1660+
"long_name": "Minimum radar visible height above ground",
16531661
"coordinates": "x y",
16541662
},
16551663
min_vis_elevation: {

doc/source/overview/main.rst

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ rm_s3_file INT OPTIONAL. If set input radar data files downloade
2727
s3EndpointWrite STRING OPTIONAL. Url to an S3 endpoint where to store output data. The format must be https://endpoint.domain (e.g. https://eu-central-1.linodeobjects.com/), the https:// is not mandatory.
2828
s3BucketWrite STRING OPTIONAL. Name of an S3 bucket containing input radar data. It has to be used together with ``s3EndpointWrite`` and ``s3PathWrite`` to be able to save output data to a bucket. The procedure will only work by setting the environment variables S3_KEY_WRITE and S3_SECRET_WRITE.
2929
s3PathWrite STRING OPTIONAL. Path where to save output radar data in an S3 bucket. The data will be saved at url https://s3BucketRead.s3EndpointRead/s3PathRead/filename
30+
s3SplitExtensionWrite INT OPTIONAL. If set to 1, if files with multiple file extensions are located in the same directory, they will be separated on the s3 according to their file extension, for example myfolder/out.csv will be stored as myfolder/csv/out.csv on the S3. Default is 0 (False).
3031
loadbasepath STRING OPTIONAL. Base path of saved data. By default, this field is set to ``saveimgbasepath``.
3132
loadname STRING OPTIONAL. Name of the saved data processing. Used for saved volume loading. By default, this field is set to ``name``.
3233
gecsxbasepath STRING OPTIONAL. Base path of saved visibility fields generated by the GECSX tool

src/make_all.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,4 +16,4 @@ echo 'Building PyTDA...'
1616
./make_pytda.sh
1717

1818
echo 'Building Pyrad...'
19-
./make_pyrad.sh
19+
./make_pyrad.sh

src/pyrad_proc/pyrad/flow/flow_aux.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1007,6 +1007,15 @@ def _generate_prod(dataset, cfg, prdname, prdfunc, dsname, voltime, runinfo=None
10071007
): # copy to S3
10081008
s3AccessPolicy = prdcfg.get("s3AccessPolicy", None)
10091009
s3path = prdcfg.get("s3PathWrite", None)
1010+
s3splitext = bool(prdcfg.get("s3SplitExtensionWrite", 0))
1011+
if s3splitext:
1012+
nextensions = set([os.path.splitext(f)[1] for f in filenames])
1013+
else:
1014+
nextensions = 1
1015+
1016+
if nextensions == 1:
1017+
s3splitext = False
1018+
10101019
for fname in filenames:
10111020
if (
10121021
prdcfg["basepath"] in fname
@@ -1018,6 +1027,7 @@ def _generate_prod(dataset, cfg, prdname, prdfunc, dsname, voltime, runinfo=None
10181027
prdcfg["s3BucketWrite"],
10191028
s3path,
10201029
s3AccessPolicy,
1030+
s3splitext
10211031
)
10221032
return False
10231033
except Exception as inst:
@@ -1599,6 +1609,8 @@ def _create_prdcfg_dict(cfg, dataset, product, voltime, runinfo=None):
15991609
prdcfg.update({"s3BucketWrite": cfg["s3BucketWrite"]})
16001610
if "s3PathWrite" in cfg:
16011611
prdcfg.update({"s3PathWrite": cfg["s3PathWrite"]})
1612+
if "s3SplitExtensionWrite" in cfg:
1613+
prdcfg.update({"s3SplitExtensionWrite": cfg["s3SplitExtensionWrite"]})
16021614
if "s3AccessPolicy" in cfg:
16031615
prdcfg.update({"s3AccessPolicy": cfg["s3AccessPolicy"]})
16041616
if "RadarBeamwidth" in cfg:

src/pyrad_proc/pyrad/graph/plots.py

Lines changed: 49 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,6 @@
3131
import pyart
3232
import matplotlib.pyplot as plt
3333
from warnings import warn
34-
from copy import deepcopy
3534

3635
import numpy as np
3736

@@ -399,20 +398,16 @@ def plot_density(
399398
ang_step = ang[1] - ang[0]
400399
labelx = "ray number"
401400

402-
# compute percentiles of the histogram
403-
az_percentile_ref = np.ma.masked_all(len(ang))
404-
az_percentile_low = deepcopy(az_percentile_ref)
405-
az_percentile_high = deepcopy(az_percentile_ref)
401+
# Compute quantiles for each ray
402+
az_percentiles = np.ma.masked_all((len(ang), len(quantiles)))
406403
for ray in range(len(ang)):
407-
quantiles, values_ray = compute_quantiles_from_hist(
404+
_, values_ray = compute_quantiles_from_hist(
408405
hist_obj.range["data"], field[ray, :], quantiles=quantiles
409406
)
407+
az_percentiles[ray, :] = values_ray
410408

411-
az_percentile_low[ray] = values_ray[0]
412-
az_percentile_ref[ray] = values_ray[1]
413-
az_percentile_high[ray] = values_ray[2]
414-
415-
quantiles, values_sweep = compute_quantiles_from_hist(
409+
# Compute overall sweep quantiles
410+
_, values_sweep = compute_quantiles_from_hist(
416411
hist_obj.range["data"], np.ma.sum(field, axis=0), quantiles=quantiles
417412
)
418413

@@ -469,14 +464,25 @@ def plot_density(
469464
# plot reference
470465
ax.plot(ang, np.zeros(len(ang)) + ref_value, "k--")
471466

472-
# plot quantiles
473-
ax.plot(ang, np.zeros(len(ang)) + values_sweep[1], "r")
474-
ax.plot(ang, np.zeros(len(ang)) + values_sweep[0], "r--")
475-
ax.plot(ang, np.zeros(len(ang)) + values_sweep[2], "r--")
476-
477-
ax.plot(ang, az_percentile_ref, "k")
478-
ax.plot(ang, az_percentile_low, "k--")
479-
ax.plot(ang, az_percentile_high, "k--")
467+
# Generate colormap for quantiles
468+
quantile_colors = plt.cm.viridis_r(np.linspace(0, 1, len(quantiles)))
469+
470+
# Plot all quantiles
471+
for i, q in enumerate(quantiles):
472+
ax.plot(
473+
ang,
474+
np.zeros(len(ang)) + values_sweep[i],
475+
color=quantile_colors[i],
476+
linestyle="--",
477+
label=f"{q:.1f}th quantile",
478+
)
479+
ax.plot(
480+
ang,
481+
az_percentiles[:, i],
482+
color=quantile_colors[i],
483+
linestyle="-",
484+
label=f"{q:.1f}th quantile",
485+
)
480486

481487
# ax.autoscale(enable=True, axis='both', tight=True)
482488

@@ -487,33 +493,12 @@ def plot_density(
487493
cb = fig.colorbar(cax)
488494
cb.set_label(label)
489495

490-
val_quant0_str = "--"
491-
if values_sweep[0] is not np.ma.masked:
492-
val_quant0_str = "{:.3f}".format(values_sweep[0])
493-
val_quant1_str = "--"
494-
if values_sweep[1] is not np.ma.masked:
495-
val_quant1_str = "{:.3f}".format(values_sweep[1])
496-
val_quant2_str = "--"
497-
if values_sweep[2] is not np.ma.masked:
498-
val_quant2_str = "{:.3f}".format(values_sweep[2])
499-
500-
metadata = (
501-
"npoints: "
502-
+ str(np.ma.sum(field))
503-
+ "\n"
504-
+ str(quantiles[1])
505-
+ " quant: "
506-
+ val_quant1_str
507-
+ "\n"
508-
+ str(quantiles[0])
509-
+ " quant: "
510-
+ val_quant0_str
511-
+ "\n"
512-
+ str(quantiles[2])
513-
+ " quant: "
514-
+ val_quant2_str
515-
+ "\n"
516-
)
496+
metadata = "npoints: " + str(np.ma.sum(field)) + "\n"
497+
for i, quant in enumerate(quantiles):
498+
val_quant_str = "--"
499+
if values_sweep[i] is not np.ma.masked:
500+
val_quant_str = f"{values_sweep[i]:.3f}"
501+
metadata += f"{quant} quant: {val_quant_str}\n"
517502

518503
ax.text(
519504
0.05,
@@ -1641,6 +1626,7 @@ def plot_scatter_comp(
16411626
fig=None,
16421627
save_fig=True,
16431628
point_format="bx",
1629+
write_stats=True,
16441630
):
16451631
"""
16461632
plots the scatter between two time series
@@ -1674,6 +1660,8 @@ def plot_scatter_comp(
16741660
returns the handle to the figure
16751661
point_format : str
16761662
format of the scatter point
1663+
write_stats : bool
1664+
If set to True will write the RMSE and bias on the plot. Default is true.
16771665
16781666
Returns
16791667
-------
@@ -1714,6 +1702,21 @@ def plot_scatter_comp(
17141702
ax.autoscale(False)
17151703
ax.plot(value1, value2, point_format)
17161704

1705+
if write_stats:
1706+
for txt in ax.texts:
1707+
txt.remove()
1708+
rmse = np.sqrt(np.nanmean((value1 - value2) ** 2))
1709+
bias = np.nanmean(value1 - value2)
1710+
1711+
ax.text(
1712+
0.01,
1713+
0.98,
1714+
f"RMSE={rmse:2.2f}\n Bias (r-g)={bias:2.2f}",
1715+
horizontalalignment="left",
1716+
verticalalignment="top",
1717+
transform=ax.transAxes,
1718+
)
1719+
17171720
if save_fig:
17181721
for fname in fname_list:
17191722
fig.savefig(fname, dpi=dpi)

0 commit comments

Comments
 (0)