From 2708179d016e147f2f41512771fc8486d4b38b05 Mon Sep 17 00:00:00 2001 From: aitaten Date: Tue, 16 Jul 2024 05:44:23 +0000 Subject: [PATCH 1/4] INCA motion vector without NWP comparison are added in the example program --- .../plot_optical_flow_with_new_inca_motion.py | 526 ++++++++++++++++++ 1 file changed, 526 insertions(+) create mode 100644 examples/plot_optical_flow_with_new_inca_motion.py diff --git a/examples/plot_optical_flow_with_new_inca_motion.py b/examples/plot_optical_flow_with_new_inca_motion.py new file mode 100644 index 000000000..d27357102 --- /dev/null +++ b/examples/plot_optical_flow_with_new_inca_motion.py @@ -0,0 +1,526 @@ +""" +Optical flow +============ + +This tutorial offers a short overview of the optical flow routines available in +pysteps and it will cover how to compute and plot the motion field from a +sequence of radar images. +""" + +from datetime import datetime +from pprint import pprint +import matplotlib.pyplot as plt +import numpy as np +import math +import time +from datetime import timedelta + +from pysteps import io, motion, rcparams, nowcasts +from pysteps.utils import conversion, transformation +from pysteps.visualization import plot_precip_field, quiver + +import pdb + +from numba import jit + +################################################################################ +# The INCA module functions to test agains the four (one removed) OF methods +# --------------------------- + +@jit(nopython=True) +def compute_motion_numba(image1, image2, NI, NJ, nthin, + IS_tmp, JS_tmp): + nsh = 20 + nqu = 45 + nsq = nsh + nqu + di = 1 + PAREAMIN = 1.0 + rr_dpa_min = 0.03 + nn = (2 * nqu / di + 1) ** 2. + + for i in range(0, NI, nthin): + if (i >= nsq) and (i < NI - nsq): + for j in range(0, NJ, nthin): + if (j >= nsq) and (j < NJ - nsq): + ii1 = max(i - nqu, 0) + ii2 = min(i + nqu, NI - 1) + jj1 = max(j - nqu, 0) + jj2 = min(j + nqu, NJ - 1) + + sy = 0. + sy2 = 0. + + for ii in range(ii1, ii2+1, di): + for jj in range(jj1, jj2+1, di): + sy = sy + image1[jj, ii] + sy2 = sy2 + image1[jj, ii] ** 2. + + sigy = sy2 - sy ** 2. / nn + isho = -99 + jsho = -99 + + if (sigy > 0.) and (sy > PAREAMIN): + corqx = 0.1 + for ish in range(-nsh, nsh + 1): + for jsh in range(-nsh, nsh + 1): + if ( math.sqrt((ish)**2. + (jsh)**2.) > nsh ): continue + sx = 0. + sx2 = 0. + sxy = 0. + for ii in range(ii1, ii2+1, di): + for jj in range(jj1, jj2+1, di): + ind_x = min(max(0, jj + jsh), NJ - 1) + ind_y = min(max(0, ii + ish), NI - 1) + sx += image2[ind_x, ind_y] + sx2 += image2[ind_x, ind_y] ** 2. + sxy += image2[ind_x, ind_y] * image1[jj, ii] + + sigx = sx2 - sx ** 2. / nn + if sigx > 0.: + corq = (sxy -sx * sy / nn) ** 2. / (sigx * sigy) + if corq > corqx: + corqx = corq + isho = ish + jsho = jsh + + if (isho != -99) and (corqx > 0.3) and (corqx * sy > 1.) and (sigy / sy >= 0.08): + if (abs(isho) < nsh) and (abs(jsho) < nsh): + IS_tmp[int(j/nthin),int(i/nthin)] = -isho + JS_tmp[int(j/nthin),int(i/nthin)] = -jsho + else: + IS_tmp[int(j/nthin),int(i/nthin)] = 0 + JS_tmp[int(j/nthin),int(i/nthin)] = 0 + + return IS_tmp, JS_tmp + +def within_interval(now_dd, err_dd, do_it): + """ + Check whether a direction value lies within a certain interval or not. + + Parameters: + - now_dd (float): The direction value to be checked. + - err_dd (float): The reference direction value. + - do_it (int): A flag to control printing. + + Returns: + - int: 0 if now_dd is within the interval, 1 otherwise. + """ + + version = 0 + window = 45.0 + + if (now_dd + window) > 360.0 or (now_dd - window) < 0.0: + version = 1 + if (err_dd + window) > 360.0 or (err_dd - window) < 0.0: + version = 1 + + if do_it == 1: + print(f"now_dd: {now_dd:7.2f}, err_dd: {err_dd:7.2f}") + if version == 1: + now_dd = np.fmod(now_dd + 360.0, 360.0) + err_dd = np.fmod(err_dd + 360.0, 360.0) + if do_it == 1: + print(f"now_dd: {now_dd:7.2f}, err_dd: {err_dd:7.2f} (version 1)") + + return 0 if (now_dd > (err_dd - window) and now_dd < (err_dd + window)) else 1 + +def calculate_corrected_motion_vector(NI, NJ, nthin, IS_NOW, JS_NOW, Uerr_, Verr_, IS_ANA, JS_ANA, IS, JS, Uerr_av, Verr_av): + """ + Calculate a corrected motion vector based on input data and error vectors. + + Parameters: + - NI (int): Number of elements in the x-direction. + - NJ (int): Number of elements in the y-direction. + - nthin (int): Step size for iteration. + - IS_NOW ((NJ/nthin, NI/nthin,) ndarray): Current error motion vector in the x-direction. + - JS_NOW ((NJ/nthin, NI/nthin,) ndarray): Current error motion vector in the y-direction. + - Uerr_ ((NJ/nthin, NI/nthin,) ndarray): Climatological error motion vector in the x-direction. + - Verr_ ((NJ/nthin, NI/nthin,) ndarray): Climatological error motion vector in the y-direction. + - IS_ANA ((NJ/nthin, NI/nthin,) ndarray): Analysis motion vector in the x-direction. + - JS_ANA ((NJ/nthin, NI/nthin,) ndarray): Analysis motion vector in the y-direction. + - IS ((NJ/nthin, NI/nthin,) ndarray): Output corrected motion vector in the x-direction. + - JS ((NJ/nthin, NI/nthin,) ndarray): Output corrected motion vector in the y-direction. + - Uerr_av ((NJ/nthin, NI/nthin,) ndarray): Averaged error motion vector in the x-direction. + - Verr_av ((NJ/nthin, NI/nthin,) ndarray): Averaged error motion vector in the y-direction. + + Returns: + - Tuple[ndarray, ndarray, ndarray, ndarray]: + - The corrected motion vectors in the x-direction (IS). + - The corrected motion vectors in the y-direction (JS). + - The averaged error motion vectors in the x-direction (Uerr_av). + - The averaged error motion vectors in the y-direction (Verr_av). + """ + print("Now in the works: corrected motion vector.") + # This is the original approach. Do we need this? + # for i in range(0, NJ - 1, nthin): + # for j in range(0, NI - 1, nthin): + for i in range(0, int(NJ / nthin) + 1): + for j in range(0, int(NI / nthin) + 1): + has_averaged = 0 + # why do we need this? + # do_it = 1 if i == 480 and j == 90 else 0 + do_it = 0 + + if IS_NOW[i, j] != -99 and Uerr_[i, j] > -98.: + NOW_dd = (int)(270. - np.arctan2(JS_NOW[i, j], IS_NOW[i, j]) * 180. / np.pi) % 360 + NOW_ff = np.sqrt(np.square(IS_NOW[i, j]) + np.square(JS_NOW[i, j])) + err_dd = (int)(270. - np.arctan2(Verr_[i, j], Uerr_[i, j]) * 180. / np.pi) % 360 + err_ff = np.sqrt(np.square(Uerr_[i, j]) + np.square(Verr_[i, j])) + + if within_interval(float(NOW_dd), float(err_dd), do_it) == 0 and NOW_ff > err_ff * 0.5 and NOW_ff < err_ff * 1.5: + Uerr_av[i, j] = float(IS_NOW[i, j] - IS_ANA[i, j]) + Verr_av[i, j] = float(JS_NOW[i, j] - JS_ANA[i, j]) + else: + Uerr_av[i, j] = float(IS_NOW[i, j] - IS_ANA[i, j]) * 0.25 + Uerr_[i, j] * 0.75 + Verr_av[i, j] = float(JS_NOW[i, j] - JS_ANA[i, j]) * 0.25 + Verr_[i, j] * 0.75 + has_averaged = 1 + + else: + Uerr_av[i, j] = float(IS_NOW[i, j] - IS_ANA[i, j]) + Verr_av[i, j] = float(JS_NOW[i, j] - JS_ANA[i, j]) + + if Uerr_av[i, j] > -98.: + if IS_ANA[i, j] == -99: + IS[i, j] = -99 + JS[i, j] = -99 + else: + IS[i, j] = math.fmod(IS_ANA[i, j],99) + math.fmod(int(Uerr_av[i, j]),99) + JS[i, j] = math.fmod(JS_ANA[i, j],99) + math.fmod(int(Verr_av[i, j]),99) + + else: + IS[i, j] = IS_ANA[i, j] + JS[i, j] = JS_ANA[i, j] + + if has_averaged == 0: + if Uerr_av[i, j] < -98.: + w_emv_now = 0. + else: + w_emv_now = 0.25 + + if Uerr_[i, j] < -98.: + w_emv_avg = 0. + else: + w_emv_avg = 0.75 + + if w_emv_avg + w_emv_now >= 0.999: + Uerr_av[i, j] = int(Uerr_av[i, j] * w_emv_now + Uerr_[i, j] * (1. - w_emv_now)) + Verr_av[i, j] = int(Verr_av[i, j] * w_emv_now + Verr_[i, j] * (1. - w_emv_now)) + + return IS, JS, Uerr_av, Verr_av + +@jit(nopython=True) +def interpolate_rmv(ni1, ni2, nj1, nj2, kmax, nsteph, xr, yr, sta, uu, vv, u, v): + """ + Perform distance-weighted interpolation of values at grid points. + + Parameters: + - ni1 (int): Lower bound of the first dimension of the grid. + - ni2 (int): Upper bound of the first dimension of the grid. + - nj1 (int): Lower bound of the second dimension of the grid. + - nj2 (int): Upper bound of the second dimension of the grid. + - wgts (ndarray): Weights vector for each interpolation point. + - kmax (int): Maximum value of k for looping. + - nsteph (float): A scaling factor for the interpolated values. + - xr (ndarray): Array containing x-coordinates of the interpolation points. + - yr (ndarray): Array containing y-coordinates of the interpolation points. + - sta (ndarray): Array indicating whether the interpolation point is valid. + - uu (ndarray): Array containing values to be interpolated (component u). + - vv (ndarray): Array containing values to be interpolated (component v). + - u (ndarray): Array to store the interpolated values (component u). + - v (ndarray): Array to store the interpolated values (component v). + + Returns: + - u (ndarray): Array containing the interpolated values (component u). + - v (ndarray): Array containing the interpolated values (component v). + """ + for j in range(nj1, nj2): + for i in range(ni1, ni2): + wgtsum = 0.0 + + u[i-ni1, j-nj1] = 0.0 + v[i-ni1, j-nj1] = 0.0 + + for k in range(0, kmax): + if sta[k]: + rsq = np.sqrt((xr[k]-j)**2 + (yr[k]-i)**2) + if rsq < 1.0: + wgts = 1000000.0 + else: + wgts = 1.0 / rsq**3 + wgtsum += wgts + + for k in range(0, kmax): + if sta[k]: + rsq = np.sqrt((xr[k]-j)**2 + (yr[k]-i)**2) + if rsq < 1.0: + wgts = 1000000.0 + else: + wgts = 1.0 / rsq**3 + wgts = wgts / wgtsum + u[i-ni1, j-nj1] += wgts * uu[k] + v[i-ni1, j-nj1] += wgts * vv[k] + + u[i-ni1, j-nj1] *= nsteph / 3.6 + v[i-ni1, j-nj1] *= nsteph / 3.6 + + return u, v + +def inca_motion_vectors_simplified(PREV, timestep,settings={}): +# def inca_motion_vectors_simplified(ANA, PREV, NWPUAWIND, Uerr_, Verr_, +# initTime, inca_domain, timestep, settings,adv_files): + """This function calculates motion vectors for precipitation nowcasting. + following the same approach as the operation INCA C code. It computes + the motion vector in lower resolution (nthin) for two radar fields and + for the error of the previous nowcasting (+15 min). Then it performs a + correction based on a climatology error to keep trend recent performance. + Then it is cross-checked with the upper air wind field (500hPa and 700hPa) + for consistency before doing the interpolation to the original resolution. + + + Args: + ANA ((N,M,) ndarray): the current precip analysis + PREV ((N,M,O,2,) ndarray): the previous analyses and nowcasts + NWPUAWIND (dict): the upper air wind (U,V) field (500hPa, 600hP and 700hPa) + Uerr_ ((N/nthin, M/nthin,) ndarray): Climatological error motion vector in the x-direction. + Verr_ ((N/nthin, M/nthin,) ndarray): Climatological error motion vector in the y-direction. + initTime (datetime): the initialization of the INCA+ run + inca_domain (domain class): all there is to know about the domain + timestep (int): the accumulation time for precipitation [seconds] + settings (dict): the nowcasting settings + + Returns: + (N,M,2,) ndarray: the motion vectors + (N,M,2,) ndarray: the error motion vectors + """ + + print('Creating INCA motion vectors') + + start_nwc = time.monotonic() + + #Taking the needed parameters from the nowcasting settings dict + nback = settings.get('nback', 3) + PMIN = settings.get('PMIN', 0.05) + nthin = settings.get('nthin', 15) + delmax = settings.get('delmax', 5.) + + # Create factor and apply + factor = timestep / 3600. + nsteph = 1/factor + + NI = int((metadata['x2']-metadata['x1'])/metadata['xpixelsize']) + NJ = int((metadata['y2']-metadata['y1'])/metadata['ypixelsize']) + + NX = int(np.ceil(NI / nthin)) + NY = int(np.ceil(NJ / nthin)) + + Uana = np.full((NJ, NI),-99.)[::nthin, ::nthin] + Vana = np.full((NJ, NI),-99.)[::nthin, ::nthin] + + Uana, Vana = compute_motion_numba( + PREV[1,...], PREV[0,...], + NI, NJ, nthin, Uana, Vana) + + boole_ = ((Uana > -99) & (Vana > -99)) + + X_orig = metadata['xpixelsize']*np.arange(NI)+metadata['x1'] + Y_orig = metadata['ypixelsize']*np.arange(NJ)+metadata['y1'] + X, Y = X_orig[::nthin], Y_orig[::nthin] + XX, YY =np.meshgrid(X,Y) + + UU = np.zeros([NJ, NI]) + VV = np.zeros([NJ, NI]) + UU, VV = interpolate_rmv(int(metadata['y1']/metadata['xpixelsize']), int(metadata['y2']/metadata['xpixelsize']), + int(metadata['x1']/metadata['xpixelsize']), int(metadata['x2']/metadata['xpixelsize']), + len(Uana.flatten()), nsteph, XX.flatten()/metadata['xpixelsize'], YY.flatten()/metadata['xpixelsize'], + boole_.flatten(), Uana.flatten(), Vana.flatten(), UU, VV) + + extrapolate = nowcasts.get_method("extrapolation") + R[~np.isfinite(R)] = metadata["zerovalue"] + NWC_field = extrapolate(PREV[1,...], np.append(UU[np.newaxis,...],VV[np.newaxis,...],axis=0), 12) + + #Initialize all the required matrices for following code + Uana = np.full((NJ, NI),-99.)[::nthin, ::nthin] + Vana = np.full((NJ, NI),-99.)[::nthin, ::nthin] + Unow = np.full((NJ, NI),-99.)[::nthin, ::nthin] + Vnow = np.full((NJ, NI),-99.)[::nthin, ::nthin] + IS = np.full((NJ, NI),-99.)[::nthin,::nthin] + JS = np.full((NJ, NI),-99.)[::nthin,::nthin] + Uerr_av = np.full((NJ, NI),-99.)[::nthin,::nthin] + Verr_av = np.full((NJ, NI),-99.)[::nthin,::nthin] + Uerr_ = np.full((NJ, NI),0.)[::nthin,::nthin] + Verr_ = np.full((NJ, NI),0.)[::nthin,::nthin] + + Uana, Vana = compute_motion_numba( + PREV[2,...], PREV[1,...], + NI, NJ, nthin, Uana, Vana) + + Unow, Vnow = compute_motion_numba( + PREV[2,...], NWC_field[0,...], + NI, NJ, nthin, Unow, Vnow) + + IS, JS, Uerr_av, Verr_av = calculate_corrected_motion_vector(NI, NJ, + nthin, Unow, Vnow, Uerr_, Verr_, Uana, Vana, IS, JS, Uerr_av, Verr_av) + + boole_ = ((IS > -99) & (JS > -99)) + + UU = np.zeros([NJ, NI]) + VV = np.zeros([NJ, NI]) + UU, VV = interpolate_rmv(int(metadata['y1']/metadata['xpixelsize']), int(metadata['y2']/metadata['xpixelsize']), + int(metadata['x1']/metadata['xpixelsize']), int(metadata['x2']/metadata['xpixelsize']), + len(Uana.flatten()), nsteph, XX.flatten()/metadata['xpixelsize'], YY.flatten()/metadata['xpixelsize'], + boole_.flatten(), IS.flatten(), JS.flatten(), UU, VV) + + OFZ = np.append(UU[np.newaxis,...],VV[np.newaxis,...],axis=0) + + print("Elapsed time of INCA motion vector: %s [h:mm:ss].", + timedelta(seconds=int(time.monotonic() - start_nwc))) + + return OFZ + + + +################################################################################ +# Read the radar input images +# --------------------------- +# +# First, we will import the sequence of radar composites. +# You need the pysteps-data archive downloaded and the pystepsrc file +# configured with the data_source paths pointing to data folders. + +# Selected case +date = datetime.strptime("201505151630", "%Y%m%d%H%M") +data_source = rcparams.data_sources["mch"] + +############################################################################### +# Load the data from the archive +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +root_path = data_source["root_path"] +path_fmt = data_source["path_fmt"] +fn_pattern = data_source["fn_pattern"] +fn_ext = data_source["fn_ext"] +importer_name = data_source["importer"] +importer_kwargs = data_source["importer_kwargs"] +timestep = data_source["timestep"] + +# Find the input files from the archive +fns = io.archive.find_by_date( + date, root_path, path_fmt, fn_pattern, fn_ext, timestep=5, num_prev_files=9 +) + +# Read the radar composites +importer = io.get_method(importer_name, "importer") +R, quality, metadata = io.read_timeseries(fns, importer, **importer_kwargs) + +del quality # Not used + +############################################################################### +# Preprocess the data +# ~~~~~~~~~~~~~~~~~~~ + +# Convert to mm/h +R, metadata = conversion.to_rainrate(R, metadata) + +# Store the reference frame +R_ = R[-1, :, :].copy() +R_here = R.copy() + +# Log-transform the data [dBR] +R, metadata = transformation.dB_transform(R, metadata, threshold=0.1, zerovalue=-15.0) + +# Nicely print the metadata +pprint(metadata) + +################################################################################ +# INCA TREC method with error motion correction (Haiden et al 2011) +# --------------------------------------------------- +# +# This module implements the anisotropic diffusion method presented in Proesmans +# et al. (1994), a robust optical flow technique which employs the notion of +# inconsitency during the solution of the optical flow equations. + +# oflow_method = motion.get_method("proesmans") +R_here[~np.isfinite(R)] = 0 +V4 = inca_motion_vectors_simplified(R_here[-3:, :, :],timestep*60) + +# Plot the motion field +plot_precip_field(R_, geodata=metadata, title="INCA motion") +quiver(V4, geodata=metadata, step=25) +plt.show() + + +################################################################################ +# Lucas-Kanade (LK) +# ----------------- +# +# The Lucas-Kanade optical flow method implemented in pysteps is a local +# tracking approach that relies on the OpenCV package. +# Local features are tracked in a sequence of two or more radar images. The +# scheme includes a final interpolation step in order to produce a smooth +# field of motion vectors. + +oflow_method = motion.get_method("LK") +V1 = oflow_method(R[-3:, :, :]) + +# Plot the motion field on top of the reference frame +plot_precip_field(R_, geodata=metadata, title="LK") +quiver(V1, geodata=metadata, step=25) +plt.show() + +################################################################################ +# Variational echo tracking (VET) +# ------------------------------- +# +# This module implements the VET algorithm presented +# by Laroche and Zawadzki (1995) and used in the McGill Algorithm for +# Prediction by Lagrangian Extrapolation (MAPLE) described in +# Germann and Zawadzki (2002). +# The approach essentially consists of a global optimization routine that seeks +# at minimizing a cost function between the displaced and the reference image. + +oflow_method = motion.get_method("VET") +V2 = oflow_method(R[-3:, :, :]) + +# Plot the motion field +plot_precip_field(R_, geodata=metadata, title="VET") +quiver(V2, geodata=metadata, step=25) +plt.show() + +################################################################################ +# Dynamic and adaptive radar tracking of storms (DARTS) +# ----------------------------------------------------- +# +# DARTS uses a spectral approach to optical flow that is based on the discrete +# Fourier transform (DFT) of a temporal sequence of radar fields. +# The level of truncation of the DFT coefficients controls the degree of +# smoothness of the estimated motion field, allowing for an efficient +# motion estimation. DARTS requires a longer sequence of radar fields for +# estimating the motion, here we are going to use all the available 10 fields. + +oflow_method = motion.get_method("DARTS") +R[~np.isfinite(R)] = metadata["zerovalue"] +V3 = oflow_method(R) # needs longer training sequence + +# Plot the motion field +plot_precip_field(R_, geodata=metadata, title="DARTS") +quiver(V3, geodata=metadata, step=25) +plt.show() + +################################################################################ +# Anisotropic diffusion method (Proesmans et al 1994) +# --------------------------------------------------- +# +# This module implements the anisotropic diffusion method presented in Proesmans +# et al. (1994), a robust optical flow technique which employs the notion of +# inconsitency during the solution of the optical flow equations. + +# oflow_method = motion.get_method("proesmans") +# R[~np.isfinite(R)] = metadata["zerovalue"] +# V4 = oflow_method(R[-2:, :, :]) + +# # Plot the motion field +# plot_precip_field(R_, geodata=metadata, title="Proesmans") +# quiver(V4, geodata=metadata, step=25) +# plt.show() + +# sphinx_gallery_thumbnail_number = 1 From 2ba04fda64755fb56aeb55fe173331eb1c3a7359 Mon Sep 17 00:00:00 2001 From: aitaten Date: Sat, 20 Jul 2024 17:45:41 +0000 Subject: [PATCH 2/4] Add notebook for optimal number of frames --- examples/opt_number_frames_OF.ipynb | 568 ++++++++++++++++++++++++++++ 1 file changed, 568 insertions(+) create mode 100644 examples/opt_number_frames_OF.ipynb diff --git a/examples/opt_number_frames_OF.ipynb b/examples/opt_number_frames_OF.ipynb new file mode 100644 index 000000000..f1a06f1d6 --- /dev/null +++ b/examples/opt_number_frames_OF.ipynb @@ -0,0 +1,568 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "language_info": { + "name": "python" + } + }, + "cells": [ + { + "cell_type": "markdown", + "source": [ + "# Optimizing number of frames\n", + "\n", + "This is coming from the question ([issue 376](https://github.com/pySTEPS/pysteps/issues/376)) on how to optimize the Optical Flow (OF) motion vectors in pySTEPS, and it is taking advantage of *ray* for computing this optimization. Other parameters of the OF computation could also be optimized but in this example just the number of frames is tested.\n", + "\n", + "**Caution:** This notebook is an example, but a larger dataset should be used to optimize the number of frames so that the results are statistically significant and sound.\n" + ], + "metadata": { + "id": "JPIK350CFtK7" + } + }, + { + "cell_type": "markdown", + "source": [ + "## Set up the Colab environment\n", + "\n", + "First, let's set up our working environment. Note that these steps are specific for Google Colab. For more information on how to install pysteps see [these instructions](https://pysteps.readthedocs.io/en/latest/user_guide/install_pysteps.html).\n", + "\n", + "### Install dependencies\n", + "\n", + "First, let's install the latest available versions of pysteps and ray using pip. This will also install the minimal dependencies needed to run pysteps." + ], + "metadata": { + "id": "Rg8qT_73G9Lk" + } + }, + { + "cell_type": "code", + "source": [ + "!pip install pysteps \"ray[tune]\" --quiet" + ], + "metadata": { + "id": "3khQ2OVqHB13" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "### Getting the example data\n", + "\n", + "Now that we have the environment ready, let's install the example data and configure pysteps by following [this tutorial](https://pysteps.readthedocs.io/en/latest/user_guide/example_data.html).\n", + "\n", + "First, we will use the [pysteps.datasets.download_pysteps_data()](https://pysteps.readthedocs.io/en/latest/generated/pysteps.datasets.download_pysteps_data.html) function to download the data." + ], + "metadata": { + "id": "dEIfmMtOHCpV" + } + }, + { + "cell_type": "code", + "source": [ + "# Import the helper functions\n", + "from pysteps.datasets import download_pysteps_data, create_default_pystepsrc\n", + "\n", + "# Download the pysteps data in the \"pysteps_data\"\n", + "download_pysteps_data(\"pysteps_data\")" + ], + "metadata": { + "id": "gH_Tr-GFHF4v" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "Next, we need to create a default configuration file that points to the downloaded data.\n", + "By default, pysteps will place the configuration file in `$HOME/.pysteps` (unix and Mac OS X) or `$USERPROFILE/pysteps` (windows).\n", + "To quickly create a configuration file, we will use the [pysteps.datasets.create_default_pystepsrc()](https://pysteps.readthedocs.io/en/latest/generated/pysteps.datasets.create_default_pystepsrc.html#pysteps.datasets.create_default_pystepsrc) helper function." + ], + "metadata": { + "id": "xIJKLYyBHf6X" + } + }, + { + "cell_type": "code", + "source": [ + "# If the configuration file is placed in one of the default locations\n", + "# (https://pysteps.readthedocs.io/en/latest/user_guide/set_pystepsrc.html#configuration-file-lookup)\n", + "# it will be loaded automatically when pysteps is imported.\n", + "config_file_path = create_default_pystepsrc(\"pysteps_data\")" + ], + "metadata": { + "id": "9w1bFhb3Hjte" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "Since pysteps was already initialized in this notebook, we need to load the new configuration file and update the default configuration." + ], + "metadata": { + "id": "blKtFOpEHnbk" + } + }, + { + "cell_type": "code", + "source": [ + "# Import pysteps and load the new configuration file\n", + "import pysteps\n", + "\n", + "_ = pysteps.load_config_file(config_file_path, verbose=True)" + ], + "metadata": { + "id": "pX1WeRiYHqoc" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "### Load the input data\n", + "\n", + "Pysteps comes with few precipitation datasets with precipitation events from different countries and providers. We can see the available datasets with `datasets.info()`:" + ], + "metadata": { + "id": "RNqD4DwpHwiB" + } + }, + { + "cell_type": "code", + "source": [ + "from pysteps import datasets\n", + "datasets.info()" + ], + "metadata": { + "id": "3kIorbSxHxW6" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "To import an example dataset, we use the [load_dataset()](https://pysteps.readthedocs.io/en/latest/generated/pysteps.datasets.load_dataset.html) helper function from the `pysteps.datasets` module.\n", + "\n", + "For this tutorial, we will use three events from MeteoSwiss: the first two events for our tuning set, while `mch3` will serve for the validation at the end." + ], + "metadata": { + "id": "W8SfYSysHzh6" + } + }, + { + "cell_type": "code", + "source": [ + "# import the 2-hour precipitation events\n", + "# i.e., a sequence of 24 precipitation frames with 5 minute temporal resolution\n", + "mch1 = datasets.load_dataset(\"mch\", frames=24)\n", + "mch2 = datasets.load_dataset(\"mch2\", frames=24)\n", + "mch3 = datasets.load_dataset(\"mch3\", frames=24)\n", + "\n", + "tune_set = (mch1, mch3)\n", + "val_set = (mch2,)" + ], + "metadata": { + "id": "fsire424H1_C" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "## Set up the experiment\n", + "**Evaluate Performance Metrics**\n", + "\n", + " - Use performance metrics such as Root Mean Squared Error (RMSE), Critical Success Index (CSI), or Heidke Skill Score (HSS) to evaluate the accuracy of nowcasts generated using different numbers of frames.\n", + " - Calculate these metrics for each number of frames across all runs but same forecasting period to avoid dependence on the variability of the observation.\n", + "\n", + "**Analyze Computational Load**\n", + " - Measure the computational time and resources required for processing different numbers of frames.\n", + " - Ensure that the increased accuracy justifies any additional computational costs." + ], + "metadata": { + "id": "XASmG5EcWQ9Q" + } + }, + { + "cell_type": "code", + "source": [ + "from pysteps.verification.detcontscores import det_cont_fct\n", + "from pysteps.verification.detcatscores import det_cat_fct\n", + "\n", + "def evaluation_fn(fct, obs):\n", + " rmse = det_cont_fct(fct, obs, scores='RMSE',axis=(1,2))\n", + " #csi = det_cat_fct(fct, obs, 0.1, scores='CSI')\n", + " #we could create a normalized multiscore value using several scores\n", + " #but for now we are just returning an array with the lead-time rmse\n", + " return rmse['RMSE']" + ], + "metadata": { + "id": "eb8eW38VX7Sy" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "Here we define the function that will compute a single trial of the optimization experiment. Such function takes a sample of the parameter space as an input and outputs the resulting error.\n", + "\n", + "The trial consists of run of 1 hour at the end of the 2-hours event to account for the variability of the results as a function of the lead-time. The initial hour is the range of number of frames that we are testing. The final error is the average error of all runs." + ], + "metadata": { + "id": "BLeoXHGYX934" + } + }, + { + "cell_type": "code", + "source": [ + "from pysteps import motion, nowcasts\n", + "from pysteps.utils import transformation\n", + "\n", + "N_LEADTIMES = 12\n", + "\n", + "def pysteps_objective(config, events=None, time_step=None):\n", + "\n", + " # select nowcasting methods\n", + " estimate_motion = motion.get_method(\"LK\")\n", + " extrapolate = nowcasts.get_method(\"extrapolation\")\n", + "\n", + " n_frames = config['n_frames']\n", + "\n", + " error = 0\n", + " n = 0\n", + " for event in events:\n", + "\n", + " precip_data, metadata, __ = event\n", + " n_total_frames = precip_data.shape[0]\n", + "\n", + " # log-transform the data to dBR.\n", + " precip_data_dbr, __ = transformation.dB_transform(\n", + " precip_data, metadata, threshold=0.1, zerovalue=-15.0\n", + " )\n", + "\n", + " # prepare the data for the run\n", + " obs_precip = precip_data[n_total_frames-N_LEADTIMES:n_total_frames]\n", + "\n", + " input_precip_dbr = precip_data_dbr[n_total_frames-N_LEADTIMES-n_frames:n_total_frames-N_LEADTIMES]\n", + " input_precip = precip_data[n_total_frames-N_LEADTIMES-1]\n", + "\n", + " # compute the nowcast\n", + " motion_field = estimate_motion(input_precip_dbr)\n", + " forecast_precip = extrapolate(input_precip, motion_field, N_LEADTIMES)\n", + "\n", + " # evaluate the nowcast\n", + " if time_step is None:\n", + " error += evaluation_fn(forecast_precip, obs_precip)\n", + " else:\n", + " error += evaluation_fn(forecast_precip, obs_precip)[time_step]\n", + " n += 1\n", + " # loop the event\n", + "\n", + " return {\"mean_error\": error / n}" + ], + "metadata": { + "id": "1WrFN9VGYYkq" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "Finally, we need to define the [search space](https://docs.ray.io/en/latest/tune/api/search_space.html) for our experiment. This step is critical to the success of the tuning, as it is important to select the most sensitive parameters and to define a reasonable range of values to be explored during the tuning. Typically, one will need to go through multiple iterations before finding a satisfying set up.\n", + "\n", + "Below we define the search space which in this case it is the number of frames. Usually ray is optimal for multiparameter models but for this example just one is used. The number of frames is defined by the input array in [dense Lucas-Kanade](https://pysteps.readthedocs.io/en/stable/generated/pysteps.motion.lucaskanade.dense_lucaskanade.html#pysteps.motion.lucaskanade.dense_lucaskanade) optical flow routine. For each parameter, we need to specify the sampling strategy, for example in this case the min (2 samples) and max (12 samples) limits." + ], + "metadata": { + "id": "iRcd2Vh8YRIq" + } + }, + { + "cell_type": "code", + "source": [ + "from ray import tune\n", + "\n", + "#Original random\n", + "# param_space = {\n", + "# \"n_frames\": tune.qrandint(2, 12, 1),\n", + "# }\n", + "\n", + "param_space = {\n", + " \"n_frames\": tune.choice(list(range(2, 13))),\n", + "}" + ], + "metadata": { + "id": "tvAmCuPwY_3O" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "## Launch the experiment\n", + "\n", + "We can now launch the experiment. We ask for 12 trials because of this simple experiment. If not specified, ray will use a random search by default, but more sophisticated [search algorithms](https://docs.ray.io/en/latest/tune/api/suggestion.html#tune-search-alg) are available.\n" + ], + "metadata": { + "id": "zEvmvEyPZUlU" + } + }, + { + "cell_type": "code", + "source": [ + "import ray\n", + "\n", + "N_TRIALS = 12\n", + "\n", + "results=[]\n", + "\n", + "#This is a loop in the lead-times range\n", + "for i_leadtime in range(12):\n", + " ray.shutdown()\n", + " ray.init(configure_logging=False)\n", + "\n", + " tuner = tune.Tuner(\n", + " trainable=tune.with_parameters(\n", + " pysteps_objective,\n", + " events=tune_set,\n", + " time_step=i_leadtime,\n", + " ),\n", + " tune_config=tune.TuneConfig(\n", + " metric=\"mean_error\",\n", + " mode=\"min\",\n", + " num_samples=N_TRIALS,\n", + " ),\n", + " param_space=param_space,\n", + " )\n", + "\n", + " results.append(tuner.fit())" + ], + "metadata": { + "id": "mzV73JQ5Zeq8" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "It is important to mention, that in the case of this example, similar results could be obtained without the use of *ray* due to the fact that only one parameter is actually computed for 11 specific values. This could be done as follow:" + ], + "metadata": { + "id": "9NWj70pQApax" + } + }, + { + "cell_type": "code", + "source": [ + "list_n_frames = list(range(2, 13))\n", + "results_noray=[]\n", + "for n_frame in list_n_frames:\n", + " results_noray.append(pysteps_objective({'n_frames':n_frame}, events=tune_set, time_step=None))" + ], + "metadata": { + "id": "-vvvcrygBGhX" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "## Analyse the results\n", + "\n", + "The first and most simple question we can ask is to return the best parameter set for our problem:" + ], + "metadata": { + "id": "nBtIA6SebZ-u" + } + }, + { + "cell_type": "code", + "source": [ + "best_param_set = [result.get_best_result().config for result in results]\n", + "best_param_set\n", + "\n", + "## too avoid multiple lead-times optimization, we fix the first lead-time\n", + "results = results[0]\n", + "best_param_set = best_param_set[0]" + ], + "metadata": { + "id": "TXALu0D3bcsN" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "We can verify that this results are matching with the ones obtained without using *ray* library. Hoowever, the simplicity of the previous code with ray will change to a slightly more complex code to obtain similar outputs." + ], + "metadata": { + "id": "IgrzYOQNC4XX" + } + }, + { + "cell_type": "code", + "source": [ + "import numpy as np\n", + "\n", + "search_matrix = np.vstack([item['mean_error'] for item in results_noray])\n", + "\n", + "# Lead-time range\n", + "lead_time = np.arange(1, 13)\n", + "\n", + "# Optimal n_frames\n", + "optimal_n_frames = np.argmin(search_matrix, axis=0) + min(list_n_frames)\n", + "\n", + "# Value of the minimum\n", + "optimal_value = np.min(search_matrix, axis=0)\n", + "\n", + "# Print the table with aligned columns\n", + "print('{:<12} {}'.format('Lead-time: ', ' '.join(f'{x:5d}' for x in lead_time)))\n", + "print('{:<12} {}'.format('Opt n_frames: ', ' '.join(f'{x:5d}' for x in optimal_n_frames)))\n", + "print('{:<12} {}'.format('Optimal value:', ' '.join(f'{x:5.2f}' for x in optimal_value)))" + ], + "metadata": { + "id": "i5LhPbelDLgf" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "An interesting analysis of the results consists in plotting the error of each trial as a function of its parameter values. Such plots are known as \"dot plots\". By looking at the shape of the cloud of points, one can get a good idea about the sensitivity of a given parameter. For instance, a flat curve usually indicates weak sensitivity: no matter what parameter value is selected, the resulting error doesn't seem to change much. Conversely, a clear spike in the plot indicates a sensitive parameter." + ], + "metadata": { + "id": "dmOBGa6ubhmC" + } + }, + { + "cell_type": "code", + "source": [ + "import matplotlib.pyplot as plt\n", + "from math import ceil\n", + "\n", + "def make_dot_plots(tune_results, yaxis, ylog=True, reference=None):\n", + " tune_df = tune_results.get_dataframe()\n", + " tune_best = tune_results.get_best_result()\n", + " col_params = sorted([col for col in tune_df.columns if \"config/\" in col])\n", + " ncols = 1\n", + " nrows = ceil(len(col_params) / ncols)\n", + " fig, axs = plt.subplots(nrows, ncols, figsize=(ncols * 3.5, nrows * 3))\n", + " for n, par in enumerate(col_params):\n", + " parsed_par = par.split(\"/\")[1]\n", + " try:\n", + " ax = axs.flat[n]\n", + " except:\n", + " ax = axs\n", + " ax.scatter(tune_df[par], tune_df[yaxis], alpha=0.5)\n", + " ax.scatter(tune_best.config[parsed_par], tune_best.metrics[yaxis], c=\"r\")\n", + " if reference:\n", + " ax.axhline(reference, c=\"k\", ls=\":\")\n", + " if ylog:\n", + " ax.set_yscale(\"log\")\n", + " ax.set_xlabel(parsed_par)\n", + " if n % ncols == 0:\n", + " ax.set_ylabel(yaxis)\n", + " else:\n", + " ax.axes.get_yaxis().set_visible(False)\n", + "\n", + " plt.tight_layout()\n", + "\n", + "# compute error for default settings\n", + "tune_error_default = pysteps_objective({'n_frames':3}, tune_set)[\"mean_error\"]\n", + "\n", + "make_dot_plots(results, \"mean_error\", reference=tune_error_default)" + ], + "metadata": { + "id": "YnqCN9mSboGc" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "The dashed horizontal line represents the skill of the default parameter set (i.e., without tuning), while the red dot represents the best paramter set.\n", + "\n", + "Finally, let's compare the default and best parameter sets on the tune and validation events. This will provide some indication on the generability of the optimized parameter set." + ], + "metadata": { + "id": "hOCvapHtdt-_" + } + }, + { + "cell_type": "code", + "source": [ + "import pandas as pd\n", + "\n", + "# compute validation scores\n", + "val_error_best = pysteps_objective(best_param_set, val_set)[\"mean_error\"]\n", + "val_error_default = pysteps_objective({'n_frames':3}, val_set)[\"mean_error\"]" + ], + "metadata": { + "id": "POqo0ksJdw65" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "data = {\n", + " \"default\": {\n", + " \"tune\": tune_error_default,\n", + " \"val\": val_error_default,\n", + " },\n", + " \"best\": {\n", + " \"tune\": results.get_best_result().metrics[\"mean_error\"],\n", + " \"val\": val_error_best,\n", + " },\n", + "}\n", + "\n", + "df = pd.DataFrame(data)\n", + "\n", + "# Plotting with adjusted y-axis limits\n", + "ax = df.plot.bar()\n", + "\n", + "# Set the y-axis limits to focus on the data range\n", + "y_min = df.min().min() - 0.1 * (df.max().max() - df.min().min())\n", + "y_max = df.max().max() + 0.1 * (df.max().max() - df.min().min())\n", + "ax.set_ylim(y_min, y_max)" + ], + "metadata": { + "id": "LLKOrNiRfVT0" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "And in this case it seems the obtained optimal value is also optimal for the validation event." + ], + "metadata": { + "id": "CvymwPhmHbWc" + } + } + ] +} \ No newline at end of file From d63961b539722dec12da2c7b969de45678af042c Mon Sep 17 00:00:00 2001 From: aitaten Date: Sat, 20 Jul 2024 17:46:19 +0000 Subject: [PATCH 3/4] Add link in the docs to the optimal number of frames notebook --- doc/source/index.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/index.rst b/doc/source/index.rst index daf8678a2..7f0a7b7a4 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -33,6 +33,7 @@ __ https://github.com/pySTEPS/pysteps Installation Gallery <../auto_examples/index> My first nowcast (Colab Notebook) + Optimizing number of frames OF (Colab Notebook) API Reference Example data Configuration file (pystepsrc) From 3f0c4edc9e3a2eeece1b745738e5a101ec237c1f Mon Sep 17 00:00:00 2001 From: aitaten Date: Mon, 22 Jul 2024 07:45:06 +0000 Subject: [PATCH 4/4] Remove unwanted file from PR --- .../plot_optical_flow_with_new_inca_motion.py | 526 ------------------ 1 file changed, 526 deletions(-) delete mode 100644 examples/plot_optical_flow_with_new_inca_motion.py diff --git a/examples/plot_optical_flow_with_new_inca_motion.py b/examples/plot_optical_flow_with_new_inca_motion.py deleted file mode 100644 index d27357102..000000000 --- a/examples/plot_optical_flow_with_new_inca_motion.py +++ /dev/null @@ -1,526 +0,0 @@ -""" -Optical flow -============ - -This tutorial offers a short overview of the optical flow routines available in -pysteps and it will cover how to compute and plot the motion field from a -sequence of radar images. -""" - -from datetime import datetime -from pprint import pprint -import matplotlib.pyplot as plt -import numpy as np -import math -import time -from datetime import timedelta - -from pysteps import io, motion, rcparams, nowcasts -from pysteps.utils import conversion, transformation -from pysteps.visualization import plot_precip_field, quiver - -import pdb - -from numba import jit - -################################################################################ -# The INCA module functions to test agains the four (one removed) OF methods -# --------------------------- - -@jit(nopython=True) -def compute_motion_numba(image1, image2, NI, NJ, nthin, - IS_tmp, JS_tmp): - nsh = 20 - nqu = 45 - nsq = nsh + nqu - di = 1 - PAREAMIN = 1.0 - rr_dpa_min = 0.03 - nn = (2 * nqu / di + 1) ** 2. - - for i in range(0, NI, nthin): - if (i >= nsq) and (i < NI - nsq): - for j in range(0, NJ, nthin): - if (j >= nsq) and (j < NJ - nsq): - ii1 = max(i - nqu, 0) - ii2 = min(i + nqu, NI - 1) - jj1 = max(j - nqu, 0) - jj2 = min(j + nqu, NJ - 1) - - sy = 0. - sy2 = 0. - - for ii in range(ii1, ii2+1, di): - for jj in range(jj1, jj2+1, di): - sy = sy + image1[jj, ii] - sy2 = sy2 + image1[jj, ii] ** 2. - - sigy = sy2 - sy ** 2. / nn - isho = -99 - jsho = -99 - - if (sigy > 0.) and (sy > PAREAMIN): - corqx = 0.1 - for ish in range(-nsh, nsh + 1): - for jsh in range(-nsh, nsh + 1): - if ( math.sqrt((ish)**2. + (jsh)**2.) > nsh ): continue - sx = 0. - sx2 = 0. - sxy = 0. - for ii in range(ii1, ii2+1, di): - for jj in range(jj1, jj2+1, di): - ind_x = min(max(0, jj + jsh), NJ - 1) - ind_y = min(max(0, ii + ish), NI - 1) - sx += image2[ind_x, ind_y] - sx2 += image2[ind_x, ind_y] ** 2. - sxy += image2[ind_x, ind_y] * image1[jj, ii] - - sigx = sx2 - sx ** 2. / nn - if sigx > 0.: - corq = (sxy -sx * sy / nn) ** 2. / (sigx * sigy) - if corq > corqx: - corqx = corq - isho = ish - jsho = jsh - - if (isho != -99) and (corqx > 0.3) and (corqx * sy > 1.) and (sigy / sy >= 0.08): - if (abs(isho) < nsh) and (abs(jsho) < nsh): - IS_tmp[int(j/nthin),int(i/nthin)] = -isho - JS_tmp[int(j/nthin),int(i/nthin)] = -jsho - else: - IS_tmp[int(j/nthin),int(i/nthin)] = 0 - JS_tmp[int(j/nthin),int(i/nthin)] = 0 - - return IS_tmp, JS_tmp - -def within_interval(now_dd, err_dd, do_it): - """ - Check whether a direction value lies within a certain interval or not. - - Parameters: - - now_dd (float): The direction value to be checked. - - err_dd (float): The reference direction value. - - do_it (int): A flag to control printing. - - Returns: - - int: 0 if now_dd is within the interval, 1 otherwise. - """ - - version = 0 - window = 45.0 - - if (now_dd + window) > 360.0 or (now_dd - window) < 0.0: - version = 1 - if (err_dd + window) > 360.0 or (err_dd - window) < 0.0: - version = 1 - - if do_it == 1: - print(f"now_dd: {now_dd:7.2f}, err_dd: {err_dd:7.2f}") - if version == 1: - now_dd = np.fmod(now_dd + 360.0, 360.0) - err_dd = np.fmod(err_dd + 360.0, 360.0) - if do_it == 1: - print(f"now_dd: {now_dd:7.2f}, err_dd: {err_dd:7.2f} (version 1)") - - return 0 if (now_dd > (err_dd - window) and now_dd < (err_dd + window)) else 1 - -def calculate_corrected_motion_vector(NI, NJ, nthin, IS_NOW, JS_NOW, Uerr_, Verr_, IS_ANA, JS_ANA, IS, JS, Uerr_av, Verr_av): - """ - Calculate a corrected motion vector based on input data and error vectors. - - Parameters: - - NI (int): Number of elements in the x-direction. - - NJ (int): Number of elements in the y-direction. - - nthin (int): Step size for iteration. - - IS_NOW ((NJ/nthin, NI/nthin,) ndarray): Current error motion vector in the x-direction. - - JS_NOW ((NJ/nthin, NI/nthin,) ndarray): Current error motion vector in the y-direction. - - Uerr_ ((NJ/nthin, NI/nthin,) ndarray): Climatological error motion vector in the x-direction. - - Verr_ ((NJ/nthin, NI/nthin,) ndarray): Climatological error motion vector in the y-direction. - - IS_ANA ((NJ/nthin, NI/nthin,) ndarray): Analysis motion vector in the x-direction. - - JS_ANA ((NJ/nthin, NI/nthin,) ndarray): Analysis motion vector in the y-direction. - - IS ((NJ/nthin, NI/nthin,) ndarray): Output corrected motion vector in the x-direction. - - JS ((NJ/nthin, NI/nthin,) ndarray): Output corrected motion vector in the y-direction. - - Uerr_av ((NJ/nthin, NI/nthin,) ndarray): Averaged error motion vector in the x-direction. - - Verr_av ((NJ/nthin, NI/nthin,) ndarray): Averaged error motion vector in the y-direction. - - Returns: - - Tuple[ndarray, ndarray, ndarray, ndarray]: - - The corrected motion vectors in the x-direction (IS). - - The corrected motion vectors in the y-direction (JS). - - The averaged error motion vectors in the x-direction (Uerr_av). - - The averaged error motion vectors in the y-direction (Verr_av). - """ - print("Now in the works: corrected motion vector.") - # This is the original approach. Do we need this? - # for i in range(0, NJ - 1, nthin): - # for j in range(0, NI - 1, nthin): - for i in range(0, int(NJ / nthin) + 1): - for j in range(0, int(NI / nthin) + 1): - has_averaged = 0 - # why do we need this? - # do_it = 1 if i == 480 and j == 90 else 0 - do_it = 0 - - if IS_NOW[i, j] != -99 and Uerr_[i, j] > -98.: - NOW_dd = (int)(270. - np.arctan2(JS_NOW[i, j], IS_NOW[i, j]) * 180. / np.pi) % 360 - NOW_ff = np.sqrt(np.square(IS_NOW[i, j]) + np.square(JS_NOW[i, j])) - err_dd = (int)(270. - np.arctan2(Verr_[i, j], Uerr_[i, j]) * 180. / np.pi) % 360 - err_ff = np.sqrt(np.square(Uerr_[i, j]) + np.square(Verr_[i, j])) - - if within_interval(float(NOW_dd), float(err_dd), do_it) == 0 and NOW_ff > err_ff * 0.5 and NOW_ff < err_ff * 1.5: - Uerr_av[i, j] = float(IS_NOW[i, j] - IS_ANA[i, j]) - Verr_av[i, j] = float(JS_NOW[i, j] - JS_ANA[i, j]) - else: - Uerr_av[i, j] = float(IS_NOW[i, j] - IS_ANA[i, j]) * 0.25 + Uerr_[i, j] * 0.75 - Verr_av[i, j] = float(JS_NOW[i, j] - JS_ANA[i, j]) * 0.25 + Verr_[i, j] * 0.75 - has_averaged = 1 - - else: - Uerr_av[i, j] = float(IS_NOW[i, j] - IS_ANA[i, j]) - Verr_av[i, j] = float(JS_NOW[i, j] - JS_ANA[i, j]) - - if Uerr_av[i, j] > -98.: - if IS_ANA[i, j] == -99: - IS[i, j] = -99 - JS[i, j] = -99 - else: - IS[i, j] = math.fmod(IS_ANA[i, j],99) + math.fmod(int(Uerr_av[i, j]),99) - JS[i, j] = math.fmod(JS_ANA[i, j],99) + math.fmod(int(Verr_av[i, j]),99) - - else: - IS[i, j] = IS_ANA[i, j] - JS[i, j] = JS_ANA[i, j] - - if has_averaged == 0: - if Uerr_av[i, j] < -98.: - w_emv_now = 0. - else: - w_emv_now = 0.25 - - if Uerr_[i, j] < -98.: - w_emv_avg = 0. - else: - w_emv_avg = 0.75 - - if w_emv_avg + w_emv_now >= 0.999: - Uerr_av[i, j] = int(Uerr_av[i, j] * w_emv_now + Uerr_[i, j] * (1. - w_emv_now)) - Verr_av[i, j] = int(Verr_av[i, j] * w_emv_now + Verr_[i, j] * (1. - w_emv_now)) - - return IS, JS, Uerr_av, Verr_av - -@jit(nopython=True) -def interpolate_rmv(ni1, ni2, nj1, nj2, kmax, nsteph, xr, yr, sta, uu, vv, u, v): - """ - Perform distance-weighted interpolation of values at grid points. - - Parameters: - - ni1 (int): Lower bound of the first dimension of the grid. - - ni2 (int): Upper bound of the first dimension of the grid. - - nj1 (int): Lower bound of the second dimension of the grid. - - nj2 (int): Upper bound of the second dimension of the grid. - - wgts (ndarray): Weights vector for each interpolation point. - - kmax (int): Maximum value of k for looping. - - nsteph (float): A scaling factor for the interpolated values. - - xr (ndarray): Array containing x-coordinates of the interpolation points. - - yr (ndarray): Array containing y-coordinates of the interpolation points. - - sta (ndarray): Array indicating whether the interpolation point is valid. - - uu (ndarray): Array containing values to be interpolated (component u). - - vv (ndarray): Array containing values to be interpolated (component v). - - u (ndarray): Array to store the interpolated values (component u). - - v (ndarray): Array to store the interpolated values (component v). - - Returns: - - u (ndarray): Array containing the interpolated values (component u). - - v (ndarray): Array containing the interpolated values (component v). - """ - for j in range(nj1, nj2): - for i in range(ni1, ni2): - wgtsum = 0.0 - - u[i-ni1, j-nj1] = 0.0 - v[i-ni1, j-nj1] = 0.0 - - for k in range(0, kmax): - if sta[k]: - rsq = np.sqrt((xr[k]-j)**2 + (yr[k]-i)**2) - if rsq < 1.0: - wgts = 1000000.0 - else: - wgts = 1.0 / rsq**3 - wgtsum += wgts - - for k in range(0, kmax): - if sta[k]: - rsq = np.sqrt((xr[k]-j)**2 + (yr[k]-i)**2) - if rsq < 1.0: - wgts = 1000000.0 - else: - wgts = 1.0 / rsq**3 - wgts = wgts / wgtsum - u[i-ni1, j-nj1] += wgts * uu[k] - v[i-ni1, j-nj1] += wgts * vv[k] - - u[i-ni1, j-nj1] *= nsteph / 3.6 - v[i-ni1, j-nj1] *= nsteph / 3.6 - - return u, v - -def inca_motion_vectors_simplified(PREV, timestep,settings={}): -# def inca_motion_vectors_simplified(ANA, PREV, NWPUAWIND, Uerr_, Verr_, -# initTime, inca_domain, timestep, settings,adv_files): - """This function calculates motion vectors for precipitation nowcasting. - following the same approach as the operation INCA C code. It computes - the motion vector in lower resolution (nthin) for two radar fields and - for the error of the previous nowcasting (+15 min). Then it performs a - correction based on a climatology error to keep trend recent performance. - Then it is cross-checked with the upper air wind field (500hPa and 700hPa) - for consistency before doing the interpolation to the original resolution. - - - Args: - ANA ((N,M,) ndarray): the current precip analysis - PREV ((N,M,O,2,) ndarray): the previous analyses and nowcasts - NWPUAWIND (dict): the upper air wind (U,V) field (500hPa, 600hP and 700hPa) - Uerr_ ((N/nthin, M/nthin,) ndarray): Climatological error motion vector in the x-direction. - Verr_ ((N/nthin, M/nthin,) ndarray): Climatological error motion vector in the y-direction. - initTime (datetime): the initialization of the INCA+ run - inca_domain (domain class): all there is to know about the domain - timestep (int): the accumulation time for precipitation [seconds] - settings (dict): the nowcasting settings - - Returns: - (N,M,2,) ndarray: the motion vectors - (N,M,2,) ndarray: the error motion vectors - """ - - print('Creating INCA motion vectors') - - start_nwc = time.monotonic() - - #Taking the needed parameters from the nowcasting settings dict - nback = settings.get('nback', 3) - PMIN = settings.get('PMIN', 0.05) - nthin = settings.get('nthin', 15) - delmax = settings.get('delmax', 5.) - - # Create factor and apply - factor = timestep / 3600. - nsteph = 1/factor - - NI = int((metadata['x2']-metadata['x1'])/metadata['xpixelsize']) - NJ = int((metadata['y2']-metadata['y1'])/metadata['ypixelsize']) - - NX = int(np.ceil(NI / nthin)) - NY = int(np.ceil(NJ / nthin)) - - Uana = np.full((NJ, NI),-99.)[::nthin, ::nthin] - Vana = np.full((NJ, NI),-99.)[::nthin, ::nthin] - - Uana, Vana = compute_motion_numba( - PREV[1,...], PREV[0,...], - NI, NJ, nthin, Uana, Vana) - - boole_ = ((Uana > -99) & (Vana > -99)) - - X_orig = metadata['xpixelsize']*np.arange(NI)+metadata['x1'] - Y_orig = metadata['ypixelsize']*np.arange(NJ)+metadata['y1'] - X, Y = X_orig[::nthin], Y_orig[::nthin] - XX, YY =np.meshgrid(X,Y) - - UU = np.zeros([NJ, NI]) - VV = np.zeros([NJ, NI]) - UU, VV = interpolate_rmv(int(metadata['y1']/metadata['xpixelsize']), int(metadata['y2']/metadata['xpixelsize']), - int(metadata['x1']/metadata['xpixelsize']), int(metadata['x2']/metadata['xpixelsize']), - len(Uana.flatten()), nsteph, XX.flatten()/metadata['xpixelsize'], YY.flatten()/metadata['xpixelsize'], - boole_.flatten(), Uana.flatten(), Vana.flatten(), UU, VV) - - extrapolate = nowcasts.get_method("extrapolation") - R[~np.isfinite(R)] = metadata["zerovalue"] - NWC_field = extrapolate(PREV[1,...], np.append(UU[np.newaxis,...],VV[np.newaxis,...],axis=0), 12) - - #Initialize all the required matrices for following code - Uana = np.full((NJ, NI),-99.)[::nthin, ::nthin] - Vana = np.full((NJ, NI),-99.)[::nthin, ::nthin] - Unow = np.full((NJ, NI),-99.)[::nthin, ::nthin] - Vnow = np.full((NJ, NI),-99.)[::nthin, ::nthin] - IS = np.full((NJ, NI),-99.)[::nthin,::nthin] - JS = np.full((NJ, NI),-99.)[::nthin,::nthin] - Uerr_av = np.full((NJ, NI),-99.)[::nthin,::nthin] - Verr_av = np.full((NJ, NI),-99.)[::nthin,::nthin] - Uerr_ = np.full((NJ, NI),0.)[::nthin,::nthin] - Verr_ = np.full((NJ, NI),0.)[::nthin,::nthin] - - Uana, Vana = compute_motion_numba( - PREV[2,...], PREV[1,...], - NI, NJ, nthin, Uana, Vana) - - Unow, Vnow = compute_motion_numba( - PREV[2,...], NWC_field[0,...], - NI, NJ, nthin, Unow, Vnow) - - IS, JS, Uerr_av, Verr_av = calculate_corrected_motion_vector(NI, NJ, - nthin, Unow, Vnow, Uerr_, Verr_, Uana, Vana, IS, JS, Uerr_av, Verr_av) - - boole_ = ((IS > -99) & (JS > -99)) - - UU = np.zeros([NJ, NI]) - VV = np.zeros([NJ, NI]) - UU, VV = interpolate_rmv(int(metadata['y1']/metadata['xpixelsize']), int(metadata['y2']/metadata['xpixelsize']), - int(metadata['x1']/metadata['xpixelsize']), int(metadata['x2']/metadata['xpixelsize']), - len(Uana.flatten()), nsteph, XX.flatten()/metadata['xpixelsize'], YY.flatten()/metadata['xpixelsize'], - boole_.flatten(), IS.flatten(), JS.flatten(), UU, VV) - - OFZ = np.append(UU[np.newaxis,...],VV[np.newaxis,...],axis=0) - - print("Elapsed time of INCA motion vector: %s [h:mm:ss].", - timedelta(seconds=int(time.monotonic() - start_nwc))) - - return OFZ - - - -################################################################################ -# Read the radar input images -# --------------------------- -# -# First, we will import the sequence of radar composites. -# You need the pysteps-data archive downloaded and the pystepsrc file -# configured with the data_source paths pointing to data folders. - -# Selected case -date = datetime.strptime("201505151630", "%Y%m%d%H%M") -data_source = rcparams.data_sources["mch"] - -############################################################################### -# Load the data from the archive -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -root_path = data_source["root_path"] -path_fmt = data_source["path_fmt"] -fn_pattern = data_source["fn_pattern"] -fn_ext = data_source["fn_ext"] -importer_name = data_source["importer"] -importer_kwargs = data_source["importer_kwargs"] -timestep = data_source["timestep"] - -# Find the input files from the archive -fns = io.archive.find_by_date( - date, root_path, path_fmt, fn_pattern, fn_ext, timestep=5, num_prev_files=9 -) - -# Read the radar composites -importer = io.get_method(importer_name, "importer") -R, quality, metadata = io.read_timeseries(fns, importer, **importer_kwargs) - -del quality # Not used - -############################################################################### -# Preprocess the data -# ~~~~~~~~~~~~~~~~~~~ - -# Convert to mm/h -R, metadata = conversion.to_rainrate(R, metadata) - -# Store the reference frame -R_ = R[-1, :, :].copy() -R_here = R.copy() - -# Log-transform the data [dBR] -R, metadata = transformation.dB_transform(R, metadata, threshold=0.1, zerovalue=-15.0) - -# Nicely print the metadata -pprint(metadata) - -################################################################################ -# INCA TREC method with error motion correction (Haiden et al 2011) -# --------------------------------------------------- -# -# This module implements the anisotropic diffusion method presented in Proesmans -# et al. (1994), a robust optical flow technique which employs the notion of -# inconsitency during the solution of the optical flow equations. - -# oflow_method = motion.get_method("proesmans") -R_here[~np.isfinite(R)] = 0 -V4 = inca_motion_vectors_simplified(R_here[-3:, :, :],timestep*60) - -# Plot the motion field -plot_precip_field(R_, geodata=metadata, title="INCA motion") -quiver(V4, geodata=metadata, step=25) -plt.show() - - -################################################################################ -# Lucas-Kanade (LK) -# ----------------- -# -# The Lucas-Kanade optical flow method implemented in pysteps is a local -# tracking approach that relies on the OpenCV package. -# Local features are tracked in a sequence of two or more radar images. The -# scheme includes a final interpolation step in order to produce a smooth -# field of motion vectors. - -oflow_method = motion.get_method("LK") -V1 = oflow_method(R[-3:, :, :]) - -# Plot the motion field on top of the reference frame -plot_precip_field(R_, geodata=metadata, title="LK") -quiver(V1, geodata=metadata, step=25) -plt.show() - -################################################################################ -# Variational echo tracking (VET) -# ------------------------------- -# -# This module implements the VET algorithm presented -# by Laroche and Zawadzki (1995) and used in the McGill Algorithm for -# Prediction by Lagrangian Extrapolation (MAPLE) described in -# Germann and Zawadzki (2002). -# The approach essentially consists of a global optimization routine that seeks -# at minimizing a cost function between the displaced and the reference image. - -oflow_method = motion.get_method("VET") -V2 = oflow_method(R[-3:, :, :]) - -# Plot the motion field -plot_precip_field(R_, geodata=metadata, title="VET") -quiver(V2, geodata=metadata, step=25) -plt.show() - -################################################################################ -# Dynamic and adaptive radar tracking of storms (DARTS) -# ----------------------------------------------------- -# -# DARTS uses a spectral approach to optical flow that is based on the discrete -# Fourier transform (DFT) of a temporal sequence of radar fields. -# The level of truncation of the DFT coefficients controls the degree of -# smoothness of the estimated motion field, allowing for an efficient -# motion estimation. DARTS requires a longer sequence of radar fields for -# estimating the motion, here we are going to use all the available 10 fields. - -oflow_method = motion.get_method("DARTS") -R[~np.isfinite(R)] = metadata["zerovalue"] -V3 = oflow_method(R) # needs longer training sequence - -# Plot the motion field -plot_precip_field(R_, geodata=metadata, title="DARTS") -quiver(V3, geodata=metadata, step=25) -plt.show() - -################################################################################ -# Anisotropic diffusion method (Proesmans et al 1994) -# --------------------------------------------------- -# -# This module implements the anisotropic diffusion method presented in Proesmans -# et al. (1994), a robust optical flow technique which employs the notion of -# inconsitency during the solution of the optical flow equations. - -# oflow_method = motion.get_method("proesmans") -# R[~np.isfinite(R)] = metadata["zerovalue"] -# V4 = oflow_method(R[-2:, :, :]) - -# # Plot the motion field -# plot_precip_field(R_, geodata=metadata, title="Proesmans") -# quiver(V4, geodata=metadata, step=25) -# plt.show() - -# sphinx_gallery_thumbnail_number = 1