Skip to content
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions docs/CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,20 @@
All notable changes to this project will be documented in this file.
We follow the [Semantic Versioning 2.0.0](http://semver.org/) format.

## v4.8.__._ - 2025-09-02 - [PR#1642](https://github.com/NOAA-OWP/inundation-mapping/pull/1642)

Adjust eval_plot.py so it only looks for the config file when in -sp (spatial) mode. Spatial mode is used when running the tool to create FIM_performance files. In that mode, it can only run in OWP servers as it needs to talk to internal servers.

Also updated the tools to create gpkg files instead of shape file. It does continue to create csv as files as before. Adjustments for zero padding HUC numbers when applicable was also added.

This update does not affect use of the tool as part of regular alpha testing.

### Changes
- `tools\`
- `eval_plots.py`: as described above including the zero padding HUC fix.
- `eval_plots_stackedbar.py`: zero padding HUC fix.
<br/>

## v4.8.10.3 - 2025-08-29 - [PR#1627](https://github.com/NOAA-OWP/inundation-mapping/pull/1627)

Adds gcsfs dependency to allow retrieval of NWM output from the Google Cloud Service.
Expand Down
71 changes: 36 additions & 35 deletions tools/eval_plots.py
Original file line number Diff line number Diff line change
Expand Up @@ -574,6 +574,9 @@ def eval_plots(
# Import metrics csv as DataFrame and initialize all_datasets dictionary
csv_df = pd.read_csv(metrics_csv, dtype={'huc': str})

# Ensure all HUC values are 8 characters long with leading zeros if needed
csv_df['huc'] = csv_df['huc'].str.zfill(8)

# If versions are supplied then filter out
if versions:
# Filter out versions based on supplied version list
Expand Down Expand Up @@ -865,9 +868,11 @@ def eval_plots(
# Join spatial data to metric data
gdf['nws_lid'] = gdf['nws_lid'].str.lower()
joined = gdf.merge(all_ahps_datasets, on='nws_lid')
# Project to VIZ projection and write to file
# Project to VIZ projection and write to file (the csv is for HV, the gpkg is for debugging)
joined = joined.to_crs(VIZ_PROJECTION)
joined.to_file(Path(workspace) / 'fim_performance_points.shp', engine='fiona')
points_file_name = os.path.join(workspace, 'fim_performance_points.gpkg')
joined.to_file(points_file_name, driver="GPKG", engine='fiona')
joined.to_csv(points_file_name.replace(".gpkg", ".csv"))
else:
print(
'NWS/USGS MS datasets not analyzed, no spatial data created.\n'
Expand Down Expand Up @@ -924,43 +929,40 @@ def eval_plots(
wbd_with_metrics.rename(columns={'benchmark_source': 'source'}, inplace=True)
# Project to VIZ projection
wbd_with_metrics = wbd_with_metrics.to_crs(VIZ_PROJECTION)
# Write out to file
wbd_with_metrics.to_file(Path(workspace) / 'fim_performance_polys.shp', engine='fiona')
# Write out to file (the csv is for HV, the gpkg is for debugging)
wbd_with_metrics_file_name = os.path.join(workspace, 'fim_performance_polys.gpkg')
wbd_with_metrics.to_file(wbd_with_metrics_file_name, driver="GPKG", engine='fiona')
wbd_with_metrics.to_csv(wbd_with_metrics_file_name.replace(".gpkg", ".csv"))
else:
print(
'BLE/IFC/RAS2FIM FR datasets not analyzed, no spatial data created.\n'
'To produce spatial data analyze a FR version'
)


def convert_shapes_to_csv(workspace):
# Convert any geopackage in the root level of output_mapping_dir to CSV and rename.
shape_list = glob.glob(os.path.join(workspace, '*.shp'))
for shape in shape_list:
gdf = gpd.read_file(shape)
parent_directory = os.path.split(shape)[0]
file_name = shape.replace('.shp', '.csv')
csv_output_path = os.path.join(parent_directory, file_name)
gdf.to_csv(csv_output_path)


#######################################################################
if __name__ == '__main__':

"""
This script has two main uses. The most common use is part of the "alpha test" system via
This script has two main uses.

The most common use is part of the "alpha test" system generally ran after
synthesize_test_cases.py. This set can be run in any environment.

The second usage is called FIM Performance mode and talks to the WRDS API to get some nwm metadata.
This results the script needing to be run in the OWP environments with the valid URL to WRDS.
This creates FIM Performance files of fim_performance_points.csv and fim_performance_polys.csv
which are used by HV.
which are used by HV. Adding the -sp (spatial) flag creates the FIM performance output files.

If you include the -sp flag, it needs to talk to WRDS and will neeed a config file.
That config file is defaulted as part of the -env arg which can be overridde.
If you do not include the -sp flag, the config file is not needed.

Usage for FIM Performance mode. The example for output data is based on the filtered hand output
normally used in OWP servers by catfim. Any valid HAND dataset can be used and the filtered
"catfim" output version has all of the files needed by both CatFIM and this script.
fim_version="hand_4_6_1_4" ; python /foss_fim/tools/eval_plots.py \
-m /outputs/${fim_version}_catfim/${fim_version}_metrics.csv \
fim_version="hand_4_6_1_4" ; python /foss_fim/tools/eval_plots.py
-m /outputs/${fim_version}_catfim/${fim_version}_metrics.csv
-w /data/fim_performance/${fim_version} -v ${fim_version} -sp -i

"""
Expand Down Expand Up @@ -1009,7 +1011,9 @@ def convert_shapes_to_csv(workspace):
'-e',
'--env_file',
help='OPTIONAl: Docker mount path to the script environment file.'
' Defaults to /data/config/fim_enviro_values.env.',
' Defaults to /data/config/fim_enviro_values.env.\n'
' The env file is only needed when you are using the -sp argument'
' which generates spatial data generally used for FIM Performance output files.',
default="/data/config/fim_enviro_values.env",
required=False,
)
Expand All @@ -1025,24 +1029,21 @@ def convert_shapes_to_csv(workspace):
sp = args['spatial']
i = args['site_plots']

load_dotenv(args['env_file'])
if sp: # create the spatial files generally used for FIM Performance.
load_dotenv(args['env_file'])

API_BASE_URL = os.getenv('API_BASE_URL')
if API_BASE_URL is None:
raise ValueError(
'API base url not found. '
'Ensure inundation_mapping/tools/ has an .env file with the following info: '
'API_BASE_URL, WBD_LAYER, NWM_FLOWS_MS, '
'USGS_METADATA_URL, USGS_DOWNLOAD_URL'
)
WBD_LAYER = os.getenv("WBD_LAYER")
API_BASE_URL = os.getenv("API_BASE_URL")
API_BASE_URL = os.getenv('API_BASE_URL')
if API_BASE_URL is None:
raise ValueError(
'API base url not found. '
'Ensure inundation_mapping/tools/ has an .env file with the following info: '
'API_BASE_URL, WBD_LAYER, NWM_FLOWS_MS, '
'USGS_METADATA_URL, USGS_DOWNLOAD_URL'
)
WBD_LAYER = os.getenv("WBD_LAYER")
API_BASE_URL = os.getenv("API_BASE_URL")

# Run eval_plots function
print('The following AHPS sites are considered "BAD_SITES": ' + ', '.join(BAD_SITES))
print('The following query is used to filter AHPS: ' + DISCARD_AHPS_QUERY)
eval_plots(metrics_csv=m, workspace=w, versions=v, stats=s, spatial=sp, site_barplots=i)

# Convert output shapefiles to CSV
print("Converting to CSVs...")
convert_shapes_to_csv(w)
2 changes: 2 additions & 0 deletions tools/eval_plots_stackedbar.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,8 @@
def eval_plot_stack_data_prep(metric_csv, versions=[]):
# Load in FIM 4 CSV and select the proper version metrics
metrics = pd.read_csv(metric_csv, dtype={"huc": str})
# Ensure all HUC values are 8 characters long with leading zeros if needed
metrics['huc'] = metrics['huc'].str.zfill(8)
if versions:
versions = list(versions)
# Check to make sure requested versions are in the metrics file
Expand Down